You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

498 lines
14KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "buffer.h"
  19. #include "common.h"
  20. #include "hwcontext.h"
  21. #include "hwcontext_internal.h"
  22. #include "hwcontext_cuda_internal.h"
  23. #if CONFIG_VULKAN
  24. #include "hwcontext_vulkan.h"
  25. #endif
  26. #include "cuda_check.h"
  27. #include "mem.h"
  28. #include "pixdesc.h"
  29. #include "pixfmt.h"
  30. #include "imgutils.h"
  31. #define CUDA_FRAME_ALIGNMENT 256
  32. typedef struct CUDAFramesContext {
  33. int shift_width, shift_height;
  34. } CUDAFramesContext;
  35. static const enum AVPixelFormat supported_formats[] = {
  36. AV_PIX_FMT_NV12,
  37. AV_PIX_FMT_YUV420P,
  38. AV_PIX_FMT_YUVA420P,
  39. AV_PIX_FMT_YUV444P,
  40. AV_PIX_FMT_P010,
  41. AV_PIX_FMT_P016,
  42. AV_PIX_FMT_YUV444P16,
  43. AV_PIX_FMT_0RGB32,
  44. AV_PIX_FMT_0BGR32,
  45. #if CONFIG_VULKAN
  46. AV_PIX_FMT_VULKAN,
  47. #endif
  48. };
  49. #define CHECK_CU(x) FF_CUDA_CHECK_DL(device_ctx, cu, x)
  50. static int cuda_frames_get_constraints(AVHWDeviceContext *ctx,
  51. const void *hwconfig,
  52. AVHWFramesConstraints *constraints)
  53. {
  54. int i;
  55. constraints->valid_sw_formats = av_malloc_array(FF_ARRAY_ELEMS(supported_formats) + 1,
  56. sizeof(*constraints->valid_sw_formats));
  57. if (!constraints->valid_sw_formats)
  58. return AVERROR(ENOMEM);
  59. for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
  60. constraints->valid_sw_formats[i] = supported_formats[i];
  61. constraints->valid_sw_formats[FF_ARRAY_ELEMS(supported_formats)] = AV_PIX_FMT_NONE;
  62. constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints->valid_hw_formats));
  63. if (!constraints->valid_hw_formats)
  64. return AVERROR(ENOMEM);
  65. constraints->valid_hw_formats[0] = AV_PIX_FMT_CUDA;
  66. constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
  67. return 0;
  68. }
  69. static void cuda_buffer_free(void *opaque, uint8_t *data)
  70. {
  71. AVHWFramesContext *ctx = opaque;
  72. AVHWDeviceContext *device_ctx = ctx->device_ctx;
  73. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  74. CudaFunctions *cu = hwctx->internal->cuda_dl;
  75. CUcontext dummy;
  76. CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
  77. CHECK_CU(cu->cuMemFree((CUdeviceptr)data));
  78. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  79. }
  80. static AVBufferRef *cuda_pool_alloc(void *opaque, int size)
  81. {
  82. AVHWFramesContext *ctx = opaque;
  83. AVHWDeviceContext *device_ctx = ctx->device_ctx;
  84. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  85. CudaFunctions *cu = hwctx->internal->cuda_dl;
  86. AVBufferRef *ret = NULL;
  87. CUcontext dummy = NULL;
  88. CUdeviceptr data;
  89. int err;
  90. err = CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
  91. if (err < 0)
  92. return NULL;
  93. err = CHECK_CU(cu->cuMemAlloc(&data, size));
  94. if (err < 0)
  95. goto fail;
  96. ret = av_buffer_create((uint8_t*)data, size, cuda_buffer_free, ctx, 0);
  97. if (!ret) {
  98. CHECK_CU(cu->cuMemFree(data));
  99. goto fail;
  100. }
  101. fail:
  102. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  103. return ret;
  104. }
  105. static int cuda_frames_init(AVHWFramesContext *ctx)
  106. {
  107. CUDAFramesContext *priv = ctx->internal->priv;
  108. int i;
  109. for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
  110. if (ctx->sw_format == supported_formats[i])
  111. break;
  112. }
  113. if (i == FF_ARRAY_ELEMS(supported_formats)) {
  114. av_log(ctx, AV_LOG_ERROR, "Pixel format '%s' is not supported\n",
  115. av_get_pix_fmt_name(ctx->sw_format));
  116. return AVERROR(ENOSYS);
  117. }
  118. av_pix_fmt_get_chroma_sub_sample(ctx->sw_format, &priv->shift_width, &priv->shift_height);
  119. if (!ctx->pool) {
  120. int size = av_image_get_buffer_size(ctx->sw_format, ctx->width, ctx->height, CUDA_FRAME_ALIGNMENT);
  121. if (size < 0)
  122. return size;
  123. ctx->internal->pool_internal = av_buffer_pool_init2(size, ctx, cuda_pool_alloc, NULL);
  124. if (!ctx->internal->pool_internal)
  125. return AVERROR(ENOMEM);
  126. }
  127. return 0;
  128. }
  129. static int cuda_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
  130. {
  131. int res;
  132. frame->buf[0] = av_buffer_pool_get(ctx->pool);
  133. if (!frame->buf[0])
  134. return AVERROR(ENOMEM);
  135. res = av_image_fill_arrays(frame->data, frame->linesize, frame->buf[0]->data,
  136. ctx->sw_format, ctx->width, ctx->height, CUDA_FRAME_ALIGNMENT);
  137. if (res < 0)
  138. return res;
  139. // YUV420P is a special case.
  140. // Nvenc expects the U/V planes in swapped order from how ffmpeg expects them, also chroma is half-aligned
  141. if (ctx->sw_format == AV_PIX_FMT_YUV420P) {
  142. frame->linesize[1] = frame->linesize[2] = frame->linesize[0] / 2;
  143. frame->data[2] = frame->data[1];
  144. frame->data[1] = frame->data[2] + frame->linesize[2] * ctx->height / 2;
  145. }
  146. frame->format = AV_PIX_FMT_CUDA;
  147. frame->width = ctx->width;
  148. frame->height = ctx->height;
  149. return 0;
  150. }
  151. static int cuda_transfer_get_formats(AVHWFramesContext *ctx,
  152. enum AVHWFrameTransferDirection dir,
  153. enum AVPixelFormat **formats)
  154. {
  155. enum AVPixelFormat *fmts;
  156. fmts = av_malloc_array(2, sizeof(*fmts));
  157. if (!fmts)
  158. return AVERROR(ENOMEM);
  159. fmts[0] = ctx->sw_format;
  160. fmts[1] = AV_PIX_FMT_NONE;
  161. *formats = fmts;
  162. return 0;
  163. }
  164. static int cuda_transfer_data(AVHWFramesContext *ctx, AVFrame *dst,
  165. const AVFrame *src)
  166. {
  167. CUDAFramesContext *priv = ctx->internal->priv;
  168. AVHWDeviceContext *device_ctx = ctx->device_ctx;
  169. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  170. CudaFunctions *cu = hwctx->internal->cuda_dl;
  171. CUcontext dummy;
  172. int i, ret;
  173. ret = CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
  174. if (ret < 0)
  175. return ret;
  176. for (i = 0; i < FF_ARRAY_ELEMS(src->data) && src->data[i]; i++) {
  177. CUDA_MEMCPY2D cpy = {
  178. .srcPitch = src->linesize[i],
  179. .dstPitch = dst->linesize[i],
  180. .WidthInBytes = FFMIN(src->linesize[i], dst->linesize[i]),
  181. .Height = src->height >> ((i == 0 || i == 3) ? 0 : priv->shift_height),
  182. };
  183. if (src->hw_frames_ctx) {
  184. cpy.srcMemoryType = CU_MEMORYTYPE_DEVICE;
  185. cpy.srcDevice = (CUdeviceptr)src->data[i];
  186. } else {
  187. cpy.srcMemoryType = CU_MEMORYTYPE_HOST;
  188. cpy.srcHost = src->data[i];
  189. }
  190. if (dst->hw_frames_ctx) {
  191. cpy.dstMemoryType = CU_MEMORYTYPE_DEVICE;
  192. cpy.dstDevice = (CUdeviceptr)dst->data[i];
  193. } else {
  194. cpy.dstMemoryType = CU_MEMORYTYPE_HOST;
  195. cpy.dstHost = dst->data[i];
  196. }
  197. ret = CHECK_CU(cu->cuMemcpy2DAsync(&cpy, hwctx->stream));
  198. if (ret < 0)
  199. goto exit;
  200. }
  201. if (!dst->hw_frames_ctx) {
  202. ret = CHECK_CU(cu->cuStreamSynchronize(hwctx->stream));
  203. if (ret < 0)
  204. goto exit;
  205. }
  206. exit:
  207. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  208. return 0;
  209. }
  210. static void cuda_device_uninit(AVHWDeviceContext *device_ctx)
  211. {
  212. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  213. if (hwctx->internal) {
  214. CudaFunctions *cu = hwctx->internal->cuda_dl;
  215. if (hwctx->internal->is_allocated && hwctx->cuda_ctx) {
  216. if (hwctx->internal->flags & AV_CUDA_USE_PRIMARY_CONTEXT)
  217. CHECK_CU(cu->cuDevicePrimaryCtxRelease(hwctx->internal->cuda_device));
  218. else
  219. CHECK_CU(cu->cuCtxDestroy(hwctx->cuda_ctx));
  220. hwctx->cuda_ctx = NULL;
  221. }
  222. cuda_free_functions(&hwctx->internal->cuda_dl);
  223. }
  224. av_freep(&hwctx->internal);
  225. }
  226. static int cuda_device_init(AVHWDeviceContext *ctx)
  227. {
  228. AVCUDADeviceContext *hwctx = ctx->hwctx;
  229. int ret;
  230. if (!hwctx->internal) {
  231. hwctx->internal = av_mallocz(sizeof(*hwctx->internal));
  232. if (!hwctx->internal)
  233. return AVERROR(ENOMEM);
  234. }
  235. if (!hwctx->internal->cuda_dl) {
  236. ret = cuda_load_functions(&hwctx->internal->cuda_dl, ctx);
  237. if (ret < 0) {
  238. av_log(ctx, AV_LOG_ERROR, "Could not dynamically load CUDA\n");
  239. goto error;
  240. }
  241. }
  242. return 0;
  243. error:
  244. cuda_device_uninit(ctx);
  245. return ret;
  246. }
  247. static int cuda_context_init(AVHWDeviceContext *device_ctx, int flags) {
  248. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  249. CudaFunctions *cu;
  250. CUcontext dummy;
  251. int ret, dev_active = 0;
  252. unsigned int dev_flags = 0;
  253. const unsigned int desired_flags = CU_CTX_SCHED_BLOCKING_SYNC;
  254. cu = hwctx->internal->cuda_dl;
  255. hwctx->internal->flags = flags;
  256. if (flags & AV_CUDA_USE_PRIMARY_CONTEXT) {
  257. ret = CHECK_CU(cu->cuDevicePrimaryCtxGetState(hwctx->internal->cuda_device,
  258. &dev_flags, &dev_active));
  259. if (ret < 0)
  260. return ret;
  261. if (dev_active && dev_flags != desired_flags) {
  262. av_log(device_ctx, AV_LOG_ERROR, "Primary context already active with incompatible flags.\n");
  263. return AVERROR(ENOTSUP);
  264. } else if (dev_flags != desired_flags) {
  265. ret = CHECK_CU(cu->cuDevicePrimaryCtxSetFlags(hwctx->internal->cuda_device,
  266. desired_flags));
  267. if (ret < 0)
  268. return ret;
  269. }
  270. ret = CHECK_CU(cu->cuDevicePrimaryCtxRetain(&hwctx->cuda_ctx,
  271. hwctx->internal->cuda_device));
  272. if (ret < 0)
  273. return ret;
  274. } else {
  275. ret = CHECK_CU(cu->cuCtxCreate(&hwctx->cuda_ctx, desired_flags,
  276. hwctx->internal->cuda_device));
  277. if (ret < 0)
  278. return ret;
  279. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  280. }
  281. hwctx->internal->is_allocated = 1;
  282. // Setting stream to NULL will make functions automatically use the default CUstream
  283. hwctx->stream = NULL;
  284. return 0;
  285. }
  286. static int cuda_device_create(AVHWDeviceContext *device_ctx,
  287. const char *device,
  288. AVDictionary *opts, int flags)
  289. {
  290. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  291. CudaFunctions *cu;
  292. int ret, device_idx = 0;
  293. if (device)
  294. device_idx = strtol(device, NULL, 0);
  295. if (cuda_device_init(device_ctx) < 0)
  296. goto error;
  297. cu = hwctx->internal->cuda_dl;
  298. ret = CHECK_CU(cu->cuInit(0));
  299. if (ret < 0)
  300. goto error;
  301. ret = CHECK_CU(cu->cuDeviceGet(&hwctx->internal->cuda_device, device_idx));
  302. if (ret < 0)
  303. goto error;
  304. ret = cuda_context_init(device_ctx, flags);
  305. if (ret < 0)
  306. goto error;
  307. return 0;
  308. error:
  309. cuda_device_uninit(device_ctx);
  310. return AVERROR_UNKNOWN;
  311. }
  312. static int cuda_device_derive(AVHWDeviceContext *device_ctx,
  313. AVHWDeviceContext *src_ctx,
  314. int flags) {
  315. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  316. CudaFunctions *cu;
  317. const char *src_uuid = NULL;
  318. int ret, i, device_count;
  319. #if CONFIG_VULKAN
  320. VkPhysicalDeviceIDProperties vk_idp = {
  321. .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES,
  322. };
  323. #endif
  324. switch (src_ctx->type) {
  325. #if CONFIG_VULKAN
  326. case AV_HWDEVICE_TYPE_VULKAN: {
  327. AVVulkanDeviceContext *vkctx = src_ctx->hwctx;
  328. VkPhysicalDeviceProperties2 vk_dev_props = {
  329. .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
  330. .pNext = &vk_idp,
  331. };
  332. vkGetPhysicalDeviceProperties2(vkctx->phys_dev, &vk_dev_props);
  333. src_uuid = vk_idp.deviceUUID;
  334. break;
  335. }
  336. #endif
  337. default:
  338. return AVERROR(ENOSYS);
  339. }
  340. if (!src_uuid) {
  341. av_log(device_ctx, AV_LOG_ERROR,
  342. "Failed to get UUID of source device.\n");
  343. goto error;
  344. }
  345. if (cuda_device_init(device_ctx) < 0)
  346. goto error;
  347. cu = hwctx->internal->cuda_dl;
  348. ret = CHECK_CU(cu->cuInit(0));
  349. if (ret < 0)
  350. goto error;
  351. ret = CHECK_CU(cu->cuDeviceGetCount(&device_count));
  352. if (ret < 0)
  353. goto error;
  354. hwctx->internal->cuda_device = -1;
  355. for (i = 0; i < device_count; i++) {
  356. CUdevice dev;
  357. CUuuid uuid;
  358. ret = CHECK_CU(cu->cuDeviceGet(&dev, i));
  359. if (ret < 0)
  360. goto error;
  361. ret = CHECK_CU(cu->cuDeviceGetUuid(&uuid, dev));
  362. if (ret < 0)
  363. goto error;
  364. if (memcmp(src_uuid, uuid.bytes, sizeof (uuid.bytes)) == 0) {
  365. hwctx->internal->cuda_device = dev;
  366. break;
  367. }
  368. }
  369. if (hwctx->internal->cuda_device == -1) {
  370. av_log(device_ctx, AV_LOG_ERROR, "Could not derive CUDA device.\n");
  371. goto error;
  372. }
  373. ret = cuda_context_init(device_ctx, flags);
  374. if (ret < 0)
  375. goto error;
  376. return 0;
  377. error:
  378. cuda_device_uninit(device_ctx);
  379. return AVERROR_UNKNOWN;
  380. }
  381. const HWContextType ff_hwcontext_type_cuda = {
  382. .type = AV_HWDEVICE_TYPE_CUDA,
  383. .name = "CUDA",
  384. .device_hwctx_size = sizeof(AVCUDADeviceContext),
  385. .frames_priv_size = sizeof(CUDAFramesContext),
  386. .device_create = cuda_device_create,
  387. .device_derive = cuda_device_derive,
  388. .device_init = cuda_device_init,
  389. .device_uninit = cuda_device_uninit,
  390. .frames_get_constraints = cuda_frames_get_constraints,
  391. .frames_init = cuda_frames_init,
  392. .frames_get_buffer = cuda_get_buffer,
  393. .transfer_get_formats = cuda_transfer_get_formats,
  394. .transfer_data_to = cuda_transfer_data,
  395. .transfer_data_from = cuda_transfer_data,
  396. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE },
  397. };