You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

502 lines
14KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "buffer.h"
  19. #include "common.h"
  20. #include "hwcontext.h"
  21. #include "hwcontext_internal.h"
  22. #include "hwcontext_cuda_internal.h"
  23. #if CONFIG_VULKAN
  24. #include "hwcontext_vulkan.h"
  25. #endif
  26. #include "cuda_check.h"
  27. #include "mem.h"
  28. #include "pixdesc.h"
  29. #include "pixfmt.h"
  30. #include "imgutils.h"
  31. #define CUDA_FRAME_ALIGNMENT 256
  32. typedef struct CUDAFramesContext {
  33. int shift_width, shift_height;
  34. } CUDAFramesContext;
  35. static const enum AVPixelFormat supported_formats[] = {
  36. AV_PIX_FMT_NV12,
  37. AV_PIX_FMT_YUV420P,
  38. AV_PIX_FMT_YUVA420P,
  39. AV_PIX_FMT_YUV444P,
  40. AV_PIX_FMT_P010,
  41. AV_PIX_FMT_P016,
  42. AV_PIX_FMT_YUV444P16,
  43. AV_PIX_FMT_0RGB32,
  44. AV_PIX_FMT_0BGR32,
  45. #if CONFIG_VULKAN
  46. AV_PIX_FMT_VULKAN,
  47. #endif
  48. };
  49. #define CHECK_CU(x) FF_CUDA_CHECK_DL(device_ctx, cu, x)
  50. static int cuda_frames_get_constraints(AVHWDeviceContext *ctx,
  51. const void *hwconfig,
  52. AVHWFramesConstraints *constraints)
  53. {
  54. int i;
  55. constraints->valid_sw_formats = av_malloc_array(FF_ARRAY_ELEMS(supported_formats) + 1,
  56. sizeof(*constraints->valid_sw_formats));
  57. if (!constraints->valid_sw_formats)
  58. return AVERROR(ENOMEM);
  59. for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
  60. constraints->valid_sw_formats[i] = supported_formats[i];
  61. constraints->valid_sw_formats[FF_ARRAY_ELEMS(supported_formats)] = AV_PIX_FMT_NONE;
  62. constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints->valid_hw_formats));
  63. if (!constraints->valid_hw_formats)
  64. return AVERROR(ENOMEM);
  65. constraints->valid_hw_formats[0] = AV_PIX_FMT_CUDA;
  66. constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
  67. return 0;
  68. }
  69. static void cuda_buffer_free(void *opaque, uint8_t *data)
  70. {
  71. AVHWFramesContext *ctx = opaque;
  72. AVHWDeviceContext *device_ctx = ctx->device_ctx;
  73. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  74. CudaFunctions *cu = hwctx->internal->cuda_dl;
  75. CUcontext dummy;
  76. CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
  77. CHECK_CU(cu->cuMemFree((CUdeviceptr)data));
  78. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  79. }
  80. static AVBufferRef *cuda_pool_alloc(void *opaque, int size)
  81. {
  82. AVHWFramesContext *ctx = opaque;
  83. AVHWDeviceContext *device_ctx = ctx->device_ctx;
  84. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  85. CudaFunctions *cu = hwctx->internal->cuda_dl;
  86. AVBufferRef *ret = NULL;
  87. CUcontext dummy = NULL;
  88. CUdeviceptr data;
  89. int err;
  90. err = CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
  91. if (err < 0)
  92. return NULL;
  93. err = CHECK_CU(cu->cuMemAlloc(&data, size));
  94. if (err < 0)
  95. goto fail;
  96. ret = av_buffer_create((uint8_t*)data, size, cuda_buffer_free, ctx, 0);
  97. if (!ret) {
  98. CHECK_CU(cu->cuMemFree(data));
  99. goto fail;
  100. }
  101. fail:
  102. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  103. return ret;
  104. }
  105. static int cuda_frames_init(AVHWFramesContext *ctx)
  106. {
  107. CUDAFramesContext *priv = ctx->internal->priv;
  108. int i;
  109. for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
  110. if (ctx->sw_format == supported_formats[i])
  111. break;
  112. }
  113. if (i == FF_ARRAY_ELEMS(supported_formats)) {
  114. av_log(ctx, AV_LOG_ERROR, "Pixel format '%s' is not supported\n",
  115. av_get_pix_fmt_name(ctx->sw_format));
  116. return AVERROR(ENOSYS);
  117. }
  118. av_pix_fmt_get_chroma_sub_sample(ctx->sw_format, &priv->shift_width, &priv->shift_height);
  119. if (!ctx->pool) {
  120. int size = av_image_get_buffer_size(ctx->sw_format, ctx->width, ctx->height, CUDA_FRAME_ALIGNMENT);
  121. if (size < 0)
  122. return size;
  123. ctx->internal->pool_internal = av_buffer_pool_init2(size, ctx, cuda_pool_alloc, NULL);
  124. if (!ctx->internal->pool_internal)
  125. return AVERROR(ENOMEM);
  126. }
  127. return 0;
  128. }
  129. static int cuda_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
  130. {
  131. int res;
  132. frame->buf[0] = av_buffer_pool_get(ctx->pool);
  133. if (!frame->buf[0])
  134. return AVERROR(ENOMEM);
  135. res = av_image_fill_arrays(frame->data, frame->linesize, frame->buf[0]->data,
  136. ctx->sw_format, ctx->width, ctx->height, CUDA_FRAME_ALIGNMENT);
  137. if (res < 0)
  138. return res;
  139. // YUV420P is a special case.
  140. // Nvenc expects the U/V planes in swapped order from how ffmpeg expects them, also chroma is half-aligned
  141. if (ctx->sw_format == AV_PIX_FMT_YUV420P) {
  142. frame->linesize[1] = frame->linesize[2] = frame->linesize[0] / 2;
  143. frame->data[2] = frame->data[1];
  144. frame->data[1] = frame->data[2] + frame->linesize[2] * ctx->height / 2;
  145. }
  146. frame->format = AV_PIX_FMT_CUDA;
  147. frame->width = ctx->width;
  148. frame->height = ctx->height;
  149. return 0;
  150. }
  151. static int cuda_transfer_get_formats(AVHWFramesContext *ctx,
  152. enum AVHWFrameTransferDirection dir,
  153. enum AVPixelFormat **formats)
  154. {
  155. enum AVPixelFormat *fmts;
  156. fmts = av_malloc_array(2, sizeof(*fmts));
  157. if (!fmts)
  158. return AVERROR(ENOMEM);
  159. fmts[0] = ctx->sw_format;
  160. fmts[1] = AV_PIX_FMT_NONE;
  161. *formats = fmts;
  162. return 0;
  163. }
  164. static int cuda_transfer_data(AVHWFramesContext *ctx, AVFrame *dst,
  165. const AVFrame *src)
  166. {
  167. CUDAFramesContext *priv = ctx->internal->priv;
  168. AVHWDeviceContext *device_ctx = ctx->device_ctx;
  169. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  170. CudaFunctions *cu = hwctx->internal->cuda_dl;
  171. CUcontext dummy;
  172. int i, ret;
  173. if ((src->hw_frames_ctx && ((AVHWFramesContext*)src->hw_frames_ctx->data)->format != AV_PIX_FMT_CUDA) ||
  174. (dst->hw_frames_ctx && ((AVHWFramesContext*)dst->hw_frames_ctx->data)->format != AV_PIX_FMT_CUDA))
  175. return AVERROR(ENOSYS);
  176. ret = CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
  177. if (ret < 0)
  178. return ret;
  179. for (i = 0; i < FF_ARRAY_ELEMS(src->data) && src->data[i]; i++) {
  180. CUDA_MEMCPY2D cpy = {
  181. .srcPitch = src->linesize[i],
  182. .dstPitch = dst->linesize[i],
  183. .WidthInBytes = FFMIN(src->linesize[i], dst->linesize[i]),
  184. .Height = src->height >> ((i == 0 || i == 3) ? 0 : priv->shift_height),
  185. };
  186. if (src->hw_frames_ctx) {
  187. cpy.srcMemoryType = CU_MEMORYTYPE_DEVICE;
  188. cpy.srcDevice = (CUdeviceptr)src->data[i];
  189. } else {
  190. cpy.srcMemoryType = CU_MEMORYTYPE_HOST;
  191. cpy.srcHost = src->data[i];
  192. }
  193. if (dst->hw_frames_ctx) {
  194. cpy.dstMemoryType = CU_MEMORYTYPE_DEVICE;
  195. cpy.dstDevice = (CUdeviceptr)dst->data[i];
  196. } else {
  197. cpy.dstMemoryType = CU_MEMORYTYPE_HOST;
  198. cpy.dstHost = dst->data[i];
  199. }
  200. ret = CHECK_CU(cu->cuMemcpy2DAsync(&cpy, hwctx->stream));
  201. if (ret < 0)
  202. goto exit;
  203. }
  204. if (!dst->hw_frames_ctx) {
  205. ret = CHECK_CU(cu->cuStreamSynchronize(hwctx->stream));
  206. if (ret < 0)
  207. goto exit;
  208. }
  209. exit:
  210. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  211. return 0;
  212. }
  213. static void cuda_device_uninit(AVHWDeviceContext *device_ctx)
  214. {
  215. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  216. if (hwctx->internal) {
  217. CudaFunctions *cu = hwctx->internal->cuda_dl;
  218. if (hwctx->internal->is_allocated && hwctx->cuda_ctx) {
  219. if (hwctx->internal->flags & AV_CUDA_USE_PRIMARY_CONTEXT)
  220. CHECK_CU(cu->cuDevicePrimaryCtxRelease(hwctx->internal->cuda_device));
  221. else
  222. CHECK_CU(cu->cuCtxDestroy(hwctx->cuda_ctx));
  223. hwctx->cuda_ctx = NULL;
  224. }
  225. cuda_free_functions(&hwctx->internal->cuda_dl);
  226. }
  227. av_freep(&hwctx->internal);
  228. }
  229. static int cuda_device_init(AVHWDeviceContext *ctx)
  230. {
  231. AVCUDADeviceContext *hwctx = ctx->hwctx;
  232. int ret;
  233. if (!hwctx->internal) {
  234. hwctx->internal = av_mallocz(sizeof(*hwctx->internal));
  235. if (!hwctx->internal)
  236. return AVERROR(ENOMEM);
  237. }
  238. if (!hwctx->internal->cuda_dl) {
  239. ret = cuda_load_functions(&hwctx->internal->cuda_dl, ctx);
  240. if (ret < 0) {
  241. av_log(ctx, AV_LOG_ERROR, "Could not dynamically load CUDA\n");
  242. goto error;
  243. }
  244. }
  245. return 0;
  246. error:
  247. cuda_device_uninit(ctx);
  248. return ret;
  249. }
  250. static int cuda_context_init(AVHWDeviceContext *device_ctx, int flags) {
  251. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  252. CudaFunctions *cu;
  253. CUcontext dummy;
  254. int ret, dev_active = 0;
  255. unsigned int dev_flags = 0;
  256. const unsigned int desired_flags = CU_CTX_SCHED_BLOCKING_SYNC;
  257. cu = hwctx->internal->cuda_dl;
  258. hwctx->internal->flags = flags;
  259. if (flags & AV_CUDA_USE_PRIMARY_CONTEXT) {
  260. ret = CHECK_CU(cu->cuDevicePrimaryCtxGetState(hwctx->internal->cuda_device,
  261. &dev_flags, &dev_active));
  262. if (ret < 0)
  263. return ret;
  264. if (dev_active && dev_flags != desired_flags) {
  265. av_log(device_ctx, AV_LOG_ERROR, "Primary context already active with incompatible flags.\n");
  266. return AVERROR(ENOTSUP);
  267. } else if (dev_flags != desired_flags) {
  268. ret = CHECK_CU(cu->cuDevicePrimaryCtxSetFlags(hwctx->internal->cuda_device,
  269. desired_flags));
  270. if (ret < 0)
  271. return ret;
  272. }
  273. ret = CHECK_CU(cu->cuDevicePrimaryCtxRetain(&hwctx->cuda_ctx,
  274. hwctx->internal->cuda_device));
  275. if (ret < 0)
  276. return ret;
  277. } else {
  278. ret = CHECK_CU(cu->cuCtxCreate(&hwctx->cuda_ctx, desired_flags,
  279. hwctx->internal->cuda_device));
  280. if (ret < 0)
  281. return ret;
  282. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  283. }
  284. hwctx->internal->is_allocated = 1;
  285. // Setting stream to NULL will make functions automatically use the default CUstream
  286. hwctx->stream = NULL;
  287. return 0;
  288. }
  289. static int cuda_device_create(AVHWDeviceContext *device_ctx,
  290. const char *device,
  291. AVDictionary *opts, int flags)
  292. {
  293. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  294. CudaFunctions *cu;
  295. int ret, device_idx = 0;
  296. if (device)
  297. device_idx = strtol(device, NULL, 0);
  298. if (cuda_device_init(device_ctx) < 0)
  299. goto error;
  300. cu = hwctx->internal->cuda_dl;
  301. ret = CHECK_CU(cu->cuInit(0));
  302. if (ret < 0)
  303. goto error;
  304. ret = CHECK_CU(cu->cuDeviceGet(&hwctx->internal->cuda_device, device_idx));
  305. if (ret < 0)
  306. goto error;
  307. ret = cuda_context_init(device_ctx, flags);
  308. if (ret < 0)
  309. goto error;
  310. return 0;
  311. error:
  312. cuda_device_uninit(device_ctx);
  313. return AVERROR_UNKNOWN;
  314. }
  315. static int cuda_device_derive(AVHWDeviceContext *device_ctx,
  316. AVHWDeviceContext *src_ctx, AVDictionary *opts,
  317. int flags) {
  318. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  319. CudaFunctions *cu;
  320. const char *src_uuid = NULL;
  321. int ret, i, device_count;
  322. #if CONFIG_VULKAN
  323. VkPhysicalDeviceIDProperties vk_idp = {
  324. .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES,
  325. };
  326. #endif
  327. switch (src_ctx->type) {
  328. #if CONFIG_VULKAN
  329. case AV_HWDEVICE_TYPE_VULKAN: {
  330. AVVulkanDeviceContext *vkctx = src_ctx->hwctx;
  331. VkPhysicalDeviceProperties2 vk_dev_props = {
  332. .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
  333. .pNext = &vk_idp,
  334. };
  335. vkGetPhysicalDeviceProperties2(vkctx->phys_dev, &vk_dev_props);
  336. src_uuid = vk_idp.deviceUUID;
  337. break;
  338. }
  339. #endif
  340. default:
  341. return AVERROR(ENOSYS);
  342. }
  343. if (!src_uuid) {
  344. av_log(device_ctx, AV_LOG_ERROR,
  345. "Failed to get UUID of source device.\n");
  346. goto error;
  347. }
  348. if (cuda_device_init(device_ctx) < 0)
  349. goto error;
  350. cu = hwctx->internal->cuda_dl;
  351. ret = CHECK_CU(cu->cuInit(0));
  352. if (ret < 0)
  353. goto error;
  354. ret = CHECK_CU(cu->cuDeviceGetCount(&device_count));
  355. if (ret < 0)
  356. goto error;
  357. hwctx->internal->cuda_device = -1;
  358. for (i = 0; i < device_count; i++) {
  359. CUdevice dev;
  360. CUuuid uuid;
  361. ret = CHECK_CU(cu->cuDeviceGet(&dev, i));
  362. if (ret < 0)
  363. goto error;
  364. ret = CHECK_CU(cu->cuDeviceGetUuid(&uuid, dev));
  365. if (ret < 0)
  366. goto error;
  367. if (memcmp(src_uuid, uuid.bytes, sizeof (uuid.bytes)) == 0) {
  368. hwctx->internal->cuda_device = dev;
  369. break;
  370. }
  371. }
  372. if (hwctx->internal->cuda_device == -1) {
  373. av_log(device_ctx, AV_LOG_ERROR, "Could not derive CUDA device.\n");
  374. goto error;
  375. }
  376. ret = cuda_context_init(device_ctx, flags);
  377. if (ret < 0)
  378. goto error;
  379. return 0;
  380. error:
  381. cuda_device_uninit(device_ctx);
  382. return AVERROR_UNKNOWN;
  383. }
  384. const HWContextType ff_hwcontext_type_cuda = {
  385. .type = AV_HWDEVICE_TYPE_CUDA,
  386. .name = "CUDA",
  387. .device_hwctx_size = sizeof(AVCUDADeviceContext),
  388. .frames_priv_size = sizeof(CUDAFramesContext),
  389. .device_create = cuda_device_create,
  390. .device_derive = cuda_device_derive,
  391. .device_init = cuda_device_init,
  392. .device_uninit = cuda_device_uninit,
  393. .frames_get_constraints = cuda_frames_get_constraints,
  394. .frames_init = cuda_frames_init,
  395. .frames_get_buffer = cuda_get_buffer,
  396. .transfer_get_formats = cuda_transfer_get_formats,
  397. .transfer_data_to = cuda_transfer_data,
  398. .transfer_data_from = cuda_transfer_data,
  399. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE },
  400. };