You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

519 lines
15KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "buffer.h"
  19. #include "common.h"
  20. #include "hwcontext.h"
  21. #include "hwcontext_internal.h"
  22. #include "hwcontext_cuda_internal.h"
  23. #if CONFIG_VULKAN
  24. #include "hwcontext_vulkan.h"
  25. #endif
  26. #include "cuda_check.h"
  27. #include "mem.h"
  28. #include "pixdesc.h"
  29. #include "pixfmt.h"
  30. #include "imgutils.h"
  31. typedef struct CUDAFramesContext {
  32. int shift_width, shift_height;
  33. int tex_alignment;
  34. } CUDAFramesContext;
  35. static const enum AVPixelFormat supported_formats[] = {
  36. AV_PIX_FMT_NV12,
  37. AV_PIX_FMT_YUV420P,
  38. AV_PIX_FMT_YUVA420P,
  39. AV_PIX_FMT_YUV444P,
  40. AV_PIX_FMT_P010,
  41. AV_PIX_FMT_P016,
  42. AV_PIX_FMT_YUV444P16,
  43. AV_PIX_FMT_0RGB32,
  44. AV_PIX_FMT_0BGR32,
  45. #if CONFIG_VULKAN
  46. AV_PIX_FMT_VULKAN,
  47. #endif
  48. };
  49. #define CHECK_CU(x) FF_CUDA_CHECK_DL(device_ctx, cu, x)
  50. static int cuda_frames_get_constraints(AVHWDeviceContext *ctx,
  51. const void *hwconfig,
  52. AVHWFramesConstraints *constraints)
  53. {
  54. int i;
  55. constraints->valid_sw_formats = av_malloc_array(FF_ARRAY_ELEMS(supported_formats) + 1,
  56. sizeof(*constraints->valid_sw_formats));
  57. if (!constraints->valid_sw_formats)
  58. return AVERROR(ENOMEM);
  59. for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
  60. constraints->valid_sw_formats[i] = supported_formats[i];
  61. constraints->valid_sw_formats[FF_ARRAY_ELEMS(supported_formats)] = AV_PIX_FMT_NONE;
  62. constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints->valid_hw_formats));
  63. if (!constraints->valid_hw_formats)
  64. return AVERROR(ENOMEM);
  65. constraints->valid_hw_formats[0] = AV_PIX_FMT_CUDA;
  66. constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
  67. return 0;
  68. }
  69. static void cuda_buffer_free(void *opaque, uint8_t *data)
  70. {
  71. AVHWFramesContext *ctx = opaque;
  72. AVHWDeviceContext *device_ctx = ctx->device_ctx;
  73. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  74. CudaFunctions *cu = hwctx->internal->cuda_dl;
  75. CUcontext dummy;
  76. CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
  77. CHECK_CU(cu->cuMemFree((CUdeviceptr)data));
  78. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  79. }
  80. static AVBufferRef *cuda_pool_alloc(void *opaque, buffer_size_t size)
  81. {
  82. AVHWFramesContext *ctx = opaque;
  83. AVHWDeviceContext *device_ctx = ctx->device_ctx;
  84. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  85. CudaFunctions *cu = hwctx->internal->cuda_dl;
  86. AVBufferRef *ret = NULL;
  87. CUcontext dummy = NULL;
  88. CUdeviceptr data;
  89. int err;
  90. err = CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
  91. if (err < 0)
  92. return NULL;
  93. err = CHECK_CU(cu->cuMemAlloc(&data, size));
  94. if (err < 0)
  95. goto fail;
  96. ret = av_buffer_create((uint8_t*)data, size, cuda_buffer_free, ctx, 0);
  97. if (!ret) {
  98. CHECK_CU(cu->cuMemFree(data));
  99. goto fail;
  100. }
  101. fail:
  102. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  103. return ret;
  104. }
  105. static int cuda_frames_init(AVHWFramesContext *ctx)
  106. {
  107. AVHWDeviceContext *device_ctx = ctx->device_ctx;
  108. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  109. CUDAFramesContext *priv = ctx->internal->priv;
  110. CudaFunctions *cu = hwctx->internal->cuda_dl;
  111. int err, i;
  112. for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
  113. if (ctx->sw_format == supported_formats[i])
  114. break;
  115. }
  116. if (i == FF_ARRAY_ELEMS(supported_formats)) {
  117. av_log(ctx, AV_LOG_ERROR, "Pixel format '%s' is not supported\n",
  118. av_get_pix_fmt_name(ctx->sw_format));
  119. return AVERROR(ENOSYS);
  120. }
  121. err = CHECK_CU(cu->cuDeviceGetAttribute(&priv->tex_alignment,
  122. 14 /* CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT */,
  123. hwctx->internal->cuda_device));
  124. if (err < 0)
  125. return err;
  126. av_log(ctx, AV_LOG_DEBUG, "CUDA texture alignment: %d\n", priv->tex_alignment);
  127. // YUV420P is a special case.
  128. // Since nvenc expects the U/V planes to have half the linesize of the Y plane
  129. // alignment has to be doubled to ensure the U/V planes still end up aligned.
  130. if (ctx->sw_format == AV_PIX_FMT_YUV420P)
  131. priv->tex_alignment *= 2;
  132. av_pix_fmt_get_chroma_sub_sample(ctx->sw_format, &priv->shift_width, &priv->shift_height);
  133. if (!ctx->pool) {
  134. int size = av_image_get_buffer_size(ctx->sw_format, ctx->width, ctx->height, priv->tex_alignment);
  135. if (size < 0)
  136. return size;
  137. ctx->internal->pool_internal = av_buffer_pool_init2(size, ctx, cuda_pool_alloc, NULL);
  138. if (!ctx->internal->pool_internal)
  139. return AVERROR(ENOMEM);
  140. }
  141. return 0;
  142. }
  143. static int cuda_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
  144. {
  145. CUDAFramesContext *priv = ctx->internal->priv;
  146. int res;
  147. frame->buf[0] = av_buffer_pool_get(ctx->pool);
  148. if (!frame->buf[0])
  149. return AVERROR(ENOMEM);
  150. res = av_image_fill_arrays(frame->data, frame->linesize, frame->buf[0]->data,
  151. ctx->sw_format, ctx->width, ctx->height, priv->tex_alignment);
  152. if (res < 0)
  153. return res;
  154. // YUV420P is a special case.
  155. // Nvenc expects the U/V planes in swapped order from how ffmpeg expects them, also chroma is half-aligned
  156. if (ctx->sw_format == AV_PIX_FMT_YUV420P) {
  157. frame->linesize[1] = frame->linesize[2] = frame->linesize[0] / 2;
  158. frame->data[2] = frame->data[1];
  159. frame->data[1] = frame->data[2] + frame->linesize[2] * (ctx->height / 2);
  160. }
  161. frame->format = AV_PIX_FMT_CUDA;
  162. frame->width = ctx->width;
  163. frame->height = ctx->height;
  164. return 0;
  165. }
  166. static int cuda_transfer_get_formats(AVHWFramesContext *ctx,
  167. enum AVHWFrameTransferDirection dir,
  168. enum AVPixelFormat **formats)
  169. {
  170. enum AVPixelFormat *fmts;
  171. fmts = av_malloc_array(2, sizeof(*fmts));
  172. if (!fmts)
  173. return AVERROR(ENOMEM);
  174. fmts[0] = ctx->sw_format;
  175. fmts[1] = AV_PIX_FMT_NONE;
  176. *formats = fmts;
  177. return 0;
  178. }
  179. static int cuda_transfer_data(AVHWFramesContext *ctx, AVFrame *dst,
  180. const AVFrame *src)
  181. {
  182. CUDAFramesContext *priv = ctx->internal->priv;
  183. AVHWDeviceContext *device_ctx = ctx->device_ctx;
  184. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  185. CudaFunctions *cu = hwctx->internal->cuda_dl;
  186. CUcontext dummy;
  187. int i, ret;
  188. if ((src->hw_frames_ctx && ((AVHWFramesContext*)src->hw_frames_ctx->data)->format != AV_PIX_FMT_CUDA) ||
  189. (dst->hw_frames_ctx && ((AVHWFramesContext*)dst->hw_frames_ctx->data)->format != AV_PIX_FMT_CUDA))
  190. return AVERROR(ENOSYS);
  191. ret = CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
  192. if (ret < 0)
  193. return ret;
  194. for (i = 0; i < FF_ARRAY_ELEMS(src->data) && src->data[i]; i++) {
  195. CUDA_MEMCPY2D cpy = {
  196. .srcPitch = src->linesize[i],
  197. .dstPitch = dst->linesize[i],
  198. .WidthInBytes = FFMIN(src->linesize[i], dst->linesize[i]),
  199. .Height = src->height >> ((i == 0 || i == 3) ? 0 : priv->shift_height),
  200. };
  201. if (src->hw_frames_ctx) {
  202. cpy.srcMemoryType = CU_MEMORYTYPE_DEVICE;
  203. cpy.srcDevice = (CUdeviceptr)src->data[i];
  204. } else {
  205. cpy.srcMemoryType = CU_MEMORYTYPE_HOST;
  206. cpy.srcHost = src->data[i];
  207. }
  208. if (dst->hw_frames_ctx) {
  209. cpy.dstMemoryType = CU_MEMORYTYPE_DEVICE;
  210. cpy.dstDevice = (CUdeviceptr)dst->data[i];
  211. } else {
  212. cpy.dstMemoryType = CU_MEMORYTYPE_HOST;
  213. cpy.dstHost = dst->data[i];
  214. }
  215. ret = CHECK_CU(cu->cuMemcpy2DAsync(&cpy, hwctx->stream));
  216. if (ret < 0)
  217. goto exit;
  218. }
  219. if (!dst->hw_frames_ctx) {
  220. ret = CHECK_CU(cu->cuStreamSynchronize(hwctx->stream));
  221. if (ret < 0)
  222. goto exit;
  223. }
  224. exit:
  225. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  226. return 0;
  227. }
  228. static void cuda_device_uninit(AVHWDeviceContext *device_ctx)
  229. {
  230. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  231. if (hwctx->internal) {
  232. CudaFunctions *cu = hwctx->internal->cuda_dl;
  233. if (hwctx->internal->is_allocated && hwctx->cuda_ctx) {
  234. if (hwctx->internal->flags & AV_CUDA_USE_PRIMARY_CONTEXT)
  235. CHECK_CU(cu->cuDevicePrimaryCtxRelease(hwctx->internal->cuda_device));
  236. else
  237. CHECK_CU(cu->cuCtxDestroy(hwctx->cuda_ctx));
  238. hwctx->cuda_ctx = NULL;
  239. }
  240. cuda_free_functions(&hwctx->internal->cuda_dl);
  241. }
  242. av_freep(&hwctx->internal);
  243. }
  244. static int cuda_device_init(AVHWDeviceContext *ctx)
  245. {
  246. AVCUDADeviceContext *hwctx = ctx->hwctx;
  247. int ret;
  248. if (!hwctx->internal) {
  249. hwctx->internal = av_mallocz(sizeof(*hwctx->internal));
  250. if (!hwctx->internal)
  251. return AVERROR(ENOMEM);
  252. }
  253. if (!hwctx->internal->cuda_dl) {
  254. ret = cuda_load_functions(&hwctx->internal->cuda_dl, ctx);
  255. if (ret < 0) {
  256. av_log(ctx, AV_LOG_ERROR, "Could not dynamically load CUDA\n");
  257. goto error;
  258. }
  259. }
  260. return 0;
  261. error:
  262. cuda_device_uninit(ctx);
  263. return ret;
  264. }
  265. static int cuda_context_init(AVHWDeviceContext *device_ctx, int flags) {
  266. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  267. CudaFunctions *cu;
  268. CUcontext dummy;
  269. int ret, dev_active = 0;
  270. unsigned int dev_flags = 0;
  271. const unsigned int desired_flags = CU_CTX_SCHED_BLOCKING_SYNC;
  272. cu = hwctx->internal->cuda_dl;
  273. hwctx->internal->flags = flags;
  274. if (flags & AV_CUDA_USE_PRIMARY_CONTEXT) {
  275. ret = CHECK_CU(cu->cuDevicePrimaryCtxGetState(hwctx->internal->cuda_device,
  276. &dev_flags, &dev_active));
  277. if (ret < 0)
  278. return ret;
  279. if (dev_active && dev_flags != desired_flags) {
  280. av_log(device_ctx, AV_LOG_ERROR, "Primary context already active with incompatible flags.\n");
  281. return AVERROR(ENOTSUP);
  282. } else if (dev_flags != desired_flags) {
  283. ret = CHECK_CU(cu->cuDevicePrimaryCtxSetFlags(hwctx->internal->cuda_device,
  284. desired_flags));
  285. if (ret < 0)
  286. return ret;
  287. }
  288. ret = CHECK_CU(cu->cuDevicePrimaryCtxRetain(&hwctx->cuda_ctx,
  289. hwctx->internal->cuda_device));
  290. if (ret < 0)
  291. return ret;
  292. } else {
  293. ret = CHECK_CU(cu->cuCtxCreate(&hwctx->cuda_ctx, desired_flags,
  294. hwctx->internal->cuda_device));
  295. if (ret < 0)
  296. return ret;
  297. CHECK_CU(cu->cuCtxPopCurrent(&dummy));
  298. }
  299. hwctx->internal->is_allocated = 1;
  300. // Setting stream to NULL will make functions automatically use the default CUstream
  301. hwctx->stream = NULL;
  302. return 0;
  303. }
  304. static int cuda_device_create(AVHWDeviceContext *device_ctx,
  305. const char *device,
  306. AVDictionary *opts, int flags)
  307. {
  308. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  309. CudaFunctions *cu;
  310. int ret, device_idx = 0;
  311. if (device)
  312. device_idx = strtol(device, NULL, 0);
  313. if (cuda_device_init(device_ctx) < 0)
  314. goto error;
  315. cu = hwctx->internal->cuda_dl;
  316. ret = CHECK_CU(cu->cuInit(0));
  317. if (ret < 0)
  318. goto error;
  319. ret = CHECK_CU(cu->cuDeviceGet(&hwctx->internal->cuda_device, device_idx));
  320. if (ret < 0)
  321. goto error;
  322. ret = cuda_context_init(device_ctx, flags);
  323. if (ret < 0)
  324. goto error;
  325. return 0;
  326. error:
  327. cuda_device_uninit(device_ctx);
  328. return AVERROR_UNKNOWN;
  329. }
  330. static int cuda_device_derive(AVHWDeviceContext *device_ctx,
  331. AVHWDeviceContext *src_ctx, AVDictionary *opts,
  332. int flags) {
  333. AVCUDADeviceContext *hwctx = device_ctx->hwctx;
  334. CudaFunctions *cu;
  335. const char *src_uuid = NULL;
  336. int ret, i, device_count;
  337. #if CONFIG_VULKAN
  338. VkPhysicalDeviceIDProperties vk_idp = {
  339. .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES,
  340. };
  341. #endif
  342. switch (src_ctx->type) {
  343. #if CONFIG_VULKAN
  344. case AV_HWDEVICE_TYPE_VULKAN: {
  345. AVVulkanDeviceContext *vkctx = src_ctx->hwctx;
  346. VkPhysicalDeviceProperties2 vk_dev_props = {
  347. .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
  348. .pNext = &vk_idp,
  349. };
  350. vkGetPhysicalDeviceProperties2(vkctx->phys_dev, &vk_dev_props);
  351. src_uuid = vk_idp.deviceUUID;
  352. break;
  353. }
  354. #endif
  355. default:
  356. return AVERROR(ENOSYS);
  357. }
  358. if (!src_uuid) {
  359. av_log(device_ctx, AV_LOG_ERROR,
  360. "Failed to get UUID of source device.\n");
  361. goto error;
  362. }
  363. if (cuda_device_init(device_ctx) < 0)
  364. goto error;
  365. cu = hwctx->internal->cuda_dl;
  366. ret = CHECK_CU(cu->cuInit(0));
  367. if (ret < 0)
  368. goto error;
  369. ret = CHECK_CU(cu->cuDeviceGetCount(&device_count));
  370. if (ret < 0)
  371. goto error;
  372. hwctx->internal->cuda_device = -1;
  373. for (i = 0; i < device_count; i++) {
  374. CUdevice dev;
  375. CUuuid uuid;
  376. ret = CHECK_CU(cu->cuDeviceGet(&dev, i));
  377. if (ret < 0)
  378. goto error;
  379. ret = CHECK_CU(cu->cuDeviceGetUuid(&uuid, dev));
  380. if (ret < 0)
  381. goto error;
  382. if (memcmp(src_uuid, uuid.bytes, sizeof (uuid.bytes)) == 0) {
  383. hwctx->internal->cuda_device = dev;
  384. break;
  385. }
  386. }
  387. if (hwctx->internal->cuda_device == -1) {
  388. av_log(device_ctx, AV_LOG_ERROR, "Could not derive CUDA device.\n");
  389. goto error;
  390. }
  391. ret = cuda_context_init(device_ctx, flags);
  392. if (ret < 0)
  393. goto error;
  394. return 0;
  395. error:
  396. cuda_device_uninit(device_ctx);
  397. return AVERROR_UNKNOWN;
  398. }
  399. const HWContextType ff_hwcontext_type_cuda = {
  400. .type = AV_HWDEVICE_TYPE_CUDA,
  401. .name = "CUDA",
  402. .device_hwctx_size = sizeof(AVCUDADeviceContext),
  403. .frames_priv_size = sizeof(CUDAFramesContext),
  404. .device_create = cuda_device_create,
  405. .device_derive = cuda_device_derive,
  406. .device_init = cuda_device_init,
  407. .device_uninit = cuda_device_uninit,
  408. .frames_get_constraints = cuda_frames_get_constraints,
  409. .frames_init = cuda_frames_init,
  410. .frames_get_buffer = cuda_get_buffer,
  411. .transfer_get_formats = cuda_transfer_get_formats,
  412. .transfer_data_to = cuda_transfer_data,
  413. .transfer_data_from = cuda_transfer_data,
  414. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE },
  415. };