You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1171 lines
39KB

  1. /*
  2. * Nvidia CUVID decoder
  3. * Copyright (c) 2016 Timo Rothenpieler <timo@rothenpieler.org>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "compat/cuda/dynlink_loader.h"
  22. #include "libavutil/buffer.h"
  23. #include "libavutil/mathematics.h"
  24. #include "libavutil/hwcontext.h"
  25. #include "libavutil/hwcontext_cuda_internal.h"
  26. #include "libavutil/cuda_check.h"
  27. #include "libavutil/fifo.h"
  28. #include "libavutil/log.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/pixdesc.h"
  31. #include "avcodec.h"
  32. #include "decode.h"
  33. #include "hwconfig.h"
  34. #include "nvdec.h"
  35. #include "internal.h"
  36. #if !NVDECAPI_CHECK_VERSION(9, 0)
  37. #define cudaVideoSurfaceFormat_YUV444 2
  38. #define cudaVideoSurfaceFormat_YUV444_16Bit 3
  39. #endif
  40. typedef struct CuvidContext
  41. {
  42. AVClass *avclass;
  43. CUvideodecoder cudecoder;
  44. CUvideoparser cuparser;
  45. char *cu_gpu;
  46. int nb_surfaces;
  47. int drop_second_field;
  48. char *crop_expr;
  49. char *resize_expr;
  50. struct {
  51. int left;
  52. int top;
  53. int right;
  54. int bottom;
  55. } crop;
  56. struct {
  57. int width;
  58. int height;
  59. } resize;
  60. AVBufferRef *hwdevice;
  61. AVBufferRef *hwframe;
  62. AVFifoBuffer *frame_queue;
  63. int deint_mode;
  64. int deint_mode_current;
  65. int64_t prev_pts;
  66. int progressive_sequence;
  67. int internal_error;
  68. int decoder_flushing;
  69. int *key_frame;
  70. cudaVideoCodec codec_type;
  71. cudaVideoChromaFormat chroma_format;
  72. CUVIDDECODECAPS caps8, caps10, caps12;
  73. CUVIDPARSERPARAMS cuparseinfo;
  74. CUVIDEOFORMATEX *cuparse_ext;
  75. CudaFunctions *cudl;
  76. CuvidFunctions *cvdl;
  77. } CuvidContext;
  78. typedef struct CuvidParsedFrame
  79. {
  80. CUVIDPARSERDISPINFO dispinfo;
  81. int second_field;
  82. int is_deinterlacing;
  83. } CuvidParsedFrame;
  84. #define CHECK_CU(x) FF_CUDA_CHECK_DL(avctx, ctx->cudl, x)
  85. static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* format)
  86. {
  87. AVCodecContext *avctx = opaque;
  88. CuvidContext *ctx = avctx->priv_data;
  89. AVHWFramesContext *hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  90. CUVIDDECODECAPS *caps = NULL;
  91. CUVIDDECODECREATEINFO cuinfo;
  92. int surface_fmt;
  93. int chroma_444;
  94. int old_width = avctx->width;
  95. int old_height = avctx->height;
  96. enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_CUDA,
  97. AV_PIX_FMT_NONE, // Will be updated below
  98. AV_PIX_FMT_NONE };
  99. av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback, progressive_sequence=%d\n", format->progressive_sequence);
  100. memset(&cuinfo, 0, sizeof(cuinfo));
  101. ctx->internal_error = 0;
  102. avctx->coded_width = cuinfo.ulWidth = format->coded_width;
  103. avctx->coded_height = cuinfo.ulHeight = format->coded_height;
  104. // apply cropping
  105. cuinfo.display_area.left = format->display_area.left + ctx->crop.left;
  106. cuinfo.display_area.top = format->display_area.top + ctx->crop.top;
  107. cuinfo.display_area.right = format->display_area.right - ctx->crop.right;
  108. cuinfo.display_area.bottom = format->display_area.bottom - ctx->crop.bottom;
  109. // width and height need to be set before calling ff_get_format
  110. if (ctx->resize_expr) {
  111. avctx->width = ctx->resize.width;
  112. avctx->height = ctx->resize.height;
  113. } else {
  114. avctx->width = cuinfo.display_area.right - cuinfo.display_area.left;
  115. avctx->height = cuinfo.display_area.bottom - cuinfo.display_area.top;
  116. }
  117. // target width/height need to be multiples of two
  118. cuinfo.ulTargetWidth = avctx->width = (avctx->width + 1) & ~1;
  119. cuinfo.ulTargetHeight = avctx->height = (avctx->height + 1) & ~1;
  120. // aspect ratio conversion, 1:1, depends on scaled resolution
  121. cuinfo.target_rect.left = 0;
  122. cuinfo.target_rect.top = 0;
  123. cuinfo.target_rect.right = cuinfo.ulTargetWidth;
  124. cuinfo.target_rect.bottom = cuinfo.ulTargetHeight;
  125. chroma_444 = format->chroma_format == cudaVideoChromaFormat_444;
  126. switch (format->bit_depth_luma_minus8) {
  127. case 0: // 8-bit
  128. pix_fmts[1] = chroma_444 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_NV12;
  129. caps = &ctx->caps8;
  130. break;
  131. case 2: // 10-bit
  132. pix_fmts[1] = chroma_444 ? AV_PIX_FMT_YUV444P16 : AV_PIX_FMT_P010;
  133. caps = &ctx->caps10;
  134. break;
  135. case 4: // 12-bit
  136. pix_fmts[1] = chroma_444 ? AV_PIX_FMT_YUV444P16 : AV_PIX_FMT_P016;
  137. caps = &ctx->caps12;
  138. break;
  139. default:
  140. break;
  141. }
  142. if (!caps || !caps->bIsSupported) {
  143. av_log(avctx, AV_LOG_ERROR, "unsupported bit depth: %d\n",
  144. format->bit_depth_luma_minus8 + 8);
  145. ctx->internal_error = AVERROR(EINVAL);
  146. return 0;
  147. }
  148. surface_fmt = ff_get_format(avctx, pix_fmts);
  149. if (surface_fmt < 0) {
  150. av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", surface_fmt);
  151. ctx->internal_error = AVERROR(EINVAL);
  152. return 0;
  153. }
  154. av_log(avctx, AV_LOG_VERBOSE, "Formats: Original: %s | HW: %s | SW: %s\n",
  155. av_get_pix_fmt_name(avctx->pix_fmt),
  156. av_get_pix_fmt_name(surface_fmt),
  157. av_get_pix_fmt_name(avctx->sw_pix_fmt));
  158. avctx->pix_fmt = surface_fmt;
  159. // Update our hwframe ctx, as the get_format callback might have refreshed it!
  160. if (avctx->hw_frames_ctx) {
  161. av_buffer_unref(&ctx->hwframe);
  162. ctx->hwframe = av_buffer_ref(avctx->hw_frames_ctx);
  163. if (!ctx->hwframe) {
  164. ctx->internal_error = AVERROR(ENOMEM);
  165. return 0;
  166. }
  167. hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  168. }
  169. ff_set_sar(avctx, av_div_q(
  170. (AVRational){ format->display_aspect_ratio.x, format->display_aspect_ratio.y },
  171. (AVRational){ avctx->width, avctx->height }));
  172. ctx->deint_mode_current = format->progressive_sequence
  173. ? cudaVideoDeinterlaceMode_Weave
  174. : ctx->deint_mode;
  175. ctx->progressive_sequence = format->progressive_sequence;
  176. if (!format->progressive_sequence && ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave)
  177. avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT;
  178. else
  179. avctx->flags &= ~AV_CODEC_FLAG_INTERLACED_DCT;
  180. if (format->video_signal_description.video_full_range_flag)
  181. avctx->color_range = AVCOL_RANGE_JPEG;
  182. else
  183. avctx->color_range = AVCOL_RANGE_MPEG;
  184. avctx->color_primaries = format->video_signal_description.color_primaries;
  185. avctx->color_trc = format->video_signal_description.transfer_characteristics;
  186. avctx->colorspace = format->video_signal_description.matrix_coefficients;
  187. if (format->bitrate)
  188. avctx->bit_rate = format->bitrate;
  189. if (format->frame_rate.numerator && format->frame_rate.denominator) {
  190. avctx->framerate.num = format->frame_rate.numerator;
  191. avctx->framerate.den = format->frame_rate.denominator;
  192. }
  193. if (ctx->cudecoder
  194. && avctx->coded_width == format->coded_width
  195. && avctx->coded_height == format->coded_height
  196. && avctx->width == old_width
  197. && avctx->height == old_height
  198. && ctx->chroma_format == format->chroma_format
  199. && ctx->codec_type == format->codec)
  200. return 1;
  201. if (ctx->cudecoder) {
  202. av_log(avctx, AV_LOG_TRACE, "Re-initializing decoder\n");
  203. ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder));
  204. if (ctx->internal_error < 0)
  205. return 0;
  206. ctx->cudecoder = NULL;
  207. }
  208. if (hwframe_ctx->pool && (
  209. hwframe_ctx->width < avctx->width ||
  210. hwframe_ctx->height < avctx->height ||
  211. hwframe_ctx->format != AV_PIX_FMT_CUDA ||
  212. hwframe_ctx->sw_format != avctx->sw_pix_fmt)) {
  213. av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized with incompatible parameters\n");
  214. av_log(avctx, AV_LOG_DEBUG, "width: %d <-> %d\n", hwframe_ctx->width, avctx->width);
  215. av_log(avctx, AV_LOG_DEBUG, "height: %d <-> %d\n", hwframe_ctx->height, avctx->height);
  216. av_log(avctx, AV_LOG_DEBUG, "format: %s <-> cuda\n", av_get_pix_fmt_name(hwframe_ctx->format));
  217. av_log(avctx, AV_LOG_DEBUG, "sw_format: %s <-> %s\n",
  218. av_get_pix_fmt_name(hwframe_ctx->sw_format), av_get_pix_fmt_name(avctx->sw_pix_fmt));
  219. ctx->internal_error = AVERROR(EINVAL);
  220. return 0;
  221. }
  222. ctx->chroma_format = format->chroma_format;
  223. cuinfo.CodecType = ctx->codec_type = format->codec;
  224. cuinfo.ChromaFormat = format->chroma_format;
  225. switch (avctx->sw_pix_fmt) {
  226. case AV_PIX_FMT_NV12:
  227. cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12;
  228. break;
  229. case AV_PIX_FMT_P010:
  230. case AV_PIX_FMT_P016:
  231. cuinfo.OutputFormat = cudaVideoSurfaceFormat_P016;
  232. break;
  233. case AV_PIX_FMT_YUV444P:
  234. cuinfo.OutputFormat = cudaVideoSurfaceFormat_YUV444;
  235. break;
  236. case AV_PIX_FMT_YUV444P16:
  237. cuinfo.OutputFormat = cudaVideoSurfaceFormat_YUV444_16Bit;
  238. break;
  239. default:
  240. av_log(avctx, AV_LOG_ERROR, "Unsupported output format: %s\n",
  241. av_get_pix_fmt_name(avctx->sw_pix_fmt));
  242. ctx->internal_error = AVERROR(EINVAL);
  243. return 0;
  244. }
  245. cuinfo.ulNumDecodeSurfaces = ctx->nb_surfaces;
  246. cuinfo.ulNumOutputSurfaces = 1;
  247. cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID;
  248. cuinfo.bitDepthMinus8 = format->bit_depth_luma_minus8;
  249. cuinfo.DeinterlaceMode = ctx->deint_mode_current;
  250. if (ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave && !ctx->drop_second_field)
  251. avctx->framerate = av_mul_q(avctx->framerate, (AVRational){2, 1});
  252. ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidCreateDecoder(&ctx->cudecoder, &cuinfo));
  253. if (ctx->internal_error < 0)
  254. return 0;
  255. if (!hwframe_ctx->pool) {
  256. hwframe_ctx->format = AV_PIX_FMT_CUDA;
  257. hwframe_ctx->sw_format = avctx->sw_pix_fmt;
  258. hwframe_ctx->width = avctx->width;
  259. hwframe_ctx->height = avctx->height;
  260. if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) {
  261. av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n");
  262. return 0;
  263. }
  264. }
  265. return 1;
  266. }
  267. static int CUDAAPI cuvid_handle_picture_decode(void *opaque, CUVIDPICPARAMS* picparams)
  268. {
  269. AVCodecContext *avctx = opaque;
  270. CuvidContext *ctx = avctx->priv_data;
  271. av_log(avctx, AV_LOG_TRACE, "pfnDecodePicture\n");
  272. ctx->key_frame[picparams->CurrPicIdx] = picparams->intra_pic_flag;
  273. ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidDecodePicture(ctx->cudecoder, picparams));
  274. if (ctx->internal_error < 0)
  275. return 0;
  276. return 1;
  277. }
  278. static int CUDAAPI cuvid_handle_picture_display(void *opaque, CUVIDPARSERDISPINFO* dispinfo)
  279. {
  280. AVCodecContext *avctx = opaque;
  281. CuvidContext *ctx = avctx->priv_data;
  282. CuvidParsedFrame parsed_frame = { { 0 } };
  283. parsed_frame.dispinfo = *dispinfo;
  284. ctx->internal_error = 0;
  285. // For some reason, dispinfo->progressive_frame is sometimes wrong.
  286. parsed_frame.dispinfo.progressive_frame = ctx->progressive_sequence;
  287. if (ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave) {
  288. av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  289. } else {
  290. parsed_frame.is_deinterlacing = 1;
  291. av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  292. if (!ctx->drop_second_field) {
  293. parsed_frame.second_field = 1;
  294. av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  295. }
  296. }
  297. return 1;
  298. }
  299. static int cuvid_is_buffer_full(AVCodecContext *avctx)
  300. {
  301. CuvidContext *ctx = avctx->priv_data;
  302. int delay = ctx->cuparseinfo.ulMaxDisplayDelay;
  303. if (ctx->deint_mode != cudaVideoDeinterlaceMode_Weave && !ctx->drop_second_field)
  304. delay *= 2;
  305. return (av_fifo_size(ctx->frame_queue) / sizeof(CuvidParsedFrame)) + delay >= ctx->nb_surfaces;
  306. }
  307. static int cuvid_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt)
  308. {
  309. CuvidContext *ctx = avctx->priv_data;
  310. AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
  311. AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
  312. CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
  313. CUVIDSOURCEDATAPACKET cupkt;
  314. int ret = 0, eret = 0, is_flush = ctx->decoder_flushing;
  315. av_log(avctx, AV_LOG_TRACE, "cuvid_decode_packet\n");
  316. if (is_flush && avpkt && avpkt->size)
  317. return AVERROR_EOF;
  318. if (cuvid_is_buffer_full(avctx) && avpkt && avpkt->size)
  319. return AVERROR(EAGAIN);
  320. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  321. if (ret < 0) {
  322. return ret;
  323. }
  324. memset(&cupkt, 0, sizeof(cupkt));
  325. if (avpkt && avpkt->size) {
  326. cupkt.payload_size = avpkt->size;
  327. cupkt.payload = avpkt->data;
  328. if (avpkt->pts != AV_NOPTS_VALUE) {
  329. cupkt.flags = CUVID_PKT_TIMESTAMP;
  330. if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
  331. cupkt.timestamp = av_rescale_q(avpkt->pts, avctx->pkt_timebase, (AVRational){1, 10000000});
  332. else
  333. cupkt.timestamp = avpkt->pts;
  334. }
  335. } else {
  336. cupkt.flags = CUVID_PKT_ENDOFSTREAM;
  337. ctx->decoder_flushing = 1;
  338. }
  339. ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &cupkt));
  340. if (ret < 0)
  341. goto error;
  342. // cuvidParseVideoData doesn't return an error just because stuff failed...
  343. if (ctx->internal_error) {
  344. av_log(avctx, AV_LOG_ERROR, "cuvid decode callback error\n");
  345. ret = ctx->internal_error;
  346. goto error;
  347. }
  348. error:
  349. eret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  350. if (eret < 0)
  351. return eret;
  352. else if (ret < 0)
  353. return ret;
  354. else if (is_flush)
  355. return AVERROR_EOF;
  356. else
  357. return 0;
  358. }
  359. static int cuvid_output_frame(AVCodecContext *avctx, AVFrame *frame)
  360. {
  361. CuvidContext *ctx = avctx->priv_data;
  362. AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
  363. AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
  364. CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
  365. CUdeviceptr mapped_frame = 0;
  366. int ret = 0, eret = 0;
  367. av_log(avctx, AV_LOG_TRACE, "cuvid_output_frame\n");
  368. if (ctx->decoder_flushing) {
  369. ret = cuvid_decode_packet(avctx, NULL);
  370. if (ret < 0 && ret != AVERROR_EOF)
  371. return ret;
  372. }
  373. if (!cuvid_is_buffer_full(avctx)) {
  374. AVPacket pkt = {0};
  375. ret = ff_decode_get_packet(avctx, &pkt);
  376. if (ret < 0 && ret != AVERROR_EOF)
  377. return ret;
  378. ret = cuvid_decode_packet(avctx, &pkt);
  379. av_packet_unref(&pkt);
  380. // cuvid_is_buffer_full() should avoid this.
  381. if (ret == AVERROR(EAGAIN))
  382. ret = AVERROR_EXTERNAL;
  383. if (ret < 0 && ret != AVERROR_EOF)
  384. return ret;
  385. }
  386. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  387. if (ret < 0)
  388. return ret;
  389. if (av_fifo_size(ctx->frame_queue)) {
  390. const AVPixFmtDescriptor *pixdesc;
  391. CuvidParsedFrame parsed_frame;
  392. CUVIDPROCPARAMS params;
  393. unsigned int pitch = 0;
  394. int offset = 0;
  395. int i;
  396. av_fifo_generic_read(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  397. memset(&params, 0, sizeof(params));
  398. params.progressive_frame = parsed_frame.dispinfo.progressive_frame;
  399. params.second_field = parsed_frame.second_field;
  400. params.top_field_first = parsed_frame.dispinfo.top_field_first;
  401. ret = CHECK_CU(ctx->cvdl->cuvidMapVideoFrame(ctx->cudecoder, parsed_frame.dispinfo.picture_index, &mapped_frame, &pitch, &params));
  402. if (ret < 0)
  403. goto error;
  404. if (avctx->pix_fmt == AV_PIX_FMT_CUDA) {
  405. ret = av_hwframe_get_buffer(ctx->hwframe, frame, 0);
  406. if (ret < 0) {
  407. av_log(avctx, AV_LOG_ERROR, "av_hwframe_get_buffer failed\n");
  408. goto error;
  409. }
  410. ret = ff_decode_frame_props(avctx, frame);
  411. if (ret < 0) {
  412. av_log(avctx, AV_LOG_ERROR, "ff_decode_frame_props failed\n");
  413. goto error;
  414. }
  415. pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
  416. for (i = 0; i < pixdesc->nb_components; i++) {
  417. int height = avctx->height >> (i ? pixdesc->log2_chroma_h : 0);
  418. CUDA_MEMCPY2D cpy = {
  419. .srcMemoryType = CU_MEMORYTYPE_DEVICE,
  420. .dstMemoryType = CU_MEMORYTYPE_DEVICE,
  421. .srcDevice = mapped_frame,
  422. .dstDevice = (CUdeviceptr)frame->data[i],
  423. .srcPitch = pitch,
  424. .dstPitch = frame->linesize[i],
  425. .srcY = offset,
  426. .WidthInBytes = FFMIN(pitch, frame->linesize[i]),
  427. .Height = height,
  428. };
  429. ret = CHECK_CU(ctx->cudl->cuMemcpy2DAsync(&cpy, device_hwctx->stream));
  430. if (ret < 0)
  431. goto error;
  432. offset += height;
  433. }
  434. } else if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
  435. avctx->pix_fmt == AV_PIX_FMT_P010 ||
  436. avctx->pix_fmt == AV_PIX_FMT_P016 ||
  437. avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
  438. avctx->pix_fmt == AV_PIX_FMT_YUV444P16) {
  439. unsigned int offset = 0;
  440. AVFrame *tmp_frame = av_frame_alloc();
  441. if (!tmp_frame) {
  442. av_log(avctx, AV_LOG_ERROR, "av_frame_alloc failed\n");
  443. ret = AVERROR(ENOMEM);
  444. goto error;
  445. }
  446. pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
  447. tmp_frame->format = AV_PIX_FMT_CUDA;
  448. tmp_frame->hw_frames_ctx = av_buffer_ref(ctx->hwframe);
  449. tmp_frame->width = avctx->width;
  450. tmp_frame->height = avctx->height;
  451. /*
  452. * Note that the following logic would not work for three plane
  453. * YUV420 because the pitch value is different for the chroma
  454. * planes.
  455. */
  456. for (i = 0; i < pixdesc->nb_components; i++) {
  457. tmp_frame->data[i] = (uint8_t*)mapped_frame + offset;
  458. tmp_frame->linesize[i] = pitch;
  459. offset += pitch * (avctx->height >> (i ? pixdesc->log2_chroma_h : 0));
  460. }
  461. ret = ff_get_buffer(avctx, frame, 0);
  462. if (ret < 0) {
  463. av_log(avctx, AV_LOG_ERROR, "ff_get_buffer failed\n");
  464. av_frame_free(&tmp_frame);
  465. goto error;
  466. }
  467. ret = av_hwframe_transfer_data(frame, tmp_frame, 0);
  468. if (ret) {
  469. av_log(avctx, AV_LOG_ERROR, "av_hwframe_transfer_data failed\n");
  470. av_frame_free(&tmp_frame);
  471. goto error;
  472. }
  473. av_frame_free(&tmp_frame);
  474. } else {
  475. ret = AVERROR_BUG;
  476. goto error;
  477. }
  478. frame->key_frame = ctx->key_frame[parsed_frame.dispinfo.picture_index];
  479. frame->width = avctx->width;
  480. frame->height = avctx->height;
  481. if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
  482. frame->pts = av_rescale_q(parsed_frame.dispinfo.timestamp, (AVRational){1, 10000000}, avctx->pkt_timebase);
  483. else
  484. frame->pts = parsed_frame.dispinfo.timestamp;
  485. if (parsed_frame.second_field) {
  486. if (ctx->prev_pts == INT64_MIN) {
  487. ctx->prev_pts = frame->pts;
  488. frame->pts += (avctx->pkt_timebase.den * avctx->framerate.den) / (avctx->pkt_timebase.num * avctx->framerate.num);
  489. } else {
  490. int pts_diff = (frame->pts - ctx->prev_pts) / 2;
  491. ctx->prev_pts = frame->pts;
  492. frame->pts += pts_diff;
  493. }
  494. }
  495. /* CUVIDs opaque reordering breaks the internal pkt logic.
  496. * So set pkt_pts and clear all the other pkt_ fields.
  497. */
  498. #if FF_API_PKT_PTS
  499. FF_DISABLE_DEPRECATION_WARNINGS
  500. frame->pkt_pts = frame->pts;
  501. FF_ENABLE_DEPRECATION_WARNINGS
  502. #endif
  503. frame->pkt_pos = -1;
  504. frame->pkt_duration = 0;
  505. frame->pkt_size = -1;
  506. frame->interlaced_frame = !parsed_frame.is_deinterlacing && !parsed_frame.dispinfo.progressive_frame;
  507. if (frame->interlaced_frame)
  508. frame->top_field_first = parsed_frame.dispinfo.top_field_first;
  509. } else if (ctx->decoder_flushing) {
  510. ret = AVERROR_EOF;
  511. } else {
  512. ret = AVERROR(EAGAIN);
  513. }
  514. error:
  515. if (mapped_frame)
  516. eret = CHECK_CU(ctx->cvdl->cuvidUnmapVideoFrame(ctx->cudecoder, mapped_frame));
  517. eret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  518. if (eret < 0)
  519. return eret;
  520. else
  521. return ret;
  522. }
  523. static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
  524. {
  525. CuvidContext *ctx = avctx->priv_data;
  526. AVFrame *frame = data;
  527. int ret = 0;
  528. av_log(avctx, AV_LOG_TRACE, "cuvid_decode_frame\n");
  529. if (ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave) {
  530. av_log(avctx, AV_LOG_ERROR, "Deinterlacing is not supported via the old API\n");
  531. return AVERROR(EINVAL);
  532. }
  533. if (!ctx->decoder_flushing) {
  534. ret = cuvid_decode_packet(avctx, avpkt);
  535. if (ret < 0)
  536. return ret;
  537. }
  538. ret = cuvid_output_frame(avctx, frame);
  539. if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
  540. *got_frame = 0;
  541. } else if (ret < 0) {
  542. return ret;
  543. } else {
  544. *got_frame = 1;
  545. }
  546. return 0;
  547. }
  548. static av_cold int cuvid_decode_end(AVCodecContext *avctx)
  549. {
  550. CuvidContext *ctx = avctx->priv_data;
  551. av_fifo_freep(&ctx->frame_queue);
  552. if (ctx->cuparser)
  553. ctx->cvdl->cuvidDestroyVideoParser(ctx->cuparser);
  554. if (ctx->cudecoder)
  555. ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder);
  556. ctx->cudl = NULL;
  557. av_buffer_unref(&ctx->hwframe);
  558. av_buffer_unref(&ctx->hwdevice);
  559. av_freep(&ctx->key_frame);
  560. av_freep(&ctx->cuparse_ext);
  561. cuvid_free_functions(&ctx->cvdl);
  562. return 0;
  563. }
  564. static int cuvid_test_capabilities(AVCodecContext *avctx,
  565. const CUVIDPARSERPARAMS *cuparseinfo,
  566. int probed_width,
  567. int probed_height,
  568. int bit_depth)
  569. {
  570. CuvidContext *ctx = avctx->priv_data;
  571. CUVIDDECODECAPS *caps;
  572. int res8 = 0, res10 = 0, res12 = 0;
  573. if (!ctx->cvdl->cuvidGetDecoderCaps) {
  574. av_log(avctx, AV_LOG_WARNING, "Used Nvidia driver is too old to perform a capability check.\n");
  575. av_log(avctx, AV_LOG_WARNING, "The minimum required version is "
  576. #if defined(_WIN32) || defined(__CYGWIN__)
  577. "378.66"
  578. #else
  579. "378.13"
  580. #endif
  581. ". Continuing blind.\n");
  582. ctx->caps8.bIsSupported = ctx->caps10.bIsSupported = 1;
  583. // 12 bit was not supported before the capability check was introduced, so disable it.
  584. ctx->caps12.bIsSupported = 0;
  585. return 0;
  586. }
  587. ctx->caps8.eCodecType = ctx->caps10.eCodecType = ctx->caps12.eCodecType
  588. = cuparseinfo->CodecType;
  589. ctx->caps8.eChromaFormat = ctx->caps10.eChromaFormat = ctx->caps12.eChromaFormat
  590. = cudaVideoChromaFormat_420;
  591. ctx->caps8.nBitDepthMinus8 = 0;
  592. ctx->caps10.nBitDepthMinus8 = 2;
  593. ctx->caps12.nBitDepthMinus8 = 4;
  594. res8 = CHECK_CU(ctx->cvdl->cuvidGetDecoderCaps(&ctx->caps8));
  595. res10 = CHECK_CU(ctx->cvdl->cuvidGetDecoderCaps(&ctx->caps10));
  596. res12 = CHECK_CU(ctx->cvdl->cuvidGetDecoderCaps(&ctx->caps12));
  597. av_log(avctx, AV_LOG_VERBOSE, "CUVID capabilities for %s:\n", avctx->codec->name);
  598. av_log(avctx, AV_LOG_VERBOSE, "8 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
  599. ctx->caps8.bIsSupported, ctx->caps8.nMinWidth, ctx->caps8.nMaxWidth, ctx->caps8.nMinHeight, ctx->caps8.nMaxHeight);
  600. av_log(avctx, AV_LOG_VERBOSE, "10 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
  601. ctx->caps10.bIsSupported, ctx->caps10.nMinWidth, ctx->caps10.nMaxWidth, ctx->caps10.nMinHeight, ctx->caps10.nMaxHeight);
  602. av_log(avctx, AV_LOG_VERBOSE, "12 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
  603. ctx->caps12.bIsSupported, ctx->caps12.nMinWidth, ctx->caps12.nMaxWidth, ctx->caps12.nMinHeight, ctx->caps12.nMaxHeight);
  604. switch (bit_depth) {
  605. case 10:
  606. caps = &ctx->caps10;
  607. if (res10 < 0)
  608. return res10;
  609. break;
  610. case 12:
  611. caps = &ctx->caps12;
  612. if (res12 < 0)
  613. return res12;
  614. break;
  615. default:
  616. caps = &ctx->caps8;
  617. if (res8 < 0)
  618. return res8;
  619. }
  620. if (!ctx->caps8.bIsSupported) {
  621. av_log(avctx, AV_LOG_ERROR, "Codec %s is not supported.\n", avctx->codec->name);
  622. return AVERROR(EINVAL);
  623. }
  624. if (!caps->bIsSupported) {
  625. av_log(avctx, AV_LOG_ERROR, "Bit depth %d is not supported.\n", bit_depth);
  626. return AVERROR(EINVAL);
  627. }
  628. if (probed_width > caps->nMaxWidth || probed_width < caps->nMinWidth) {
  629. av_log(avctx, AV_LOG_ERROR, "Video width %d not within range from %d to %d\n",
  630. probed_width, caps->nMinWidth, caps->nMaxWidth);
  631. return AVERROR(EINVAL);
  632. }
  633. if (probed_height > caps->nMaxHeight || probed_height < caps->nMinHeight) {
  634. av_log(avctx, AV_LOG_ERROR, "Video height %d not within range from %d to %d\n",
  635. probed_height, caps->nMinHeight, caps->nMaxHeight);
  636. return AVERROR(EINVAL);
  637. }
  638. if ((probed_width * probed_height) / 256 > caps->nMaxMBCount) {
  639. av_log(avctx, AV_LOG_ERROR, "Video macroblock count %d exceeds maximum of %d\n",
  640. (int)(probed_width * probed_height) / 256, caps->nMaxMBCount);
  641. return AVERROR(EINVAL);
  642. }
  643. return 0;
  644. }
  645. static av_cold int cuvid_decode_init(AVCodecContext *avctx)
  646. {
  647. CuvidContext *ctx = avctx->priv_data;
  648. AVCUDADeviceContext *device_hwctx;
  649. AVHWDeviceContext *device_ctx;
  650. AVHWFramesContext *hwframe_ctx;
  651. CUVIDSOURCEDATAPACKET seq_pkt;
  652. CUcontext cuda_ctx = NULL;
  653. CUcontext dummy;
  654. uint8_t *extradata;
  655. uint32_t extradata_size;
  656. int ret = 0;
  657. enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_CUDA,
  658. AV_PIX_FMT_NV12,
  659. AV_PIX_FMT_NONE };
  660. int probed_width = avctx->coded_width ? avctx->coded_width : 1280;
  661. int probed_height = avctx->coded_height ? avctx->coded_height : 720;
  662. int probed_bit_depth = 8;
  663. const AVPixFmtDescriptor *probe_desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  664. if (probe_desc && probe_desc->nb_components)
  665. probed_bit_depth = probe_desc->comp[0].depth;
  666. // Accelerated transcoding scenarios with 'ffmpeg' require that the
  667. // pix_fmt be set to AV_PIX_FMT_CUDA early. The sw_pix_fmt, and the
  668. // pix_fmt for non-accelerated transcoding, do not need to be correct
  669. // but need to be set to something. We arbitrarily pick NV12.
  670. ret = ff_get_format(avctx, pix_fmts);
  671. if (ret < 0) {
  672. av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", ret);
  673. return ret;
  674. }
  675. avctx->pix_fmt = ret;
  676. if (ctx->resize_expr && sscanf(ctx->resize_expr, "%dx%d",
  677. &ctx->resize.width, &ctx->resize.height) != 2) {
  678. av_log(avctx, AV_LOG_ERROR, "Invalid resize expressions\n");
  679. ret = AVERROR(EINVAL);
  680. goto error;
  681. }
  682. if (ctx->crop_expr && sscanf(ctx->crop_expr, "%dx%dx%dx%d",
  683. &ctx->crop.top, &ctx->crop.bottom,
  684. &ctx->crop.left, &ctx->crop.right) != 4) {
  685. av_log(avctx, AV_LOG_ERROR, "Invalid cropping expressions\n");
  686. ret = AVERROR(EINVAL);
  687. goto error;
  688. }
  689. ret = cuvid_load_functions(&ctx->cvdl, avctx);
  690. if (ret < 0) {
  691. av_log(avctx, AV_LOG_ERROR, "Failed loading nvcuvid.\n");
  692. goto error;
  693. }
  694. ctx->frame_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(CuvidParsedFrame));
  695. if (!ctx->frame_queue) {
  696. ret = AVERROR(ENOMEM);
  697. goto error;
  698. }
  699. if (avctx->hw_frames_ctx) {
  700. ctx->hwframe = av_buffer_ref(avctx->hw_frames_ctx);
  701. if (!ctx->hwframe) {
  702. ret = AVERROR(ENOMEM);
  703. goto error;
  704. }
  705. hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  706. ctx->hwdevice = av_buffer_ref(hwframe_ctx->device_ref);
  707. if (!ctx->hwdevice) {
  708. ret = AVERROR(ENOMEM);
  709. goto error;
  710. }
  711. } else {
  712. if (avctx->hw_device_ctx) {
  713. ctx->hwdevice = av_buffer_ref(avctx->hw_device_ctx);
  714. if (!ctx->hwdevice) {
  715. ret = AVERROR(ENOMEM);
  716. goto error;
  717. }
  718. } else {
  719. ret = av_hwdevice_ctx_create(&ctx->hwdevice, AV_HWDEVICE_TYPE_CUDA, ctx->cu_gpu, NULL, 0);
  720. if (ret < 0)
  721. goto error;
  722. }
  723. ctx->hwframe = av_hwframe_ctx_alloc(ctx->hwdevice);
  724. if (!ctx->hwframe) {
  725. av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
  726. ret = AVERROR(ENOMEM);
  727. goto error;
  728. }
  729. hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  730. }
  731. device_ctx = hwframe_ctx->device_ctx;
  732. device_hwctx = device_ctx->hwctx;
  733. cuda_ctx = device_hwctx->cuda_ctx;
  734. ctx->cudl = device_hwctx->internal->cuda_dl;
  735. memset(&ctx->cuparseinfo, 0, sizeof(ctx->cuparseinfo));
  736. memset(&seq_pkt, 0, sizeof(seq_pkt));
  737. switch (avctx->codec->id) {
  738. #if CONFIG_H264_CUVID_DECODER
  739. case AV_CODEC_ID_H264:
  740. ctx->cuparseinfo.CodecType = cudaVideoCodec_H264;
  741. break;
  742. #endif
  743. #if CONFIG_HEVC_CUVID_DECODER
  744. case AV_CODEC_ID_HEVC:
  745. ctx->cuparseinfo.CodecType = cudaVideoCodec_HEVC;
  746. break;
  747. #endif
  748. #if CONFIG_MJPEG_CUVID_DECODER
  749. case AV_CODEC_ID_MJPEG:
  750. ctx->cuparseinfo.CodecType = cudaVideoCodec_JPEG;
  751. break;
  752. #endif
  753. #if CONFIG_MPEG1_CUVID_DECODER
  754. case AV_CODEC_ID_MPEG1VIDEO:
  755. ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG1;
  756. break;
  757. #endif
  758. #if CONFIG_MPEG2_CUVID_DECODER
  759. case AV_CODEC_ID_MPEG2VIDEO:
  760. ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG2;
  761. break;
  762. #endif
  763. #if CONFIG_MPEG4_CUVID_DECODER
  764. case AV_CODEC_ID_MPEG4:
  765. ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG4;
  766. break;
  767. #endif
  768. #if CONFIG_VP8_CUVID_DECODER
  769. case AV_CODEC_ID_VP8:
  770. ctx->cuparseinfo.CodecType = cudaVideoCodec_VP8;
  771. break;
  772. #endif
  773. #if CONFIG_VP9_CUVID_DECODER
  774. case AV_CODEC_ID_VP9:
  775. ctx->cuparseinfo.CodecType = cudaVideoCodec_VP9;
  776. break;
  777. #endif
  778. #if CONFIG_VC1_CUVID_DECODER
  779. case AV_CODEC_ID_VC1:
  780. ctx->cuparseinfo.CodecType = cudaVideoCodec_VC1;
  781. break;
  782. #endif
  783. default:
  784. av_log(avctx, AV_LOG_ERROR, "Invalid CUVID codec!\n");
  785. return AVERROR_BUG;
  786. }
  787. if (avctx->codec->bsfs) {
  788. const AVCodecParameters *par = avctx->internal->bsf->par_out;
  789. extradata = par->extradata;
  790. extradata_size = par->extradata_size;
  791. } else if (avctx->extradata_size > 0) {
  792. extradata = avctx->extradata;
  793. extradata_size = avctx->extradata_size;
  794. }
  795. ctx->cuparse_ext = av_mallocz(sizeof(*ctx->cuparse_ext)
  796. + FFMAX(extradata_size - sizeof(ctx->cuparse_ext->raw_seqhdr_data), 0));
  797. if (!ctx->cuparse_ext) {
  798. ret = AVERROR(ENOMEM);
  799. goto error;
  800. }
  801. ctx->cuparse_ext->format.seqhdr_data_length = avctx->extradata_size;
  802. memcpy(ctx->cuparse_ext->raw_seqhdr_data, extradata, extradata_size);
  803. ctx->cuparseinfo.pExtVideoInfo = ctx->cuparse_ext;
  804. ctx->key_frame = av_mallocz(ctx->nb_surfaces * sizeof(int));
  805. if (!ctx->key_frame) {
  806. ret = AVERROR(ENOMEM);
  807. goto error;
  808. }
  809. ctx->cuparseinfo.ulMaxNumDecodeSurfaces = ctx->nb_surfaces;
  810. ctx->cuparseinfo.ulMaxDisplayDelay = 4;
  811. ctx->cuparseinfo.pUserData = avctx;
  812. ctx->cuparseinfo.pfnSequenceCallback = cuvid_handle_video_sequence;
  813. ctx->cuparseinfo.pfnDecodePicture = cuvid_handle_picture_decode;
  814. ctx->cuparseinfo.pfnDisplayPicture = cuvid_handle_picture_display;
  815. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  816. if (ret < 0)
  817. goto error;
  818. ret = cuvid_test_capabilities(avctx, &ctx->cuparseinfo,
  819. probed_width,
  820. probed_height,
  821. probed_bit_depth);
  822. if (ret < 0)
  823. goto error;
  824. ret = CHECK_CU(ctx->cvdl->cuvidCreateVideoParser(&ctx->cuparser, &ctx->cuparseinfo));
  825. if (ret < 0)
  826. goto error;
  827. seq_pkt.payload = ctx->cuparse_ext->raw_seqhdr_data;
  828. seq_pkt.payload_size = ctx->cuparse_ext->format.seqhdr_data_length;
  829. if (seq_pkt.payload && seq_pkt.payload_size) {
  830. ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &seq_pkt));
  831. if (ret < 0)
  832. goto error;
  833. }
  834. ret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  835. if (ret < 0)
  836. goto error;
  837. ctx->prev_pts = INT64_MIN;
  838. if (!avctx->pkt_timebase.num || !avctx->pkt_timebase.den)
  839. av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
  840. return 0;
  841. error:
  842. cuvid_decode_end(avctx);
  843. return ret;
  844. }
  845. static void cuvid_flush(AVCodecContext *avctx)
  846. {
  847. CuvidContext *ctx = avctx->priv_data;
  848. AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
  849. AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
  850. CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
  851. CUVIDSOURCEDATAPACKET seq_pkt = { 0 };
  852. int ret;
  853. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  854. if (ret < 0)
  855. goto error;
  856. av_fifo_freep(&ctx->frame_queue);
  857. ctx->frame_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(CuvidParsedFrame));
  858. if (!ctx->frame_queue) {
  859. av_log(avctx, AV_LOG_ERROR, "Failed to recreate frame queue on flush\n");
  860. return;
  861. }
  862. if (ctx->cudecoder) {
  863. ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder);
  864. ctx->cudecoder = NULL;
  865. }
  866. if (ctx->cuparser) {
  867. ctx->cvdl->cuvidDestroyVideoParser(ctx->cuparser);
  868. ctx->cuparser = NULL;
  869. }
  870. ret = CHECK_CU(ctx->cvdl->cuvidCreateVideoParser(&ctx->cuparser, &ctx->cuparseinfo));
  871. if (ret < 0)
  872. goto error;
  873. seq_pkt.payload = ctx->cuparse_ext->raw_seqhdr_data;
  874. seq_pkt.payload_size = ctx->cuparse_ext->format.seqhdr_data_length;
  875. if (seq_pkt.payload && seq_pkt.payload_size) {
  876. ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &seq_pkt));
  877. if (ret < 0)
  878. goto error;
  879. }
  880. ret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  881. if (ret < 0)
  882. goto error;
  883. ctx->prev_pts = INT64_MIN;
  884. ctx->decoder_flushing = 0;
  885. return;
  886. error:
  887. av_log(avctx, AV_LOG_ERROR, "CUDA reinit on flush failed\n");
  888. }
  889. #define OFFSET(x) offsetof(CuvidContext, x)
  890. #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  891. static const AVOption options[] = {
  892. { "deint", "Set deinterlacing mode", OFFSET(deint_mode), AV_OPT_TYPE_INT, { .i64 = cudaVideoDeinterlaceMode_Weave }, cudaVideoDeinterlaceMode_Weave, cudaVideoDeinterlaceMode_Adaptive, VD, "deint" },
  893. { "weave", "Weave deinterlacing (do nothing)", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Weave }, 0, 0, VD, "deint" },
  894. { "bob", "Bob deinterlacing", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Bob }, 0, 0, VD, "deint" },
  895. { "adaptive", "Adaptive deinterlacing", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Adaptive }, 0, 0, VD, "deint" },
  896. { "gpu", "GPU to be used for decoding", OFFSET(cu_gpu), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
  897. { "surfaces", "Maximum surfaces to be used for decoding", OFFSET(nb_surfaces), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, VD },
  898. { "drop_second_field", "Drop second field when deinterlacing", OFFSET(drop_second_field), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
  899. { "crop", "Crop (top)x(bottom)x(left)x(right)", OFFSET(crop_expr), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
  900. { "resize", "Resize (width)x(height)", OFFSET(resize_expr), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
  901. { NULL }
  902. };
  903. static const AVCodecHWConfigInternal *cuvid_hw_configs[] = {
  904. &(const AVCodecHWConfigInternal) {
  905. .public = {
  906. .pix_fmt = AV_PIX_FMT_CUDA,
  907. .methods = AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX |
  908. AV_CODEC_HW_CONFIG_METHOD_INTERNAL,
  909. .device_type = AV_HWDEVICE_TYPE_CUDA
  910. },
  911. .hwaccel = NULL,
  912. },
  913. NULL
  914. };
  915. #define DEFINE_CUVID_CODEC(x, X, bsf_name) \
  916. static const AVClass x##_cuvid_class = { \
  917. .class_name = #x "_cuvid", \
  918. .item_name = av_default_item_name, \
  919. .option = options, \
  920. .version = LIBAVUTIL_VERSION_INT, \
  921. }; \
  922. AVCodec ff_##x##_cuvid_decoder = { \
  923. .name = #x "_cuvid", \
  924. .long_name = NULL_IF_CONFIG_SMALL("Nvidia CUVID " #X " decoder"), \
  925. .type = AVMEDIA_TYPE_VIDEO, \
  926. .id = AV_CODEC_ID_##X, \
  927. .priv_data_size = sizeof(CuvidContext), \
  928. .priv_class = &x##_cuvid_class, \
  929. .init = cuvid_decode_init, \
  930. .close = cuvid_decode_end, \
  931. .decode = cuvid_decode_frame, \
  932. .receive_frame = cuvid_output_frame, \
  933. .flush = cuvid_flush, \
  934. .bsfs = bsf_name, \
  935. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \
  936. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, \
  937. AV_PIX_FMT_NV12, \
  938. AV_PIX_FMT_P010, \
  939. AV_PIX_FMT_P016, \
  940. AV_PIX_FMT_NONE }, \
  941. .hw_configs = cuvid_hw_configs, \
  942. .wrapper_name = "cuvid", \
  943. };
  944. #if CONFIG_HEVC_CUVID_DECODER
  945. DEFINE_CUVID_CODEC(hevc, HEVC, "hevc_mp4toannexb")
  946. #endif
  947. #if CONFIG_H264_CUVID_DECODER
  948. DEFINE_CUVID_CODEC(h264, H264, "h264_mp4toannexb")
  949. #endif
  950. #if CONFIG_MJPEG_CUVID_DECODER
  951. DEFINE_CUVID_CODEC(mjpeg, MJPEG, NULL)
  952. #endif
  953. #if CONFIG_MPEG1_CUVID_DECODER
  954. DEFINE_CUVID_CODEC(mpeg1, MPEG1VIDEO, NULL)
  955. #endif
  956. #if CONFIG_MPEG2_CUVID_DECODER
  957. DEFINE_CUVID_CODEC(mpeg2, MPEG2VIDEO, NULL)
  958. #endif
  959. #if CONFIG_MPEG4_CUVID_DECODER
  960. DEFINE_CUVID_CODEC(mpeg4, MPEG4, NULL)
  961. #endif
  962. #if CONFIG_VP8_CUVID_DECODER
  963. DEFINE_CUVID_CODEC(vp8, VP8, NULL)
  964. #endif
  965. #if CONFIG_VP9_CUVID_DECODER
  966. DEFINE_CUVID_CODEC(vp9, VP9, NULL)
  967. #endif
  968. #if CONFIG_VC1_CUVID_DECODER
  969. DEFINE_CUVID_CODEC(vc1, VC1, NULL)
  970. #endif