You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1167 lines
39KB

  1. /*
  2. * Nvidia CUVID decoder
  3. * Copyright (c) 2016 Timo Rothenpieler <timo@rothenpieler.org>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "compat/cuda/dynlink_loader.h"
  22. #include "libavutil/buffer.h"
  23. #include "libavutil/mathematics.h"
  24. #include "libavutil/hwcontext.h"
  25. #include "libavutil/hwcontext_cuda_internal.h"
  26. #include "libavutil/fifo.h"
  27. #include "libavutil/log.h"
  28. #include "libavutil/opt.h"
  29. #include "libavutil/pixdesc.h"
  30. #include "avcodec.h"
  31. #include "decode.h"
  32. #include "internal.h"
  33. typedef struct CuvidContext
  34. {
  35. AVClass *avclass;
  36. CUvideodecoder cudecoder;
  37. CUvideoparser cuparser;
  38. char *cu_gpu;
  39. int nb_surfaces;
  40. int drop_second_field;
  41. char *crop_expr;
  42. char *resize_expr;
  43. struct {
  44. int left;
  45. int top;
  46. int right;
  47. int bottom;
  48. } crop;
  49. struct {
  50. int width;
  51. int height;
  52. } resize;
  53. AVBufferRef *hwdevice;
  54. AVBufferRef *hwframe;
  55. AVBSFContext *bsf;
  56. AVFifoBuffer *frame_queue;
  57. int deint_mode;
  58. int deint_mode_current;
  59. int64_t prev_pts;
  60. int internal_error;
  61. int decoder_flushing;
  62. cudaVideoCodec codec_type;
  63. cudaVideoChromaFormat chroma_format;
  64. CUVIDDECODECAPS caps8, caps10, caps12;
  65. CUVIDPARSERPARAMS cuparseinfo;
  66. CUVIDEOFORMATEX cuparse_ext;
  67. CudaFunctions *cudl;
  68. CuvidFunctions *cvdl;
  69. } CuvidContext;
  70. typedef struct CuvidParsedFrame
  71. {
  72. CUVIDPARSERDISPINFO dispinfo;
  73. int second_field;
  74. int is_deinterlacing;
  75. } CuvidParsedFrame;
  76. static int check_cu(AVCodecContext *avctx, CUresult err, const char *func)
  77. {
  78. CuvidContext *ctx = avctx->priv_data;
  79. const char *err_name;
  80. const char *err_string;
  81. av_log(avctx, AV_LOG_TRACE, "Calling %s\n", func);
  82. if (err == CUDA_SUCCESS)
  83. return 0;
  84. ctx->cudl->cuGetErrorName(err, &err_name);
  85. ctx->cudl->cuGetErrorString(err, &err_string);
  86. av_log(avctx, AV_LOG_ERROR, "%s failed", func);
  87. if (err_name && err_string)
  88. av_log(avctx, AV_LOG_ERROR, " -> %s: %s", err_name, err_string);
  89. av_log(avctx, AV_LOG_ERROR, "\n");
  90. return AVERROR_EXTERNAL;
  91. }
  92. #define CHECK_CU(x) check_cu(avctx, (x), #x)
  93. static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* format)
  94. {
  95. AVCodecContext *avctx = opaque;
  96. CuvidContext *ctx = avctx->priv_data;
  97. AVHWFramesContext *hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  98. CUVIDDECODECAPS *caps = NULL;
  99. CUVIDDECODECREATEINFO cuinfo;
  100. int surface_fmt;
  101. int old_width = avctx->width;
  102. int old_height = avctx->height;
  103. enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_CUDA,
  104. AV_PIX_FMT_NONE, // Will be updated below
  105. AV_PIX_FMT_NONE };
  106. av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback, progressive_sequence=%d\n", format->progressive_sequence);
  107. memset(&cuinfo, 0, sizeof(cuinfo));
  108. ctx->internal_error = 0;
  109. avctx->coded_width = cuinfo.ulWidth = format->coded_width;
  110. avctx->coded_height = cuinfo.ulHeight = format->coded_height;
  111. // apply cropping
  112. cuinfo.display_area.left = format->display_area.left + ctx->crop.left;
  113. cuinfo.display_area.top = format->display_area.top + ctx->crop.top;
  114. cuinfo.display_area.right = format->display_area.right - ctx->crop.right;
  115. cuinfo.display_area.bottom = format->display_area.bottom - ctx->crop.bottom;
  116. // width and height need to be set before calling ff_get_format
  117. if (ctx->resize_expr) {
  118. avctx->width = ctx->resize.width;
  119. avctx->height = ctx->resize.height;
  120. } else {
  121. avctx->width = cuinfo.display_area.right - cuinfo.display_area.left;
  122. avctx->height = cuinfo.display_area.bottom - cuinfo.display_area.top;
  123. }
  124. // target width/height need to be multiples of two
  125. cuinfo.ulTargetWidth = avctx->width = (avctx->width + 1) & ~1;
  126. cuinfo.ulTargetHeight = avctx->height = (avctx->height + 1) & ~1;
  127. // aspect ratio conversion, 1:1, depends on scaled resolution
  128. cuinfo.target_rect.left = 0;
  129. cuinfo.target_rect.top = 0;
  130. cuinfo.target_rect.right = cuinfo.ulTargetWidth;
  131. cuinfo.target_rect.bottom = cuinfo.ulTargetHeight;
  132. switch (format->bit_depth_luma_minus8) {
  133. case 0: // 8-bit
  134. pix_fmts[1] = AV_PIX_FMT_NV12;
  135. caps = &ctx->caps8;
  136. break;
  137. case 2: // 10-bit
  138. pix_fmts[1] = AV_PIX_FMT_P010;
  139. caps = &ctx->caps10;
  140. break;
  141. case 4: // 12-bit
  142. pix_fmts[1] = AV_PIX_FMT_P016;
  143. caps = &ctx->caps12;
  144. break;
  145. default:
  146. break;
  147. }
  148. if (!caps || !caps->bIsSupported) {
  149. av_log(avctx, AV_LOG_ERROR, "unsupported bit depth: %d\n",
  150. format->bit_depth_luma_minus8 + 8);
  151. ctx->internal_error = AVERROR(EINVAL);
  152. return 0;
  153. }
  154. surface_fmt = ff_get_format(avctx, pix_fmts);
  155. if (surface_fmt < 0) {
  156. av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", surface_fmt);
  157. ctx->internal_error = AVERROR(EINVAL);
  158. return 0;
  159. }
  160. av_log(avctx, AV_LOG_VERBOSE, "Formats: Original: %s | HW: %s | SW: %s\n",
  161. av_get_pix_fmt_name(avctx->pix_fmt),
  162. av_get_pix_fmt_name(surface_fmt),
  163. av_get_pix_fmt_name(avctx->sw_pix_fmt));
  164. avctx->pix_fmt = surface_fmt;
  165. // Update our hwframe ctx, as the get_format callback might have refreshed it!
  166. if (avctx->hw_frames_ctx) {
  167. av_buffer_unref(&ctx->hwframe);
  168. ctx->hwframe = av_buffer_ref(avctx->hw_frames_ctx);
  169. if (!ctx->hwframe) {
  170. ctx->internal_error = AVERROR(ENOMEM);
  171. return 0;
  172. }
  173. hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  174. }
  175. ff_set_sar(avctx, av_div_q(
  176. (AVRational){ format->display_aspect_ratio.x, format->display_aspect_ratio.y },
  177. (AVRational){ avctx->width, avctx->height }));
  178. ctx->deint_mode_current = format->progressive_sequence
  179. ? cudaVideoDeinterlaceMode_Weave
  180. : ctx->deint_mode;
  181. if (!format->progressive_sequence && ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave)
  182. avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT;
  183. else
  184. avctx->flags &= ~AV_CODEC_FLAG_INTERLACED_DCT;
  185. if (format->video_signal_description.video_full_range_flag)
  186. avctx->color_range = AVCOL_RANGE_JPEG;
  187. else
  188. avctx->color_range = AVCOL_RANGE_MPEG;
  189. avctx->color_primaries = format->video_signal_description.color_primaries;
  190. avctx->color_trc = format->video_signal_description.transfer_characteristics;
  191. avctx->colorspace = format->video_signal_description.matrix_coefficients;
  192. if (format->bitrate)
  193. avctx->bit_rate = format->bitrate;
  194. if (format->frame_rate.numerator && format->frame_rate.denominator) {
  195. avctx->framerate.num = format->frame_rate.numerator;
  196. avctx->framerate.den = format->frame_rate.denominator;
  197. }
  198. if (ctx->cudecoder
  199. && avctx->coded_width == format->coded_width
  200. && avctx->coded_height == format->coded_height
  201. && avctx->width == old_width
  202. && avctx->height == old_height
  203. && ctx->chroma_format == format->chroma_format
  204. && ctx->codec_type == format->codec)
  205. return 1;
  206. if (ctx->cudecoder) {
  207. av_log(avctx, AV_LOG_TRACE, "Re-initializing decoder\n");
  208. ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder));
  209. if (ctx->internal_error < 0)
  210. return 0;
  211. ctx->cudecoder = NULL;
  212. }
  213. if (hwframe_ctx->pool && (
  214. hwframe_ctx->width < avctx->width ||
  215. hwframe_ctx->height < avctx->height ||
  216. hwframe_ctx->format != AV_PIX_FMT_CUDA ||
  217. hwframe_ctx->sw_format != avctx->sw_pix_fmt)) {
  218. av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized with incompatible parameters\n");
  219. av_log(avctx, AV_LOG_DEBUG, "width: %d <-> %d\n", hwframe_ctx->width, avctx->width);
  220. av_log(avctx, AV_LOG_DEBUG, "height: %d <-> %d\n", hwframe_ctx->height, avctx->height);
  221. av_log(avctx, AV_LOG_DEBUG, "format: %s <-> cuda\n", av_get_pix_fmt_name(hwframe_ctx->format));
  222. av_log(avctx, AV_LOG_DEBUG, "sw_format: %s <-> %s\n",
  223. av_get_pix_fmt_name(hwframe_ctx->sw_format), av_get_pix_fmt_name(avctx->sw_pix_fmt));
  224. ctx->internal_error = AVERROR(EINVAL);
  225. return 0;
  226. }
  227. if (format->chroma_format != cudaVideoChromaFormat_420) {
  228. av_log(avctx, AV_LOG_ERROR, "Chroma formats other than 420 are not supported\n");
  229. ctx->internal_error = AVERROR(EINVAL);
  230. return 0;
  231. }
  232. ctx->chroma_format = format->chroma_format;
  233. cuinfo.CodecType = ctx->codec_type = format->codec;
  234. cuinfo.ChromaFormat = format->chroma_format;
  235. switch (avctx->sw_pix_fmt) {
  236. case AV_PIX_FMT_NV12:
  237. cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12;
  238. break;
  239. case AV_PIX_FMT_P010:
  240. case AV_PIX_FMT_P016:
  241. cuinfo.OutputFormat = cudaVideoSurfaceFormat_P016;
  242. break;
  243. default:
  244. av_log(avctx, AV_LOG_ERROR, "Output formats other than NV12, P010 or P016 are not supported\n");
  245. ctx->internal_error = AVERROR(EINVAL);
  246. return 0;
  247. }
  248. cuinfo.ulNumDecodeSurfaces = ctx->nb_surfaces;
  249. cuinfo.ulNumOutputSurfaces = 1;
  250. cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID;
  251. cuinfo.bitDepthMinus8 = format->bit_depth_luma_minus8;
  252. cuinfo.DeinterlaceMode = ctx->deint_mode_current;
  253. if (ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave && !ctx->drop_second_field)
  254. avctx->framerate = av_mul_q(avctx->framerate, (AVRational){2, 1});
  255. ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidCreateDecoder(&ctx->cudecoder, &cuinfo));
  256. if (ctx->internal_error < 0)
  257. return 0;
  258. if (!hwframe_ctx->pool) {
  259. hwframe_ctx->format = AV_PIX_FMT_CUDA;
  260. hwframe_ctx->sw_format = avctx->sw_pix_fmt;
  261. hwframe_ctx->width = avctx->width;
  262. hwframe_ctx->height = avctx->height;
  263. if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) {
  264. av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n");
  265. return 0;
  266. }
  267. }
  268. return 1;
  269. }
  270. static int CUDAAPI cuvid_handle_picture_decode(void *opaque, CUVIDPICPARAMS* picparams)
  271. {
  272. AVCodecContext *avctx = opaque;
  273. CuvidContext *ctx = avctx->priv_data;
  274. av_log(avctx, AV_LOG_TRACE, "pfnDecodePicture\n");
  275. ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidDecodePicture(ctx->cudecoder, picparams));
  276. if (ctx->internal_error < 0)
  277. return 0;
  278. return 1;
  279. }
  280. static int CUDAAPI cuvid_handle_picture_display(void *opaque, CUVIDPARSERDISPINFO* dispinfo)
  281. {
  282. AVCodecContext *avctx = opaque;
  283. CuvidContext *ctx = avctx->priv_data;
  284. CuvidParsedFrame parsed_frame = { { 0 } };
  285. parsed_frame.dispinfo = *dispinfo;
  286. ctx->internal_error = 0;
  287. if (ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave) {
  288. av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  289. } else {
  290. parsed_frame.is_deinterlacing = 1;
  291. av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  292. if (!ctx->drop_second_field) {
  293. parsed_frame.second_field = 1;
  294. av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  295. }
  296. }
  297. return 1;
  298. }
  299. static int cuvid_is_buffer_full(AVCodecContext *avctx)
  300. {
  301. CuvidContext *ctx = avctx->priv_data;
  302. return (av_fifo_size(ctx->frame_queue) / sizeof(CuvidParsedFrame)) + 2 > ctx->nb_surfaces;
  303. }
  304. static int cuvid_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt)
  305. {
  306. CuvidContext *ctx = avctx->priv_data;
  307. AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
  308. AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
  309. CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
  310. CUVIDSOURCEDATAPACKET cupkt;
  311. AVPacket filter_packet = { 0 };
  312. AVPacket filtered_packet = { 0 };
  313. int ret = 0, eret = 0, is_flush = ctx->decoder_flushing;
  314. av_log(avctx, AV_LOG_TRACE, "cuvid_decode_packet\n");
  315. if (is_flush && avpkt && avpkt->size)
  316. return AVERROR_EOF;
  317. if (cuvid_is_buffer_full(avctx) && avpkt && avpkt->size)
  318. return AVERROR(EAGAIN);
  319. if (ctx->bsf && avpkt && avpkt->size) {
  320. if ((ret = av_packet_ref(&filter_packet, avpkt)) < 0) {
  321. av_log(avctx, AV_LOG_ERROR, "av_packet_ref failed\n");
  322. return ret;
  323. }
  324. if ((ret = av_bsf_send_packet(ctx->bsf, &filter_packet)) < 0) {
  325. av_log(avctx, AV_LOG_ERROR, "av_bsf_send_packet failed\n");
  326. av_packet_unref(&filter_packet);
  327. return ret;
  328. }
  329. if ((ret = av_bsf_receive_packet(ctx->bsf, &filtered_packet)) < 0) {
  330. av_log(avctx, AV_LOG_ERROR, "av_bsf_receive_packet failed\n");
  331. return ret;
  332. }
  333. avpkt = &filtered_packet;
  334. }
  335. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  336. if (ret < 0) {
  337. av_packet_unref(&filtered_packet);
  338. return ret;
  339. }
  340. memset(&cupkt, 0, sizeof(cupkt));
  341. if (avpkt && avpkt->size) {
  342. cupkt.payload_size = avpkt->size;
  343. cupkt.payload = avpkt->data;
  344. if (avpkt->pts != AV_NOPTS_VALUE) {
  345. cupkt.flags = CUVID_PKT_TIMESTAMP;
  346. if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
  347. cupkt.timestamp = av_rescale_q(avpkt->pts, avctx->pkt_timebase, (AVRational){1, 10000000});
  348. else
  349. cupkt.timestamp = avpkt->pts;
  350. }
  351. } else {
  352. cupkt.flags = CUVID_PKT_ENDOFSTREAM;
  353. ctx->decoder_flushing = 1;
  354. }
  355. ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &cupkt));
  356. av_packet_unref(&filtered_packet);
  357. if (ret < 0)
  358. goto error;
  359. // cuvidParseVideoData doesn't return an error just because stuff failed...
  360. if (ctx->internal_error) {
  361. av_log(avctx, AV_LOG_ERROR, "cuvid decode callback error\n");
  362. ret = ctx->internal_error;
  363. goto error;
  364. }
  365. error:
  366. eret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  367. if (eret < 0)
  368. return eret;
  369. else if (ret < 0)
  370. return ret;
  371. else if (is_flush)
  372. return AVERROR_EOF;
  373. else
  374. return 0;
  375. }
  376. static int cuvid_output_frame(AVCodecContext *avctx, AVFrame *frame)
  377. {
  378. CuvidContext *ctx = avctx->priv_data;
  379. AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
  380. AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
  381. CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
  382. CUdeviceptr mapped_frame = 0;
  383. int ret = 0, eret = 0;
  384. av_log(avctx, AV_LOG_TRACE, "cuvid_output_frame\n");
  385. if (ctx->decoder_flushing) {
  386. ret = cuvid_decode_packet(avctx, NULL);
  387. if (ret < 0 && ret != AVERROR_EOF)
  388. return ret;
  389. }
  390. if (!cuvid_is_buffer_full(avctx)) {
  391. AVPacket pkt = {0};
  392. ret = ff_decode_get_packet(avctx, &pkt);
  393. if (ret < 0 && ret != AVERROR_EOF)
  394. return ret;
  395. ret = cuvid_decode_packet(avctx, &pkt);
  396. av_packet_unref(&pkt);
  397. // cuvid_is_buffer_full() should avoid this.
  398. if (ret == AVERROR(EAGAIN))
  399. ret = AVERROR_EXTERNAL;
  400. if (ret < 0 && ret != AVERROR_EOF)
  401. return ret;
  402. }
  403. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  404. if (ret < 0)
  405. return ret;
  406. if (av_fifo_size(ctx->frame_queue)) {
  407. CuvidParsedFrame parsed_frame;
  408. CUVIDPROCPARAMS params;
  409. unsigned int pitch = 0;
  410. int offset = 0;
  411. int i;
  412. av_fifo_generic_read(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  413. memset(&params, 0, sizeof(params));
  414. params.progressive_frame = parsed_frame.dispinfo.progressive_frame;
  415. params.second_field = parsed_frame.second_field;
  416. params.top_field_first = parsed_frame.dispinfo.top_field_first;
  417. ret = CHECK_CU(ctx->cvdl->cuvidMapVideoFrame(ctx->cudecoder, parsed_frame.dispinfo.picture_index, &mapped_frame, &pitch, &params));
  418. if (ret < 0)
  419. goto error;
  420. if (avctx->pix_fmt == AV_PIX_FMT_CUDA) {
  421. ret = av_hwframe_get_buffer(ctx->hwframe, frame, 0);
  422. if (ret < 0) {
  423. av_log(avctx, AV_LOG_ERROR, "av_hwframe_get_buffer failed\n");
  424. goto error;
  425. }
  426. ret = ff_decode_frame_props(avctx, frame);
  427. if (ret < 0) {
  428. av_log(avctx, AV_LOG_ERROR, "ff_decode_frame_props failed\n");
  429. goto error;
  430. }
  431. for (i = 0; i < 2; i++) {
  432. CUDA_MEMCPY2D cpy = {
  433. .srcMemoryType = CU_MEMORYTYPE_DEVICE,
  434. .dstMemoryType = CU_MEMORYTYPE_DEVICE,
  435. .srcDevice = mapped_frame,
  436. .dstDevice = (CUdeviceptr)frame->data[i],
  437. .srcPitch = pitch,
  438. .dstPitch = frame->linesize[i],
  439. .srcY = offset,
  440. .WidthInBytes = FFMIN(pitch, frame->linesize[i]),
  441. .Height = avctx->height >> (i ? 1 : 0),
  442. };
  443. ret = CHECK_CU(ctx->cudl->cuMemcpy2D(&cpy));
  444. if (ret < 0)
  445. goto error;
  446. offset += avctx->height;
  447. }
  448. } else if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
  449. avctx->pix_fmt == AV_PIX_FMT_P010 ||
  450. avctx->pix_fmt == AV_PIX_FMT_P016) {
  451. AVFrame *tmp_frame = av_frame_alloc();
  452. if (!tmp_frame) {
  453. av_log(avctx, AV_LOG_ERROR, "av_frame_alloc failed\n");
  454. ret = AVERROR(ENOMEM);
  455. goto error;
  456. }
  457. tmp_frame->format = AV_PIX_FMT_CUDA;
  458. tmp_frame->hw_frames_ctx = av_buffer_ref(ctx->hwframe);
  459. tmp_frame->data[0] = (uint8_t*)mapped_frame;
  460. tmp_frame->linesize[0] = pitch;
  461. tmp_frame->data[1] = (uint8_t*)(mapped_frame + avctx->height * pitch);
  462. tmp_frame->linesize[1] = pitch;
  463. tmp_frame->width = avctx->width;
  464. tmp_frame->height = avctx->height;
  465. ret = ff_get_buffer(avctx, frame, 0);
  466. if (ret < 0) {
  467. av_log(avctx, AV_LOG_ERROR, "ff_get_buffer failed\n");
  468. av_frame_free(&tmp_frame);
  469. goto error;
  470. }
  471. ret = av_hwframe_transfer_data(frame, tmp_frame, 0);
  472. if (ret) {
  473. av_log(avctx, AV_LOG_ERROR, "av_hwframe_transfer_data failed\n");
  474. av_frame_free(&tmp_frame);
  475. goto error;
  476. }
  477. av_frame_free(&tmp_frame);
  478. } else {
  479. ret = AVERROR_BUG;
  480. goto error;
  481. }
  482. frame->width = avctx->width;
  483. frame->height = avctx->height;
  484. if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
  485. frame->pts = av_rescale_q(parsed_frame.dispinfo.timestamp, (AVRational){1, 10000000}, avctx->pkt_timebase);
  486. else
  487. frame->pts = parsed_frame.dispinfo.timestamp;
  488. if (parsed_frame.second_field) {
  489. if (ctx->prev_pts == INT64_MIN) {
  490. ctx->prev_pts = frame->pts;
  491. frame->pts += (avctx->pkt_timebase.den * avctx->framerate.den) / (avctx->pkt_timebase.num * avctx->framerate.num);
  492. } else {
  493. int pts_diff = (frame->pts - ctx->prev_pts) / 2;
  494. ctx->prev_pts = frame->pts;
  495. frame->pts += pts_diff;
  496. }
  497. }
  498. /* CUVIDs opaque reordering breaks the internal pkt logic.
  499. * So set pkt_pts and clear all the other pkt_ fields.
  500. */
  501. #if FF_API_PKT_PTS
  502. FF_DISABLE_DEPRECATION_WARNINGS
  503. frame->pkt_pts = frame->pts;
  504. FF_ENABLE_DEPRECATION_WARNINGS
  505. #endif
  506. frame->pkt_pos = -1;
  507. frame->pkt_duration = 0;
  508. frame->pkt_size = -1;
  509. frame->interlaced_frame = !parsed_frame.is_deinterlacing && !parsed_frame.dispinfo.progressive_frame;
  510. if (frame->interlaced_frame)
  511. frame->top_field_first = parsed_frame.dispinfo.top_field_first;
  512. } else if (ctx->decoder_flushing) {
  513. ret = AVERROR_EOF;
  514. } else {
  515. ret = AVERROR(EAGAIN);
  516. }
  517. error:
  518. if (mapped_frame)
  519. eret = CHECK_CU(ctx->cvdl->cuvidUnmapVideoFrame(ctx->cudecoder, mapped_frame));
  520. eret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  521. if (eret < 0)
  522. return eret;
  523. else
  524. return ret;
  525. }
  526. static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
  527. {
  528. CuvidContext *ctx = avctx->priv_data;
  529. AVFrame *frame = data;
  530. int ret = 0;
  531. av_log(avctx, AV_LOG_TRACE, "cuvid_decode_frame\n");
  532. if (ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave) {
  533. av_log(avctx, AV_LOG_ERROR, "Deinterlacing is not supported via the old API\n");
  534. return AVERROR(EINVAL);
  535. }
  536. if (!ctx->decoder_flushing) {
  537. ret = cuvid_decode_packet(avctx, avpkt);
  538. if (ret < 0)
  539. return ret;
  540. }
  541. ret = cuvid_output_frame(avctx, frame);
  542. if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
  543. *got_frame = 0;
  544. } else if (ret < 0) {
  545. return ret;
  546. } else {
  547. *got_frame = 1;
  548. }
  549. return 0;
  550. }
  551. static av_cold int cuvid_decode_end(AVCodecContext *avctx)
  552. {
  553. CuvidContext *ctx = avctx->priv_data;
  554. av_fifo_freep(&ctx->frame_queue);
  555. if (ctx->bsf)
  556. av_bsf_free(&ctx->bsf);
  557. if (ctx->cuparser)
  558. ctx->cvdl->cuvidDestroyVideoParser(ctx->cuparser);
  559. if (ctx->cudecoder)
  560. ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder);
  561. ctx->cudl = NULL;
  562. av_buffer_unref(&ctx->hwframe);
  563. av_buffer_unref(&ctx->hwdevice);
  564. cuvid_free_functions(&ctx->cvdl);
  565. return 0;
  566. }
  567. static int cuvid_test_capabilities(AVCodecContext *avctx,
  568. const CUVIDPARSERPARAMS *cuparseinfo,
  569. int probed_width,
  570. int probed_height,
  571. int bit_depth)
  572. {
  573. CuvidContext *ctx = avctx->priv_data;
  574. CUVIDDECODECAPS *caps;
  575. int res8 = 0, res10 = 0, res12 = 0;
  576. if (!ctx->cvdl->cuvidGetDecoderCaps) {
  577. av_log(avctx, AV_LOG_WARNING, "Used Nvidia driver is too old to perform a capability check.\n");
  578. av_log(avctx, AV_LOG_WARNING, "The minimum required version is "
  579. #if defined(_WIN32) || defined(__CYGWIN__)
  580. "378.66"
  581. #else
  582. "378.13"
  583. #endif
  584. ". Continuing blind.\n");
  585. ctx->caps8.bIsSupported = ctx->caps10.bIsSupported = 1;
  586. // 12 bit was not supported before the capability check was introduced, so disable it.
  587. ctx->caps12.bIsSupported = 0;
  588. return 0;
  589. }
  590. ctx->caps8.eCodecType = ctx->caps10.eCodecType = ctx->caps12.eCodecType
  591. = cuparseinfo->CodecType;
  592. ctx->caps8.eChromaFormat = ctx->caps10.eChromaFormat = ctx->caps12.eChromaFormat
  593. = cudaVideoChromaFormat_420;
  594. ctx->caps8.nBitDepthMinus8 = 0;
  595. ctx->caps10.nBitDepthMinus8 = 2;
  596. ctx->caps12.nBitDepthMinus8 = 4;
  597. res8 = CHECK_CU(ctx->cvdl->cuvidGetDecoderCaps(&ctx->caps8));
  598. res10 = CHECK_CU(ctx->cvdl->cuvidGetDecoderCaps(&ctx->caps10));
  599. res12 = CHECK_CU(ctx->cvdl->cuvidGetDecoderCaps(&ctx->caps12));
  600. av_log(avctx, AV_LOG_VERBOSE, "CUVID capabilities for %s:\n", avctx->codec->name);
  601. av_log(avctx, AV_LOG_VERBOSE, "8 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
  602. ctx->caps8.bIsSupported, ctx->caps8.nMinWidth, ctx->caps8.nMaxWidth, ctx->caps8.nMinHeight, ctx->caps8.nMaxHeight);
  603. av_log(avctx, AV_LOG_VERBOSE, "10 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
  604. ctx->caps10.bIsSupported, ctx->caps10.nMinWidth, ctx->caps10.nMaxWidth, ctx->caps10.nMinHeight, ctx->caps10.nMaxHeight);
  605. av_log(avctx, AV_LOG_VERBOSE, "12 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
  606. ctx->caps12.bIsSupported, ctx->caps12.nMinWidth, ctx->caps12.nMaxWidth, ctx->caps12.nMinHeight, ctx->caps12.nMaxHeight);
  607. switch (bit_depth) {
  608. case 10:
  609. caps = &ctx->caps10;
  610. if (res10 < 0)
  611. return res10;
  612. break;
  613. case 12:
  614. caps = &ctx->caps12;
  615. if (res12 < 0)
  616. return res12;
  617. break;
  618. default:
  619. caps = &ctx->caps8;
  620. if (res8 < 0)
  621. return res8;
  622. }
  623. if (!ctx->caps8.bIsSupported) {
  624. av_log(avctx, AV_LOG_ERROR, "Codec %s is not supported.\n", avctx->codec->name);
  625. return AVERROR(EINVAL);
  626. }
  627. if (!caps->bIsSupported) {
  628. av_log(avctx, AV_LOG_ERROR, "Bit depth %d is not supported.\n", bit_depth);
  629. return AVERROR(EINVAL);
  630. }
  631. if (probed_width > caps->nMaxWidth || probed_width < caps->nMinWidth) {
  632. av_log(avctx, AV_LOG_ERROR, "Video width %d not within range from %d to %d\n",
  633. probed_width, caps->nMinWidth, caps->nMaxWidth);
  634. return AVERROR(EINVAL);
  635. }
  636. if (probed_height > caps->nMaxHeight || probed_height < caps->nMinHeight) {
  637. av_log(avctx, AV_LOG_ERROR, "Video height %d not within range from %d to %d\n",
  638. probed_height, caps->nMinHeight, caps->nMaxHeight);
  639. return AVERROR(EINVAL);
  640. }
  641. return 0;
  642. }
  643. static av_cold int cuvid_decode_init(AVCodecContext *avctx)
  644. {
  645. CuvidContext *ctx = avctx->priv_data;
  646. AVCUDADeviceContext *device_hwctx;
  647. AVHWDeviceContext *device_ctx;
  648. AVHWFramesContext *hwframe_ctx;
  649. CUVIDSOURCEDATAPACKET seq_pkt;
  650. CUcontext cuda_ctx = NULL;
  651. CUcontext dummy;
  652. const AVBitStreamFilter *bsf;
  653. int ret = 0;
  654. enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_CUDA,
  655. AV_PIX_FMT_NV12,
  656. AV_PIX_FMT_NONE };
  657. int probed_width = avctx->coded_width ? avctx->coded_width : 1280;
  658. int probed_height = avctx->coded_height ? avctx->coded_height : 720;
  659. int probed_bit_depth = 8;
  660. const AVPixFmtDescriptor *probe_desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  661. if (probe_desc && probe_desc->nb_components)
  662. probed_bit_depth = probe_desc->comp[0].depth;
  663. // Accelerated transcoding scenarios with 'ffmpeg' require that the
  664. // pix_fmt be set to AV_PIX_FMT_CUDA early. The sw_pix_fmt, and the
  665. // pix_fmt for non-accelerated transcoding, do not need to be correct
  666. // but need to be set to something. We arbitrarily pick NV12.
  667. ret = ff_get_format(avctx, pix_fmts);
  668. if (ret < 0) {
  669. av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", ret);
  670. return ret;
  671. }
  672. avctx->pix_fmt = ret;
  673. if (ctx->resize_expr && sscanf(ctx->resize_expr, "%dx%d",
  674. &ctx->resize.width, &ctx->resize.height) != 2) {
  675. av_log(avctx, AV_LOG_ERROR, "Invalid resize expressions\n");
  676. ret = AVERROR(EINVAL);
  677. goto error;
  678. }
  679. if (ctx->crop_expr && sscanf(ctx->crop_expr, "%dx%dx%dx%d",
  680. &ctx->crop.top, &ctx->crop.bottom,
  681. &ctx->crop.left, &ctx->crop.right) != 4) {
  682. av_log(avctx, AV_LOG_ERROR, "Invalid cropping expressions\n");
  683. ret = AVERROR(EINVAL);
  684. goto error;
  685. }
  686. ret = cuvid_load_functions(&ctx->cvdl);
  687. if (ret < 0) {
  688. av_log(avctx, AV_LOG_ERROR, "Failed loading nvcuvid.\n");
  689. goto error;
  690. }
  691. ctx->frame_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(CuvidParsedFrame));
  692. if (!ctx->frame_queue) {
  693. ret = AVERROR(ENOMEM);
  694. goto error;
  695. }
  696. if (avctx->hw_frames_ctx) {
  697. ctx->hwframe = av_buffer_ref(avctx->hw_frames_ctx);
  698. if (!ctx->hwframe) {
  699. ret = AVERROR(ENOMEM);
  700. goto error;
  701. }
  702. hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  703. ctx->hwdevice = av_buffer_ref(hwframe_ctx->device_ref);
  704. if (!ctx->hwdevice) {
  705. ret = AVERROR(ENOMEM);
  706. goto error;
  707. }
  708. } else {
  709. if (avctx->hw_device_ctx) {
  710. ctx->hwdevice = av_buffer_ref(avctx->hw_device_ctx);
  711. if (!ctx->hwdevice) {
  712. ret = AVERROR(ENOMEM);
  713. goto error;
  714. }
  715. } else {
  716. ret = av_hwdevice_ctx_create(&ctx->hwdevice, AV_HWDEVICE_TYPE_CUDA, ctx->cu_gpu, NULL, 0);
  717. if (ret < 0)
  718. goto error;
  719. }
  720. ctx->hwframe = av_hwframe_ctx_alloc(ctx->hwdevice);
  721. if (!ctx->hwframe) {
  722. av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
  723. ret = AVERROR(ENOMEM);
  724. goto error;
  725. }
  726. hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  727. }
  728. device_ctx = hwframe_ctx->device_ctx;
  729. device_hwctx = device_ctx->hwctx;
  730. cuda_ctx = device_hwctx->cuda_ctx;
  731. ctx->cudl = device_hwctx->internal->cuda_dl;
  732. memset(&ctx->cuparseinfo, 0, sizeof(ctx->cuparseinfo));
  733. memset(&ctx->cuparse_ext, 0, sizeof(ctx->cuparse_ext));
  734. memset(&seq_pkt, 0, sizeof(seq_pkt));
  735. ctx->cuparseinfo.pExtVideoInfo = &ctx->cuparse_ext;
  736. switch (avctx->codec->id) {
  737. #if CONFIG_H264_CUVID_DECODER
  738. case AV_CODEC_ID_H264:
  739. ctx->cuparseinfo.CodecType = cudaVideoCodec_H264;
  740. break;
  741. #endif
  742. #if CONFIG_HEVC_CUVID_DECODER
  743. case AV_CODEC_ID_HEVC:
  744. ctx->cuparseinfo.CodecType = cudaVideoCodec_HEVC;
  745. break;
  746. #endif
  747. #if CONFIG_MJPEG_CUVID_DECODER
  748. case AV_CODEC_ID_MJPEG:
  749. ctx->cuparseinfo.CodecType = cudaVideoCodec_JPEG;
  750. break;
  751. #endif
  752. #if CONFIG_MPEG1_CUVID_DECODER
  753. case AV_CODEC_ID_MPEG1VIDEO:
  754. ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG1;
  755. break;
  756. #endif
  757. #if CONFIG_MPEG2_CUVID_DECODER
  758. case AV_CODEC_ID_MPEG2VIDEO:
  759. ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG2;
  760. break;
  761. #endif
  762. #if CONFIG_MPEG4_CUVID_DECODER
  763. case AV_CODEC_ID_MPEG4:
  764. ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG4;
  765. break;
  766. #endif
  767. #if CONFIG_VP8_CUVID_DECODER
  768. case AV_CODEC_ID_VP8:
  769. ctx->cuparseinfo.CodecType = cudaVideoCodec_VP8;
  770. break;
  771. #endif
  772. #if CONFIG_VP9_CUVID_DECODER
  773. case AV_CODEC_ID_VP9:
  774. ctx->cuparseinfo.CodecType = cudaVideoCodec_VP9;
  775. break;
  776. #endif
  777. #if CONFIG_VC1_CUVID_DECODER
  778. case AV_CODEC_ID_VC1:
  779. ctx->cuparseinfo.CodecType = cudaVideoCodec_VC1;
  780. break;
  781. #endif
  782. default:
  783. av_log(avctx, AV_LOG_ERROR, "Invalid CUVID codec!\n");
  784. return AVERROR_BUG;
  785. }
  786. if (avctx->codec->id == AV_CODEC_ID_H264 || avctx->codec->id == AV_CODEC_ID_HEVC) {
  787. if (avctx->codec->id == AV_CODEC_ID_H264)
  788. bsf = av_bsf_get_by_name("h264_mp4toannexb");
  789. else
  790. bsf = av_bsf_get_by_name("hevc_mp4toannexb");
  791. if (!bsf) {
  792. ret = AVERROR_BSF_NOT_FOUND;
  793. goto error;
  794. }
  795. if (ret = av_bsf_alloc(bsf, &ctx->bsf)) {
  796. goto error;
  797. }
  798. if (((ret = avcodec_parameters_from_context(ctx->bsf->par_in, avctx)) < 0) || ((ret = av_bsf_init(ctx->bsf)) < 0)) {
  799. av_bsf_free(&ctx->bsf);
  800. goto error;
  801. }
  802. ctx->cuparse_ext.format.seqhdr_data_length = ctx->bsf->par_out->extradata_size;
  803. memcpy(ctx->cuparse_ext.raw_seqhdr_data,
  804. ctx->bsf->par_out->extradata,
  805. FFMIN(sizeof(ctx->cuparse_ext.raw_seqhdr_data), ctx->bsf->par_out->extradata_size));
  806. } else if (avctx->extradata_size > 0) {
  807. ctx->cuparse_ext.format.seqhdr_data_length = avctx->extradata_size;
  808. memcpy(ctx->cuparse_ext.raw_seqhdr_data,
  809. avctx->extradata,
  810. FFMIN(sizeof(ctx->cuparse_ext.raw_seqhdr_data), avctx->extradata_size));
  811. }
  812. ctx->cuparseinfo.ulMaxNumDecodeSurfaces = ctx->nb_surfaces;
  813. ctx->cuparseinfo.ulMaxDisplayDelay = 4;
  814. ctx->cuparseinfo.pUserData = avctx;
  815. ctx->cuparseinfo.pfnSequenceCallback = cuvid_handle_video_sequence;
  816. ctx->cuparseinfo.pfnDecodePicture = cuvid_handle_picture_decode;
  817. ctx->cuparseinfo.pfnDisplayPicture = cuvid_handle_picture_display;
  818. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  819. if (ret < 0)
  820. goto error;
  821. ret = cuvid_test_capabilities(avctx, &ctx->cuparseinfo,
  822. probed_width,
  823. probed_height,
  824. probed_bit_depth);
  825. if (ret < 0)
  826. goto error;
  827. ret = CHECK_CU(ctx->cvdl->cuvidCreateVideoParser(&ctx->cuparser, &ctx->cuparseinfo));
  828. if (ret < 0)
  829. goto error;
  830. seq_pkt.payload = ctx->cuparse_ext.raw_seqhdr_data;
  831. seq_pkt.payload_size = ctx->cuparse_ext.format.seqhdr_data_length;
  832. if (seq_pkt.payload && seq_pkt.payload_size) {
  833. ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &seq_pkt));
  834. if (ret < 0)
  835. goto error;
  836. }
  837. ret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  838. if (ret < 0)
  839. goto error;
  840. ctx->prev_pts = INT64_MIN;
  841. if (!avctx->pkt_timebase.num || !avctx->pkt_timebase.den)
  842. av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
  843. return 0;
  844. error:
  845. cuvid_decode_end(avctx);
  846. return ret;
  847. }
  848. static void cuvid_flush(AVCodecContext *avctx)
  849. {
  850. CuvidContext *ctx = avctx->priv_data;
  851. AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
  852. AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
  853. CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
  854. CUVIDSOURCEDATAPACKET seq_pkt = { 0 };
  855. int ret;
  856. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  857. if (ret < 0)
  858. goto error;
  859. av_fifo_freep(&ctx->frame_queue);
  860. ctx->frame_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(CuvidParsedFrame));
  861. if (!ctx->frame_queue) {
  862. av_log(avctx, AV_LOG_ERROR, "Failed to recreate frame queue on flush\n");
  863. return;
  864. }
  865. if (ctx->cudecoder) {
  866. ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder);
  867. ctx->cudecoder = NULL;
  868. }
  869. if (ctx->cuparser) {
  870. ctx->cvdl->cuvidDestroyVideoParser(ctx->cuparser);
  871. ctx->cuparser = NULL;
  872. }
  873. ret = CHECK_CU(ctx->cvdl->cuvidCreateVideoParser(&ctx->cuparser, &ctx->cuparseinfo));
  874. if (ret < 0)
  875. goto error;
  876. seq_pkt.payload = ctx->cuparse_ext.raw_seqhdr_data;
  877. seq_pkt.payload_size = ctx->cuparse_ext.format.seqhdr_data_length;
  878. if (seq_pkt.payload && seq_pkt.payload_size) {
  879. ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &seq_pkt));
  880. if (ret < 0)
  881. goto error;
  882. }
  883. ret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  884. if (ret < 0)
  885. goto error;
  886. ctx->prev_pts = INT64_MIN;
  887. ctx->decoder_flushing = 0;
  888. return;
  889. error:
  890. av_log(avctx, AV_LOG_ERROR, "CUDA reinit on flush failed\n");
  891. }
  892. #define OFFSET(x) offsetof(CuvidContext, x)
  893. #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  894. static const AVOption options[] = {
  895. { "deint", "Set deinterlacing mode", OFFSET(deint_mode), AV_OPT_TYPE_INT, { .i64 = cudaVideoDeinterlaceMode_Weave }, cudaVideoDeinterlaceMode_Weave, cudaVideoDeinterlaceMode_Adaptive, VD, "deint" },
  896. { "weave", "Weave deinterlacing (do nothing)", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Weave }, 0, 0, VD, "deint" },
  897. { "bob", "Bob deinterlacing", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Bob }, 0, 0, VD, "deint" },
  898. { "adaptive", "Adaptive deinterlacing", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Adaptive }, 0, 0, VD, "deint" },
  899. { "gpu", "GPU to be used for decoding", OFFSET(cu_gpu), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
  900. { "surfaces", "Maximum surfaces to be used for decoding", OFFSET(nb_surfaces), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, VD },
  901. { "drop_second_field", "Drop second field when deinterlacing", OFFSET(drop_second_field), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
  902. { "crop", "Crop (top)x(bottom)x(left)x(right)", OFFSET(crop_expr), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
  903. { "resize", "Resize (width)x(height)", OFFSET(resize_expr), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
  904. { NULL }
  905. };
  906. #define DEFINE_CUVID_CODEC(x, X) \
  907. static const AVClass x##_cuvid_class = { \
  908. .class_name = #x "_cuvid", \
  909. .item_name = av_default_item_name, \
  910. .option = options, \
  911. .version = LIBAVUTIL_VERSION_INT, \
  912. }; \
  913. AVHWAccel ff_##x##_cuvid_hwaccel = { \
  914. .name = #x "_cuvid", \
  915. .type = AVMEDIA_TYPE_VIDEO, \
  916. .id = AV_CODEC_ID_##X, \
  917. .pix_fmt = AV_PIX_FMT_CUDA, \
  918. .decoder_class = &x##_cuvid_class, \
  919. }; \
  920. AVCodec ff_##x##_cuvid_decoder = { \
  921. .name = #x "_cuvid", \
  922. .long_name = NULL_IF_CONFIG_SMALL("Nvidia CUVID " #X " decoder"), \
  923. .type = AVMEDIA_TYPE_VIDEO, \
  924. .id = AV_CODEC_ID_##X, \
  925. .priv_data_size = sizeof(CuvidContext), \
  926. .priv_class = &x##_cuvid_class, \
  927. .init = cuvid_decode_init, \
  928. .close = cuvid_decode_end, \
  929. .decode = cuvid_decode_frame, \
  930. .receive_frame = cuvid_output_frame, \
  931. .flush = cuvid_flush, \
  932. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
  933. .caps_internal = FF_CODEC_CAP_HWACCEL_REQUIRE_CLASS, \
  934. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, \
  935. AV_PIX_FMT_NV12, \
  936. AV_PIX_FMT_P010, \
  937. AV_PIX_FMT_P016, \
  938. AV_PIX_FMT_NONE }, \
  939. };
  940. #if CONFIG_HEVC_CUVID_DECODER
  941. DEFINE_CUVID_CODEC(hevc, HEVC)
  942. #endif
  943. #if CONFIG_H264_CUVID_DECODER
  944. DEFINE_CUVID_CODEC(h264, H264)
  945. #endif
  946. #if CONFIG_MJPEG_CUVID_DECODER
  947. DEFINE_CUVID_CODEC(mjpeg, MJPEG)
  948. #endif
  949. #if CONFIG_MPEG1_CUVID_DECODER
  950. DEFINE_CUVID_CODEC(mpeg1, MPEG1VIDEO)
  951. #endif
  952. #if CONFIG_MPEG2_CUVID_DECODER
  953. DEFINE_CUVID_CODEC(mpeg2, MPEG2VIDEO)
  954. #endif
  955. #if CONFIG_MPEG4_CUVID_DECODER
  956. DEFINE_CUVID_CODEC(mpeg4, MPEG4)
  957. #endif
  958. #if CONFIG_VP8_CUVID_DECODER
  959. DEFINE_CUVID_CODEC(vp8, VP8)
  960. #endif
  961. #if CONFIG_VP9_CUVID_DECODER
  962. DEFINE_CUVID_CODEC(vp9, VP9)
  963. #endif
  964. #if CONFIG_VC1_CUVID_DECODER
  965. DEFINE_CUVID_CODEC(vc1, VC1)
  966. #endif