You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1201 lines
40KB

  1. /*
  2. * Nvidia CUVID decoder
  3. * Copyright (c) 2016 Timo Rothenpieler <timo@rothenpieler.org>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "compat/cuda/dynlink_loader.h"
  22. #include "libavutil/buffer.h"
  23. #include "libavutil/mathematics.h"
  24. #include "libavutil/hwcontext.h"
  25. #include "libavutil/hwcontext_cuda_internal.h"
  26. #include "libavutil/cuda_check.h"
  27. #include "libavutil/fifo.h"
  28. #include "libavutil/log.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/pixdesc.h"
  31. #include "avcodec.h"
  32. #include "decode.h"
  33. #include "hwaccel.h"
  34. #include "nvdec.h"
  35. #include "internal.h"
  36. #if !NVDECAPI_CHECK_VERSION(9, 0)
  37. #define cudaVideoSurfaceFormat_YUV444 2
  38. #define cudaVideoSurfaceFormat_YUV444_16Bit 3
  39. #endif
  40. typedef struct CuvidContext
  41. {
  42. AVClass *avclass;
  43. CUvideodecoder cudecoder;
  44. CUvideoparser cuparser;
  45. char *cu_gpu;
  46. int nb_surfaces;
  47. int drop_second_field;
  48. char *crop_expr;
  49. char *resize_expr;
  50. struct {
  51. int left;
  52. int top;
  53. int right;
  54. int bottom;
  55. } crop;
  56. struct {
  57. int width;
  58. int height;
  59. } resize;
  60. AVBufferRef *hwdevice;
  61. AVBufferRef *hwframe;
  62. AVBSFContext *bsf;
  63. AVFifoBuffer *frame_queue;
  64. int deint_mode;
  65. int deint_mode_current;
  66. int64_t prev_pts;
  67. int internal_error;
  68. int decoder_flushing;
  69. int *key_frame;
  70. cudaVideoCodec codec_type;
  71. cudaVideoChromaFormat chroma_format;
  72. CUVIDDECODECAPS caps8, caps10, caps12;
  73. CUVIDPARSERPARAMS cuparseinfo;
  74. CUVIDEOFORMATEX cuparse_ext;
  75. CudaFunctions *cudl;
  76. CuvidFunctions *cvdl;
  77. } CuvidContext;
  78. typedef struct CuvidParsedFrame
  79. {
  80. CUVIDPARSERDISPINFO dispinfo;
  81. int second_field;
  82. int is_deinterlacing;
  83. } CuvidParsedFrame;
  84. #define CHECK_CU(x) FF_CUDA_CHECK_DL(avctx, ctx->cudl, x)
  85. static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* format)
  86. {
  87. AVCodecContext *avctx = opaque;
  88. CuvidContext *ctx = avctx->priv_data;
  89. AVHWFramesContext *hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  90. CUVIDDECODECAPS *caps = NULL;
  91. CUVIDDECODECREATEINFO cuinfo;
  92. int surface_fmt;
  93. int chroma_444;
  94. int old_width = avctx->width;
  95. int old_height = avctx->height;
  96. enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_CUDA,
  97. AV_PIX_FMT_NONE, // Will be updated below
  98. AV_PIX_FMT_NONE };
  99. av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback, progressive_sequence=%d\n", format->progressive_sequence);
  100. memset(&cuinfo, 0, sizeof(cuinfo));
  101. ctx->internal_error = 0;
  102. avctx->coded_width = cuinfo.ulWidth = format->coded_width;
  103. avctx->coded_height = cuinfo.ulHeight = format->coded_height;
  104. // apply cropping
  105. cuinfo.display_area.left = format->display_area.left + ctx->crop.left;
  106. cuinfo.display_area.top = format->display_area.top + ctx->crop.top;
  107. cuinfo.display_area.right = format->display_area.right - ctx->crop.right;
  108. cuinfo.display_area.bottom = format->display_area.bottom - ctx->crop.bottom;
  109. // width and height need to be set before calling ff_get_format
  110. if (ctx->resize_expr) {
  111. avctx->width = ctx->resize.width;
  112. avctx->height = ctx->resize.height;
  113. } else {
  114. avctx->width = cuinfo.display_area.right - cuinfo.display_area.left;
  115. avctx->height = cuinfo.display_area.bottom - cuinfo.display_area.top;
  116. }
  117. // target width/height need to be multiples of two
  118. cuinfo.ulTargetWidth = avctx->width = (avctx->width + 1) & ~1;
  119. cuinfo.ulTargetHeight = avctx->height = (avctx->height + 1) & ~1;
  120. // aspect ratio conversion, 1:1, depends on scaled resolution
  121. cuinfo.target_rect.left = 0;
  122. cuinfo.target_rect.top = 0;
  123. cuinfo.target_rect.right = cuinfo.ulTargetWidth;
  124. cuinfo.target_rect.bottom = cuinfo.ulTargetHeight;
  125. chroma_444 = format->chroma_format == cudaVideoChromaFormat_444;
  126. switch (format->bit_depth_luma_minus8) {
  127. case 0: // 8-bit
  128. pix_fmts[1] = chroma_444 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_NV12;
  129. caps = &ctx->caps8;
  130. break;
  131. case 2: // 10-bit
  132. pix_fmts[1] = chroma_444 ? AV_PIX_FMT_YUV444P16 : AV_PIX_FMT_P010;
  133. caps = &ctx->caps10;
  134. break;
  135. case 4: // 12-bit
  136. pix_fmts[1] = chroma_444 ? AV_PIX_FMT_YUV444P16 : AV_PIX_FMT_P016;
  137. caps = &ctx->caps12;
  138. break;
  139. default:
  140. break;
  141. }
  142. if (!caps || !caps->bIsSupported) {
  143. av_log(avctx, AV_LOG_ERROR, "unsupported bit depth: %d\n",
  144. format->bit_depth_luma_minus8 + 8);
  145. ctx->internal_error = AVERROR(EINVAL);
  146. return 0;
  147. }
  148. surface_fmt = ff_get_format(avctx, pix_fmts);
  149. if (surface_fmt < 0) {
  150. av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", surface_fmt);
  151. ctx->internal_error = AVERROR(EINVAL);
  152. return 0;
  153. }
  154. av_log(avctx, AV_LOG_VERBOSE, "Formats: Original: %s | HW: %s | SW: %s\n",
  155. av_get_pix_fmt_name(avctx->pix_fmt),
  156. av_get_pix_fmt_name(surface_fmt),
  157. av_get_pix_fmt_name(avctx->sw_pix_fmt));
  158. avctx->pix_fmt = surface_fmt;
  159. // Update our hwframe ctx, as the get_format callback might have refreshed it!
  160. if (avctx->hw_frames_ctx) {
  161. av_buffer_unref(&ctx->hwframe);
  162. ctx->hwframe = av_buffer_ref(avctx->hw_frames_ctx);
  163. if (!ctx->hwframe) {
  164. ctx->internal_error = AVERROR(ENOMEM);
  165. return 0;
  166. }
  167. hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  168. }
  169. ff_set_sar(avctx, av_div_q(
  170. (AVRational){ format->display_aspect_ratio.x, format->display_aspect_ratio.y },
  171. (AVRational){ avctx->width, avctx->height }));
  172. ctx->deint_mode_current = format->progressive_sequence
  173. ? cudaVideoDeinterlaceMode_Weave
  174. : ctx->deint_mode;
  175. if (!format->progressive_sequence && ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave)
  176. avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT;
  177. else
  178. avctx->flags &= ~AV_CODEC_FLAG_INTERLACED_DCT;
  179. if (format->video_signal_description.video_full_range_flag)
  180. avctx->color_range = AVCOL_RANGE_JPEG;
  181. else
  182. avctx->color_range = AVCOL_RANGE_MPEG;
  183. avctx->color_primaries = format->video_signal_description.color_primaries;
  184. avctx->color_trc = format->video_signal_description.transfer_characteristics;
  185. avctx->colorspace = format->video_signal_description.matrix_coefficients;
  186. if (format->bitrate)
  187. avctx->bit_rate = format->bitrate;
  188. if (format->frame_rate.numerator && format->frame_rate.denominator) {
  189. avctx->framerate.num = format->frame_rate.numerator;
  190. avctx->framerate.den = format->frame_rate.denominator;
  191. }
  192. if (ctx->cudecoder
  193. && avctx->coded_width == format->coded_width
  194. && avctx->coded_height == format->coded_height
  195. && avctx->width == old_width
  196. && avctx->height == old_height
  197. && ctx->chroma_format == format->chroma_format
  198. && ctx->codec_type == format->codec)
  199. return 1;
  200. if (ctx->cudecoder) {
  201. av_log(avctx, AV_LOG_TRACE, "Re-initializing decoder\n");
  202. ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder));
  203. if (ctx->internal_error < 0)
  204. return 0;
  205. ctx->cudecoder = NULL;
  206. }
  207. if (hwframe_ctx->pool && (
  208. hwframe_ctx->width < avctx->width ||
  209. hwframe_ctx->height < avctx->height ||
  210. hwframe_ctx->format != AV_PIX_FMT_CUDA ||
  211. hwframe_ctx->sw_format != avctx->sw_pix_fmt)) {
  212. av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized with incompatible parameters\n");
  213. av_log(avctx, AV_LOG_DEBUG, "width: %d <-> %d\n", hwframe_ctx->width, avctx->width);
  214. av_log(avctx, AV_LOG_DEBUG, "height: %d <-> %d\n", hwframe_ctx->height, avctx->height);
  215. av_log(avctx, AV_LOG_DEBUG, "format: %s <-> cuda\n", av_get_pix_fmt_name(hwframe_ctx->format));
  216. av_log(avctx, AV_LOG_DEBUG, "sw_format: %s <-> %s\n",
  217. av_get_pix_fmt_name(hwframe_ctx->sw_format), av_get_pix_fmt_name(avctx->sw_pix_fmt));
  218. ctx->internal_error = AVERROR(EINVAL);
  219. return 0;
  220. }
  221. ctx->chroma_format = format->chroma_format;
  222. cuinfo.CodecType = ctx->codec_type = format->codec;
  223. cuinfo.ChromaFormat = format->chroma_format;
  224. switch (avctx->sw_pix_fmt) {
  225. case AV_PIX_FMT_NV12:
  226. cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12;
  227. break;
  228. case AV_PIX_FMT_P010:
  229. case AV_PIX_FMT_P016:
  230. cuinfo.OutputFormat = cudaVideoSurfaceFormat_P016;
  231. break;
  232. case AV_PIX_FMT_YUV444P:
  233. cuinfo.OutputFormat = cudaVideoSurfaceFormat_YUV444;
  234. break;
  235. case AV_PIX_FMT_YUV444P16:
  236. cuinfo.OutputFormat = cudaVideoSurfaceFormat_YUV444_16Bit;
  237. break;
  238. default:
  239. av_log(avctx, AV_LOG_ERROR, "Unsupported output format: %s\n",
  240. av_get_pix_fmt_name(avctx->sw_pix_fmt));
  241. ctx->internal_error = AVERROR(EINVAL);
  242. return 0;
  243. }
  244. cuinfo.ulNumDecodeSurfaces = ctx->nb_surfaces;
  245. cuinfo.ulNumOutputSurfaces = 1;
  246. cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID;
  247. cuinfo.bitDepthMinus8 = format->bit_depth_luma_minus8;
  248. cuinfo.DeinterlaceMode = ctx->deint_mode_current;
  249. if (ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave && !ctx->drop_second_field)
  250. avctx->framerate = av_mul_q(avctx->framerate, (AVRational){2, 1});
  251. ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidCreateDecoder(&ctx->cudecoder, &cuinfo));
  252. if (ctx->internal_error < 0)
  253. return 0;
  254. if (!hwframe_ctx->pool) {
  255. hwframe_ctx->format = AV_PIX_FMT_CUDA;
  256. hwframe_ctx->sw_format = avctx->sw_pix_fmt;
  257. hwframe_ctx->width = avctx->width;
  258. hwframe_ctx->height = avctx->height;
  259. if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) {
  260. av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n");
  261. return 0;
  262. }
  263. }
  264. return 1;
  265. }
  266. static int CUDAAPI cuvid_handle_picture_decode(void *opaque, CUVIDPICPARAMS* picparams)
  267. {
  268. AVCodecContext *avctx = opaque;
  269. CuvidContext *ctx = avctx->priv_data;
  270. av_log(avctx, AV_LOG_TRACE, "pfnDecodePicture\n");
  271. ctx->key_frame[picparams->CurrPicIdx] = picparams->intra_pic_flag;
  272. ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidDecodePicture(ctx->cudecoder, picparams));
  273. if (ctx->internal_error < 0)
  274. return 0;
  275. return 1;
  276. }
  277. static int CUDAAPI cuvid_handle_picture_display(void *opaque, CUVIDPARSERDISPINFO* dispinfo)
  278. {
  279. AVCodecContext *avctx = opaque;
  280. CuvidContext *ctx = avctx->priv_data;
  281. CuvidParsedFrame parsed_frame = { { 0 } };
  282. parsed_frame.dispinfo = *dispinfo;
  283. ctx->internal_error = 0;
  284. if (ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave) {
  285. av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  286. } else {
  287. parsed_frame.is_deinterlacing = 1;
  288. av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  289. if (!ctx->drop_second_field) {
  290. parsed_frame.second_field = 1;
  291. av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  292. }
  293. }
  294. return 1;
  295. }
  296. static int cuvid_is_buffer_full(AVCodecContext *avctx)
  297. {
  298. CuvidContext *ctx = avctx->priv_data;
  299. int delay = ctx->cuparseinfo.ulMaxDisplayDelay;
  300. if (ctx->deint_mode != cudaVideoDeinterlaceMode_Weave && !ctx->drop_second_field)
  301. delay *= 2;
  302. return (av_fifo_size(ctx->frame_queue) / sizeof(CuvidParsedFrame)) + delay >= ctx->nb_surfaces;
  303. }
  304. static int cuvid_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt)
  305. {
  306. CuvidContext *ctx = avctx->priv_data;
  307. AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
  308. AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
  309. CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
  310. CUVIDSOURCEDATAPACKET cupkt;
  311. AVPacket filter_packet = { 0 };
  312. AVPacket filtered_packet = { 0 };
  313. int ret = 0, eret = 0, is_flush = ctx->decoder_flushing;
  314. av_log(avctx, AV_LOG_TRACE, "cuvid_decode_packet\n");
  315. if (is_flush && avpkt && avpkt->size)
  316. return AVERROR_EOF;
  317. if (cuvid_is_buffer_full(avctx) && avpkt && avpkt->size)
  318. return AVERROR(EAGAIN);
  319. if (ctx->bsf && avpkt && avpkt->size) {
  320. if ((ret = av_packet_ref(&filter_packet, avpkt)) < 0) {
  321. av_log(avctx, AV_LOG_ERROR, "av_packet_ref failed\n");
  322. return ret;
  323. }
  324. if ((ret = av_bsf_send_packet(ctx->bsf, &filter_packet)) < 0) {
  325. av_log(avctx, AV_LOG_ERROR, "av_bsf_send_packet failed\n");
  326. av_packet_unref(&filter_packet);
  327. return ret;
  328. }
  329. if ((ret = av_bsf_receive_packet(ctx->bsf, &filtered_packet)) < 0) {
  330. av_log(avctx, AV_LOG_ERROR, "av_bsf_receive_packet failed\n");
  331. return ret;
  332. }
  333. avpkt = &filtered_packet;
  334. }
  335. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  336. if (ret < 0) {
  337. av_packet_unref(&filtered_packet);
  338. return ret;
  339. }
  340. memset(&cupkt, 0, sizeof(cupkt));
  341. if (avpkt && avpkt->size) {
  342. cupkt.payload_size = avpkt->size;
  343. cupkt.payload = avpkt->data;
  344. if (avpkt->pts != AV_NOPTS_VALUE) {
  345. cupkt.flags = CUVID_PKT_TIMESTAMP;
  346. if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
  347. cupkt.timestamp = av_rescale_q(avpkt->pts, avctx->pkt_timebase, (AVRational){1, 10000000});
  348. else
  349. cupkt.timestamp = avpkt->pts;
  350. }
  351. } else {
  352. cupkt.flags = CUVID_PKT_ENDOFSTREAM;
  353. ctx->decoder_flushing = 1;
  354. }
  355. ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &cupkt));
  356. av_packet_unref(&filtered_packet);
  357. if (ret < 0)
  358. goto error;
  359. // cuvidParseVideoData doesn't return an error just because stuff failed...
  360. if (ctx->internal_error) {
  361. av_log(avctx, AV_LOG_ERROR, "cuvid decode callback error\n");
  362. ret = ctx->internal_error;
  363. goto error;
  364. }
  365. error:
  366. eret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  367. if (eret < 0)
  368. return eret;
  369. else if (ret < 0)
  370. return ret;
  371. else if (is_flush)
  372. return AVERROR_EOF;
  373. else
  374. return 0;
  375. }
  376. static int cuvid_output_frame(AVCodecContext *avctx, AVFrame *frame)
  377. {
  378. CuvidContext *ctx = avctx->priv_data;
  379. AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
  380. AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
  381. CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
  382. CUdeviceptr mapped_frame = 0;
  383. int ret = 0, eret = 0;
  384. av_log(avctx, AV_LOG_TRACE, "cuvid_output_frame\n");
  385. if (ctx->decoder_flushing) {
  386. ret = cuvid_decode_packet(avctx, NULL);
  387. if (ret < 0 && ret != AVERROR_EOF)
  388. return ret;
  389. }
  390. if (!cuvid_is_buffer_full(avctx)) {
  391. AVPacket pkt = {0};
  392. ret = ff_decode_get_packet(avctx, &pkt);
  393. if (ret < 0 && ret != AVERROR_EOF)
  394. return ret;
  395. ret = cuvid_decode_packet(avctx, &pkt);
  396. av_packet_unref(&pkt);
  397. // cuvid_is_buffer_full() should avoid this.
  398. if (ret == AVERROR(EAGAIN))
  399. ret = AVERROR_EXTERNAL;
  400. if (ret < 0 && ret != AVERROR_EOF)
  401. return ret;
  402. }
  403. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  404. if (ret < 0)
  405. return ret;
  406. if (av_fifo_size(ctx->frame_queue)) {
  407. const AVPixFmtDescriptor *pixdesc;
  408. CuvidParsedFrame parsed_frame;
  409. CUVIDPROCPARAMS params;
  410. unsigned int pitch = 0;
  411. int offset = 0;
  412. int i;
  413. av_fifo_generic_read(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
  414. memset(&params, 0, sizeof(params));
  415. params.progressive_frame = parsed_frame.dispinfo.progressive_frame;
  416. params.second_field = parsed_frame.second_field;
  417. params.top_field_first = parsed_frame.dispinfo.top_field_first;
  418. ret = CHECK_CU(ctx->cvdl->cuvidMapVideoFrame(ctx->cudecoder, parsed_frame.dispinfo.picture_index, &mapped_frame, &pitch, &params));
  419. if (ret < 0)
  420. goto error;
  421. if (avctx->pix_fmt == AV_PIX_FMT_CUDA) {
  422. ret = av_hwframe_get_buffer(ctx->hwframe, frame, 0);
  423. if (ret < 0) {
  424. av_log(avctx, AV_LOG_ERROR, "av_hwframe_get_buffer failed\n");
  425. goto error;
  426. }
  427. ret = ff_decode_frame_props(avctx, frame);
  428. if (ret < 0) {
  429. av_log(avctx, AV_LOG_ERROR, "ff_decode_frame_props failed\n");
  430. goto error;
  431. }
  432. pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
  433. for (i = 0; i < pixdesc->nb_components; i++) {
  434. int height = avctx->height >> (i ? pixdesc->log2_chroma_h : 0);
  435. CUDA_MEMCPY2D cpy = {
  436. .srcMemoryType = CU_MEMORYTYPE_DEVICE,
  437. .dstMemoryType = CU_MEMORYTYPE_DEVICE,
  438. .srcDevice = mapped_frame,
  439. .dstDevice = (CUdeviceptr)frame->data[i],
  440. .srcPitch = pitch,
  441. .dstPitch = frame->linesize[i],
  442. .srcY = offset,
  443. .WidthInBytes = FFMIN(pitch, frame->linesize[i]),
  444. .Height = height,
  445. };
  446. ret = CHECK_CU(ctx->cudl->cuMemcpy2DAsync(&cpy, device_hwctx->stream));
  447. if (ret < 0)
  448. goto error;
  449. offset += height;
  450. }
  451. ret = CHECK_CU(ctx->cudl->cuStreamSynchronize(device_hwctx->stream));
  452. if (ret < 0)
  453. goto error;
  454. } else if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
  455. avctx->pix_fmt == AV_PIX_FMT_P010 ||
  456. avctx->pix_fmt == AV_PIX_FMT_P016 ||
  457. avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
  458. avctx->pix_fmt == AV_PIX_FMT_YUV444P16) {
  459. unsigned int offset = 0;
  460. AVFrame *tmp_frame = av_frame_alloc();
  461. if (!tmp_frame) {
  462. av_log(avctx, AV_LOG_ERROR, "av_frame_alloc failed\n");
  463. ret = AVERROR(ENOMEM);
  464. goto error;
  465. }
  466. pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
  467. tmp_frame->format = AV_PIX_FMT_CUDA;
  468. tmp_frame->hw_frames_ctx = av_buffer_ref(ctx->hwframe);
  469. tmp_frame->width = avctx->width;
  470. tmp_frame->height = avctx->height;
  471. /*
  472. * Note that the following logic would not work for three plane
  473. * YUV420 because the pitch value is different for the chroma
  474. * planes.
  475. */
  476. for (i = 0; i < pixdesc->nb_components; i++) {
  477. tmp_frame->data[i] = (uint8_t*)mapped_frame + offset;
  478. tmp_frame->linesize[i] = pitch;
  479. offset += pitch * (avctx->height >> (i ? pixdesc->log2_chroma_h : 0));
  480. }
  481. ret = ff_get_buffer(avctx, frame, 0);
  482. if (ret < 0) {
  483. av_log(avctx, AV_LOG_ERROR, "ff_get_buffer failed\n");
  484. av_frame_free(&tmp_frame);
  485. goto error;
  486. }
  487. ret = av_hwframe_transfer_data(frame, tmp_frame, 0);
  488. if (ret) {
  489. av_log(avctx, AV_LOG_ERROR, "av_hwframe_transfer_data failed\n");
  490. av_frame_free(&tmp_frame);
  491. goto error;
  492. }
  493. av_frame_free(&tmp_frame);
  494. } else {
  495. ret = AVERROR_BUG;
  496. goto error;
  497. }
  498. frame->key_frame = ctx->key_frame[parsed_frame.dispinfo.picture_index];
  499. frame->width = avctx->width;
  500. frame->height = avctx->height;
  501. if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
  502. frame->pts = av_rescale_q(parsed_frame.dispinfo.timestamp, (AVRational){1, 10000000}, avctx->pkt_timebase);
  503. else
  504. frame->pts = parsed_frame.dispinfo.timestamp;
  505. if (parsed_frame.second_field) {
  506. if (ctx->prev_pts == INT64_MIN) {
  507. ctx->prev_pts = frame->pts;
  508. frame->pts += (avctx->pkt_timebase.den * avctx->framerate.den) / (avctx->pkt_timebase.num * avctx->framerate.num);
  509. } else {
  510. int pts_diff = (frame->pts - ctx->prev_pts) / 2;
  511. ctx->prev_pts = frame->pts;
  512. frame->pts += pts_diff;
  513. }
  514. }
  515. /* CUVIDs opaque reordering breaks the internal pkt logic.
  516. * So set pkt_pts and clear all the other pkt_ fields.
  517. */
  518. #if FF_API_PKT_PTS
  519. FF_DISABLE_DEPRECATION_WARNINGS
  520. frame->pkt_pts = frame->pts;
  521. FF_ENABLE_DEPRECATION_WARNINGS
  522. #endif
  523. frame->pkt_pos = -1;
  524. frame->pkt_duration = 0;
  525. frame->pkt_size = -1;
  526. frame->interlaced_frame = !parsed_frame.is_deinterlacing && !parsed_frame.dispinfo.progressive_frame;
  527. if (frame->interlaced_frame)
  528. frame->top_field_first = parsed_frame.dispinfo.top_field_first;
  529. } else if (ctx->decoder_flushing) {
  530. ret = AVERROR_EOF;
  531. } else {
  532. ret = AVERROR(EAGAIN);
  533. }
  534. error:
  535. if (mapped_frame)
  536. eret = CHECK_CU(ctx->cvdl->cuvidUnmapVideoFrame(ctx->cudecoder, mapped_frame));
  537. eret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  538. if (eret < 0)
  539. return eret;
  540. else
  541. return ret;
  542. }
  543. static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
  544. {
  545. CuvidContext *ctx = avctx->priv_data;
  546. AVFrame *frame = data;
  547. int ret = 0;
  548. av_log(avctx, AV_LOG_TRACE, "cuvid_decode_frame\n");
  549. if (ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave) {
  550. av_log(avctx, AV_LOG_ERROR, "Deinterlacing is not supported via the old API\n");
  551. return AVERROR(EINVAL);
  552. }
  553. if (!ctx->decoder_flushing) {
  554. ret = cuvid_decode_packet(avctx, avpkt);
  555. if (ret < 0)
  556. return ret;
  557. }
  558. ret = cuvid_output_frame(avctx, frame);
  559. if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
  560. *got_frame = 0;
  561. } else if (ret < 0) {
  562. return ret;
  563. } else {
  564. *got_frame = 1;
  565. }
  566. return 0;
  567. }
  568. static av_cold int cuvid_decode_end(AVCodecContext *avctx)
  569. {
  570. CuvidContext *ctx = avctx->priv_data;
  571. av_fifo_freep(&ctx->frame_queue);
  572. if (ctx->bsf)
  573. av_bsf_free(&ctx->bsf);
  574. if (ctx->cuparser)
  575. ctx->cvdl->cuvidDestroyVideoParser(ctx->cuparser);
  576. if (ctx->cudecoder)
  577. ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder);
  578. ctx->cudl = NULL;
  579. av_buffer_unref(&ctx->hwframe);
  580. av_buffer_unref(&ctx->hwdevice);
  581. av_freep(&ctx->key_frame);
  582. cuvid_free_functions(&ctx->cvdl);
  583. return 0;
  584. }
  585. static int cuvid_test_capabilities(AVCodecContext *avctx,
  586. const CUVIDPARSERPARAMS *cuparseinfo,
  587. int probed_width,
  588. int probed_height,
  589. int bit_depth)
  590. {
  591. CuvidContext *ctx = avctx->priv_data;
  592. CUVIDDECODECAPS *caps;
  593. int res8 = 0, res10 = 0, res12 = 0;
  594. if (!ctx->cvdl->cuvidGetDecoderCaps) {
  595. av_log(avctx, AV_LOG_WARNING, "Used Nvidia driver is too old to perform a capability check.\n");
  596. av_log(avctx, AV_LOG_WARNING, "The minimum required version is "
  597. #if defined(_WIN32) || defined(__CYGWIN__)
  598. "378.66"
  599. #else
  600. "378.13"
  601. #endif
  602. ". Continuing blind.\n");
  603. ctx->caps8.bIsSupported = ctx->caps10.bIsSupported = 1;
  604. // 12 bit was not supported before the capability check was introduced, so disable it.
  605. ctx->caps12.bIsSupported = 0;
  606. return 0;
  607. }
  608. ctx->caps8.eCodecType = ctx->caps10.eCodecType = ctx->caps12.eCodecType
  609. = cuparseinfo->CodecType;
  610. ctx->caps8.eChromaFormat = ctx->caps10.eChromaFormat = ctx->caps12.eChromaFormat
  611. = cudaVideoChromaFormat_420;
  612. ctx->caps8.nBitDepthMinus8 = 0;
  613. ctx->caps10.nBitDepthMinus8 = 2;
  614. ctx->caps12.nBitDepthMinus8 = 4;
  615. res8 = CHECK_CU(ctx->cvdl->cuvidGetDecoderCaps(&ctx->caps8));
  616. res10 = CHECK_CU(ctx->cvdl->cuvidGetDecoderCaps(&ctx->caps10));
  617. res12 = CHECK_CU(ctx->cvdl->cuvidGetDecoderCaps(&ctx->caps12));
  618. av_log(avctx, AV_LOG_VERBOSE, "CUVID capabilities for %s:\n", avctx->codec->name);
  619. av_log(avctx, AV_LOG_VERBOSE, "8 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
  620. ctx->caps8.bIsSupported, ctx->caps8.nMinWidth, ctx->caps8.nMaxWidth, ctx->caps8.nMinHeight, ctx->caps8.nMaxHeight);
  621. av_log(avctx, AV_LOG_VERBOSE, "10 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
  622. ctx->caps10.bIsSupported, ctx->caps10.nMinWidth, ctx->caps10.nMaxWidth, ctx->caps10.nMinHeight, ctx->caps10.nMaxHeight);
  623. av_log(avctx, AV_LOG_VERBOSE, "12 bit: supported: %d, min_width: %d, max_width: %d, min_height: %d, max_height: %d\n",
  624. ctx->caps12.bIsSupported, ctx->caps12.nMinWidth, ctx->caps12.nMaxWidth, ctx->caps12.nMinHeight, ctx->caps12.nMaxHeight);
  625. switch (bit_depth) {
  626. case 10:
  627. caps = &ctx->caps10;
  628. if (res10 < 0)
  629. return res10;
  630. break;
  631. case 12:
  632. caps = &ctx->caps12;
  633. if (res12 < 0)
  634. return res12;
  635. break;
  636. default:
  637. caps = &ctx->caps8;
  638. if (res8 < 0)
  639. return res8;
  640. }
  641. if (!ctx->caps8.bIsSupported) {
  642. av_log(avctx, AV_LOG_ERROR, "Codec %s is not supported.\n", avctx->codec->name);
  643. return AVERROR(EINVAL);
  644. }
  645. if (!caps->bIsSupported) {
  646. av_log(avctx, AV_LOG_ERROR, "Bit depth %d is not supported.\n", bit_depth);
  647. return AVERROR(EINVAL);
  648. }
  649. if (probed_width > caps->nMaxWidth || probed_width < caps->nMinWidth) {
  650. av_log(avctx, AV_LOG_ERROR, "Video width %d not within range from %d to %d\n",
  651. probed_width, caps->nMinWidth, caps->nMaxWidth);
  652. return AVERROR(EINVAL);
  653. }
  654. if (probed_height > caps->nMaxHeight || probed_height < caps->nMinHeight) {
  655. av_log(avctx, AV_LOG_ERROR, "Video height %d not within range from %d to %d\n",
  656. probed_height, caps->nMinHeight, caps->nMaxHeight);
  657. return AVERROR(EINVAL);
  658. }
  659. return 0;
  660. }
  661. static av_cold int cuvid_decode_init(AVCodecContext *avctx)
  662. {
  663. CuvidContext *ctx = avctx->priv_data;
  664. AVCUDADeviceContext *device_hwctx;
  665. AVHWDeviceContext *device_ctx;
  666. AVHWFramesContext *hwframe_ctx;
  667. CUVIDSOURCEDATAPACKET seq_pkt;
  668. CUcontext cuda_ctx = NULL;
  669. CUcontext dummy;
  670. const AVBitStreamFilter *bsf;
  671. int ret = 0;
  672. enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_CUDA,
  673. AV_PIX_FMT_NV12,
  674. AV_PIX_FMT_NONE };
  675. int probed_width = avctx->coded_width ? avctx->coded_width : 1280;
  676. int probed_height = avctx->coded_height ? avctx->coded_height : 720;
  677. int probed_bit_depth = 8;
  678. const AVPixFmtDescriptor *probe_desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  679. if (probe_desc && probe_desc->nb_components)
  680. probed_bit_depth = probe_desc->comp[0].depth;
  681. // Accelerated transcoding scenarios with 'ffmpeg' require that the
  682. // pix_fmt be set to AV_PIX_FMT_CUDA early. The sw_pix_fmt, and the
  683. // pix_fmt for non-accelerated transcoding, do not need to be correct
  684. // but need to be set to something. We arbitrarily pick NV12.
  685. ret = ff_get_format(avctx, pix_fmts);
  686. if (ret < 0) {
  687. av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", ret);
  688. return ret;
  689. }
  690. avctx->pix_fmt = ret;
  691. if (ctx->resize_expr && sscanf(ctx->resize_expr, "%dx%d",
  692. &ctx->resize.width, &ctx->resize.height) != 2) {
  693. av_log(avctx, AV_LOG_ERROR, "Invalid resize expressions\n");
  694. ret = AVERROR(EINVAL);
  695. goto error;
  696. }
  697. if (ctx->crop_expr && sscanf(ctx->crop_expr, "%dx%dx%dx%d",
  698. &ctx->crop.top, &ctx->crop.bottom,
  699. &ctx->crop.left, &ctx->crop.right) != 4) {
  700. av_log(avctx, AV_LOG_ERROR, "Invalid cropping expressions\n");
  701. ret = AVERROR(EINVAL);
  702. goto error;
  703. }
  704. ret = cuvid_load_functions(&ctx->cvdl, avctx);
  705. if (ret < 0) {
  706. av_log(avctx, AV_LOG_ERROR, "Failed loading nvcuvid.\n");
  707. goto error;
  708. }
  709. ctx->frame_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(CuvidParsedFrame));
  710. if (!ctx->frame_queue) {
  711. ret = AVERROR(ENOMEM);
  712. goto error;
  713. }
  714. if (avctx->hw_frames_ctx) {
  715. ctx->hwframe = av_buffer_ref(avctx->hw_frames_ctx);
  716. if (!ctx->hwframe) {
  717. ret = AVERROR(ENOMEM);
  718. goto error;
  719. }
  720. hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  721. ctx->hwdevice = av_buffer_ref(hwframe_ctx->device_ref);
  722. if (!ctx->hwdevice) {
  723. ret = AVERROR(ENOMEM);
  724. goto error;
  725. }
  726. } else {
  727. if (avctx->hw_device_ctx) {
  728. ctx->hwdevice = av_buffer_ref(avctx->hw_device_ctx);
  729. if (!ctx->hwdevice) {
  730. ret = AVERROR(ENOMEM);
  731. goto error;
  732. }
  733. } else {
  734. ret = av_hwdevice_ctx_create(&ctx->hwdevice, AV_HWDEVICE_TYPE_CUDA, ctx->cu_gpu, NULL, 0);
  735. if (ret < 0)
  736. goto error;
  737. }
  738. ctx->hwframe = av_hwframe_ctx_alloc(ctx->hwdevice);
  739. if (!ctx->hwframe) {
  740. av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
  741. ret = AVERROR(ENOMEM);
  742. goto error;
  743. }
  744. hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
  745. }
  746. device_ctx = hwframe_ctx->device_ctx;
  747. device_hwctx = device_ctx->hwctx;
  748. cuda_ctx = device_hwctx->cuda_ctx;
  749. ctx->cudl = device_hwctx->internal->cuda_dl;
  750. memset(&ctx->cuparseinfo, 0, sizeof(ctx->cuparseinfo));
  751. memset(&ctx->cuparse_ext, 0, sizeof(ctx->cuparse_ext));
  752. memset(&seq_pkt, 0, sizeof(seq_pkt));
  753. ctx->cuparseinfo.pExtVideoInfo = &ctx->cuparse_ext;
  754. switch (avctx->codec->id) {
  755. #if CONFIG_H264_CUVID_DECODER
  756. case AV_CODEC_ID_H264:
  757. ctx->cuparseinfo.CodecType = cudaVideoCodec_H264;
  758. break;
  759. #endif
  760. #if CONFIG_HEVC_CUVID_DECODER
  761. case AV_CODEC_ID_HEVC:
  762. ctx->cuparseinfo.CodecType = cudaVideoCodec_HEVC;
  763. break;
  764. #endif
  765. #if CONFIG_MJPEG_CUVID_DECODER
  766. case AV_CODEC_ID_MJPEG:
  767. ctx->cuparseinfo.CodecType = cudaVideoCodec_JPEG;
  768. break;
  769. #endif
  770. #if CONFIG_MPEG1_CUVID_DECODER
  771. case AV_CODEC_ID_MPEG1VIDEO:
  772. ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG1;
  773. break;
  774. #endif
  775. #if CONFIG_MPEG2_CUVID_DECODER
  776. case AV_CODEC_ID_MPEG2VIDEO:
  777. ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG2;
  778. break;
  779. #endif
  780. #if CONFIG_MPEG4_CUVID_DECODER
  781. case AV_CODEC_ID_MPEG4:
  782. ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG4;
  783. break;
  784. #endif
  785. #if CONFIG_VP8_CUVID_DECODER
  786. case AV_CODEC_ID_VP8:
  787. ctx->cuparseinfo.CodecType = cudaVideoCodec_VP8;
  788. break;
  789. #endif
  790. #if CONFIG_VP9_CUVID_DECODER
  791. case AV_CODEC_ID_VP9:
  792. ctx->cuparseinfo.CodecType = cudaVideoCodec_VP9;
  793. break;
  794. #endif
  795. #if CONFIG_VC1_CUVID_DECODER
  796. case AV_CODEC_ID_VC1:
  797. ctx->cuparseinfo.CodecType = cudaVideoCodec_VC1;
  798. break;
  799. #endif
  800. default:
  801. av_log(avctx, AV_LOG_ERROR, "Invalid CUVID codec!\n");
  802. return AVERROR_BUG;
  803. }
  804. if (avctx->codec->id == AV_CODEC_ID_H264 || avctx->codec->id == AV_CODEC_ID_HEVC) {
  805. if (avctx->codec->id == AV_CODEC_ID_H264)
  806. bsf = av_bsf_get_by_name("h264_mp4toannexb");
  807. else
  808. bsf = av_bsf_get_by_name("hevc_mp4toannexb");
  809. if (!bsf) {
  810. ret = AVERROR_BSF_NOT_FOUND;
  811. goto error;
  812. }
  813. if (ret = av_bsf_alloc(bsf, &ctx->bsf)) {
  814. goto error;
  815. }
  816. if (((ret = avcodec_parameters_from_context(ctx->bsf->par_in, avctx)) < 0) || ((ret = av_bsf_init(ctx->bsf)) < 0)) {
  817. av_bsf_free(&ctx->bsf);
  818. goto error;
  819. }
  820. ctx->cuparse_ext.format.seqhdr_data_length = ctx->bsf->par_out->extradata_size;
  821. memcpy(ctx->cuparse_ext.raw_seqhdr_data,
  822. ctx->bsf->par_out->extradata,
  823. FFMIN(sizeof(ctx->cuparse_ext.raw_seqhdr_data), ctx->bsf->par_out->extradata_size));
  824. } else if (avctx->extradata_size > 0) {
  825. ctx->cuparse_ext.format.seqhdr_data_length = avctx->extradata_size;
  826. memcpy(ctx->cuparse_ext.raw_seqhdr_data,
  827. avctx->extradata,
  828. FFMIN(sizeof(ctx->cuparse_ext.raw_seqhdr_data), avctx->extradata_size));
  829. }
  830. ctx->key_frame = av_mallocz(ctx->nb_surfaces * sizeof(int));
  831. if (!ctx->key_frame) {
  832. ret = AVERROR(ENOMEM);
  833. goto error;
  834. }
  835. ctx->cuparseinfo.ulMaxNumDecodeSurfaces = ctx->nb_surfaces;
  836. ctx->cuparseinfo.ulMaxDisplayDelay = 4;
  837. ctx->cuparseinfo.pUserData = avctx;
  838. ctx->cuparseinfo.pfnSequenceCallback = cuvid_handle_video_sequence;
  839. ctx->cuparseinfo.pfnDecodePicture = cuvid_handle_picture_decode;
  840. ctx->cuparseinfo.pfnDisplayPicture = cuvid_handle_picture_display;
  841. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  842. if (ret < 0)
  843. goto error;
  844. ret = cuvid_test_capabilities(avctx, &ctx->cuparseinfo,
  845. probed_width,
  846. probed_height,
  847. probed_bit_depth);
  848. if (ret < 0)
  849. goto error;
  850. ret = CHECK_CU(ctx->cvdl->cuvidCreateVideoParser(&ctx->cuparser, &ctx->cuparseinfo));
  851. if (ret < 0)
  852. goto error;
  853. seq_pkt.payload = ctx->cuparse_ext.raw_seqhdr_data;
  854. seq_pkt.payload_size = ctx->cuparse_ext.format.seqhdr_data_length;
  855. if (seq_pkt.payload && seq_pkt.payload_size) {
  856. ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &seq_pkt));
  857. if (ret < 0)
  858. goto error;
  859. }
  860. ret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  861. if (ret < 0)
  862. goto error;
  863. ctx->prev_pts = INT64_MIN;
  864. if (!avctx->pkt_timebase.num || !avctx->pkt_timebase.den)
  865. av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
  866. return 0;
  867. error:
  868. cuvid_decode_end(avctx);
  869. return ret;
  870. }
  871. static void cuvid_flush(AVCodecContext *avctx)
  872. {
  873. CuvidContext *ctx = avctx->priv_data;
  874. AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
  875. AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
  876. CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
  877. CUVIDSOURCEDATAPACKET seq_pkt = { 0 };
  878. int ret;
  879. ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
  880. if (ret < 0)
  881. goto error;
  882. av_fifo_freep(&ctx->frame_queue);
  883. ctx->frame_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(CuvidParsedFrame));
  884. if (!ctx->frame_queue) {
  885. av_log(avctx, AV_LOG_ERROR, "Failed to recreate frame queue on flush\n");
  886. return;
  887. }
  888. if (ctx->cudecoder) {
  889. ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder);
  890. ctx->cudecoder = NULL;
  891. }
  892. if (ctx->cuparser) {
  893. ctx->cvdl->cuvidDestroyVideoParser(ctx->cuparser);
  894. ctx->cuparser = NULL;
  895. }
  896. ret = CHECK_CU(ctx->cvdl->cuvidCreateVideoParser(&ctx->cuparser, &ctx->cuparseinfo));
  897. if (ret < 0)
  898. goto error;
  899. seq_pkt.payload = ctx->cuparse_ext.raw_seqhdr_data;
  900. seq_pkt.payload_size = ctx->cuparse_ext.format.seqhdr_data_length;
  901. if (seq_pkt.payload && seq_pkt.payload_size) {
  902. ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &seq_pkt));
  903. if (ret < 0)
  904. goto error;
  905. }
  906. ret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
  907. if (ret < 0)
  908. goto error;
  909. ctx->prev_pts = INT64_MIN;
  910. ctx->decoder_flushing = 0;
  911. return;
  912. error:
  913. av_log(avctx, AV_LOG_ERROR, "CUDA reinit on flush failed\n");
  914. }
  915. #define OFFSET(x) offsetof(CuvidContext, x)
  916. #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  917. static const AVOption options[] = {
  918. { "deint", "Set deinterlacing mode", OFFSET(deint_mode), AV_OPT_TYPE_INT, { .i64 = cudaVideoDeinterlaceMode_Weave }, cudaVideoDeinterlaceMode_Weave, cudaVideoDeinterlaceMode_Adaptive, VD, "deint" },
  919. { "weave", "Weave deinterlacing (do nothing)", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Weave }, 0, 0, VD, "deint" },
  920. { "bob", "Bob deinterlacing", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Bob }, 0, 0, VD, "deint" },
  921. { "adaptive", "Adaptive deinterlacing", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Adaptive }, 0, 0, VD, "deint" },
  922. { "gpu", "GPU to be used for decoding", OFFSET(cu_gpu), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
  923. { "surfaces", "Maximum surfaces to be used for decoding", OFFSET(nb_surfaces), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, VD },
  924. { "drop_second_field", "Drop second field when deinterlacing", OFFSET(drop_second_field), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
  925. { "crop", "Crop (top)x(bottom)x(left)x(right)", OFFSET(crop_expr), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
  926. { "resize", "Resize (width)x(height)", OFFSET(resize_expr), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
  927. { NULL }
  928. };
  929. static const AVCodecHWConfigInternal *cuvid_hw_configs[] = {
  930. &(const AVCodecHWConfigInternal) {
  931. .public = {
  932. .pix_fmt = AV_PIX_FMT_CUDA,
  933. .methods = AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX |
  934. AV_CODEC_HW_CONFIG_METHOD_INTERNAL,
  935. .device_type = AV_HWDEVICE_TYPE_CUDA
  936. },
  937. .hwaccel = NULL,
  938. },
  939. NULL
  940. };
  941. #define DEFINE_CUVID_CODEC(x, X) \
  942. static const AVClass x##_cuvid_class = { \
  943. .class_name = #x "_cuvid", \
  944. .item_name = av_default_item_name, \
  945. .option = options, \
  946. .version = LIBAVUTIL_VERSION_INT, \
  947. }; \
  948. AVCodec ff_##x##_cuvid_decoder = { \
  949. .name = #x "_cuvid", \
  950. .long_name = NULL_IF_CONFIG_SMALL("Nvidia CUVID " #X " decoder"), \
  951. .type = AVMEDIA_TYPE_VIDEO, \
  952. .id = AV_CODEC_ID_##X, \
  953. .priv_data_size = sizeof(CuvidContext), \
  954. .priv_class = &x##_cuvid_class, \
  955. .init = cuvid_decode_init, \
  956. .close = cuvid_decode_end, \
  957. .decode = cuvid_decode_frame, \
  958. .receive_frame = cuvid_output_frame, \
  959. .flush = cuvid_flush, \
  960. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \
  961. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, \
  962. AV_PIX_FMT_NV12, \
  963. AV_PIX_FMT_P010, \
  964. AV_PIX_FMT_P016, \
  965. AV_PIX_FMT_NONE }, \
  966. .hw_configs = cuvid_hw_configs, \
  967. .wrapper_name = "cuvid", \
  968. };
  969. #if CONFIG_HEVC_CUVID_DECODER
  970. DEFINE_CUVID_CODEC(hevc, HEVC)
  971. #endif
  972. #if CONFIG_H264_CUVID_DECODER
  973. DEFINE_CUVID_CODEC(h264, H264)
  974. #endif
  975. #if CONFIG_MJPEG_CUVID_DECODER
  976. DEFINE_CUVID_CODEC(mjpeg, MJPEG)
  977. #endif
  978. #if CONFIG_MPEG1_CUVID_DECODER
  979. DEFINE_CUVID_CODEC(mpeg1, MPEG1VIDEO)
  980. #endif
  981. #if CONFIG_MPEG2_CUVID_DECODER
  982. DEFINE_CUVID_CODEC(mpeg2, MPEG2VIDEO)
  983. #endif
  984. #if CONFIG_MPEG4_CUVID_DECODER
  985. DEFINE_CUVID_CODEC(mpeg4, MPEG4)
  986. #endif
  987. #if CONFIG_VP8_CUVID_DECODER
  988. DEFINE_CUVID_CODEC(vp8, VP8)
  989. #endif
  990. #if CONFIG_VP9_CUVID_DECODER
  991. DEFINE_CUVID_CODEC(vp9, VP9)
  992. #endif
  993. #if CONFIG_VC1_CUVID_DECODER
  994. DEFINE_CUVID_CODEC(vc1, VC1)
  995. #endif