You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

606 lines
19KB

  1. /*
  2. * librav1e encoder
  3. *
  4. * Copyright (c) 2019 Derek Buitenhuis
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include <rav1e.h>
  23. #include "libavutil/internal.h"
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/base64.h"
  26. #include "libavutil/common.h"
  27. #include "libavutil/mathematics.h"
  28. #include "libavutil/opt.h"
  29. #include "libavutil/pixdesc.h"
  30. #include "avcodec.h"
  31. #include "internal.h"
  32. typedef struct librav1eContext {
  33. const AVClass *class;
  34. RaContext *ctx;
  35. AVBSFContext *bsf;
  36. uint8_t *pass_data;
  37. size_t pass_pos;
  38. int pass_size;
  39. AVDictionary *rav1e_opts;
  40. int quantizer;
  41. int speed;
  42. int tiles;
  43. int tile_rows;
  44. int tile_cols;
  45. } librav1eContext;
  46. static inline RaPixelRange range_map(enum AVPixelFormat pix_fmt, enum AVColorRange range)
  47. {
  48. switch (pix_fmt) {
  49. case AV_PIX_FMT_YUVJ420P:
  50. case AV_PIX_FMT_YUVJ422P:
  51. case AV_PIX_FMT_YUVJ444P:
  52. return RA_PIXEL_RANGE_FULL;
  53. }
  54. switch (range) {
  55. case AVCOL_RANGE_JPEG:
  56. return RA_PIXEL_RANGE_FULL;
  57. case AVCOL_RANGE_MPEG:
  58. default:
  59. return RA_PIXEL_RANGE_LIMITED;
  60. }
  61. }
  62. static inline RaChromaSampling pix_fmt_map(enum AVPixelFormat pix_fmt)
  63. {
  64. switch (pix_fmt) {
  65. case AV_PIX_FMT_YUV420P:
  66. case AV_PIX_FMT_YUVJ420P:
  67. case AV_PIX_FMT_YUV420P10:
  68. case AV_PIX_FMT_YUV420P12:
  69. return RA_CHROMA_SAMPLING_CS420;
  70. case AV_PIX_FMT_YUV422P:
  71. case AV_PIX_FMT_YUVJ422P:
  72. case AV_PIX_FMT_YUV422P10:
  73. case AV_PIX_FMT_YUV422P12:
  74. return RA_CHROMA_SAMPLING_CS422;
  75. case AV_PIX_FMT_YUV444P:
  76. case AV_PIX_FMT_YUVJ444P:
  77. case AV_PIX_FMT_YUV444P10:
  78. case AV_PIX_FMT_YUV444P12:
  79. return RA_CHROMA_SAMPLING_CS444;
  80. default:
  81. av_assert0(0);
  82. }
  83. }
  84. static inline RaChromaSamplePosition chroma_loc_map(enum AVChromaLocation chroma_loc)
  85. {
  86. switch (chroma_loc) {
  87. case AVCHROMA_LOC_LEFT:
  88. return RA_CHROMA_SAMPLE_POSITION_VERTICAL;
  89. case AVCHROMA_LOC_TOPLEFT:
  90. return RA_CHROMA_SAMPLE_POSITION_COLOCATED;
  91. default:
  92. return RA_CHROMA_SAMPLE_POSITION_UNKNOWN;
  93. }
  94. }
  95. static int get_stats(AVCodecContext *avctx, int eos)
  96. {
  97. librav1eContext *ctx = avctx->priv_data;
  98. RaData* buf = rav1e_twopass_out(ctx->ctx);
  99. if (!buf)
  100. return 0;
  101. if (!eos) {
  102. uint8_t *tmp = av_fast_realloc(ctx->pass_data, &ctx->pass_size,
  103. ctx->pass_pos + buf->len);
  104. if (!tmp) {
  105. rav1e_data_unref(buf);
  106. return AVERROR(ENOMEM);
  107. }
  108. ctx->pass_data = tmp;
  109. memcpy(ctx->pass_data + ctx->pass_pos, buf->data, buf->len);
  110. ctx->pass_pos += buf->len;
  111. } else {
  112. size_t b64_size = AV_BASE64_SIZE(ctx->pass_pos);
  113. memcpy(ctx->pass_data, buf->data, buf->len);
  114. avctx->stats_out = av_malloc(b64_size);
  115. if (!avctx->stats_out) {
  116. rav1e_data_unref(buf);
  117. return AVERROR(ENOMEM);
  118. }
  119. av_base64_encode(avctx->stats_out, b64_size, ctx->pass_data, ctx->pass_pos);
  120. av_freep(&ctx->pass_data);
  121. }
  122. rav1e_data_unref(buf);
  123. return 0;
  124. }
  125. static int set_stats(AVCodecContext *avctx)
  126. {
  127. librav1eContext *ctx = avctx->priv_data;
  128. int ret = 1;
  129. while (ret > 0 && ctx->pass_size - ctx->pass_pos > 0) {
  130. ret = rav1e_twopass_in(ctx->ctx, ctx->pass_data + ctx->pass_pos, ctx->pass_size);
  131. if (ret < 0)
  132. return AVERROR_EXTERNAL;
  133. ctx->pass_pos += ret;
  134. }
  135. return 0;
  136. }
  137. static av_cold int librav1e_encode_close(AVCodecContext *avctx)
  138. {
  139. librav1eContext *ctx = avctx->priv_data;
  140. if (ctx->ctx) {
  141. rav1e_context_unref(ctx->ctx);
  142. ctx->ctx = NULL;
  143. }
  144. av_bsf_free(&ctx->bsf);
  145. av_freep(&ctx->pass_data);
  146. return 0;
  147. }
  148. static av_cold int librav1e_encode_init(AVCodecContext *avctx)
  149. {
  150. librav1eContext *ctx = avctx->priv_data;
  151. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  152. RaConfig *cfg = NULL;
  153. int rret;
  154. int ret = 0;
  155. cfg = rav1e_config_default();
  156. if (!cfg) {
  157. av_log(avctx, AV_LOG_ERROR, "Could not allocate rav1e config.\n");
  158. return AVERROR_EXTERNAL;
  159. }
  160. /*
  161. * Rav1e currently uses the time base given to it only for ratecontrol... where
  162. * the inverse is taken and used as a framerate. So, do what we do in other wrappers
  163. * and use the framerate if we can.
  164. */
  165. if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
  166. rav1e_config_set_time_base(cfg, (RaRational) {
  167. avctx->framerate.den, avctx->framerate.num
  168. });
  169. } else {
  170. rav1e_config_set_time_base(cfg, (RaRational) {
  171. avctx->time_base.num * avctx->ticks_per_frame,
  172. avctx->time_base.den
  173. });
  174. }
  175. if ((avctx->flags & AV_CODEC_FLAG_PASS1 || avctx->flags & AV_CODEC_FLAG_PASS2) && !avctx->bit_rate) {
  176. av_log(avctx, AV_LOG_ERROR, "A bitrate must be set to use two pass mode.\n");
  177. ret = AVERROR_INVALIDDATA;
  178. goto end;
  179. }
  180. if (avctx->flags & AV_CODEC_FLAG_PASS2) {
  181. if (!avctx->stats_in) {
  182. av_log(avctx, AV_LOG_ERROR, "No stats file provided for second pass.\n");
  183. ret = AVERROR(EINVAL);
  184. goto end;
  185. }
  186. ctx->pass_size = (strlen(avctx->stats_in) * 3) / 4;
  187. ctx->pass_data = av_malloc(ctx->pass_size);
  188. if (!ctx->pass_data) {
  189. av_log(avctx, AV_LOG_ERROR, "Could not allocate stats buffer.\n");
  190. ret = AVERROR(ENOMEM);
  191. goto end;
  192. }
  193. ctx->pass_size = av_base64_decode(ctx->pass_data, avctx->stats_in, ctx->pass_size);
  194. if (ctx->pass_size < 0) {
  195. av_log(avctx, AV_LOG_ERROR, "Invalid pass file.\n");
  196. ret = AVERROR(EINVAL);
  197. goto end;
  198. }
  199. }
  200. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  201. const AVBitStreamFilter *filter = av_bsf_get_by_name("extract_extradata");
  202. int bret;
  203. if (!filter) {
  204. av_log(avctx, AV_LOG_ERROR, "extract_extradata bitstream filter "
  205. "not found. This is a bug, please report it.\n");
  206. ret = AVERROR_BUG;
  207. goto end;
  208. }
  209. bret = av_bsf_alloc(filter, &ctx->bsf);
  210. if (bret < 0) {
  211. ret = bret;
  212. goto end;
  213. }
  214. bret = avcodec_parameters_from_context(ctx->bsf->par_in, avctx);
  215. if (bret < 0) {
  216. ret = bret;
  217. goto end;
  218. }
  219. bret = av_bsf_init(ctx->bsf);
  220. if (bret < 0) {
  221. ret = bret;
  222. goto end;
  223. }
  224. }
  225. {
  226. AVDictionaryEntry *en = NULL;
  227. while ((en = av_dict_get(ctx->rav1e_opts, "", en, AV_DICT_IGNORE_SUFFIX))) {
  228. int parse_ret = rav1e_config_parse(cfg, en->key, en->value);
  229. if (parse_ret < 0)
  230. av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value);
  231. }
  232. }
  233. rret = rav1e_config_parse_int(cfg, "width", avctx->width);
  234. if (rret < 0) {
  235. av_log(avctx, AV_LOG_ERROR, "Invalid width passed to rav1e.\n");
  236. ret = AVERROR_INVALIDDATA;
  237. goto end;
  238. }
  239. rret = rav1e_config_parse_int(cfg, "height", avctx->height);
  240. if (rret < 0) {
  241. av_log(avctx, AV_LOG_ERROR, "Invalid height passed to rav1e.\n");
  242. ret = AVERROR_INVALIDDATA;
  243. goto end;
  244. }
  245. rret = rav1e_config_parse_int(cfg, "threads", avctx->thread_count);
  246. if (rret < 0)
  247. av_log(avctx, AV_LOG_WARNING, "Invalid number of threads, defaulting to auto.\n");
  248. if (ctx->speed >= 0) {
  249. rret = rav1e_config_parse_int(cfg, "speed", ctx->speed);
  250. if (rret < 0) {
  251. av_log(avctx, AV_LOG_ERROR, "Could not set speed preset.\n");
  252. ret = AVERROR_EXTERNAL;
  253. goto end;
  254. }
  255. }
  256. /* rav1e handles precedence between 'tiles' and cols/rows for us. */
  257. if (ctx->tiles > 0) {
  258. rret = rav1e_config_parse_int(cfg, "tiles", ctx->tiles);
  259. if (rret < 0) {
  260. av_log(avctx, AV_LOG_ERROR, "Could not set number of tiles to encode with.\n");
  261. ret = AVERROR_EXTERNAL;
  262. goto end;
  263. }
  264. }
  265. if (ctx->tile_rows > 0) {
  266. rret = rav1e_config_parse_int(cfg, "tile_rows", ctx->tile_rows);
  267. if (rret < 0) {
  268. av_log(avctx, AV_LOG_ERROR, "Could not set number of tile rows to encode with.\n");
  269. ret = AVERROR_EXTERNAL;
  270. goto end;
  271. }
  272. }
  273. if (ctx->tile_cols > 0) {
  274. rret = rav1e_config_parse_int(cfg, "tile_cols", ctx->tile_cols);
  275. if (rret < 0) {
  276. av_log(avctx, AV_LOG_ERROR, "Could not set number of tile cols to encode with.\n");
  277. ret = AVERROR_EXTERNAL;
  278. goto end;
  279. }
  280. }
  281. if (avctx->gop_size > 0) {
  282. rret = rav1e_config_parse_int(cfg, "key_frame_interval", avctx->gop_size);
  283. if (rret < 0) {
  284. av_log(avctx, AV_LOG_ERROR, "Could not set max keyint.\n");
  285. ret = AVERROR_EXTERNAL;
  286. goto end;
  287. }
  288. }
  289. if (avctx->keyint_min > 0) {
  290. rret = rav1e_config_parse_int(cfg, "min_key_frame_interval", avctx->keyint_min);
  291. if (rret < 0) {
  292. av_log(avctx, AV_LOG_ERROR, "Could not set min keyint.\n");
  293. ret = AVERROR_EXTERNAL;
  294. goto end;
  295. }
  296. }
  297. if (avctx->bit_rate && ctx->quantizer < 0) {
  298. int max_quantizer = avctx->qmax >= 0 ? avctx->qmax : 255;
  299. rret = rav1e_config_parse_int(cfg, "quantizer", max_quantizer);
  300. if (rret < 0) {
  301. av_log(avctx, AV_LOG_ERROR, "Could not set max quantizer.\n");
  302. ret = AVERROR_EXTERNAL;
  303. goto end;
  304. }
  305. if (avctx->qmin >= 0) {
  306. rret = rav1e_config_parse_int(cfg, "min_quantizer", avctx->qmin);
  307. if (rret < 0) {
  308. av_log(avctx, AV_LOG_ERROR, "Could not set min quantizer.\n");
  309. ret = AVERROR_EXTERNAL;
  310. goto end;
  311. }
  312. }
  313. rret = rav1e_config_parse_int(cfg, "bitrate", avctx->bit_rate);
  314. if (rret < 0) {
  315. av_log(avctx, AV_LOG_ERROR, "Could not set bitrate.\n");
  316. ret = AVERROR_INVALIDDATA;
  317. goto end;
  318. }
  319. } else if (ctx->quantizer >= 0) {
  320. if (avctx->bit_rate)
  321. av_log(avctx, AV_LOG_WARNING, "Both bitrate and quantizer specified. Using quantizer mode.");
  322. rret = rav1e_config_parse_int(cfg, "quantizer", ctx->quantizer);
  323. if (rret < 0) {
  324. av_log(avctx, AV_LOG_ERROR, "Could not set quantizer.\n");
  325. ret = AVERROR_EXTERNAL;
  326. goto end;
  327. }
  328. }
  329. rret = rav1e_config_set_pixel_format(cfg, desc->comp[0].depth,
  330. pix_fmt_map(avctx->pix_fmt),
  331. chroma_loc_map(avctx->chroma_sample_location),
  332. range_map(avctx->pix_fmt, avctx->color_range));
  333. if (rret < 0) {
  334. av_log(avctx, AV_LOG_ERROR, "Failed to set pixel format properties.\n");
  335. ret = AVERROR_INVALIDDATA;
  336. goto end;
  337. }
  338. /* rav1e's colorspace enums match standard values. */
  339. rret = rav1e_config_set_color_description(cfg, (RaMatrixCoefficients) avctx->colorspace,
  340. (RaColorPrimaries) avctx->color_primaries,
  341. (RaTransferCharacteristics) avctx->color_trc);
  342. if (rret < 0) {
  343. av_log(avctx, AV_LOG_WARNING, "Failed to set color properties.\n");
  344. if (avctx->err_recognition & AV_EF_EXPLODE) {
  345. ret = AVERROR_INVALIDDATA;
  346. goto end;
  347. }
  348. }
  349. ctx->ctx = rav1e_context_new(cfg);
  350. if (!ctx->ctx) {
  351. av_log(avctx, AV_LOG_ERROR, "Failed to create rav1e encode context.\n");
  352. ret = AVERROR_EXTERNAL;
  353. goto end;
  354. }
  355. ret = 0;
  356. end:
  357. rav1e_config_unref(cfg);
  358. return ret;
  359. }
  360. static int librav1e_send_frame(AVCodecContext *avctx, const AVFrame *frame)
  361. {
  362. librav1eContext *ctx = avctx->priv_data;
  363. RaFrame *rframe = NULL;
  364. int ret;
  365. if (frame) {
  366. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
  367. rframe = rav1e_frame_new(ctx->ctx);
  368. if (!rframe) {
  369. av_log(avctx, AV_LOG_ERROR, "Could not allocate new rav1e frame.\n");
  370. return AVERROR(ENOMEM);
  371. }
  372. for (int i = 0; i < desc->nb_components; i++) {
  373. int shift = i ? desc->log2_chroma_h : 0;
  374. int bytes = desc->comp[0].depth == 8 ? 1 : 2;
  375. rav1e_frame_fill_plane(rframe, i, frame->data[i],
  376. (frame->height >> shift) * frame->linesize[i],
  377. frame->linesize[i], bytes);
  378. }
  379. }
  380. ret = rav1e_send_frame(ctx->ctx, rframe);
  381. if (rframe)
  382. rav1e_frame_unref(rframe); /* No need to unref if flushing. */
  383. switch (ret) {
  384. case RA_ENCODER_STATUS_SUCCESS:
  385. break;
  386. case RA_ENCODER_STATUS_ENOUGH_DATA:
  387. return AVERROR(EAGAIN);
  388. case RA_ENCODER_STATUS_FAILURE:
  389. av_log(avctx, AV_LOG_ERROR, "Could not send frame: %s\n", rav1e_status_to_str(ret));
  390. return AVERROR_EXTERNAL;
  391. default:
  392. av_log(avctx, AV_LOG_ERROR, "Unknown return code %d from rav1e_send_frame: %s\n", ret, rav1e_status_to_str(ret));
  393. return AVERROR_UNKNOWN;
  394. }
  395. return 0;
  396. }
  397. static int librav1e_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
  398. {
  399. librav1eContext *ctx = avctx->priv_data;
  400. RaPacket *rpkt = NULL;
  401. int ret;
  402. retry:
  403. if (avctx->flags & AV_CODEC_FLAG_PASS1) {
  404. int sret = get_stats(avctx, 0);
  405. if (sret < 0)
  406. return sret;
  407. } else if (avctx->flags & AV_CODEC_FLAG_PASS2) {
  408. int sret = set_stats(avctx);
  409. if (sret < 0)
  410. return sret;
  411. }
  412. ret = rav1e_receive_packet(ctx->ctx, &rpkt);
  413. switch (ret) {
  414. case RA_ENCODER_STATUS_SUCCESS:
  415. break;
  416. case RA_ENCODER_STATUS_LIMIT_REACHED:
  417. if (avctx->flags & AV_CODEC_FLAG_PASS1) {
  418. int sret = get_stats(avctx, 1);
  419. if (sret < 0)
  420. return sret;
  421. }
  422. return AVERROR_EOF;
  423. case RA_ENCODER_STATUS_ENCODED:
  424. if (avctx->internal->draining)
  425. goto retry;
  426. return AVERROR(EAGAIN);
  427. case RA_ENCODER_STATUS_NEED_MORE_DATA:
  428. if (avctx->internal->draining) {
  429. av_log(avctx, AV_LOG_ERROR, "Unexpected error when receiving packet after EOF.\n");
  430. return AVERROR_EXTERNAL;
  431. }
  432. return AVERROR(EAGAIN);
  433. case RA_ENCODER_STATUS_FAILURE:
  434. av_log(avctx, AV_LOG_ERROR, "Could not encode frame: %s\n", rav1e_status_to_str(ret));
  435. return AVERROR_EXTERNAL;
  436. default:
  437. av_log(avctx, AV_LOG_ERROR, "Unknown return code %d from rav1e_receive_packet: %s\n", ret, rav1e_status_to_str(ret));
  438. return AVERROR_UNKNOWN;
  439. }
  440. ret = av_new_packet(pkt, rpkt->len);
  441. if (ret < 0) {
  442. av_log(avctx, AV_LOG_ERROR, "Could not allocate packet.\n");
  443. rav1e_packet_unref(rpkt);
  444. return ret;
  445. }
  446. memcpy(pkt->data, rpkt->data, rpkt->len);
  447. if (rpkt->frame_type == RA_FRAME_TYPE_KEY)
  448. pkt->flags |= AV_PKT_FLAG_KEY;
  449. pkt->pts = pkt->dts = rpkt->input_frameno * avctx->ticks_per_frame;
  450. rav1e_packet_unref(rpkt);
  451. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  452. int ret = av_bsf_send_packet(ctx->bsf, pkt);
  453. if (ret < 0) {
  454. av_log(avctx, AV_LOG_ERROR, "extradata extraction send failed.\n");
  455. av_packet_unref(pkt);
  456. return ret;
  457. }
  458. ret = av_bsf_receive_packet(ctx->bsf, pkt);
  459. if (ret < 0) {
  460. av_log(avctx, AV_LOG_ERROR, "extradata extraction receive failed.\n");
  461. av_packet_unref(pkt);
  462. return ret;
  463. }
  464. }
  465. return 0;
  466. }
  467. #define OFFSET(x) offsetof(librav1eContext, x)
  468. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  469. static const AVOption options[] = {
  470. { "qp", "use constant quantizer mode", OFFSET(quantizer), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 255, VE },
  471. { "speed", "what speed preset to use", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 10, VE },
  472. { "tiles", "number of tiles encode with", OFFSET(tiles), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
  473. { "tile-rows", "number of tiles rows to encode with", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
  474. { "tile-columns", "number of tiles columns to encode with", OFFSET(tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
  475. { "rav1e-params", "set the rav1e configuration using a :-separated list of key=value parameters", OFFSET(rav1e_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
  476. { NULL }
  477. };
  478. static const AVCodecDefault librav1e_defaults[] = {
  479. { "b", "0" },
  480. { "g", "0" },
  481. { "keyint_min", "0" },
  482. { "qmax", "-1" },
  483. { "qmin", "-1" },
  484. { NULL }
  485. };
  486. const enum AVPixelFormat librav1e_pix_fmts[] = {
  487. AV_PIX_FMT_YUV420P,
  488. AV_PIX_FMT_YUVJ420P,
  489. AV_PIX_FMT_YUV420P10,
  490. AV_PIX_FMT_YUV420P12,
  491. AV_PIX_FMT_YUV422P,
  492. AV_PIX_FMT_YUVJ422P,
  493. AV_PIX_FMT_YUV422P10,
  494. AV_PIX_FMT_YUV422P12,
  495. AV_PIX_FMT_YUV444P,
  496. AV_PIX_FMT_YUVJ444P,
  497. AV_PIX_FMT_YUV444P10,
  498. AV_PIX_FMT_YUV444P12,
  499. AV_PIX_FMT_NONE
  500. };
  501. static const AVClass class = {
  502. .class_name = "librav1e",
  503. .item_name = av_default_item_name,
  504. .option = options,
  505. .version = LIBAVUTIL_VERSION_INT,
  506. };
  507. AVCodec ff_librav1e_encoder = {
  508. .name = "librav1e",
  509. .long_name = NULL_IF_CONFIG_SMALL("librav1e AV1"),
  510. .type = AVMEDIA_TYPE_VIDEO,
  511. .id = AV_CODEC_ID_AV1,
  512. .init = librav1e_encode_init,
  513. .send_frame = librav1e_send_frame,
  514. .receive_packet = librav1e_receive_packet,
  515. .close = librav1e_encode_close,
  516. .priv_data_size = sizeof(librav1eContext),
  517. .priv_class = &class,
  518. .defaults = librav1e_defaults,
  519. .pix_fmts = librav1e_pix_fmts,
  520. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
  521. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
  522. .wrapper_name = "librav1e",
  523. };