You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

621 lines
19KB

  1. /*
  2. * librav1e encoder
  3. *
  4. * Copyright (c) 2019 Derek Buitenhuis
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include <rav1e.h>
  23. #include "libavutil/internal.h"
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/base64.h"
  26. #include "libavutil/common.h"
  27. #include "libavutil/mathematics.h"
  28. #include "libavutil/opt.h"
  29. #include "libavutil/pixdesc.h"
  30. #include "avcodec.h"
  31. #include "encode.h"
  32. #include "internal.h"
  33. typedef struct librav1eContext {
  34. const AVClass *class;
  35. RaContext *ctx;
  36. AVFrame *frame;
  37. RaFrame *rframe;
  38. AVBSFContext *bsf;
  39. uint8_t *pass_data;
  40. size_t pass_pos;
  41. int pass_size;
  42. AVDictionary *rav1e_opts;
  43. int quantizer;
  44. int speed;
  45. int tiles;
  46. int tile_rows;
  47. int tile_cols;
  48. } librav1eContext;
  49. static inline RaPixelRange range_map(enum AVPixelFormat pix_fmt, enum AVColorRange range)
  50. {
  51. switch (pix_fmt) {
  52. case AV_PIX_FMT_YUVJ420P:
  53. case AV_PIX_FMT_YUVJ422P:
  54. case AV_PIX_FMT_YUVJ444P:
  55. return RA_PIXEL_RANGE_FULL;
  56. }
  57. switch (range) {
  58. case AVCOL_RANGE_JPEG:
  59. return RA_PIXEL_RANGE_FULL;
  60. case AVCOL_RANGE_MPEG:
  61. default:
  62. return RA_PIXEL_RANGE_LIMITED;
  63. }
  64. }
  65. static inline RaChromaSampling pix_fmt_map(enum AVPixelFormat pix_fmt)
  66. {
  67. switch (pix_fmt) {
  68. case AV_PIX_FMT_YUV420P:
  69. case AV_PIX_FMT_YUVJ420P:
  70. case AV_PIX_FMT_YUV420P10:
  71. case AV_PIX_FMT_YUV420P12:
  72. return RA_CHROMA_SAMPLING_CS420;
  73. case AV_PIX_FMT_YUV422P:
  74. case AV_PIX_FMT_YUVJ422P:
  75. case AV_PIX_FMT_YUV422P10:
  76. case AV_PIX_FMT_YUV422P12:
  77. return RA_CHROMA_SAMPLING_CS422;
  78. case AV_PIX_FMT_YUV444P:
  79. case AV_PIX_FMT_YUVJ444P:
  80. case AV_PIX_FMT_YUV444P10:
  81. case AV_PIX_FMT_YUV444P12:
  82. return RA_CHROMA_SAMPLING_CS444;
  83. default:
  84. av_assert0(0);
  85. }
  86. }
  87. static inline RaChromaSamplePosition chroma_loc_map(enum AVChromaLocation chroma_loc)
  88. {
  89. switch (chroma_loc) {
  90. case AVCHROMA_LOC_LEFT:
  91. return RA_CHROMA_SAMPLE_POSITION_VERTICAL;
  92. case AVCHROMA_LOC_TOPLEFT:
  93. return RA_CHROMA_SAMPLE_POSITION_COLOCATED;
  94. default:
  95. return RA_CHROMA_SAMPLE_POSITION_UNKNOWN;
  96. }
  97. }
  98. static int get_stats(AVCodecContext *avctx, int eos)
  99. {
  100. librav1eContext *ctx = avctx->priv_data;
  101. RaData* buf = rav1e_twopass_out(ctx->ctx);
  102. if (!buf)
  103. return 0;
  104. if (!eos) {
  105. uint8_t *tmp = av_fast_realloc(ctx->pass_data, &ctx->pass_size,
  106. ctx->pass_pos + buf->len);
  107. if (!tmp) {
  108. rav1e_data_unref(buf);
  109. return AVERROR(ENOMEM);
  110. }
  111. ctx->pass_data = tmp;
  112. memcpy(ctx->pass_data + ctx->pass_pos, buf->data, buf->len);
  113. ctx->pass_pos += buf->len;
  114. } else {
  115. size_t b64_size = AV_BASE64_SIZE(ctx->pass_pos);
  116. memcpy(ctx->pass_data, buf->data, buf->len);
  117. avctx->stats_out = av_malloc(b64_size);
  118. if (!avctx->stats_out) {
  119. rav1e_data_unref(buf);
  120. return AVERROR(ENOMEM);
  121. }
  122. av_base64_encode(avctx->stats_out, b64_size, ctx->pass_data, ctx->pass_pos);
  123. av_freep(&ctx->pass_data);
  124. }
  125. rav1e_data_unref(buf);
  126. return 0;
  127. }
  128. static int set_stats(AVCodecContext *avctx)
  129. {
  130. librav1eContext *ctx = avctx->priv_data;
  131. int ret = 1;
  132. while (ret > 0 && ctx->pass_size - ctx->pass_pos > 0) {
  133. ret = rav1e_twopass_in(ctx->ctx, ctx->pass_data + ctx->pass_pos, ctx->pass_size);
  134. if (ret < 0)
  135. return AVERROR_EXTERNAL;
  136. ctx->pass_pos += ret;
  137. }
  138. return 0;
  139. }
  140. static av_cold int librav1e_encode_close(AVCodecContext *avctx)
  141. {
  142. librav1eContext *ctx = avctx->priv_data;
  143. if (ctx->ctx) {
  144. rav1e_context_unref(ctx->ctx);
  145. ctx->ctx = NULL;
  146. }
  147. if (ctx->rframe) {
  148. rav1e_frame_unref(ctx->rframe);
  149. ctx->rframe = NULL;
  150. }
  151. av_frame_free(&ctx->frame);
  152. av_bsf_free(&ctx->bsf);
  153. av_freep(&ctx->pass_data);
  154. return 0;
  155. }
  156. static av_cold int librav1e_encode_init(AVCodecContext *avctx)
  157. {
  158. librav1eContext *ctx = avctx->priv_data;
  159. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  160. RaConfig *cfg = NULL;
  161. int rret;
  162. int ret = 0;
  163. ctx->frame = av_frame_alloc();
  164. if (!ctx->frame)
  165. return AVERROR(ENOMEM);
  166. cfg = rav1e_config_default();
  167. if (!cfg) {
  168. av_log(avctx, AV_LOG_ERROR, "Could not allocate rav1e config.\n");
  169. return AVERROR_EXTERNAL;
  170. }
  171. /*
  172. * Rav1e currently uses the time base given to it only for ratecontrol... where
  173. * the inverse is taken and used as a framerate. So, do what we do in other wrappers
  174. * and use the framerate if we can.
  175. */
  176. if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
  177. rav1e_config_set_time_base(cfg, (RaRational) {
  178. avctx->framerate.den, avctx->framerate.num
  179. });
  180. } else {
  181. rav1e_config_set_time_base(cfg, (RaRational) {
  182. avctx->time_base.num * avctx->ticks_per_frame,
  183. avctx->time_base.den
  184. });
  185. }
  186. if ((avctx->flags & AV_CODEC_FLAG_PASS1 || avctx->flags & AV_CODEC_FLAG_PASS2) && !avctx->bit_rate) {
  187. av_log(avctx, AV_LOG_ERROR, "A bitrate must be set to use two pass mode.\n");
  188. ret = AVERROR_INVALIDDATA;
  189. goto end;
  190. }
  191. if (avctx->flags & AV_CODEC_FLAG_PASS2) {
  192. if (!avctx->stats_in) {
  193. av_log(avctx, AV_LOG_ERROR, "No stats file provided for second pass.\n");
  194. ret = AVERROR(EINVAL);
  195. goto end;
  196. }
  197. ctx->pass_size = (strlen(avctx->stats_in) * 3) / 4;
  198. ctx->pass_data = av_malloc(ctx->pass_size);
  199. if (!ctx->pass_data) {
  200. av_log(avctx, AV_LOG_ERROR, "Could not allocate stats buffer.\n");
  201. ret = AVERROR(ENOMEM);
  202. goto end;
  203. }
  204. ctx->pass_size = av_base64_decode(ctx->pass_data, avctx->stats_in, ctx->pass_size);
  205. if (ctx->pass_size < 0) {
  206. av_log(avctx, AV_LOG_ERROR, "Invalid pass file.\n");
  207. ret = AVERROR(EINVAL);
  208. goto end;
  209. }
  210. }
  211. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  212. const AVBitStreamFilter *filter = av_bsf_get_by_name("extract_extradata");
  213. int bret;
  214. if (!filter) {
  215. av_log(avctx, AV_LOG_ERROR, "extract_extradata bitstream filter "
  216. "not found. This is a bug, please report it.\n");
  217. ret = AVERROR_BUG;
  218. goto end;
  219. }
  220. bret = av_bsf_alloc(filter, &ctx->bsf);
  221. if (bret < 0) {
  222. ret = bret;
  223. goto end;
  224. }
  225. bret = avcodec_parameters_from_context(ctx->bsf->par_in, avctx);
  226. if (bret < 0) {
  227. ret = bret;
  228. goto end;
  229. }
  230. bret = av_bsf_init(ctx->bsf);
  231. if (bret < 0) {
  232. ret = bret;
  233. goto end;
  234. }
  235. }
  236. {
  237. AVDictionaryEntry *en = NULL;
  238. while ((en = av_dict_get(ctx->rav1e_opts, "", en, AV_DICT_IGNORE_SUFFIX))) {
  239. int parse_ret = rav1e_config_parse(cfg, en->key, en->value);
  240. if (parse_ret < 0)
  241. av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value);
  242. }
  243. }
  244. rret = rav1e_config_parse_int(cfg, "width", avctx->width);
  245. if (rret < 0) {
  246. av_log(avctx, AV_LOG_ERROR, "Invalid width passed to rav1e.\n");
  247. ret = AVERROR_INVALIDDATA;
  248. goto end;
  249. }
  250. rret = rav1e_config_parse_int(cfg, "height", avctx->height);
  251. if (rret < 0) {
  252. av_log(avctx, AV_LOG_ERROR, "Invalid height passed to rav1e.\n");
  253. ret = AVERROR_INVALIDDATA;
  254. goto end;
  255. }
  256. rret = rav1e_config_parse_int(cfg, "threads", avctx->thread_count);
  257. if (rret < 0)
  258. av_log(avctx, AV_LOG_WARNING, "Invalid number of threads, defaulting to auto.\n");
  259. if (ctx->speed >= 0) {
  260. rret = rav1e_config_parse_int(cfg, "speed", ctx->speed);
  261. if (rret < 0) {
  262. av_log(avctx, AV_LOG_ERROR, "Could not set speed preset.\n");
  263. ret = AVERROR_EXTERNAL;
  264. goto end;
  265. }
  266. }
  267. /* rav1e handles precedence between 'tiles' and cols/rows for us. */
  268. if (ctx->tiles > 0) {
  269. rret = rav1e_config_parse_int(cfg, "tiles", ctx->tiles);
  270. if (rret < 0) {
  271. av_log(avctx, AV_LOG_ERROR, "Could not set number of tiles to encode with.\n");
  272. ret = AVERROR_EXTERNAL;
  273. goto end;
  274. }
  275. }
  276. if (ctx->tile_rows > 0) {
  277. rret = rav1e_config_parse_int(cfg, "tile_rows", ctx->tile_rows);
  278. if (rret < 0) {
  279. av_log(avctx, AV_LOG_ERROR, "Could not set number of tile rows to encode with.\n");
  280. ret = AVERROR_EXTERNAL;
  281. goto end;
  282. }
  283. }
  284. if (ctx->tile_cols > 0) {
  285. rret = rav1e_config_parse_int(cfg, "tile_cols", ctx->tile_cols);
  286. if (rret < 0) {
  287. av_log(avctx, AV_LOG_ERROR, "Could not set number of tile cols to encode with.\n");
  288. ret = AVERROR_EXTERNAL;
  289. goto end;
  290. }
  291. }
  292. if (avctx->gop_size > 0) {
  293. rret = rav1e_config_parse_int(cfg, "key_frame_interval", avctx->gop_size);
  294. if (rret < 0) {
  295. av_log(avctx, AV_LOG_ERROR, "Could not set max keyint.\n");
  296. ret = AVERROR_EXTERNAL;
  297. goto end;
  298. }
  299. }
  300. if (avctx->keyint_min > 0) {
  301. rret = rav1e_config_parse_int(cfg, "min_key_frame_interval", avctx->keyint_min);
  302. if (rret < 0) {
  303. av_log(avctx, AV_LOG_ERROR, "Could not set min keyint.\n");
  304. ret = AVERROR_EXTERNAL;
  305. goto end;
  306. }
  307. }
  308. if (avctx->bit_rate && ctx->quantizer < 0) {
  309. int max_quantizer = avctx->qmax >= 0 ? avctx->qmax : 255;
  310. rret = rav1e_config_parse_int(cfg, "quantizer", max_quantizer);
  311. if (rret < 0) {
  312. av_log(avctx, AV_LOG_ERROR, "Could not set max quantizer.\n");
  313. ret = AVERROR_EXTERNAL;
  314. goto end;
  315. }
  316. if (avctx->qmin >= 0) {
  317. rret = rav1e_config_parse_int(cfg, "min_quantizer", avctx->qmin);
  318. if (rret < 0) {
  319. av_log(avctx, AV_LOG_ERROR, "Could not set min quantizer.\n");
  320. ret = AVERROR_EXTERNAL;
  321. goto end;
  322. }
  323. }
  324. rret = rav1e_config_parse_int(cfg, "bitrate", avctx->bit_rate);
  325. if (rret < 0) {
  326. av_log(avctx, AV_LOG_ERROR, "Could not set bitrate.\n");
  327. ret = AVERROR_INVALIDDATA;
  328. goto end;
  329. }
  330. } else if (ctx->quantizer >= 0) {
  331. if (avctx->bit_rate)
  332. av_log(avctx, AV_LOG_WARNING, "Both bitrate and quantizer specified. Using quantizer mode.");
  333. rret = rav1e_config_parse_int(cfg, "quantizer", ctx->quantizer);
  334. if (rret < 0) {
  335. av_log(avctx, AV_LOG_ERROR, "Could not set quantizer.\n");
  336. ret = AVERROR_EXTERNAL;
  337. goto end;
  338. }
  339. }
  340. rret = rav1e_config_set_pixel_format(cfg, desc->comp[0].depth,
  341. pix_fmt_map(avctx->pix_fmt),
  342. chroma_loc_map(avctx->chroma_sample_location),
  343. range_map(avctx->pix_fmt, avctx->color_range));
  344. if (rret < 0) {
  345. av_log(avctx, AV_LOG_ERROR, "Failed to set pixel format properties.\n");
  346. ret = AVERROR_INVALIDDATA;
  347. goto end;
  348. }
  349. /* rav1e's colorspace enums match standard values. */
  350. rret = rav1e_config_set_color_description(cfg, (RaMatrixCoefficients) avctx->colorspace,
  351. (RaColorPrimaries) avctx->color_primaries,
  352. (RaTransferCharacteristics) avctx->color_trc);
  353. if (rret < 0) {
  354. av_log(avctx, AV_LOG_WARNING, "Failed to set color properties.\n");
  355. if (avctx->err_recognition & AV_EF_EXPLODE) {
  356. ret = AVERROR_INVALIDDATA;
  357. goto end;
  358. }
  359. }
  360. ctx->ctx = rav1e_context_new(cfg);
  361. if (!ctx->ctx) {
  362. av_log(avctx, AV_LOG_ERROR, "Failed to create rav1e encode context.\n");
  363. ret = AVERROR_EXTERNAL;
  364. goto end;
  365. }
  366. ret = 0;
  367. end:
  368. rav1e_config_unref(cfg);
  369. return ret;
  370. }
  371. static int librav1e_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
  372. {
  373. librav1eContext *ctx = avctx->priv_data;
  374. RaFrame *rframe = ctx->rframe;
  375. RaPacket *rpkt = NULL;
  376. int ret;
  377. if (!rframe) {
  378. AVFrame *frame = ctx->frame;
  379. ret = ff_encode_get_frame(avctx, frame);
  380. if (ret < 0 && ret != AVERROR_EOF)
  381. return ret;
  382. if (frame->buf[0]) {
  383. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
  384. rframe = rav1e_frame_new(ctx->ctx);
  385. if (!rframe) {
  386. av_log(avctx, AV_LOG_ERROR, "Could not allocate new rav1e frame.\n");
  387. av_frame_unref(frame);
  388. return AVERROR(ENOMEM);
  389. }
  390. for (int i = 0; i < desc->nb_components; i++) {
  391. int shift = i ? desc->log2_chroma_h : 0;
  392. int bytes = desc->comp[0].depth == 8 ? 1 : 2;
  393. rav1e_frame_fill_plane(rframe, i, frame->data[i],
  394. (frame->height >> shift) * frame->linesize[i],
  395. frame->linesize[i], bytes);
  396. }
  397. av_frame_unref(frame);
  398. }
  399. }
  400. ret = rav1e_send_frame(ctx->ctx, rframe);
  401. if (rframe)
  402. if (ret == RA_ENCODER_STATUS_ENOUGH_DATA) {
  403. ctx->rframe = rframe; /* Queue is full. Store the RaFrame to retry next call */
  404. } else {
  405. rav1e_frame_unref(rframe); /* No need to unref if flushing. */
  406. ctx->rframe = NULL;
  407. }
  408. switch (ret) {
  409. case RA_ENCODER_STATUS_SUCCESS:
  410. case RA_ENCODER_STATUS_ENOUGH_DATA:
  411. break;
  412. case RA_ENCODER_STATUS_FAILURE:
  413. av_log(avctx, AV_LOG_ERROR, "Could not send frame: %s\n", rav1e_status_to_str(ret));
  414. return AVERROR_EXTERNAL;
  415. default:
  416. av_log(avctx, AV_LOG_ERROR, "Unknown return code %d from rav1e_send_frame: %s\n", ret, rav1e_status_to_str(ret));
  417. return AVERROR_UNKNOWN;
  418. }
  419. retry:
  420. if (avctx->flags & AV_CODEC_FLAG_PASS1) {
  421. int sret = get_stats(avctx, 0);
  422. if (sret < 0)
  423. return sret;
  424. } else if (avctx->flags & AV_CODEC_FLAG_PASS2) {
  425. int sret = set_stats(avctx);
  426. if (sret < 0)
  427. return sret;
  428. }
  429. ret = rav1e_receive_packet(ctx->ctx, &rpkt);
  430. switch (ret) {
  431. case RA_ENCODER_STATUS_SUCCESS:
  432. break;
  433. case RA_ENCODER_STATUS_LIMIT_REACHED:
  434. if (avctx->flags & AV_CODEC_FLAG_PASS1) {
  435. int sret = get_stats(avctx, 1);
  436. if (sret < 0)
  437. return sret;
  438. }
  439. return AVERROR_EOF;
  440. case RA_ENCODER_STATUS_ENCODED:
  441. goto retry;
  442. case RA_ENCODER_STATUS_NEED_MORE_DATA:
  443. if (avctx->internal->draining) {
  444. av_log(avctx, AV_LOG_ERROR, "Unexpected error when receiving packet after EOF.\n");
  445. return AVERROR_EXTERNAL;
  446. }
  447. return AVERROR(EAGAIN);
  448. case RA_ENCODER_STATUS_FAILURE:
  449. av_log(avctx, AV_LOG_ERROR, "Could not encode frame: %s\n", rav1e_status_to_str(ret));
  450. return AVERROR_EXTERNAL;
  451. default:
  452. av_log(avctx, AV_LOG_ERROR, "Unknown return code %d from rav1e_receive_packet: %s\n", ret, rav1e_status_to_str(ret));
  453. return AVERROR_UNKNOWN;
  454. }
  455. ret = av_new_packet(pkt, rpkt->len);
  456. if (ret < 0) {
  457. av_log(avctx, AV_LOG_ERROR, "Could not allocate packet.\n");
  458. rav1e_packet_unref(rpkt);
  459. return ret;
  460. }
  461. memcpy(pkt->data, rpkt->data, rpkt->len);
  462. if (rpkt->frame_type == RA_FRAME_TYPE_KEY)
  463. pkt->flags |= AV_PKT_FLAG_KEY;
  464. pkt->pts = pkt->dts = rpkt->input_frameno * avctx->ticks_per_frame;
  465. rav1e_packet_unref(rpkt);
  466. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  467. int ret = av_bsf_send_packet(ctx->bsf, pkt);
  468. if (ret < 0) {
  469. av_log(avctx, AV_LOG_ERROR, "extradata extraction send failed.\n");
  470. av_packet_unref(pkt);
  471. return ret;
  472. }
  473. ret = av_bsf_receive_packet(ctx->bsf, pkt);
  474. if (ret < 0) {
  475. av_log(avctx, AV_LOG_ERROR, "extradata extraction receive failed.\n");
  476. av_packet_unref(pkt);
  477. return ret;
  478. }
  479. }
  480. return 0;
  481. }
  482. #define OFFSET(x) offsetof(librav1eContext, x)
  483. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  484. static const AVOption options[] = {
  485. { "qp", "use constant quantizer mode", OFFSET(quantizer), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 255, VE },
  486. { "speed", "what speed preset to use", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 10, VE },
  487. { "tiles", "number of tiles encode with", OFFSET(tiles), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
  488. { "tile-rows", "number of tiles rows to encode with", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
  489. { "tile-columns", "number of tiles columns to encode with", OFFSET(tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
  490. { "rav1e-params", "set the rav1e configuration using a :-separated list of key=value parameters", OFFSET(rav1e_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
  491. { NULL }
  492. };
  493. static const AVCodecDefault librav1e_defaults[] = {
  494. { "b", "0" },
  495. { "g", "0" },
  496. { "keyint_min", "0" },
  497. { "qmax", "-1" },
  498. { "qmin", "-1" },
  499. { NULL }
  500. };
  501. const enum AVPixelFormat librav1e_pix_fmts[] = {
  502. AV_PIX_FMT_YUV420P,
  503. AV_PIX_FMT_YUVJ420P,
  504. AV_PIX_FMT_YUV420P10,
  505. AV_PIX_FMT_YUV420P12,
  506. AV_PIX_FMT_YUV422P,
  507. AV_PIX_FMT_YUVJ422P,
  508. AV_PIX_FMT_YUV422P10,
  509. AV_PIX_FMT_YUV422P12,
  510. AV_PIX_FMT_YUV444P,
  511. AV_PIX_FMT_YUVJ444P,
  512. AV_PIX_FMT_YUV444P10,
  513. AV_PIX_FMT_YUV444P12,
  514. AV_PIX_FMT_NONE
  515. };
  516. static const AVClass class = {
  517. .class_name = "librav1e",
  518. .item_name = av_default_item_name,
  519. .option = options,
  520. .version = LIBAVUTIL_VERSION_INT,
  521. };
  522. AVCodec ff_librav1e_encoder = {
  523. .name = "librav1e",
  524. .long_name = NULL_IF_CONFIG_SMALL("librav1e AV1"),
  525. .type = AVMEDIA_TYPE_VIDEO,
  526. .id = AV_CODEC_ID_AV1,
  527. .init = librav1e_encode_init,
  528. .receive_packet = librav1e_receive_packet,
  529. .close = librav1e_encode_close,
  530. .priv_data_size = sizeof(librav1eContext),
  531. .priv_class = &class,
  532. .defaults = librav1e_defaults,
  533. .pix_fmts = librav1e_pix_fmts,
  534. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
  535. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
  536. .wrapper_name = "librav1e",
  537. };