You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

631 lines
20KB

  1. /*
  2. * librav1e encoder
  3. *
  4. * Copyright (c) 2019 Derek Buitenhuis
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include <rav1e.h>
  23. #include "libavutil/internal.h"
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/base64.h"
  26. #include "libavutil/common.h"
  27. #include "libavutil/mathematics.h"
  28. #include "libavutil/opt.h"
  29. #include "libavutil/pixdesc.h"
  30. #include "avcodec.h"
  31. #include "encode.h"
  32. #include "internal.h"
  33. typedef struct librav1eContext {
  34. const AVClass *class;
  35. RaContext *ctx;
  36. AVFrame *frame;
  37. RaFrame *rframe;
  38. AVBSFContext *bsf;
  39. uint8_t *pass_data;
  40. size_t pass_pos;
  41. int pass_size;
  42. AVDictionary *rav1e_opts;
  43. int quantizer;
  44. int speed;
  45. int tiles;
  46. int tile_rows;
  47. int tile_cols;
  48. } librav1eContext;
  49. static inline RaPixelRange range_map(enum AVPixelFormat pix_fmt, enum AVColorRange range)
  50. {
  51. switch (pix_fmt) {
  52. case AV_PIX_FMT_YUVJ420P:
  53. case AV_PIX_FMT_YUVJ422P:
  54. case AV_PIX_FMT_YUVJ444P:
  55. return RA_PIXEL_RANGE_FULL;
  56. }
  57. switch (range) {
  58. case AVCOL_RANGE_JPEG:
  59. return RA_PIXEL_RANGE_FULL;
  60. case AVCOL_RANGE_MPEG:
  61. default:
  62. return RA_PIXEL_RANGE_LIMITED;
  63. }
  64. }
  65. static inline RaChromaSampling pix_fmt_map(enum AVPixelFormat pix_fmt)
  66. {
  67. switch (pix_fmt) {
  68. case AV_PIX_FMT_YUV420P:
  69. case AV_PIX_FMT_YUVJ420P:
  70. case AV_PIX_FMT_YUV420P10:
  71. case AV_PIX_FMT_YUV420P12:
  72. return RA_CHROMA_SAMPLING_CS420;
  73. case AV_PIX_FMT_YUV422P:
  74. case AV_PIX_FMT_YUVJ422P:
  75. case AV_PIX_FMT_YUV422P10:
  76. case AV_PIX_FMT_YUV422P12:
  77. return RA_CHROMA_SAMPLING_CS422;
  78. case AV_PIX_FMT_YUV444P:
  79. case AV_PIX_FMT_YUVJ444P:
  80. case AV_PIX_FMT_YUV444P10:
  81. case AV_PIX_FMT_YUV444P12:
  82. return RA_CHROMA_SAMPLING_CS444;
  83. default:
  84. av_assert0(0);
  85. }
  86. }
  87. static inline RaChromaSamplePosition chroma_loc_map(enum AVChromaLocation chroma_loc)
  88. {
  89. switch (chroma_loc) {
  90. case AVCHROMA_LOC_LEFT:
  91. return RA_CHROMA_SAMPLE_POSITION_VERTICAL;
  92. case AVCHROMA_LOC_TOPLEFT:
  93. return RA_CHROMA_SAMPLE_POSITION_COLOCATED;
  94. default:
  95. return RA_CHROMA_SAMPLE_POSITION_UNKNOWN;
  96. }
  97. }
  98. static int get_stats(AVCodecContext *avctx, int eos)
  99. {
  100. librav1eContext *ctx = avctx->priv_data;
  101. RaData* buf = rav1e_twopass_out(ctx->ctx);
  102. if (!buf)
  103. return 0;
  104. if (!eos) {
  105. uint8_t *tmp = av_fast_realloc(ctx->pass_data, &ctx->pass_size,
  106. ctx->pass_pos + buf->len);
  107. if (!tmp) {
  108. rav1e_data_unref(buf);
  109. return AVERROR(ENOMEM);
  110. }
  111. ctx->pass_data = tmp;
  112. memcpy(ctx->pass_data + ctx->pass_pos, buf->data, buf->len);
  113. ctx->pass_pos += buf->len;
  114. } else {
  115. size_t b64_size = AV_BASE64_SIZE(ctx->pass_pos);
  116. memcpy(ctx->pass_data, buf->data, buf->len);
  117. avctx->stats_out = av_malloc(b64_size);
  118. if (!avctx->stats_out) {
  119. rav1e_data_unref(buf);
  120. return AVERROR(ENOMEM);
  121. }
  122. av_base64_encode(avctx->stats_out, b64_size, ctx->pass_data, ctx->pass_pos);
  123. av_freep(&ctx->pass_data);
  124. }
  125. rav1e_data_unref(buf);
  126. return 0;
  127. }
  128. static int set_stats(AVCodecContext *avctx)
  129. {
  130. librav1eContext *ctx = avctx->priv_data;
  131. int ret = 1;
  132. while (ret > 0 && ctx->pass_size - ctx->pass_pos > 0) {
  133. ret = rav1e_twopass_in(ctx->ctx, ctx->pass_data + ctx->pass_pos, ctx->pass_size);
  134. if (ret < 0)
  135. return AVERROR_EXTERNAL;
  136. ctx->pass_pos += ret;
  137. }
  138. return 0;
  139. }
  140. static av_cold int librav1e_encode_close(AVCodecContext *avctx)
  141. {
  142. librav1eContext *ctx = avctx->priv_data;
  143. if (ctx->ctx) {
  144. rav1e_context_unref(ctx->ctx);
  145. ctx->ctx = NULL;
  146. }
  147. if (ctx->rframe) {
  148. rav1e_frame_unref(ctx->rframe);
  149. ctx->rframe = NULL;
  150. }
  151. av_frame_free(&ctx->frame);
  152. av_bsf_free(&ctx->bsf);
  153. av_freep(&ctx->pass_data);
  154. return 0;
  155. }
  156. static av_cold int librav1e_encode_init(AVCodecContext *avctx)
  157. {
  158. librav1eContext *ctx = avctx->priv_data;
  159. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  160. RaConfig *cfg = NULL;
  161. int rret;
  162. int ret = 0;
  163. ctx->frame = av_frame_alloc();
  164. if (!ctx->frame)
  165. return AVERROR(ENOMEM);
  166. cfg = rav1e_config_default();
  167. if (!cfg) {
  168. av_log(avctx, AV_LOG_ERROR, "Could not allocate rav1e config.\n");
  169. return AVERROR_EXTERNAL;
  170. }
  171. /*
  172. * Rav1e currently uses the time base given to it only for ratecontrol... where
  173. * the inverse is taken and used as a framerate. So, do what we do in other wrappers
  174. * and use the framerate if we can.
  175. */
  176. if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
  177. rav1e_config_set_time_base(cfg, (RaRational) {
  178. avctx->framerate.den, avctx->framerate.num
  179. });
  180. } else {
  181. rav1e_config_set_time_base(cfg, (RaRational) {
  182. avctx->time_base.num * avctx->ticks_per_frame,
  183. avctx->time_base.den
  184. });
  185. }
  186. if ((avctx->flags & AV_CODEC_FLAG_PASS1 || avctx->flags & AV_CODEC_FLAG_PASS2) && !avctx->bit_rate) {
  187. av_log(avctx, AV_LOG_ERROR, "A bitrate must be set to use two pass mode.\n");
  188. ret = AVERROR_INVALIDDATA;
  189. goto end;
  190. }
  191. if (avctx->flags & AV_CODEC_FLAG_PASS2) {
  192. if (!avctx->stats_in) {
  193. av_log(avctx, AV_LOG_ERROR, "No stats file provided for second pass.\n");
  194. ret = AVERROR(EINVAL);
  195. goto end;
  196. }
  197. ctx->pass_size = (strlen(avctx->stats_in) * 3) / 4;
  198. ctx->pass_data = av_malloc(ctx->pass_size);
  199. if (!ctx->pass_data) {
  200. av_log(avctx, AV_LOG_ERROR, "Could not allocate stats buffer.\n");
  201. ret = AVERROR(ENOMEM);
  202. goto end;
  203. }
  204. ctx->pass_size = av_base64_decode(ctx->pass_data, avctx->stats_in, ctx->pass_size);
  205. if (ctx->pass_size < 0) {
  206. av_log(avctx, AV_LOG_ERROR, "Invalid pass file.\n");
  207. ret = AVERROR(EINVAL);
  208. goto end;
  209. }
  210. }
  211. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  212. const AVBitStreamFilter *filter = av_bsf_get_by_name("extract_extradata");
  213. int bret;
  214. if (!filter) {
  215. av_log(avctx, AV_LOG_ERROR, "extract_extradata bitstream filter "
  216. "not found. This is a bug, please report it.\n");
  217. ret = AVERROR_BUG;
  218. goto end;
  219. }
  220. bret = av_bsf_alloc(filter, &ctx->bsf);
  221. if (bret < 0) {
  222. ret = bret;
  223. goto end;
  224. }
  225. bret = avcodec_parameters_from_context(ctx->bsf->par_in, avctx);
  226. if (bret < 0) {
  227. ret = bret;
  228. goto end;
  229. }
  230. bret = av_bsf_init(ctx->bsf);
  231. if (bret < 0) {
  232. ret = bret;
  233. goto end;
  234. }
  235. }
  236. {
  237. AVDictionaryEntry *en = NULL;
  238. while ((en = av_dict_get(ctx->rav1e_opts, "", en, AV_DICT_IGNORE_SUFFIX))) {
  239. int parse_ret = rav1e_config_parse(cfg, en->key, en->value);
  240. if (parse_ret < 0)
  241. av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value);
  242. }
  243. }
  244. rret = rav1e_config_parse_int(cfg, "width", avctx->width);
  245. if (rret < 0) {
  246. av_log(avctx, AV_LOG_ERROR, "Invalid width passed to rav1e.\n");
  247. ret = AVERROR_INVALIDDATA;
  248. goto end;
  249. }
  250. rret = rav1e_config_parse_int(cfg, "height", avctx->height);
  251. if (rret < 0) {
  252. av_log(avctx, AV_LOG_ERROR, "Invalid height passed to rav1e.\n");
  253. ret = AVERROR_INVALIDDATA;
  254. goto end;
  255. }
  256. rret = rav1e_config_parse_int(cfg, "threads", avctx->thread_count);
  257. if (rret < 0)
  258. av_log(avctx, AV_LOG_WARNING, "Invalid number of threads, defaulting to auto.\n");
  259. if (ctx->speed >= 0) {
  260. rret = rav1e_config_parse_int(cfg, "speed", ctx->speed);
  261. if (rret < 0) {
  262. av_log(avctx, AV_LOG_ERROR, "Could not set speed preset.\n");
  263. ret = AVERROR_EXTERNAL;
  264. goto end;
  265. }
  266. }
  267. /* rav1e handles precedence between 'tiles' and cols/rows for us. */
  268. if (ctx->tiles > 0) {
  269. rret = rav1e_config_parse_int(cfg, "tiles", ctx->tiles);
  270. if (rret < 0) {
  271. av_log(avctx, AV_LOG_ERROR, "Could not set number of tiles to encode with.\n");
  272. ret = AVERROR_EXTERNAL;
  273. goto end;
  274. }
  275. }
  276. if (ctx->tile_rows > 0) {
  277. rret = rav1e_config_parse_int(cfg, "tile_rows", ctx->tile_rows);
  278. if (rret < 0) {
  279. av_log(avctx, AV_LOG_ERROR, "Could not set number of tile rows to encode with.\n");
  280. ret = AVERROR_EXTERNAL;
  281. goto end;
  282. }
  283. }
  284. if (ctx->tile_cols > 0) {
  285. rret = rav1e_config_parse_int(cfg, "tile_cols", ctx->tile_cols);
  286. if (rret < 0) {
  287. av_log(avctx, AV_LOG_ERROR, "Could not set number of tile cols to encode with.\n");
  288. ret = AVERROR_EXTERNAL;
  289. goto end;
  290. }
  291. }
  292. if (avctx->gop_size > 0) {
  293. rret = rav1e_config_parse_int(cfg, "key_frame_interval", avctx->gop_size);
  294. if (rret < 0) {
  295. av_log(avctx, AV_LOG_ERROR, "Could not set max keyint.\n");
  296. ret = AVERROR_EXTERNAL;
  297. goto end;
  298. }
  299. }
  300. if (avctx->keyint_min > 0) {
  301. rret = rav1e_config_parse_int(cfg, "min_key_frame_interval", avctx->keyint_min);
  302. if (rret < 0) {
  303. av_log(avctx, AV_LOG_ERROR, "Could not set min keyint.\n");
  304. ret = AVERROR_EXTERNAL;
  305. goto end;
  306. }
  307. }
  308. if (avctx->bit_rate && ctx->quantizer < 0) {
  309. int max_quantizer = avctx->qmax >= 0 ? avctx->qmax : 255;
  310. rret = rav1e_config_parse_int(cfg, "quantizer", max_quantizer);
  311. if (rret < 0) {
  312. av_log(avctx, AV_LOG_ERROR, "Could not set max quantizer.\n");
  313. ret = AVERROR_EXTERNAL;
  314. goto end;
  315. }
  316. if (avctx->qmin >= 0) {
  317. rret = rav1e_config_parse_int(cfg, "min_quantizer", avctx->qmin);
  318. if (rret < 0) {
  319. av_log(avctx, AV_LOG_ERROR, "Could not set min quantizer.\n");
  320. ret = AVERROR_EXTERNAL;
  321. goto end;
  322. }
  323. }
  324. rret = rav1e_config_parse_int(cfg, "bitrate", avctx->bit_rate);
  325. if (rret < 0) {
  326. av_log(avctx, AV_LOG_ERROR, "Could not set bitrate.\n");
  327. ret = AVERROR_INVALIDDATA;
  328. goto end;
  329. }
  330. } else if (ctx->quantizer >= 0) {
  331. if (avctx->bit_rate)
  332. av_log(avctx, AV_LOG_WARNING, "Both bitrate and quantizer specified. Using quantizer mode.");
  333. rret = rav1e_config_parse_int(cfg, "quantizer", ctx->quantizer);
  334. if (rret < 0) {
  335. av_log(avctx, AV_LOG_ERROR, "Could not set quantizer.\n");
  336. ret = AVERROR_EXTERNAL;
  337. goto end;
  338. }
  339. }
  340. rret = rav1e_config_set_pixel_format(cfg, desc->comp[0].depth,
  341. pix_fmt_map(avctx->pix_fmt),
  342. chroma_loc_map(avctx->chroma_sample_location),
  343. range_map(avctx->pix_fmt, avctx->color_range));
  344. if (rret < 0) {
  345. av_log(avctx, AV_LOG_ERROR, "Failed to set pixel format properties.\n");
  346. ret = AVERROR_INVALIDDATA;
  347. goto end;
  348. }
  349. /* rav1e's colorspace enums match standard values. */
  350. rret = rav1e_config_set_color_description(cfg, (RaMatrixCoefficients) avctx->colorspace,
  351. (RaColorPrimaries) avctx->color_primaries,
  352. (RaTransferCharacteristics) avctx->color_trc);
  353. if (rret < 0) {
  354. av_log(avctx, AV_LOG_WARNING, "Failed to set color properties.\n");
  355. if (avctx->err_recognition & AV_EF_EXPLODE) {
  356. ret = AVERROR_INVALIDDATA;
  357. goto end;
  358. }
  359. }
  360. ctx->ctx = rav1e_context_new(cfg);
  361. if (!ctx->ctx) {
  362. av_log(avctx, AV_LOG_ERROR, "Failed to create rav1e encode context.\n");
  363. ret = AVERROR_EXTERNAL;
  364. goto end;
  365. }
  366. ret = 0;
  367. end:
  368. rav1e_config_unref(cfg);
  369. return ret;
  370. }
  371. static int librav1e_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
  372. {
  373. librav1eContext *ctx = avctx->priv_data;
  374. RaFrame *rframe = ctx->rframe;
  375. RaPacket *rpkt = NULL;
  376. int ret;
  377. if (!rframe) {
  378. AVFrame *frame = ctx->frame;
  379. ret = ff_encode_get_frame(avctx, frame);
  380. if (ret < 0 && ret != AVERROR_EOF)
  381. return ret;
  382. if (frame->buf[0]) {
  383. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
  384. int64_t *pts = av_malloc(sizeof(int64_t));
  385. if (!pts) {
  386. av_log(avctx, AV_LOG_ERROR, "Could not allocate PTS buffer.\n");
  387. return AVERROR(ENOMEM);
  388. }
  389. *pts = frame->pts;
  390. rframe = rav1e_frame_new(ctx->ctx);
  391. if (!rframe) {
  392. av_log(avctx, AV_LOG_ERROR, "Could not allocate new rav1e frame.\n");
  393. av_frame_unref(frame);
  394. av_freep(&pts);
  395. return AVERROR(ENOMEM);
  396. }
  397. for (int i = 0; i < desc->nb_components; i++) {
  398. int shift = i ? desc->log2_chroma_h : 0;
  399. int bytes = desc->comp[0].depth == 8 ? 1 : 2;
  400. rav1e_frame_fill_plane(rframe, i, frame->data[i],
  401. (frame->height >> shift) * frame->linesize[i],
  402. frame->linesize[i], bytes);
  403. }
  404. av_frame_unref(frame);
  405. rav1e_frame_set_opaque(rframe, pts, av_free);
  406. }
  407. }
  408. ret = rav1e_send_frame(ctx->ctx, rframe);
  409. if (rframe)
  410. if (ret == RA_ENCODER_STATUS_ENOUGH_DATA) {
  411. ctx->rframe = rframe; /* Queue is full. Store the RaFrame to retry next call */
  412. } else {
  413. rav1e_frame_unref(rframe); /* No need to unref if flushing. */
  414. ctx->rframe = NULL;
  415. }
  416. switch (ret) {
  417. case RA_ENCODER_STATUS_SUCCESS:
  418. case RA_ENCODER_STATUS_ENOUGH_DATA:
  419. break;
  420. case RA_ENCODER_STATUS_FAILURE:
  421. av_log(avctx, AV_LOG_ERROR, "Could not send frame: %s\n", rav1e_status_to_str(ret));
  422. return AVERROR_EXTERNAL;
  423. default:
  424. av_log(avctx, AV_LOG_ERROR, "Unknown return code %d from rav1e_send_frame: %s\n", ret, rav1e_status_to_str(ret));
  425. return AVERROR_UNKNOWN;
  426. }
  427. retry:
  428. if (avctx->flags & AV_CODEC_FLAG_PASS1) {
  429. int sret = get_stats(avctx, 0);
  430. if (sret < 0)
  431. return sret;
  432. } else if (avctx->flags & AV_CODEC_FLAG_PASS2) {
  433. int sret = set_stats(avctx);
  434. if (sret < 0)
  435. return sret;
  436. }
  437. ret = rav1e_receive_packet(ctx->ctx, &rpkt);
  438. switch (ret) {
  439. case RA_ENCODER_STATUS_SUCCESS:
  440. break;
  441. case RA_ENCODER_STATUS_LIMIT_REACHED:
  442. if (avctx->flags & AV_CODEC_FLAG_PASS1) {
  443. int sret = get_stats(avctx, 1);
  444. if (sret < 0)
  445. return sret;
  446. }
  447. return AVERROR_EOF;
  448. case RA_ENCODER_STATUS_ENCODED:
  449. goto retry;
  450. case RA_ENCODER_STATUS_NEED_MORE_DATA:
  451. if (avctx->internal->draining) {
  452. av_log(avctx, AV_LOG_ERROR, "Unexpected error when receiving packet after EOF.\n");
  453. return AVERROR_EXTERNAL;
  454. }
  455. return AVERROR(EAGAIN);
  456. case RA_ENCODER_STATUS_FAILURE:
  457. av_log(avctx, AV_LOG_ERROR, "Could not encode frame: %s\n", rav1e_status_to_str(ret));
  458. return AVERROR_EXTERNAL;
  459. default:
  460. av_log(avctx, AV_LOG_ERROR, "Unknown return code %d from rav1e_receive_packet: %s\n", ret, rav1e_status_to_str(ret));
  461. return AVERROR_UNKNOWN;
  462. }
  463. ret = av_new_packet(pkt, rpkt->len);
  464. if (ret < 0) {
  465. av_log(avctx, AV_LOG_ERROR, "Could not allocate packet.\n");
  466. rav1e_packet_unref(rpkt);
  467. return ret;
  468. }
  469. memcpy(pkt->data, rpkt->data, rpkt->len);
  470. if (rpkt->frame_type == RA_FRAME_TYPE_KEY)
  471. pkt->flags |= AV_PKT_FLAG_KEY;
  472. pkt->pts = pkt->dts = *((int64_t *) rpkt->opaque);
  473. av_free(rpkt->opaque);
  474. rav1e_packet_unref(rpkt);
  475. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  476. int ret = av_bsf_send_packet(ctx->bsf, pkt);
  477. if (ret < 0) {
  478. av_log(avctx, AV_LOG_ERROR, "extradata extraction send failed.\n");
  479. av_packet_unref(pkt);
  480. return ret;
  481. }
  482. ret = av_bsf_receive_packet(ctx->bsf, pkt);
  483. if (ret < 0) {
  484. av_log(avctx, AV_LOG_ERROR, "extradata extraction receive failed.\n");
  485. av_packet_unref(pkt);
  486. return ret;
  487. }
  488. }
  489. return 0;
  490. }
  491. #define OFFSET(x) offsetof(librav1eContext, x)
  492. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  493. static const AVOption options[] = {
  494. { "qp", "use constant quantizer mode", OFFSET(quantizer), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 255, VE },
  495. { "speed", "what speed preset to use", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 10, VE },
  496. { "tiles", "number of tiles encode with", OFFSET(tiles), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
  497. { "tile-rows", "number of tiles rows to encode with", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
  498. { "tile-columns", "number of tiles columns to encode with", OFFSET(tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
  499. { "rav1e-params", "set the rav1e configuration using a :-separated list of key=value parameters", OFFSET(rav1e_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
  500. { NULL }
  501. };
  502. static const AVCodecDefault librav1e_defaults[] = {
  503. { "b", "0" },
  504. { "g", "0" },
  505. { "keyint_min", "0" },
  506. { "qmax", "-1" },
  507. { "qmin", "-1" },
  508. { NULL }
  509. };
  510. const enum AVPixelFormat librav1e_pix_fmts[] = {
  511. AV_PIX_FMT_YUV420P,
  512. AV_PIX_FMT_YUVJ420P,
  513. AV_PIX_FMT_YUV420P10,
  514. AV_PIX_FMT_YUV420P12,
  515. AV_PIX_FMT_YUV422P,
  516. AV_PIX_FMT_YUVJ422P,
  517. AV_PIX_FMT_YUV422P10,
  518. AV_PIX_FMT_YUV422P12,
  519. AV_PIX_FMT_YUV444P,
  520. AV_PIX_FMT_YUVJ444P,
  521. AV_PIX_FMT_YUV444P10,
  522. AV_PIX_FMT_YUV444P12,
  523. AV_PIX_FMT_NONE
  524. };
  525. static const AVClass class = {
  526. .class_name = "librav1e",
  527. .item_name = av_default_item_name,
  528. .option = options,
  529. .version = LIBAVUTIL_VERSION_INT,
  530. };
  531. AVCodec ff_librav1e_encoder = {
  532. .name = "librav1e",
  533. .long_name = NULL_IF_CONFIG_SMALL("librav1e AV1"),
  534. .type = AVMEDIA_TYPE_VIDEO,
  535. .id = AV_CODEC_ID_AV1,
  536. .init = librav1e_encode_init,
  537. .receive_packet = librav1e_receive_packet,
  538. .close = librav1e_encode_close,
  539. .priv_data_size = sizeof(librav1eContext),
  540. .priv_class = &class,
  541. .defaults = librav1e_defaults,
  542. .pix_fmts = librav1e_pix_fmts,
  543. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
  544. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
  545. .wrapper_name = "librav1e",
  546. };