You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1194 lines
38KB

  1. /*
  2. * Copyright (C) 2016 Open Broadcast Systems Ltd.
  3. * Author 2016 Rostislav Pehlivanov <atomnuker@gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/pixdesc.h"
  22. #include "libavutil/opt.h"
  23. #include "dirac.h"
  24. #include "put_bits.h"
  25. #include "internal.h"
  26. #include "version.h"
  27. #include "vc2enc_dwt.h"
  28. #include "diractab.h"
  29. /* Quantizations above this usually zero coefficients and lower the quality */
  30. #define MAX_QUANT_INDEX 50
  31. /* Total range is -COEF_LUT_TAB to +COEFF_LUT_TAB, but total tab size is half
  32. * (COEF_LUT_TAB*MAX_QUANT_INDEX) since the sign is appended during encoding */
  33. #define COEF_LUT_TAB 2048
  34. enum VC2_QM {
  35. VC2_QM_DEF = 0,
  36. VC2_QM_COL,
  37. VC2_QM_FLAT,
  38. VC2_QM_NB
  39. };
  40. typedef struct SubBand {
  41. dwtcoef *buf;
  42. ptrdiff_t stride;
  43. int width;
  44. int height;
  45. } SubBand;
  46. typedef struct Plane {
  47. SubBand band[MAX_DWT_LEVELS][4];
  48. dwtcoef *coef_buf;
  49. int width;
  50. int height;
  51. int dwt_width;
  52. int dwt_height;
  53. ptrdiff_t coef_stride;
  54. } Plane;
  55. typedef struct SliceArgs {
  56. PutBitContext pb;
  57. void *ctx;
  58. int x;
  59. int y;
  60. int quant_idx;
  61. int bits_ceil;
  62. int bytes;
  63. } SliceArgs;
  64. typedef struct TransformArgs {
  65. void *ctx;
  66. Plane *plane;
  67. void *idata;
  68. ptrdiff_t istride;
  69. int field;
  70. VC2TransformContext t;
  71. } TransformArgs;
  72. typedef struct VC2EncContext {
  73. AVClass *av_class;
  74. PutBitContext pb;
  75. Plane plane[3];
  76. AVCodecContext *avctx;
  77. DiracVersionInfo ver;
  78. SliceArgs *slice_args;
  79. TransformArgs transform_args[3];
  80. /* For conversion from unsigned pixel values to signed */
  81. int diff_offset;
  82. int bpp;
  83. /* Picture number */
  84. uint32_t picture_number;
  85. /* Base video format */
  86. int base_vf;
  87. int level;
  88. int profile;
  89. /* Quantization matrix */
  90. uint8_t quant[MAX_DWT_LEVELS][4];
  91. /* Coefficient LUT */
  92. uint16_t *coef_lut_val;
  93. uint8_t *coef_lut_len;
  94. int num_x; /* #slices horizontally */
  95. int num_y; /* #slices vertically */
  96. int prefix_bytes;
  97. int size_scaler;
  98. int chroma_x_shift;
  99. int chroma_y_shift;
  100. /* Rate control stuff */
  101. int slice_max_bytes;
  102. int q_ceil;
  103. int q_start;
  104. /* Options */
  105. double tolerance;
  106. int wavelet_idx;
  107. int wavelet_depth;
  108. int strict_compliance;
  109. int slice_height;
  110. int slice_width;
  111. int interlaced;
  112. enum VC2_QM quant_matrix;
  113. /* Parse code state */
  114. uint32_t next_parse_offset;
  115. enum DiracParseCodes last_parse_code;
  116. } VC2EncContext;
  117. static av_always_inline void put_padding(PutBitContext *pb, int bytes)
  118. {
  119. int bits = bytes*8;
  120. if (!bits)
  121. return;
  122. while (bits > 31) {
  123. put_bits(pb, 31, 0);
  124. bits -= 31;
  125. }
  126. if (bits)
  127. put_bits(pb, bits, 0);
  128. }
  129. static av_always_inline void put_vc2_ue_uint(PutBitContext *pb, uint16_t val)
  130. {
  131. int i;
  132. int pbits = 0, bits = 0, topbit = 1, maxval = 1;
  133. if (!val++) {
  134. put_bits(pb, 1, 1);
  135. return;
  136. }
  137. while (val > maxval) {
  138. topbit <<= 1;
  139. maxval <<= 1;
  140. maxval |= 1;
  141. }
  142. bits = ff_log2(topbit);
  143. for (i = 0; i < bits; i++) {
  144. topbit >>= 1;
  145. pbits <<= 2;
  146. if (val & topbit)
  147. pbits |= 0x1;
  148. }
  149. put_bits(pb, bits*2 + 1, (pbits << 1) | 1);
  150. }
  151. static av_always_inline int count_vc2_ue_uint(uint16_t val)
  152. {
  153. int topbit = 1, maxval = 1;
  154. if (!val++)
  155. return 1;
  156. while (val > maxval) {
  157. topbit <<= 1;
  158. maxval <<= 1;
  159. maxval |= 1;
  160. }
  161. return ff_log2(topbit)*2 + 1;
  162. }
  163. static av_always_inline void get_vc2_ue_uint(uint16_t val, uint8_t *nbits,
  164. uint16_t *eval)
  165. {
  166. int i;
  167. int pbits = 0, bits = 0, topbit = 1, maxval = 1;
  168. if (!val++) {
  169. *nbits = 1;
  170. *eval = 1;
  171. return;
  172. }
  173. while (val > maxval) {
  174. topbit <<= 1;
  175. maxval <<= 1;
  176. maxval |= 1;
  177. }
  178. bits = ff_log2(topbit);
  179. for (i = 0; i < bits; i++) {
  180. topbit >>= 1;
  181. pbits <<= 2;
  182. if (val & topbit)
  183. pbits |= 0x1;
  184. }
  185. *nbits = bits*2 + 1;
  186. *eval = (pbits << 1) | 1;
  187. }
  188. /* VC-2 10.4 - parse_info() */
  189. static void encode_parse_info(VC2EncContext *s, enum DiracParseCodes pcode)
  190. {
  191. uint32_t cur_pos, dist;
  192. avpriv_align_put_bits(&s->pb);
  193. cur_pos = put_bits_count(&s->pb) >> 3;
  194. /* Magic string */
  195. avpriv_put_string(&s->pb, "BBCD", 0);
  196. /* Parse code */
  197. put_bits(&s->pb, 8, pcode);
  198. /* Next parse offset */
  199. dist = cur_pos - s->next_parse_offset;
  200. AV_WB32(s->pb.buf + s->next_parse_offset + 5, dist);
  201. s->next_parse_offset = cur_pos;
  202. put_bits32(&s->pb, pcode == DIRAC_PCODE_END_SEQ ? 13 : 0);
  203. /* Last parse offset */
  204. put_bits32(&s->pb, s->last_parse_code == DIRAC_PCODE_END_SEQ ? 13 : dist);
  205. s->last_parse_code = pcode;
  206. }
  207. /* VC-2 11.1 - parse_parameters()
  208. * The level dictates what the decoder should expect in terms of resolution
  209. * and allows it to quickly reject whatever it can't support. Remember,
  210. * this codec kinda targets cheapo FPGAs without much memory. Unfortunately
  211. * it also limits us greatly in our choice of formats, hence the flag to disable
  212. * strict_compliance */
  213. static void encode_parse_params(VC2EncContext *s)
  214. {
  215. put_vc2_ue_uint(&s->pb, s->ver.major); /* VC-2 demands this to be 2 */
  216. put_vc2_ue_uint(&s->pb, s->ver.minor); /* ^^ and this to be 0 */
  217. put_vc2_ue_uint(&s->pb, s->profile); /* 3 to signal HQ profile */
  218. put_vc2_ue_uint(&s->pb, s->level); /* 3 - 1080/720, 6 - 4K */
  219. }
  220. /* VC-2 11.3 - frame_size() */
  221. static void encode_frame_size(VC2EncContext *s)
  222. {
  223. put_bits(&s->pb, 1, !s->strict_compliance);
  224. if (!s->strict_compliance) {
  225. AVCodecContext *avctx = s->avctx;
  226. put_vc2_ue_uint(&s->pb, avctx->width);
  227. put_vc2_ue_uint(&s->pb, avctx->height);
  228. }
  229. }
  230. /* VC-2 11.3.3 - color_diff_sampling_format() */
  231. static void encode_sample_fmt(VC2EncContext *s)
  232. {
  233. put_bits(&s->pb, 1, !s->strict_compliance);
  234. if (!s->strict_compliance) {
  235. int idx;
  236. if (s->chroma_x_shift == 1 && s->chroma_y_shift == 0)
  237. idx = 1; /* 422 */
  238. else if (s->chroma_x_shift == 1 && s->chroma_y_shift == 1)
  239. idx = 2; /* 420 */
  240. else
  241. idx = 0; /* 444 */
  242. put_vc2_ue_uint(&s->pb, idx);
  243. }
  244. }
  245. /* VC-2 11.3.4 - scan_format() */
  246. static void encode_scan_format(VC2EncContext *s)
  247. {
  248. put_bits(&s->pb, 1, !s->strict_compliance);
  249. if (!s->strict_compliance)
  250. put_vc2_ue_uint(&s->pb, s->interlaced);
  251. }
  252. /* VC-2 11.3.5 - frame_rate() */
  253. static void encode_frame_rate(VC2EncContext *s)
  254. {
  255. put_bits(&s->pb, 1, !s->strict_compliance);
  256. if (!s->strict_compliance) {
  257. AVCodecContext *avctx = s->avctx;
  258. put_vc2_ue_uint(&s->pb, 0);
  259. put_vc2_ue_uint(&s->pb, avctx->time_base.den);
  260. put_vc2_ue_uint(&s->pb, avctx->time_base.num);
  261. }
  262. }
  263. /* VC-2 11.3.6 - aspect_ratio() */
  264. static void encode_aspect_ratio(VC2EncContext *s)
  265. {
  266. put_bits(&s->pb, 1, !s->strict_compliance);
  267. if (!s->strict_compliance) {
  268. AVCodecContext *avctx = s->avctx;
  269. put_vc2_ue_uint(&s->pb, 0);
  270. put_vc2_ue_uint(&s->pb, avctx->sample_aspect_ratio.num);
  271. put_vc2_ue_uint(&s->pb, avctx->sample_aspect_ratio.den);
  272. }
  273. }
  274. /* VC-2 11.3.7 - clean_area() */
  275. static void encode_clean_area(VC2EncContext *s)
  276. {
  277. put_bits(&s->pb, 1, 0);
  278. }
  279. /* VC-2 11.3.8 - signal_range() */
  280. static void encode_signal_range(VC2EncContext *s)
  281. {
  282. int idx;
  283. AVCodecContext *avctx = s->avctx;
  284. const AVPixFmtDescriptor *fmt = av_pix_fmt_desc_get(avctx->pix_fmt);
  285. const int depth = fmt->comp[0].depth;
  286. if (depth == 8 && avctx->color_range == AVCOL_RANGE_JPEG) {
  287. idx = 1;
  288. s->bpp = 1;
  289. s->diff_offset = 128;
  290. } else if (depth == 8 && (avctx->color_range == AVCOL_RANGE_MPEG ||
  291. avctx->color_range == AVCOL_RANGE_UNSPECIFIED)) {
  292. idx = 2;
  293. s->bpp = 1;
  294. s->diff_offset = 128;
  295. } else if (depth == 10) {
  296. idx = 3;
  297. s->bpp = 2;
  298. s->diff_offset = 512;
  299. } else {
  300. idx = 4;
  301. s->bpp = 2;
  302. s->diff_offset = 2048;
  303. }
  304. put_bits(&s->pb, 1, !s->strict_compliance);
  305. if (!s->strict_compliance)
  306. put_vc2_ue_uint(&s->pb, idx);
  307. }
  308. /* VC-2 11.3.9 - color_spec() */
  309. static void encode_color_spec(VC2EncContext *s)
  310. {
  311. AVCodecContext *avctx = s->avctx;
  312. put_bits(&s->pb, 1, !s->strict_compliance);
  313. if (!s->strict_compliance) {
  314. int val;
  315. put_vc2_ue_uint(&s->pb, 0);
  316. /* primaries */
  317. put_bits(&s->pb, 1, 1);
  318. if (avctx->color_primaries == AVCOL_PRI_BT470BG)
  319. val = 2;
  320. else if (avctx->color_primaries == AVCOL_PRI_SMPTE170M)
  321. val = 1;
  322. else if (avctx->color_primaries == AVCOL_PRI_SMPTE240M)
  323. val = 1;
  324. else
  325. val = 0;
  326. put_vc2_ue_uint(&s->pb, val);
  327. /* color matrix */
  328. put_bits(&s->pb, 1, 1);
  329. if (avctx->colorspace == AVCOL_SPC_RGB)
  330. val = 3;
  331. else if (avctx->colorspace == AVCOL_SPC_YCOCG)
  332. val = 2;
  333. else if (avctx->colorspace == AVCOL_SPC_BT470BG)
  334. val = 1;
  335. else
  336. val = 0;
  337. put_vc2_ue_uint(&s->pb, val);
  338. /* transfer function */
  339. put_bits(&s->pb, 1, 1);
  340. if (avctx->color_trc == AVCOL_TRC_LINEAR)
  341. val = 2;
  342. else if (avctx->color_trc == AVCOL_TRC_BT1361_ECG)
  343. val = 1;
  344. else
  345. val = 0;
  346. put_vc2_ue_uint(&s->pb, val);
  347. }
  348. }
  349. /* VC-2 11.3 - source_parameters() */
  350. static void encode_source_params(VC2EncContext *s)
  351. {
  352. encode_frame_size(s);
  353. encode_sample_fmt(s);
  354. encode_scan_format(s);
  355. encode_frame_rate(s);
  356. encode_aspect_ratio(s);
  357. encode_clean_area(s);
  358. encode_signal_range(s);
  359. encode_color_spec(s);
  360. }
  361. /* VC-2 11 - sequence_header() */
  362. static void encode_seq_header(VC2EncContext *s)
  363. {
  364. avpriv_align_put_bits(&s->pb);
  365. encode_parse_params(s);
  366. put_vc2_ue_uint(&s->pb, s->base_vf);
  367. encode_source_params(s);
  368. put_vc2_ue_uint(&s->pb, s->interlaced); /* Frames or fields coding */
  369. }
  370. /* VC-2 12.1 - picture_header() */
  371. static void encode_picture_header(VC2EncContext *s)
  372. {
  373. avpriv_align_put_bits(&s->pb);
  374. put_bits32(&s->pb, s->picture_number++);
  375. }
  376. /* VC-2 12.3.4.1 - slice_parameters() */
  377. static void encode_slice_params(VC2EncContext *s)
  378. {
  379. put_vc2_ue_uint(&s->pb, s->num_x);
  380. put_vc2_ue_uint(&s->pb, s->num_y);
  381. put_vc2_ue_uint(&s->pb, s->prefix_bytes);
  382. put_vc2_ue_uint(&s->pb, s->size_scaler);
  383. }
  384. /* 1st idx = LL, second - vertical, third - horizontal, fourth - total */
  385. const uint8_t vc2_qm_col_tab[][4] = {
  386. {20, 9, 15, 4},
  387. { 0, 6, 6, 4},
  388. { 0, 3, 3, 5},
  389. { 0, 3, 5, 1},
  390. { 0, 11, 10, 11}
  391. };
  392. const uint8_t vc2_qm_flat_tab[][4] = {
  393. { 0, 0, 0, 0},
  394. { 0, 0, 0, 0},
  395. { 0, 0, 0, 0},
  396. { 0, 0, 0, 0},
  397. { 0, 0, 0, 0}
  398. };
  399. static void init_custom_qm(VC2EncContext *s)
  400. {
  401. int level, orientation;
  402. if (s->quant_matrix == VC2_QM_DEF) {
  403. for (level = 0; level < s->wavelet_depth; level++) {
  404. for (orientation = 0; orientation < 4; orientation++) {
  405. if (level <= 3)
  406. s->quant[level][orientation] = ff_dirac_default_qmat[s->wavelet_idx][level][orientation];
  407. else
  408. s->quant[level][orientation] = vc2_qm_col_tab[level][orientation];
  409. }
  410. }
  411. } else if (s->quant_matrix == VC2_QM_COL) {
  412. for (level = 0; level < s->wavelet_depth; level++) {
  413. for (orientation = 0; orientation < 4; orientation++) {
  414. s->quant[level][orientation] = vc2_qm_col_tab[level][orientation];
  415. }
  416. }
  417. } else {
  418. for (level = 0; level < s->wavelet_depth; level++) {
  419. for (orientation = 0; orientation < 4; orientation++) {
  420. s->quant[level][orientation] = vc2_qm_flat_tab[level][orientation];
  421. }
  422. }
  423. }
  424. }
  425. /* VC-2 12.3.4.2 - quant_matrix() */
  426. static void encode_quant_matrix(VC2EncContext *s)
  427. {
  428. int level, custom_quant_matrix = 0;
  429. if (s->wavelet_depth > 4 || s->quant_matrix != VC2_QM_DEF)
  430. custom_quant_matrix = 1;
  431. put_bits(&s->pb, 1, custom_quant_matrix);
  432. if (custom_quant_matrix) {
  433. init_custom_qm(s);
  434. put_vc2_ue_uint(&s->pb, s->quant[0][0]);
  435. for (level = 0; level < s->wavelet_depth; level++) {
  436. put_vc2_ue_uint(&s->pb, s->quant[level][1]);
  437. put_vc2_ue_uint(&s->pb, s->quant[level][2]);
  438. put_vc2_ue_uint(&s->pb, s->quant[level][3]);
  439. }
  440. } else {
  441. for (level = 0; level < s->wavelet_depth; level++) {
  442. s->quant[level][0] = ff_dirac_default_qmat[s->wavelet_idx][level][0];
  443. s->quant[level][1] = ff_dirac_default_qmat[s->wavelet_idx][level][1];
  444. s->quant[level][2] = ff_dirac_default_qmat[s->wavelet_idx][level][2];
  445. s->quant[level][3] = ff_dirac_default_qmat[s->wavelet_idx][level][3];
  446. }
  447. }
  448. }
  449. /* VC-2 12.3 - transform_parameters() */
  450. static void encode_transform_params(VC2EncContext *s)
  451. {
  452. put_vc2_ue_uint(&s->pb, s->wavelet_idx);
  453. put_vc2_ue_uint(&s->pb, s->wavelet_depth);
  454. encode_slice_params(s);
  455. encode_quant_matrix(s);
  456. }
  457. /* VC-2 12.2 - wavelet_transform() */
  458. static void encode_wavelet_transform(VC2EncContext *s)
  459. {
  460. encode_transform_params(s);
  461. avpriv_align_put_bits(&s->pb);
  462. /* Continued after DWT in encode_transform_data() */
  463. }
  464. /* VC-2 12 - picture_parse() */
  465. static void encode_picture_start(VC2EncContext *s)
  466. {
  467. avpriv_align_put_bits(&s->pb);
  468. encode_picture_header(s);
  469. avpriv_align_put_bits(&s->pb);
  470. encode_wavelet_transform(s);
  471. }
  472. #define QUANT(c, qf) (((c) << 2)/(qf))
  473. /* VC-2 13.5.5.2 - slice_band() */
  474. static void encode_subband(VC2EncContext *s, PutBitContext *pb, int sx, int sy,
  475. SubBand *b, int quant)
  476. {
  477. int x, y;
  478. const int left = b->width * (sx+0) / s->num_x;
  479. const int right = b->width * (sx+1) / s->num_x;
  480. const int top = b->height * (sy+0) / s->num_y;
  481. const int bottom = b->height * (sy+1) / s->num_y;
  482. const int qfactor = ff_dirac_qscale_tab[quant];
  483. const uint8_t *len_lut = &s->coef_lut_len[quant*COEF_LUT_TAB];
  484. const uint16_t *val_lut = &s->coef_lut_val[quant*COEF_LUT_TAB];
  485. dwtcoef *coeff = b->buf + top * b->stride;
  486. for (y = top; y < bottom; y++) {
  487. for (x = left; x < right; x++) {
  488. const int neg = coeff[x] < 0;
  489. uint16_t c_abs = FFABS(coeff[x]);
  490. if (c_abs < COEF_LUT_TAB) {
  491. const uint8_t len = len_lut[c_abs];
  492. if (len == 1)
  493. put_bits(pb, 1, 1);
  494. else
  495. put_bits(pb, len + 1, (val_lut[c_abs] << 1) | neg);
  496. } else {
  497. c_abs = QUANT(c_abs, qfactor);
  498. put_vc2_ue_uint(pb, c_abs);
  499. if (c_abs)
  500. put_bits(pb, 1, neg);
  501. }
  502. }
  503. coeff += b->stride;
  504. }
  505. }
  506. static int count_hq_slice(VC2EncContext *s, int slice_x,
  507. int slice_y, int quant_idx)
  508. {
  509. int x, y;
  510. uint8_t quants[MAX_DWT_LEVELS][4];
  511. int bits = 0, p, level, orientation;
  512. bits += 8*s->prefix_bytes;
  513. bits += 8; /* quant_idx */
  514. for (level = 0; level < s->wavelet_depth; level++)
  515. for (orientation = !!level; orientation < 4; orientation++)
  516. quants[level][orientation] = FFMAX(quant_idx - s->quant[level][orientation], 0);
  517. for (p = 0; p < 3; p++) {
  518. int bytes_start, bytes_len, pad_s, pad_c;
  519. bytes_start = bits >> 3;
  520. bits += 8;
  521. for (level = 0; level < s->wavelet_depth; level++) {
  522. for (orientation = !!level; orientation < 4; orientation++) {
  523. SubBand *b = &s->plane[p].band[level][orientation];
  524. const int q_idx = quants[level][orientation];
  525. const uint8_t *len_lut = &s->coef_lut_len[q_idx*COEF_LUT_TAB];
  526. const int qfactor = ff_dirac_qscale_tab[q_idx];
  527. const int left = b->width * slice_x / s->num_x;
  528. const int right = b->width *(slice_x+1) / s->num_x;
  529. const int top = b->height * slice_y / s->num_y;
  530. const int bottom = b->height *(slice_y+1) / s->num_y;
  531. dwtcoef *buf = b->buf + top * b->stride;
  532. for (y = top; y < bottom; y++) {
  533. for (x = left; x < right; x++) {
  534. uint16_t c_abs = FFABS(buf[x]);
  535. if (c_abs < COEF_LUT_TAB) {
  536. const int len = len_lut[c_abs];
  537. bits += len + (len != 1);
  538. } else {
  539. c_abs = QUANT(c_abs, qfactor);
  540. bits += count_vc2_ue_uint(c_abs);
  541. bits += !!c_abs;
  542. }
  543. }
  544. buf += b->stride;
  545. }
  546. }
  547. }
  548. bits += FFALIGN(bits, 8) - bits;
  549. bytes_len = (bits >> 3) - bytes_start - 1;
  550. pad_s = FFALIGN(bytes_len, s->size_scaler)/s->size_scaler;
  551. pad_c = (pad_s*s->size_scaler) - bytes_len;
  552. bits += pad_c*8;
  553. }
  554. return bits;
  555. }
  556. /* Approaches the best possible quantizer asymptotically, its kinda exaustive
  557. * but we have a LUT to get the coefficient size in bits. Guaranteed to never
  558. * overshoot, which is apparently very important when streaming */
  559. static int rate_control(AVCodecContext *avctx, void *arg)
  560. {
  561. SliceArgs *slice_dat = arg;
  562. VC2EncContext *s = slice_dat->ctx;
  563. const int sx = slice_dat->x;
  564. const int sy = slice_dat->y;
  565. int bits_last = INT_MAX, quant_buf[2] = {-1, -1};
  566. int quant = s->q_start, range = s->q_start/3;
  567. const int64_t top = slice_dat->bits_ceil;
  568. const double percent = s->tolerance;
  569. const double bottom = top - top*(percent/100.0f);
  570. int bits = count_hq_slice(s, sx, sy, quant);
  571. range -= range & 1; /* Make it an even number */
  572. while ((bits > top) || (bits < bottom)) {
  573. range *= bits > top ? +1 : -1;
  574. quant = av_clip(quant + range, 0, s->q_ceil);
  575. bits = count_hq_slice(s, sx, sy, quant);
  576. range = av_clip(range/2, 1, s->q_ceil);
  577. if (quant_buf[1] == quant) {
  578. quant = bits_last < bits ? quant_buf[0] : quant;
  579. bits = bits_last < bits ? bits_last : bits;
  580. break;
  581. }
  582. quant_buf[1] = quant_buf[0];
  583. quant_buf[0] = quant;
  584. bits_last = bits;
  585. }
  586. slice_dat->quant_idx = av_clip(quant, 0, s->q_ceil);
  587. slice_dat->bytes = FFALIGN((bits >> 3), s->size_scaler) + 4 + s->prefix_bytes;
  588. return 0;
  589. }
  590. static void calc_slice_sizes(VC2EncContext *s)
  591. {
  592. int slice_x, slice_y;
  593. SliceArgs *enc_args = s->slice_args;
  594. for (slice_y = 0; slice_y < s->num_y; slice_y++) {
  595. for (slice_x = 0; slice_x < s->num_x; slice_x++) {
  596. SliceArgs *args = &enc_args[s->num_x*slice_y + slice_x];
  597. args->ctx = s;
  598. args->x = slice_x;
  599. args->y = slice_y;
  600. args->bits_ceil = s->slice_max_bytes << 3;
  601. }
  602. }
  603. /* Determine quantization indices and bytes per slice */
  604. s->avctx->execute(s->avctx, rate_control, enc_args, NULL, s->num_x*s->num_y,
  605. sizeof(SliceArgs));
  606. }
  607. /* VC-2 13.5.3 - hq_slice */
  608. static int encode_hq_slice(AVCodecContext *avctx, void *arg)
  609. {
  610. SliceArgs *slice_dat = arg;
  611. VC2EncContext *s = slice_dat->ctx;
  612. PutBitContext *pb = &slice_dat->pb;
  613. const int slice_x = slice_dat->x;
  614. const int slice_y = slice_dat->y;
  615. const int quant_idx = slice_dat->quant_idx;
  616. const int slice_bytes_max = slice_dat->bytes;
  617. uint8_t quants[MAX_DWT_LEVELS][4];
  618. int p, level, orientation;
  619. avpriv_align_put_bits(pb);
  620. put_padding(pb, s->prefix_bytes);
  621. put_bits(pb, 8, quant_idx);
  622. /* Slice quantization (slice_quantizers() in the specs) */
  623. for (level = 0; level < s->wavelet_depth; level++)
  624. for (orientation = !!level; orientation < 4; orientation++)
  625. quants[level][orientation] = FFMAX(quant_idx - s->quant[level][orientation], 0);
  626. /* Luma + 2 Chroma planes */
  627. for (p = 0; p < 3; p++) {
  628. int bytes_start, bytes_len, pad_s, pad_c;
  629. bytes_start = put_bits_count(pb) >> 3;
  630. put_bits(pb, 8, 0);
  631. for (level = 0; level < s->wavelet_depth; level++) {
  632. for (orientation = !!level; orientation < 4; orientation++) {
  633. encode_subband(s, pb, slice_x, slice_y,
  634. &s->plane[p].band[level][orientation],
  635. quants[level][orientation]);
  636. }
  637. }
  638. avpriv_align_put_bits(pb);
  639. bytes_len = (put_bits_count(pb) >> 3) - bytes_start - 1;
  640. if (p == 2) {
  641. int len_diff = slice_bytes_max - (put_bits_count(pb) >> 3);
  642. pad_s = FFALIGN((bytes_len + len_diff), s->size_scaler)/s->size_scaler;
  643. pad_c = (pad_s*s->size_scaler) - bytes_len;
  644. } else {
  645. pad_s = FFALIGN(bytes_len, s->size_scaler)/s->size_scaler;
  646. pad_c = (pad_s*s->size_scaler) - bytes_len;
  647. }
  648. pb->buf[bytes_start] = pad_s;
  649. put_padding(pb, pad_c);
  650. }
  651. return 0;
  652. }
  653. /* VC-2 13.5.1 - low_delay_transform_data() */
  654. static int encode_slices(VC2EncContext *s)
  655. {
  656. uint8_t *buf;
  657. int slice_x, slice_y, skip = 0;
  658. SliceArgs *enc_args = s->slice_args;
  659. avpriv_align_put_bits(&s->pb);
  660. flush_put_bits(&s->pb);
  661. buf = put_bits_ptr(&s->pb);
  662. for (slice_y = 0; slice_y < s->num_y; slice_y++) {
  663. for (slice_x = 0; slice_x < s->num_x; slice_x++) {
  664. SliceArgs *args = &enc_args[s->num_x*slice_y + slice_x];
  665. init_put_bits(&args->pb, buf + skip, args->bytes);
  666. s->q_start = (s->q_start + args->quant_idx)/2;
  667. skip += args->bytes;
  668. }
  669. }
  670. s->avctx->execute(s->avctx, encode_hq_slice, enc_args, NULL, s->num_x*s->num_y,
  671. sizeof(SliceArgs));
  672. skip_put_bytes(&s->pb, skip);
  673. return 0;
  674. }
  675. /*
  676. * Transform basics for a 3 level transform
  677. * |---------------------------------------------------------------------|
  678. * | LL-0 | HL-0 | | |
  679. * |--------|-------| HL-1 | |
  680. * | LH-0 | HH-0 | | |
  681. * |----------------|-----------------| HL-2 |
  682. * | | | |
  683. * | LH-1 | HH-1 | |
  684. * | | | |
  685. * |----------------------------------|----------------------------------|
  686. * | | |
  687. * | | |
  688. * | | |
  689. * | LH-2 | HH-2 |
  690. * | | |
  691. * | | |
  692. * | | |
  693. * |---------------------------------------------------------------------|
  694. *
  695. * DWT transforms are generally applied by splitting the image in two vertically
  696. * and applying a low pass transform on the left part and a corresponding high
  697. * pass transform on the right hand side. This is known as the horizontal filter
  698. * stage.
  699. * After that, the same operation is performed except the image is divided
  700. * horizontally, with the high pass on the lower and the low pass on the higher
  701. * side.
  702. * Therefore, you're left with 4 subdivisions - known as low-low, low-high,
  703. * high-low and high-high. They're referred to as orientations in the decoder
  704. * and encoder.
  705. *
  706. * The LL (low-low) area contains the original image downsampled by the amount
  707. * of levels. The rest of the areas can be thought as the details needed
  708. * to restore the image perfectly to its original size.
  709. */
  710. static int dwt_plane(AVCodecContext *avctx, void *arg)
  711. {
  712. TransformArgs *transform_dat = arg;
  713. VC2EncContext *s = transform_dat->ctx;
  714. const void *frame_data = transform_dat->idata;
  715. const ptrdiff_t linesize = transform_dat->istride;
  716. const int field = transform_dat->field;
  717. const Plane *p = transform_dat->plane;
  718. VC2TransformContext *t = &transform_dat->t;
  719. dwtcoef *buf = p->coef_buf;
  720. const int idx = s->wavelet_idx;
  721. const int skip = 1 + s->interlaced;
  722. int x, y, level, offset;
  723. ptrdiff_t pix_stride = linesize >> (s->bpp - 1);
  724. if (field == 1) {
  725. offset = 0;
  726. pix_stride <<= 1;
  727. } else if (field == 2) {
  728. offset = pix_stride;
  729. pix_stride <<= 1;
  730. } else {
  731. offset = 0;
  732. }
  733. if (s->bpp == 1) {
  734. const uint8_t *pix = (const uint8_t *)frame_data + offset;
  735. for (y = 0; y < p->height*skip; y+=skip) {
  736. for (x = 0; x < p->width; x++) {
  737. buf[x] = pix[x] - s->diff_offset;
  738. }
  739. buf += p->coef_stride;
  740. pix += pix_stride;
  741. }
  742. } else {
  743. const uint16_t *pix = (const uint16_t *)frame_data + offset;
  744. for (y = 0; y < p->height*skip; y+=skip) {
  745. for (x = 0; x < p->width; x++) {
  746. buf[x] = pix[x] - s->diff_offset;
  747. }
  748. buf += p->coef_stride;
  749. pix += pix_stride;
  750. }
  751. }
  752. memset(buf, 0, p->coef_stride * (p->dwt_height - p->height) * sizeof(dwtcoef));
  753. for (level = s->wavelet_depth-1; level >= 0; level--) {
  754. const SubBand *b = &p->band[level][0];
  755. t->vc2_subband_dwt[idx](t, p->coef_buf, p->coef_stride,
  756. b->width, b->height);
  757. }
  758. return 0;
  759. }
  760. static void encode_frame(VC2EncContext *s, const AVFrame *frame,
  761. const char *aux_data, int field)
  762. {
  763. int i;
  764. /* Sequence header */
  765. encode_parse_info(s, DIRAC_PCODE_SEQ_HEADER);
  766. encode_seq_header(s);
  767. /* Encoder version */
  768. if (aux_data) {
  769. encode_parse_info(s, DIRAC_PCODE_AUX);
  770. avpriv_put_string(&s->pb, aux_data, 1);
  771. }
  772. /* Picture header */
  773. encode_parse_info(s, DIRAC_PCODE_PICTURE_HQ);
  774. encode_picture_start(s);
  775. for (i = 0; i < 3; i++) {
  776. s->transform_args[i].ctx = s;
  777. s->transform_args[i].field = field;
  778. s->transform_args[i].plane = &s->plane[i];
  779. s->transform_args[i].idata = frame->data[i];
  780. s->transform_args[i].istride = frame->linesize[i];
  781. }
  782. /* Do a DWT transform */
  783. s->avctx->execute(s->avctx, dwt_plane, s->transform_args, NULL, 3,
  784. sizeof(TransformArgs));
  785. /* Calculate per-slice quantizers and sizes */
  786. calc_slice_sizes(s);
  787. /* Init planes and encode slices */
  788. encode_slices(s);
  789. /* End sequence */
  790. encode_parse_info(s, DIRAC_PCODE_END_SEQ);
  791. }
  792. static av_cold int vc2_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
  793. const AVFrame *frame, int *got_packet_ptr)
  794. {
  795. int ret;
  796. int max_frame_bytes, sig_size = 256;
  797. VC2EncContext *s = avctx->priv_data;
  798. const char aux_data[] = LIBAVCODEC_IDENT;
  799. const int aux_data_size = sizeof(aux_data);
  800. const int header_size = 100 + aux_data_size;
  801. int64_t r_bitrate = avctx->bit_rate >> (s->interlaced);
  802. s->avctx = avctx;
  803. s->size_scaler = 1;
  804. s->prefix_bytes = 0;
  805. s->last_parse_code = 0;
  806. s->next_parse_offset = 0;
  807. /* Rate control */
  808. max_frame_bytes = (av_rescale(r_bitrate, s->avctx->time_base.num,
  809. s->avctx->time_base.den) >> 3) - header_size;
  810. /* Find an appropriate size scaler */
  811. while (sig_size > 255) {
  812. s->slice_max_bytes = FFALIGN(av_rescale(max_frame_bytes, 1,
  813. s->num_x*s->num_y), s->size_scaler);
  814. s->slice_max_bytes += 4 + s->prefix_bytes;
  815. sig_size = s->slice_max_bytes/s->size_scaler; /* Signalled slize size */
  816. s->size_scaler <<= 1;
  817. }
  818. ret = ff_alloc_packet2(avctx, avpkt, max_frame_bytes*2, 0);
  819. if (ret < 0) {
  820. av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
  821. return ret;
  822. } else {
  823. init_put_bits(&s->pb, avpkt->data, avpkt->size);
  824. }
  825. encode_frame(s, frame, aux_data, s->interlaced);
  826. if (s->interlaced)
  827. encode_frame(s, frame, NULL, 2);
  828. flush_put_bits(&s->pb);
  829. avpkt->size = put_bits_count(&s->pb) >> 3;
  830. *got_packet_ptr = 1;
  831. return 0;
  832. }
  833. static av_cold int vc2_encode_end(AVCodecContext *avctx)
  834. {
  835. int i;
  836. VC2EncContext *s = avctx->priv_data;
  837. av_log(avctx, AV_LOG_INFO, "Qavg: %i\n", s->q_start);
  838. for (i = 0; i < 3; i++) {
  839. ff_vc2enc_free_transforms(&s->transform_args[i].t);
  840. av_freep(&s->plane[i].coef_buf);
  841. }
  842. av_freep(&s->slice_args);
  843. av_freep(&s->coef_lut_len);
  844. av_freep(&s->coef_lut_val);
  845. return 0;
  846. }
  847. static av_cold int vc2_encode_init(AVCodecContext *avctx)
  848. {
  849. Plane *p;
  850. SubBand *b;
  851. int i, j, level, o, shift;
  852. VC2EncContext *s = avctx->priv_data;
  853. s->picture_number = 0;
  854. /* Total allowed quantization range */
  855. s->q_ceil = MAX_QUANT_INDEX;
  856. s->ver.major = 2;
  857. s->ver.minor = 0;
  858. s->profile = 3;
  859. s->level = 3;
  860. s->base_vf = -1;
  861. s->strict_compliance = 1;
  862. /* Mark unknown as progressive */
  863. s->interlaced = !((avctx->field_order == AV_FIELD_UNKNOWN) ||
  864. (avctx->field_order == AV_FIELD_PROGRESSIVE));
  865. if (avctx->pix_fmt == AV_PIX_FMT_YUV422P10) {
  866. if (avctx->width == 1280 && avctx->height == 720) {
  867. s->level = 3;
  868. if (avctx->time_base.num == 1001 && avctx->time_base.den == 60000)
  869. s->base_vf = 9;
  870. if (avctx->time_base.num == 1 && avctx->time_base.den == 50)
  871. s->base_vf = 10;
  872. } else if (avctx->width == 1920 && avctx->height == 1080) {
  873. s->level = 3;
  874. if (s->interlaced) {
  875. if (avctx->time_base.num == 1001 && avctx->time_base.den == 30000)
  876. s->base_vf = 11;
  877. if (avctx->time_base.num == 1 && avctx->time_base.den == 50)
  878. s->base_vf = 12;
  879. } else {
  880. if (avctx->time_base.num == 1001 && avctx->time_base.den == 60000)
  881. s->base_vf = 13;
  882. if (avctx->time_base.num == 1 && avctx->time_base.den == 50)
  883. s->base_vf = 14;
  884. if (avctx->time_base.num == 1001 && avctx->time_base.den == 24000)
  885. s->base_vf = 21;
  886. }
  887. } else if (avctx->width == 3840 && avctx->height == 2160) {
  888. s->level = 6;
  889. if (avctx->time_base.num == 1001 && avctx->time_base.den == 60000)
  890. s->base_vf = 17;
  891. if (avctx->time_base.num == 1 && avctx->time_base.den == 50)
  892. s->base_vf = 18;
  893. }
  894. }
  895. if (s->interlaced && s->base_vf <= 0) {
  896. av_log(avctx, AV_LOG_ERROR, "Interlacing not supported with non standard formats!\n");
  897. return AVERROR_UNKNOWN;
  898. }
  899. if (s->interlaced)
  900. av_log(avctx, AV_LOG_WARNING, "Interlacing enabled!\n");
  901. if ((s->slice_width & (s->slice_width - 1)) ||
  902. (s->slice_height & (s->slice_height - 1))) {
  903. av_log(avctx, AV_LOG_ERROR, "Slice size is not a power of two!\n");
  904. return AVERROR_UNKNOWN;
  905. }
  906. if ((s->slice_width > avctx->width) ||
  907. (s->slice_height > avctx->height)) {
  908. av_log(avctx, AV_LOG_ERROR, "Slice size is bigger than the image!\n");
  909. return AVERROR_UNKNOWN;
  910. }
  911. if (s->base_vf <= 0) {
  912. if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
  913. s->strict_compliance = s->base_vf = 0;
  914. av_log(avctx, AV_LOG_WARNING, "Disabling strict compliance\n");
  915. } else {
  916. av_log(avctx, AV_LOG_WARNING, "Given format does not strictly comply with "
  917. "the specifications, please add a -strict -1 flag to use it\n");
  918. return AVERROR_UNKNOWN;
  919. }
  920. } else {
  921. av_log(avctx, AV_LOG_INFO, "Selected base video format = %i\n", s->base_vf);
  922. }
  923. avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
  924. /* Planes initialization */
  925. for (i = 0; i < 3; i++) {
  926. int w, h;
  927. p = &s->plane[i];
  928. p->width = avctx->width >> (i ? s->chroma_x_shift : 0);
  929. p->height = avctx->height >> (i ? s->chroma_y_shift : 0);
  930. if (s->interlaced)
  931. p->height >>= 1;
  932. p->dwt_width = w = FFALIGN(p->width, (1 << s->wavelet_depth));
  933. p->dwt_height = h = FFALIGN(p->height, (1 << s->wavelet_depth));
  934. p->coef_stride = FFALIGN(p->dwt_width, 32);
  935. p->coef_buf = av_malloc(p->coef_stride*p->dwt_height*sizeof(dwtcoef));
  936. if (!p->coef_buf)
  937. goto alloc_fail;
  938. for (level = s->wavelet_depth-1; level >= 0; level--) {
  939. w = w >> 1;
  940. h = h >> 1;
  941. for (o = 0; o < 4; o++) {
  942. b = &p->band[level][o];
  943. b->width = w;
  944. b->height = h;
  945. b->stride = p->coef_stride;
  946. shift = (o > 1)*b->height*b->stride + (o & 1)*b->width;
  947. b->buf = p->coef_buf + shift;
  948. }
  949. }
  950. /* DWT init */
  951. if (ff_vc2enc_init_transforms(&s->transform_args[i].t,
  952. s->plane[0].coef_stride,
  953. s->plane[0].dwt_height))
  954. goto alloc_fail;
  955. }
  956. /* Slices */
  957. s->num_x = s->plane[0].dwt_width/s->slice_width;
  958. s->num_y = s->plane[0].dwt_height/s->slice_height;
  959. s->slice_args = av_malloc(s->num_x*s->num_y*sizeof(SliceArgs));
  960. if (!s->slice_args)
  961. goto alloc_fail;
  962. /* Lookup tables */
  963. s->coef_lut_len = av_malloc(COEF_LUT_TAB*s->q_ceil*sizeof(*s->coef_lut_len));
  964. if (!s->coef_lut_len)
  965. goto alloc_fail;
  966. s->coef_lut_val = av_malloc(COEF_LUT_TAB*s->q_ceil*sizeof(*s->coef_lut_val));
  967. if (!s->coef_lut_val)
  968. goto alloc_fail;
  969. for (i = 0; i < s->q_ceil; i++) {
  970. for (j = 0; j < COEF_LUT_TAB; j++) {
  971. uint8_t *len_lut = &s->coef_lut_len[i*COEF_LUT_TAB];
  972. uint16_t *val_lut = &s->coef_lut_val[i*COEF_LUT_TAB];
  973. get_vc2_ue_uint(QUANT(j, ff_dirac_qscale_tab[i]),
  974. &len_lut[j], &val_lut[j]);
  975. }
  976. }
  977. return 0;
  978. alloc_fail:
  979. vc2_encode_end(avctx);
  980. av_log(avctx, AV_LOG_ERROR, "Unable to allocate memory!\n");
  981. return AVERROR(ENOMEM);
  982. }
  983. #define VC2ENC_FLAGS (AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
  984. static const AVOption vc2enc_options[] = {
  985. {"tolerance", "Max undershoot in percent", offsetof(VC2EncContext, tolerance), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0f}, 0.0f, 45.0f, VC2ENC_FLAGS, "tolerance"},
  986. {"slice_width", "Slice width", offsetof(VC2EncContext, slice_width), AV_OPT_TYPE_INT, {.i64 = 128}, 32, 1024, VC2ENC_FLAGS, "slice_width"},
  987. {"slice_height", "Slice height", offsetof(VC2EncContext, slice_height), AV_OPT_TYPE_INT, {.i64 = 64}, 8, 1024, VC2ENC_FLAGS, "slice_height"},
  988. {"wavelet_depth", "Transform depth", offsetof(VC2EncContext, wavelet_depth), AV_OPT_TYPE_INT, {.i64 = 5}, 1, 5, VC2ENC_FLAGS, "wavelet_depth"},
  989. {"wavelet_type", "Transform type", offsetof(VC2EncContext, wavelet_idx), AV_OPT_TYPE_INT, {.i64 = VC2_TRANSFORM_9_7}, 0, VC2_TRANSFORMS_NB, VC2ENC_FLAGS, "wavelet_idx"},
  990. {"9_7", "Deslauriers-Dubuc (9,7)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_9_7}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "wavelet_idx"},
  991. {"5_3", "LeGall (5,3)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_5_3}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "wavelet_idx"},
  992. {"haar", "Haar (with shift)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_HAAR_S}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "wavelet_idx"},
  993. {"haar_noshift", "Haar (without shift)", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_TRANSFORM_HAAR}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "wavelet_idx"},
  994. {"qm", "Custom quantization matrix", offsetof(VC2EncContext, quant_matrix), AV_OPT_TYPE_INT, {.i64 = VC2_QM_DEF}, 0, VC2_QM_NB, VC2ENC_FLAGS, "quant_matrix"},
  995. {"default", "Default from the specifications", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_DEF}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "quant_matrix"},
  996. {"color", "Prevents low bitrate discoloration", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_COL}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "quant_matrix"},
  997. {"flat", "Optimize for PSNR", 0, AV_OPT_TYPE_CONST, {.i64 = VC2_QM_FLAT}, INT_MIN, INT_MAX, VC2ENC_FLAGS, "quant_matrix"},
  998. {NULL}
  999. };
  1000. static const AVClass vc2enc_class = {
  1001. .class_name = "SMPTE VC-2 encoder",
  1002. .category = AV_CLASS_CATEGORY_ENCODER,
  1003. .option = vc2enc_options,
  1004. .item_name = av_default_item_name,
  1005. .version = LIBAVUTIL_VERSION_INT
  1006. };
  1007. static const AVCodecDefault vc2enc_defaults[] = {
  1008. { "b", "600000000" },
  1009. { NULL },
  1010. };
  1011. static const enum AVPixelFormat allowed_pix_fmts[] = {
  1012. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
  1013. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  1014. AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
  1015. AV_PIX_FMT_NONE
  1016. };
  1017. AVCodec ff_vc2_encoder = {
  1018. .name = "vc2",
  1019. .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-2"),
  1020. .type = AVMEDIA_TYPE_VIDEO,
  1021. .id = AV_CODEC_ID_DIRAC,
  1022. .priv_data_size = sizeof(VC2EncContext),
  1023. .init = vc2_encode_init,
  1024. .close = vc2_encode_end,
  1025. .capabilities = AV_CODEC_CAP_SLICE_THREADS,
  1026. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
  1027. .encode2 = vc2_encode_frame,
  1028. .priv_class = &vc2enc_class,
  1029. .defaults = vc2enc_defaults,
  1030. .pix_fmts = allowed_pix_fmts
  1031. };