You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1946 lines
62KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. static int FUNC(obu_header)(CodedBitstreamContext *ctx, RWContext *rw,
  19. AV1RawOBUHeader *current)
  20. {
  21. int err;
  22. HEADER("OBU header");
  23. fc(1, obu_forbidden_bit, 0, 0);
  24. fc(4, obu_type, 0, AV1_OBU_PADDING);
  25. flag(obu_extension_flag);
  26. flag(obu_has_size_field);
  27. fc(1, obu_reserved_1bit, 0, 0);
  28. if (current->obu_extension_flag) {
  29. fb(3, temporal_id);
  30. fb(2, spatial_id);
  31. fc(3, extension_header_reserved_3bits, 0, 0);
  32. }
  33. return 0;
  34. }
  35. static int FUNC(trailing_bits)(CodedBitstreamContext *ctx, RWContext *rw, int nb_bits)
  36. {
  37. int err;
  38. av_assert0(nb_bits > 0);
  39. fixed(1, trailing_one_bit, 1);
  40. --nb_bits;
  41. while (nb_bits > 0) {
  42. fixed(1, trailing_zero_bit, 0);
  43. --nb_bits;
  44. }
  45. return 0;
  46. }
  47. static int FUNC(byte_alignment)(CodedBitstreamContext *ctx, RWContext *rw)
  48. {
  49. int err;
  50. while (byte_alignment(rw) != 0)
  51. fixed(1, zero_bit, 0);
  52. return 0;
  53. }
  54. static int FUNC(color_config)(CodedBitstreamContext *ctx, RWContext *rw,
  55. AV1RawColorConfig *current, int seq_profile)
  56. {
  57. CodedBitstreamAV1Context *priv = ctx->priv_data;
  58. int err;
  59. flag(high_bitdepth);
  60. if (seq_profile == FF_PROFILE_AV1_PROFESSIONAL &&
  61. current->high_bitdepth) {
  62. flag(twelve_bit);
  63. priv->bit_depth = current->twelve_bit ? 12 : 10;
  64. } else {
  65. priv->bit_depth = current->high_bitdepth ? 10 : 8;
  66. }
  67. if (seq_profile == FF_PROFILE_AV1_HIGH)
  68. infer(mono_chrome, 0);
  69. else
  70. flag(mono_chrome);
  71. priv->num_planes = current->mono_chrome ? 1 : 3;
  72. flag(color_description_present_flag);
  73. if (current->color_description_present_flag) {
  74. fb(8, color_primaries);
  75. fb(8, transfer_characteristics);
  76. fb(8, matrix_coefficients);
  77. } else {
  78. infer(color_primaries, AVCOL_PRI_UNSPECIFIED);
  79. infer(transfer_characteristics, AVCOL_TRC_UNSPECIFIED);
  80. infer(matrix_coefficients, AVCOL_SPC_UNSPECIFIED);
  81. }
  82. if (current->mono_chrome) {
  83. flag(color_range);
  84. infer(subsampling_x, 1);
  85. infer(subsampling_y, 1);
  86. infer(chroma_sample_position, AV1_CSP_UNKNOWN);
  87. infer(separate_uv_delta_q, 0);
  88. } else if (current->color_primaries == AVCOL_PRI_BT709 &&
  89. current->transfer_characteristics == AVCOL_TRC_IEC61966_2_1 &&
  90. current->matrix_coefficients == AVCOL_SPC_RGB) {
  91. infer(color_range, 1);
  92. infer(subsampling_x, 0);
  93. infer(subsampling_y, 0);
  94. flag(separate_uv_delta_q);
  95. } else {
  96. flag(color_range);
  97. if (seq_profile == FF_PROFILE_AV1_MAIN) {
  98. infer(subsampling_x, 1);
  99. infer(subsampling_y, 1);
  100. } else if (seq_profile == FF_PROFILE_AV1_HIGH) {
  101. infer(subsampling_x, 0);
  102. infer(subsampling_y, 0);
  103. } else {
  104. if (priv->bit_depth == 12) {
  105. fb(1, subsampling_x);
  106. if (current->subsampling_x)
  107. fb(1, subsampling_y);
  108. else
  109. infer(subsampling_y, 0);
  110. } else {
  111. infer(subsampling_x, 1);
  112. infer(subsampling_y, 0);
  113. }
  114. }
  115. if (current->subsampling_x && current->subsampling_y) {
  116. fc(2, chroma_sample_position, AV1_CSP_UNKNOWN,
  117. AV1_CSP_COLOCATED);
  118. }
  119. flag(separate_uv_delta_q);
  120. }
  121. return 0;
  122. }
  123. static int FUNC(timing_info)(CodedBitstreamContext *ctx, RWContext *rw,
  124. AV1RawTimingInfo *current)
  125. {
  126. int err;
  127. fc(32, num_units_in_display_tick, 1, MAX_UINT_BITS(32));
  128. fc(32, time_scale, 1, MAX_UINT_BITS(32));
  129. flag(equal_picture_interval);
  130. if (current->equal_picture_interval)
  131. uvlc(num_ticks_per_picture_minus_1, 0, MAX_UINT_BITS(32) - 1);
  132. return 0;
  133. }
  134. static int FUNC(decoder_model_info)(CodedBitstreamContext *ctx, RWContext *rw,
  135. AV1RawDecoderModelInfo *current)
  136. {
  137. int err;
  138. fb(5, buffer_delay_length_minus_1);
  139. fb(32, num_units_in_decoding_tick);
  140. fb(5, buffer_removal_time_length_minus_1);
  141. fb(5, frame_presentation_time_length_minus_1);
  142. return 0;
  143. }
  144. static int FUNC(sequence_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  145. AV1RawSequenceHeader *current)
  146. {
  147. int i, err;
  148. HEADER("Sequence Header");
  149. fc(3, seq_profile, FF_PROFILE_AV1_MAIN,
  150. FF_PROFILE_AV1_PROFESSIONAL);
  151. flag(still_picture);
  152. flag(reduced_still_picture_header);
  153. if (current->reduced_still_picture_header) {
  154. infer(timing_info_present_flag, 0);
  155. infer(decoder_model_info_present_flag, 0);
  156. infer(initial_display_delay_present_flag, 0);
  157. infer(operating_points_cnt_minus_1, 0);
  158. infer(operating_point_idc[0], 0);
  159. fb(5, seq_level_idx[0]);
  160. infer(seq_tier[0], 0);
  161. infer(decoder_model_present_for_this_op[0], 0);
  162. infer(initial_display_delay_present_for_this_op[0], 0);
  163. } else {
  164. flag(timing_info_present_flag);
  165. if (current->timing_info_present_flag) {
  166. CHECK(FUNC(timing_info)(ctx, rw, &current->timing_info));
  167. flag(decoder_model_info_present_flag);
  168. if (current->decoder_model_info_present_flag) {
  169. CHECK(FUNC(decoder_model_info)
  170. (ctx, rw, &current->decoder_model_info));
  171. }
  172. } else {
  173. infer(decoder_model_info_present_flag, 0);
  174. }
  175. flag(initial_display_delay_present_flag);
  176. fb(5, operating_points_cnt_minus_1);
  177. for (i = 0; i <= current->operating_points_cnt_minus_1; i++) {
  178. fbs(12, operating_point_idc[i], 1, i);
  179. fbs(5, seq_level_idx[i], 1, i);
  180. if (current->seq_level_idx[i] > 7)
  181. flags(seq_tier[i], 1, i);
  182. else
  183. infer(seq_tier[i], 0);
  184. if (current->decoder_model_info_present_flag) {
  185. flags(decoder_model_present_for_this_op[i], 1, i);
  186. if (current->decoder_model_present_for_this_op[i]) {
  187. int n = current->decoder_model_info.buffer_delay_length_minus_1 + 1;
  188. fbs(n, decoder_buffer_delay[i], 1, i);
  189. fbs(n, encoder_buffer_delay[i], 1, i);
  190. flags(low_delay_mode_flag[i], 1, i);
  191. }
  192. } else {
  193. infer(decoder_model_present_for_this_op[i], 0);
  194. }
  195. if (current->initial_display_delay_present_flag) {
  196. flags(initial_display_delay_present_for_this_op[i], 1, i);
  197. if (current->initial_display_delay_present_for_this_op[i])
  198. fbs(4, initial_display_delay_minus_1[i], 1, i);
  199. }
  200. }
  201. }
  202. fb(4, frame_width_bits_minus_1);
  203. fb(4, frame_height_bits_minus_1);
  204. fb(current->frame_width_bits_minus_1 + 1, max_frame_width_minus_1);
  205. fb(current->frame_height_bits_minus_1 + 1, max_frame_height_minus_1);
  206. if (current->reduced_still_picture_header)
  207. infer(frame_id_numbers_present_flag, 0);
  208. else
  209. flag(frame_id_numbers_present_flag);
  210. if (current->frame_id_numbers_present_flag) {
  211. fb(4, delta_frame_id_length_minus_2);
  212. fb(3, additional_frame_id_length_minus_1);
  213. }
  214. flag(use_128x128_superblock);
  215. flag(enable_filter_intra);
  216. flag(enable_intra_edge_filter);
  217. if (current->reduced_still_picture_header) {
  218. infer(enable_interintra_compound, 0);
  219. infer(enable_masked_compound, 0);
  220. infer(enable_warped_motion, 0);
  221. infer(enable_dual_filter, 0);
  222. infer(enable_order_hint, 0);
  223. infer(enable_jnt_comp, 0);
  224. infer(enable_ref_frame_mvs, 0);
  225. infer(seq_force_screen_content_tools,
  226. AV1_SELECT_SCREEN_CONTENT_TOOLS);
  227. infer(seq_force_integer_mv,
  228. AV1_SELECT_INTEGER_MV);
  229. } else {
  230. flag(enable_interintra_compound);
  231. flag(enable_masked_compound);
  232. flag(enable_warped_motion);
  233. flag(enable_dual_filter);
  234. flag(enable_order_hint);
  235. if (current->enable_order_hint) {
  236. flag(enable_jnt_comp);
  237. flag(enable_ref_frame_mvs);
  238. } else {
  239. infer(enable_jnt_comp, 0);
  240. infer(enable_ref_frame_mvs, 0);
  241. }
  242. flag(seq_choose_screen_content_tools);
  243. if (current->seq_choose_screen_content_tools)
  244. infer(seq_force_screen_content_tools,
  245. AV1_SELECT_SCREEN_CONTENT_TOOLS);
  246. else
  247. fb(1, seq_force_screen_content_tools);
  248. if (current->seq_force_screen_content_tools > 0) {
  249. flag(seq_choose_integer_mv);
  250. if (current->seq_choose_integer_mv)
  251. infer(seq_force_integer_mv,
  252. AV1_SELECT_INTEGER_MV);
  253. else
  254. fb(1, seq_force_integer_mv);
  255. } else {
  256. infer(seq_force_integer_mv, AV1_SELECT_INTEGER_MV);
  257. }
  258. if (current->enable_order_hint)
  259. fb(3, order_hint_bits_minus_1);
  260. }
  261. flag(enable_superres);
  262. flag(enable_cdef);
  263. flag(enable_restoration);
  264. CHECK(FUNC(color_config)(ctx, rw, &current->color_config,
  265. current->seq_profile));
  266. flag(film_grain_params_present);
  267. return 0;
  268. }
  269. static int FUNC(temporal_delimiter_obu)(CodedBitstreamContext *ctx, RWContext *rw)
  270. {
  271. CodedBitstreamAV1Context *priv = ctx->priv_data;
  272. HEADER("Temporal Delimiter");
  273. priv->seen_frame_header = 0;
  274. return 0;
  275. }
  276. static int FUNC(set_frame_refs)(CodedBitstreamContext *ctx, RWContext *rw,
  277. AV1RawFrameHeader *current)
  278. {
  279. CodedBitstreamAV1Context *priv = ctx->priv_data;
  280. const AV1RawSequenceHeader *seq = priv->sequence_header;
  281. static const uint8_t ref_frame_list[AV1_NUM_REF_FRAMES - 2] = {
  282. AV1_REF_FRAME_LAST2, AV1_REF_FRAME_LAST3, AV1_REF_FRAME_BWDREF,
  283. AV1_REF_FRAME_ALTREF2, AV1_REF_FRAME_ALTREF
  284. };
  285. int8_t ref_frame_idx[AV1_REFS_PER_FRAME], used_frame[AV1_NUM_REF_FRAMES];
  286. int8_t shifted_order_hints[AV1_NUM_REF_FRAMES];
  287. int cur_frame_hint, latest_order_hint, earliest_order_hint, ref;
  288. int i, j;
  289. for (i = 0; i < AV1_REFS_PER_FRAME; i++)
  290. ref_frame_idx[i] = -1;
  291. ref_frame_idx[AV1_REF_FRAME_LAST - AV1_REF_FRAME_LAST] = current->last_frame_idx;
  292. ref_frame_idx[AV1_REF_FRAME_GOLDEN - AV1_REF_FRAME_LAST] = current->golden_frame_idx;
  293. for (i = 0; i < AV1_NUM_REF_FRAMES; i++)
  294. used_frame[i] = 0;
  295. used_frame[current->last_frame_idx] = 1;
  296. used_frame[current->golden_frame_idx] = 1;
  297. cur_frame_hint = 1 << (seq->order_hint_bits_minus_1);
  298. for (i = 0; i < AV1_NUM_REF_FRAMES; i++)
  299. shifted_order_hints[i] = cur_frame_hint +
  300. cbs_av1_get_relative_dist(seq, priv->ref[i].order_hint,
  301. current->order_hint);
  302. latest_order_hint = shifted_order_hints[current->last_frame_idx];
  303. earliest_order_hint = shifted_order_hints[current->golden_frame_idx];
  304. ref = -1;
  305. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  306. int hint = shifted_order_hints[i];
  307. if (!used_frame[i] && hint >= cur_frame_hint &&
  308. (ref < 0 || hint >= latest_order_hint)) {
  309. ref = i;
  310. latest_order_hint = hint;
  311. }
  312. }
  313. if (ref >= 0) {
  314. ref_frame_idx[AV1_REF_FRAME_ALTREF - AV1_REF_FRAME_LAST] = ref;
  315. used_frame[ref] = 1;
  316. }
  317. ref = -1;
  318. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  319. int hint = shifted_order_hints[i];
  320. if (!used_frame[i] && hint >= cur_frame_hint &&
  321. (ref < 0 || hint < earliest_order_hint)) {
  322. ref = i;
  323. earliest_order_hint = hint;
  324. }
  325. }
  326. if (ref >= 0) {
  327. ref_frame_idx[AV1_REF_FRAME_BWDREF - AV1_REF_FRAME_LAST] = ref;
  328. used_frame[ref] = 1;
  329. }
  330. ref = -1;
  331. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  332. int hint = shifted_order_hints[i];
  333. if (!used_frame[i] && hint >= cur_frame_hint &&
  334. (ref < 0 || hint < earliest_order_hint)) {
  335. ref = i;
  336. earliest_order_hint = hint;
  337. }
  338. }
  339. if (ref >= 0) {
  340. ref_frame_idx[AV1_REF_FRAME_ALTREF2 - AV1_REF_FRAME_LAST] = ref;
  341. used_frame[ref] = 1;
  342. }
  343. for (i = 0; i < AV1_REFS_PER_FRAME - 2; i++) {
  344. int ref_frame = ref_frame_list[i];
  345. if (ref_frame_idx[ref_frame - AV1_REF_FRAME_LAST] < 0 ) {
  346. ref = -1;
  347. for (j = 0; j < AV1_NUM_REF_FRAMES; j++) {
  348. int hint = shifted_order_hints[j];
  349. if (!used_frame[j] && hint < cur_frame_hint &&
  350. (ref < 0 || hint >= latest_order_hint)) {
  351. ref = j;
  352. latest_order_hint = hint;
  353. }
  354. }
  355. if (ref >= 0) {
  356. ref_frame_idx[ref_frame - AV1_REF_FRAME_LAST] = ref;
  357. used_frame[ref] = 1;
  358. }
  359. }
  360. }
  361. ref = -1;
  362. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  363. int hint = shifted_order_hints[i];
  364. if (ref < 0 || hint < earliest_order_hint) {
  365. ref = i;
  366. earliest_order_hint = hint;
  367. }
  368. }
  369. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  370. if (ref_frame_idx[i] < 0)
  371. ref_frame_idx[i] = ref;
  372. infer(ref_frame_idx[i], ref_frame_idx[i]);
  373. }
  374. return 0;
  375. }
  376. static int FUNC(superres_params)(CodedBitstreamContext *ctx, RWContext *rw,
  377. AV1RawFrameHeader *current)
  378. {
  379. CodedBitstreamAV1Context *priv = ctx->priv_data;
  380. const AV1RawSequenceHeader *seq = priv->sequence_header;
  381. int denom, err;
  382. if (seq->enable_superres)
  383. flag(use_superres);
  384. else
  385. infer(use_superres, 0);
  386. if (current->use_superres) {
  387. fb(3, coded_denom);
  388. denom = current->coded_denom + AV1_SUPERRES_DENOM_MIN;
  389. } else {
  390. denom = AV1_SUPERRES_NUM;
  391. }
  392. priv->upscaled_width = priv->frame_width;
  393. priv->frame_width = (priv->upscaled_width * AV1_SUPERRES_NUM +
  394. denom / 2) / denom;
  395. return 0;
  396. }
  397. static int FUNC(frame_size)(CodedBitstreamContext *ctx, RWContext *rw,
  398. AV1RawFrameHeader *current)
  399. {
  400. CodedBitstreamAV1Context *priv = ctx->priv_data;
  401. const AV1RawSequenceHeader *seq = priv->sequence_header;
  402. int err;
  403. if (current->frame_size_override_flag) {
  404. fb(seq->frame_width_bits_minus_1 + 1, frame_width_minus_1);
  405. fb(seq->frame_height_bits_minus_1 + 1, frame_height_minus_1);
  406. priv->frame_width = current->frame_width_minus_1 + 1;
  407. priv->frame_height = current->frame_height_minus_1 + 1;
  408. } else {
  409. priv->frame_width = seq->max_frame_width_minus_1 + 1;
  410. priv->frame_height = seq->max_frame_height_minus_1 + 1;
  411. }
  412. CHECK(FUNC(superres_params)(ctx, rw, current));
  413. return 0;
  414. }
  415. static int FUNC(render_size)(CodedBitstreamContext *ctx, RWContext *rw,
  416. AV1RawFrameHeader *current)
  417. {
  418. CodedBitstreamAV1Context *priv = ctx->priv_data;
  419. int err;
  420. flag(render_and_frame_size_different);
  421. if (current->render_and_frame_size_different) {
  422. fb(16, render_width_minus_1);
  423. fb(16, render_height_minus_1);
  424. priv->render_width = current->render_width_minus_1 + 1;
  425. priv->render_height = current->render_height_minus_1 + 1;
  426. } else {
  427. priv->render_width = priv->upscaled_width;
  428. priv->render_height = priv->frame_height;
  429. }
  430. return 0;
  431. }
  432. static int FUNC(frame_size_with_refs)(CodedBitstreamContext *ctx, RWContext *rw,
  433. AV1RawFrameHeader *current)
  434. {
  435. CodedBitstreamAV1Context *priv = ctx->priv_data;
  436. int i, err;
  437. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  438. flags(found_ref[i], 1, i);
  439. if (current->found_ref[i]) {
  440. AV1ReferenceFrameState *ref =
  441. &priv->ref[current->ref_frame_idx[i]];
  442. if (!ref->valid) {
  443. av_log(ctx->log_ctx, AV_LOG_ERROR,
  444. "Missing reference frame needed for frame size "
  445. "(ref = %d, ref_frame_idx = %d).\n",
  446. i, current->ref_frame_idx[i]);
  447. return AVERROR_INVALIDDATA;
  448. }
  449. priv->upscaled_width = ref->upscaled_width;
  450. priv->frame_width = ref->frame_width;
  451. priv->frame_height = ref->frame_height;
  452. priv->render_width = ref->render_width;
  453. priv->render_height = ref->render_height;
  454. break;
  455. }
  456. }
  457. if (i >= AV1_REFS_PER_FRAME) {
  458. CHECK(FUNC(frame_size)(ctx, rw, current));
  459. CHECK(FUNC(render_size)(ctx, rw, current));
  460. } else {
  461. CHECK(FUNC(superres_params)(ctx, rw, current));
  462. }
  463. return 0;
  464. }
  465. static int FUNC(interpolation_filter)(CodedBitstreamContext *ctx, RWContext *rw,
  466. AV1RawFrameHeader *current)
  467. {
  468. int err;
  469. flag(is_filter_switchable);
  470. if (current->is_filter_switchable)
  471. infer(interpolation_filter,
  472. AV1_INTERPOLATION_FILTER_SWITCHABLE);
  473. else
  474. fb(2, interpolation_filter);
  475. return 0;
  476. }
  477. static int FUNC(tile_info)(CodedBitstreamContext *ctx, RWContext *rw,
  478. AV1RawFrameHeader *current)
  479. {
  480. CodedBitstreamAV1Context *priv = ctx->priv_data;
  481. const AV1RawSequenceHeader *seq = priv->sequence_header;
  482. int mi_cols, mi_rows, sb_cols, sb_rows, sb_shift, sb_size;
  483. int max_tile_width_sb, max_tile_height_sb, max_tile_area_sb;
  484. int min_log2_tile_cols, max_log2_tile_cols, max_log2_tile_rows;
  485. int min_log2_tiles, min_log2_tile_rows;
  486. int i, err;
  487. mi_cols = 2 * ((priv->frame_width + 7) >> 3);
  488. mi_rows = 2 * ((priv->frame_height + 7) >> 3);
  489. sb_cols = seq->use_128x128_superblock ? ((mi_cols + 31) >> 5)
  490. : ((mi_cols + 15) >> 4);
  491. sb_rows = seq->use_128x128_superblock ? ((mi_rows + 31) >> 5)
  492. : ((mi_rows + 15) >> 4);
  493. sb_shift = seq->use_128x128_superblock ? 5 : 4;
  494. sb_size = sb_shift + 2;
  495. max_tile_width_sb = AV1_MAX_TILE_WIDTH >> sb_size;
  496. max_tile_area_sb = AV1_MAX_TILE_AREA >> (2 * sb_size);
  497. min_log2_tile_cols = cbs_av1_tile_log2(max_tile_width_sb, sb_cols);
  498. max_log2_tile_cols = cbs_av1_tile_log2(1, FFMIN(sb_cols, AV1_MAX_TILE_COLS));
  499. max_log2_tile_rows = cbs_av1_tile_log2(1, FFMIN(sb_rows, AV1_MAX_TILE_ROWS));
  500. min_log2_tiles = FFMAX(min_log2_tile_cols,
  501. cbs_av1_tile_log2(max_tile_area_sb, sb_rows * sb_cols));
  502. flag(uniform_tile_spacing_flag);
  503. if (current->uniform_tile_spacing_flag) {
  504. int tile_width_sb, tile_height_sb;
  505. increment(tile_cols_log2, min_log2_tile_cols, max_log2_tile_cols);
  506. tile_width_sb = (sb_cols + (1 << current->tile_cols_log2) - 1) >>
  507. current->tile_cols_log2;
  508. current->tile_cols = (sb_cols + tile_width_sb - 1) / tile_width_sb;
  509. min_log2_tile_rows = FFMAX(min_log2_tiles - current->tile_cols_log2, 0);
  510. increment(tile_rows_log2, min_log2_tile_rows, max_log2_tile_rows);
  511. tile_height_sb = (sb_rows + (1 << current->tile_rows_log2) - 1) >>
  512. current->tile_rows_log2;
  513. current->tile_rows = (sb_rows + tile_height_sb - 1) / tile_height_sb;
  514. } else {
  515. int widest_tile_sb, start_sb, size_sb, max_width, max_height;
  516. widest_tile_sb = 0;
  517. start_sb = 0;
  518. for (i = 0; start_sb < sb_cols && i < AV1_MAX_TILE_COLS; i++) {
  519. max_width = FFMIN(sb_cols - start_sb, max_tile_width_sb);
  520. ns(max_width, width_in_sbs_minus_1[i], 1, i);
  521. size_sb = current->width_in_sbs_minus_1[i] + 1;
  522. widest_tile_sb = FFMAX(size_sb, widest_tile_sb);
  523. start_sb += size_sb;
  524. }
  525. current->tile_cols_log2 = cbs_av1_tile_log2(1, i);
  526. current->tile_cols = i;
  527. if (min_log2_tiles > 0)
  528. max_tile_area_sb = (sb_rows * sb_cols) >> (min_log2_tiles + 1);
  529. else
  530. max_tile_area_sb = sb_rows * sb_cols;
  531. max_tile_height_sb = FFMAX(max_tile_area_sb / widest_tile_sb, 1);
  532. start_sb = 0;
  533. for (i = 0; start_sb < sb_rows && i < AV1_MAX_TILE_ROWS; i++) {
  534. max_height = FFMIN(sb_rows - start_sb, max_tile_height_sb);
  535. ns(max_height, height_in_sbs_minus_1[i], 1, i);
  536. size_sb = current->height_in_sbs_minus_1[i] + 1;
  537. start_sb += size_sb;
  538. }
  539. current->tile_rows_log2 = cbs_av1_tile_log2(1, i);
  540. current->tile_rows = i;
  541. }
  542. if (current->tile_cols_log2 > 0 ||
  543. current->tile_rows_log2 > 0) {
  544. fb(current->tile_cols_log2 + current->tile_rows_log2,
  545. context_update_tile_id);
  546. fb(2, tile_size_bytes_minus1);
  547. } else {
  548. infer(context_update_tile_id, 0);
  549. }
  550. priv->tile_cols = current->tile_cols;
  551. priv->tile_rows = current->tile_rows;
  552. return 0;
  553. }
  554. static int FUNC(quantization_params)(CodedBitstreamContext *ctx, RWContext *rw,
  555. AV1RawFrameHeader *current)
  556. {
  557. CodedBitstreamAV1Context *priv = ctx->priv_data;
  558. const AV1RawSequenceHeader *seq = priv->sequence_header;
  559. int err;
  560. fb(8, base_q_idx);
  561. delta_q(delta_q_y_dc);
  562. if (priv->num_planes > 1) {
  563. if (seq->color_config.separate_uv_delta_q)
  564. flag(diff_uv_delta);
  565. else
  566. infer(diff_uv_delta, 0);
  567. delta_q(delta_q_u_dc);
  568. delta_q(delta_q_u_ac);
  569. if (current->diff_uv_delta) {
  570. delta_q(delta_q_v_dc);
  571. delta_q(delta_q_v_ac);
  572. } else {
  573. infer(delta_q_v_dc, current->delta_q_u_dc);
  574. infer(delta_q_v_ac, current->delta_q_u_ac);
  575. }
  576. } else {
  577. infer(delta_q_u_dc, 0);
  578. infer(delta_q_u_ac, 0);
  579. infer(delta_q_v_dc, 0);
  580. infer(delta_q_v_ac, 0);
  581. }
  582. flag(using_qmatrix);
  583. if (current->using_qmatrix) {
  584. fb(4, qm_y);
  585. fb(4, qm_u);
  586. if (seq->color_config.separate_uv_delta_q)
  587. fb(4, qm_v);
  588. else
  589. infer(qm_v, current->qm_u);
  590. }
  591. return 0;
  592. }
  593. static int FUNC(segmentation_params)(CodedBitstreamContext *ctx, RWContext *rw,
  594. AV1RawFrameHeader *current)
  595. {
  596. static const uint8_t bits[AV1_SEG_LVL_MAX] = { 8, 6, 6, 6, 6, 3, 0, 0 };
  597. static const uint8_t sign[AV1_SEG_LVL_MAX] = { 1, 1, 1, 1, 1, 0, 0, 0 };
  598. int i, j, err;
  599. flag(segmentation_enabled);
  600. if (current->segmentation_enabled) {
  601. if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
  602. infer(segmentation_update_map, 1);
  603. infer(segmentation_temporal_update, 0);
  604. infer(segmentation_update_data, 1);
  605. } else {
  606. flag(segmentation_update_map);
  607. if (current->segmentation_update_map)
  608. flag(segmentation_temporal_update);
  609. else
  610. infer(segmentation_temporal_update, 0);
  611. flag(segmentation_update_data);
  612. }
  613. if (current->segmentation_update_data) {
  614. for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
  615. for (j = 0; j < AV1_SEG_LVL_MAX; j++) {
  616. flags(feature_enabled[i][j], 2, i, j);
  617. if (current->feature_enabled[i][j] && bits[j] > 0) {
  618. if (sign[j])
  619. sus(1 + bits[j], feature_value[i][j], 2, i, j);
  620. else
  621. fbs(bits[j], feature_value[i][j], 2, i, j);
  622. } else {
  623. infer(feature_value[i][j], 0);
  624. }
  625. }
  626. }
  627. }
  628. } else {
  629. for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
  630. for (j = 0; j < AV1_SEG_LVL_MAX; j++) {
  631. infer(feature_enabled[i][j], 0);
  632. infer(feature_value[i][j], 0);
  633. }
  634. }
  635. }
  636. return 0;
  637. }
  638. static int FUNC(delta_q_params)(CodedBitstreamContext *ctx, RWContext *rw,
  639. AV1RawFrameHeader *current)
  640. {
  641. int err;
  642. if (current->base_q_idx > 0)
  643. flag(delta_q_present);
  644. else
  645. infer(delta_q_present, 0);
  646. if (current->delta_q_present)
  647. fb(2, delta_q_res);
  648. return 0;
  649. }
  650. static int FUNC(delta_lf_params)(CodedBitstreamContext *ctx, RWContext *rw,
  651. AV1RawFrameHeader *current)
  652. {
  653. int err;
  654. if (current->delta_q_present) {
  655. if (!current->allow_intrabc)
  656. flag(delta_lf_present);
  657. else
  658. infer(delta_lf_present, 0);
  659. if (current->delta_lf_present) {
  660. fb(2, delta_lf_res);
  661. flag(delta_lf_multi);
  662. } else {
  663. infer(delta_lf_res, 0);
  664. infer(delta_lf_multi, 0);
  665. }
  666. } else {
  667. infer(delta_lf_present, 0);
  668. infer(delta_lf_res, 0);
  669. infer(delta_lf_multi, 0);
  670. }
  671. return 0;
  672. }
  673. static int FUNC(loop_filter_params)(CodedBitstreamContext *ctx, RWContext *rw,
  674. AV1RawFrameHeader *current)
  675. {
  676. CodedBitstreamAV1Context *priv = ctx->priv_data;
  677. int i, err;
  678. if (priv->coded_lossless || current->allow_intrabc) {
  679. infer(loop_filter_level[0], 0);
  680. infer(loop_filter_level[1], 0);
  681. infer(loop_filter_ref_deltas[AV1_REF_FRAME_INTRA], 1);
  682. infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST], 0);
  683. infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST2], 0);
  684. infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST3], 0);
  685. infer(loop_filter_ref_deltas[AV1_REF_FRAME_BWDREF], 0);
  686. infer(loop_filter_ref_deltas[AV1_REF_FRAME_GOLDEN], -1);
  687. infer(loop_filter_ref_deltas[AV1_REF_FRAME_ALTREF], -1);
  688. infer(loop_filter_ref_deltas[AV1_REF_FRAME_ALTREF2], -1);
  689. for (i = 0; i < 2; i++)
  690. infer(loop_filter_mode_deltas[i], 0);
  691. return 0;
  692. }
  693. fb(6, loop_filter_level[0]);
  694. fb(6, loop_filter_level[1]);
  695. if (priv->num_planes > 1) {
  696. if (current->loop_filter_level[0] ||
  697. current->loop_filter_level[1]) {
  698. fb(6, loop_filter_level[2]);
  699. fb(6, loop_filter_level[3]);
  700. }
  701. }
  702. fb(3, loop_filter_sharpness);
  703. flag(loop_filter_delta_enabled);
  704. if (current->loop_filter_delta_enabled) {
  705. flag(loop_filter_delta_update);
  706. if (current->loop_filter_delta_update) {
  707. for (i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) {
  708. flags(update_ref_delta[i], 1, i);
  709. if (current->update_ref_delta[i])
  710. sus(1 + 6, loop_filter_ref_deltas[i], 1, i);
  711. }
  712. for (i = 0; i < 2; i++) {
  713. flags(update_mode_delta[i], 1, i);
  714. if (current->update_mode_delta[i])
  715. sus(1 + 6, loop_filter_mode_deltas[i], 1, i);
  716. }
  717. }
  718. }
  719. return 0;
  720. }
  721. static int FUNC(cdef_params)(CodedBitstreamContext *ctx, RWContext *rw,
  722. AV1RawFrameHeader *current)
  723. {
  724. CodedBitstreamAV1Context *priv = ctx->priv_data;
  725. const AV1RawSequenceHeader *seq = priv->sequence_header;
  726. int i, err;
  727. if (priv->coded_lossless || current->allow_intrabc ||
  728. !seq->enable_cdef) {
  729. infer(cdef_damping_minus_3, 0);
  730. infer(cdef_bits, 0);
  731. infer(cdef_y_pri_strength[0], 0);
  732. infer(cdef_y_sec_strength[0], 0);
  733. infer(cdef_uv_pri_strength[0], 0);
  734. infer(cdef_uv_sec_strength[0], 0);
  735. return 0;
  736. }
  737. fb(2, cdef_damping_minus_3);
  738. fb(2, cdef_bits);
  739. for (i = 0; i < (1 << current->cdef_bits); i++) {
  740. fbs(4, cdef_y_pri_strength[i], 1, i);
  741. fbs(2, cdef_y_sec_strength[i], 1, i);
  742. if (priv->num_planes > 1) {
  743. fbs(4, cdef_uv_pri_strength[i], 1, i);
  744. fbs(2, cdef_uv_sec_strength[i], 1, i);
  745. }
  746. }
  747. return 0;
  748. }
  749. static int FUNC(lr_params)(CodedBitstreamContext *ctx, RWContext *rw,
  750. AV1RawFrameHeader *current)
  751. {
  752. CodedBitstreamAV1Context *priv = ctx->priv_data;
  753. const AV1RawSequenceHeader *seq = priv->sequence_header;
  754. int uses_lr, uses_chroma_lr;
  755. int i, err;
  756. if (priv->all_lossless || current->allow_intrabc ||
  757. !seq->enable_restoration) {
  758. return 0;
  759. }
  760. uses_lr = uses_chroma_lr = 0;
  761. for (i = 0; i < priv->num_planes; i++) {
  762. fbs(2, lr_type[i], 1, i);
  763. if (current->lr_type[i] != 0) {
  764. uses_lr = 1;
  765. if (i > 0)
  766. uses_chroma_lr = 1;
  767. }
  768. }
  769. if (uses_lr) {
  770. if (seq->use_128x128_superblock)
  771. increment(lr_unit_shift, 1, 2);
  772. else
  773. increment(lr_unit_shift, 0, 2);
  774. if(seq->color_config.subsampling_x &&
  775. seq->color_config.subsampling_y && uses_chroma_lr) {
  776. fb(1, lr_uv_shift);
  777. } else {
  778. infer(lr_uv_shift, 0);
  779. }
  780. }
  781. return 0;
  782. }
  783. static int FUNC(read_tx_mode)(CodedBitstreamContext *ctx, RWContext *rw,
  784. AV1RawFrameHeader *current)
  785. {
  786. CodedBitstreamAV1Context *priv = ctx->priv_data;
  787. int err;
  788. if (priv->coded_lossless)
  789. infer(tx_mode, 0);
  790. else
  791. increment(tx_mode, 1, 2);
  792. return 0;
  793. }
  794. static int FUNC(frame_reference_mode)(CodedBitstreamContext *ctx, RWContext *rw,
  795. AV1RawFrameHeader *current)
  796. {
  797. int err;
  798. if (current->frame_type == AV1_FRAME_INTRA_ONLY ||
  799. current->frame_type == AV1_FRAME_KEY)
  800. infer(reference_select, 0);
  801. else
  802. flag(reference_select);
  803. return 0;
  804. }
  805. static int FUNC(skip_mode_params)(CodedBitstreamContext *ctx, RWContext *rw,
  806. AV1RawFrameHeader *current)
  807. {
  808. CodedBitstreamAV1Context *priv = ctx->priv_data;
  809. const AV1RawSequenceHeader *seq = priv->sequence_header;
  810. int skip_mode_allowed;
  811. int err;
  812. if (current->frame_type == AV1_FRAME_KEY ||
  813. current->frame_type == AV1_FRAME_INTRA_ONLY ||
  814. !current->reference_select || !seq->enable_order_hint) {
  815. skip_mode_allowed = 0;
  816. } else {
  817. int forward_idx, backward_idx;
  818. int forward_hint, backward_hint;
  819. int ref_hint, dist, i;
  820. forward_idx = -1;
  821. backward_idx = -1;
  822. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  823. ref_hint = priv->ref[current->ref_frame_idx[i]].order_hint;
  824. dist = cbs_av1_get_relative_dist(seq, ref_hint,
  825. current->order_hint);
  826. if (dist < 0) {
  827. if (forward_idx < 0 ||
  828. cbs_av1_get_relative_dist(seq, ref_hint,
  829. forward_hint) > 0) {
  830. forward_idx = i;
  831. forward_hint = ref_hint;
  832. }
  833. } else if (dist > 0) {
  834. if (backward_idx < 0 ||
  835. cbs_av1_get_relative_dist(seq, ref_hint,
  836. backward_hint) < 0) {
  837. backward_idx = i;
  838. backward_hint = ref_hint;
  839. }
  840. }
  841. }
  842. if (forward_idx < 0) {
  843. skip_mode_allowed = 0;
  844. } else if (backward_idx >= 0) {
  845. skip_mode_allowed = 1;
  846. // Frames for skip mode are forward_idx and backward_idx.
  847. } else {
  848. int second_forward_idx;
  849. int second_forward_hint;
  850. second_forward_idx = -1;
  851. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  852. ref_hint = priv->ref[current->ref_frame_idx[i]].order_hint;
  853. if (cbs_av1_get_relative_dist(seq, ref_hint,
  854. forward_hint) < 0) {
  855. if (second_forward_idx < 0 ||
  856. cbs_av1_get_relative_dist(seq, ref_hint,
  857. second_forward_hint) > 0) {
  858. second_forward_idx = i;
  859. second_forward_hint = ref_hint;
  860. }
  861. }
  862. }
  863. if (second_forward_idx < 0) {
  864. skip_mode_allowed = 0;
  865. } else {
  866. skip_mode_allowed = 1;
  867. // Frames for skip mode are forward_idx and second_forward_idx.
  868. }
  869. }
  870. }
  871. if (skip_mode_allowed)
  872. flag(skip_mode_present);
  873. else
  874. infer(skip_mode_present, 0);
  875. return 0;
  876. }
  877. static int FUNC(global_motion_param)(CodedBitstreamContext *ctx, RWContext *rw,
  878. AV1RawFrameHeader *current,
  879. int type, int ref, int idx)
  880. {
  881. uint32_t abs_bits, prec_bits, num_syms;
  882. int err;
  883. if (idx < 2) {
  884. if (type == AV1_WARP_MODEL_TRANSLATION) {
  885. abs_bits = AV1_GM_ABS_TRANS_ONLY_BITS - !current->allow_high_precision_mv;
  886. prec_bits = AV1_GM_TRANS_ONLY_PREC_BITS - !current->allow_high_precision_mv;
  887. } else {
  888. abs_bits = AV1_GM_ABS_TRANS_BITS;
  889. prec_bits = AV1_GM_TRANS_PREC_BITS;
  890. }
  891. } else {
  892. abs_bits = AV1_GM_ABS_ALPHA_BITS;
  893. prec_bits = AV1_GM_ALPHA_PREC_BITS;
  894. }
  895. num_syms = 2 * (1 << abs_bits) + 1;
  896. subexp(gm_params[ref][idx], num_syms, 2, ref, idx);
  897. // Actual gm_params value is not reconstructed here.
  898. (void)prec_bits;
  899. return 0;
  900. }
  901. static int FUNC(global_motion_params)(CodedBitstreamContext *ctx, RWContext *rw,
  902. AV1RawFrameHeader *current)
  903. {
  904. int ref, type;
  905. int err;
  906. if (current->frame_type == AV1_FRAME_KEY ||
  907. current->frame_type == AV1_FRAME_INTRA_ONLY)
  908. return 0;
  909. for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
  910. flags(is_global[ref], 1, ref);
  911. if (current->is_global[ref]) {
  912. flags(is_rot_zoom[ref], 1, ref);
  913. if (current->is_rot_zoom[ref]) {
  914. type = AV1_WARP_MODEL_ROTZOOM;
  915. } else {
  916. flags(is_translation[ref], 1, ref);
  917. type = current->is_translation[ref] ? AV1_WARP_MODEL_TRANSLATION
  918. : AV1_WARP_MODEL_AFFINE;
  919. }
  920. } else {
  921. type = AV1_WARP_MODEL_IDENTITY;
  922. }
  923. if (type >= AV1_WARP_MODEL_ROTZOOM) {
  924. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 2));
  925. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 3));
  926. if (type == AV1_WARP_MODEL_AFFINE) {
  927. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 4));
  928. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 5));
  929. } else {
  930. // gm_params[ref][4] = -gm_params[ref][3]
  931. // gm_params[ref][5] = gm_params[ref][2]
  932. }
  933. }
  934. if (type >= AV1_WARP_MODEL_TRANSLATION) {
  935. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 0));
  936. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 1));
  937. }
  938. }
  939. return 0;
  940. }
  941. static int FUNC(film_grain_params)(CodedBitstreamContext *ctx, RWContext *rw,
  942. AV1RawFrameHeader *current)
  943. {
  944. CodedBitstreamAV1Context *priv = ctx->priv_data;
  945. const AV1RawSequenceHeader *seq = priv->sequence_header;
  946. int num_pos_luma, num_pos_chroma;
  947. int i, err;
  948. if (!seq->film_grain_params_present ||
  949. (!current->show_frame && !current->showable_frame))
  950. return 0;
  951. flag(apply_grain);
  952. if (!current->apply_grain)
  953. return 0;
  954. fb(16, grain_seed);
  955. if (current->frame_type == AV1_FRAME_INTER)
  956. flag(update_grain);
  957. else
  958. infer(update_grain, 1);
  959. if (!current->update_grain) {
  960. fb(3, film_grain_params_ref_idx);
  961. return 0;
  962. }
  963. fc(4, num_y_points, 0, 14);
  964. for (i = 0; i < current->num_y_points; i++) {
  965. fcs(8, point_y_value[i],
  966. i ? current->point_y_value[i - 1] + 1 : 0,
  967. MAX_UINT_BITS(8) - (current->num_y_points - i - 1),
  968. 1, i);
  969. fbs(8, point_y_scaling[i], 1, i);
  970. }
  971. if (seq->color_config.mono_chrome)
  972. infer(chroma_scaling_from_luma, 0);
  973. else
  974. flag(chroma_scaling_from_luma);
  975. if (seq->color_config.mono_chrome ||
  976. current->chroma_scaling_from_luma ||
  977. (seq->color_config.subsampling_x == 1 &&
  978. seq->color_config.subsampling_y == 1 &&
  979. current->num_y_points == 0)) {
  980. infer(num_cb_points, 0);
  981. infer(num_cr_points, 0);
  982. } else {
  983. fc(4, num_cb_points, 0, 10);
  984. for (i = 0; i < current->num_cb_points; i++) {
  985. fcs(8, point_cb_value[i],
  986. i ? current->point_cb_value[i - 1] + 1 : 0,
  987. MAX_UINT_BITS(8) - (current->num_cb_points - i - 1),
  988. 1, i);
  989. fbs(8, point_cb_scaling[i], 1, i);
  990. }
  991. fc(4, num_cr_points, 0, 10);
  992. for (i = 0; i < current->num_cr_points; i++) {
  993. fcs(8, point_cr_value[i],
  994. i ? current->point_cr_value[i - 1] + 1 : 0,
  995. MAX_UINT_BITS(8) - (current->num_cr_points - i - 1),
  996. 1, i);
  997. fbs(8, point_cr_scaling[i], 1, i);
  998. }
  999. }
  1000. fb(2, grain_scaling_minus_8);
  1001. fb(2, ar_coeff_lag);
  1002. num_pos_luma = 2 * current->ar_coeff_lag * (current->ar_coeff_lag + 1);
  1003. if (current->num_y_points) {
  1004. num_pos_chroma = num_pos_luma + 1;
  1005. for (i = 0; i < num_pos_luma; i++)
  1006. fbs(8, ar_coeffs_y_plus_128[i], 1, i);
  1007. } else {
  1008. num_pos_chroma = num_pos_luma;
  1009. }
  1010. if (current->chroma_scaling_from_luma || current->num_cb_points) {
  1011. for (i = 0; i < num_pos_chroma; i++)
  1012. fbs(8, ar_coeffs_cb_plus_128[i], 1, i);
  1013. }
  1014. if (current->chroma_scaling_from_luma || current->num_cr_points) {
  1015. for (i = 0; i < num_pos_chroma; i++)
  1016. fbs(8, ar_coeffs_cr_plus_128[i], 1, i);
  1017. }
  1018. fb(2, ar_coeff_shift_minus_6);
  1019. fb(2, grain_scale_shift);
  1020. if (current->num_cb_points) {
  1021. fb(8, cb_mult);
  1022. fb(8, cb_luma_mult);
  1023. fb(9, cb_offset);
  1024. }
  1025. if (current->num_cr_points) {
  1026. fb(8, cr_mult);
  1027. fb(8, cr_luma_mult);
  1028. fb(9, cr_offset);
  1029. }
  1030. flag(overlap_flag);
  1031. flag(clip_to_restricted_range);
  1032. return 0;
  1033. }
  1034. static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw,
  1035. AV1RawFrameHeader *current)
  1036. {
  1037. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1038. const AV1RawSequenceHeader *seq;
  1039. int id_len, diff_len, all_frames, frame_is_intra, order_hint_bits;
  1040. int i, err;
  1041. if (!priv->sequence_header) {
  1042. av_log(ctx->log_ctx, AV_LOG_ERROR, "No sequence header available: "
  1043. "unable to decode frame header.\n");
  1044. return AVERROR_INVALIDDATA;
  1045. }
  1046. seq = priv->sequence_header;
  1047. id_len = seq->additional_frame_id_length_minus_1 +
  1048. seq->delta_frame_id_length_minus_2 + 3;
  1049. all_frames = (1 << AV1_NUM_REF_FRAMES) - 1;
  1050. if (seq->reduced_still_picture_header) {
  1051. infer(show_existing_frame, 0);
  1052. infer(frame_type, AV1_FRAME_KEY);
  1053. infer(show_frame, 1);
  1054. infer(showable_frame, 0);
  1055. frame_is_intra = 1;
  1056. } else {
  1057. flag(show_existing_frame);
  1058. if (current->show_existing_frame) {
  1059. AV1ReferenceFrameState *frame;
  1060. fb(3, frame_to_show_map_idx);
  1061. frame = &priv->ref[current->frame_to_show_map_idx];
  1062. if (seq->decoder_model_info_present_flag &&
  1063. !seq->timing_info.equal_picture_interval) {
  1064. fb(seq->decoder_model_info.frame_presentation_time_length_minus_1 + 1,
  1065. frame_presentation_time);
  1066. }
  1067. if (seq->frame_id_numbers_present_flag)
  1068. fb(id_len, display_frame_id);
  1069. if (frame->frame_type == AV1_FRAME_KEY)
  1070. infer(refresh_frame_flags, all_frames);
  1071. else
  1072. infer(refresh_frame_flags, 0);
  1073. return 0;
  1074. }
  1075. fb(2, frame_type);
  1076. frame_is_intra = (current->frame_type == AV1_FRAME_INTRA_ONLY ||
  1077. current->frame_type == AV1_FRAME_KEY);
  1078. flag(show_frame);
  1079. if (current->show_frame &&
  1080. seq->decoder_model_info_present_flag &&
  1081. !seq->timing_info.equal_picture_interval) {
  1082. fb(seq->decoder_model_info.frame_presentation_time_length_minus_1 + 1,
  1083. frame_presentation_time);
  1084. }
  1085. if (current->show_frame)
  1086. infer(showable_frame, current->frame_type != AV1_FRAME_KEY);
  1087. else
  1088. flag(showable_frame);
  1089. if (current->frame_type == AV1_FRAME_SWITCH ||
  1090. (current->frame_type == AV1_FRAME_KEY && current->show_frame))
  1091. infer(error_resilient_mode, 1);
  1092. else
  1093. flag(error_resilient_mode);
  1094. }
  1095. if (current->frame_type == AV1_FRAME_KEY && current->show_frame) {
  1096. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  1097. priv->ref[i].valid = 0;
  1098. priv->ref[i].order_hint = 0;
  1099. }
  1100. }
  1101. flag(disable_cdf_update);
  1102. if (seq->seq_force_screen_content_tools ==
  1103. AV1_SELECT_SCREEN_CONTENT_TOOLS) {
  1104. flag(allow_screen_content_tools);
  1105. } else {
  1106. infer(allow_screen_content_tools,
  1107. seq->seq_force_screen_content_tools);
  1108. }
  1109. if (current->allow_screen_content_tools) {
  1110. if (seq->seq_force_integer_mv == AV1_SELECT_INTEGER_MV)
  1111. flag(force_integer_mv);
  1112. else
  1113. infer(force_integer_mv, seq->seq_force_integer_mv);
  1114. } else {
  1115. infer(force_integer_mv, 0);
  1116. }
  1117. if (seq->frame_id_numbers_present_flag) {
  1118. fb(id_len, current_frame_id);
  1119. diff_len = seq->delta_frame_id_length_minus_2 + 2;
  1120. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  1121. if (current->current_frame_id > (1 << diff_len)) {
  1122. if (priv->ref[i].frame_id > current->current_frame_id ||
  1123. priv->ref[i].frame_id < (current->current_frame_id -
  1124. (1 << diff_len)))
  1125. priv->ref[i].valid = 0;
  1126. } else {
  1127. if (priv->ref[i].frame_id > current->current_frame_id &&
  1128. priv->ref[i].frame_id < ((1 << id_len) +
  1129. current->current_frame_id -
  1130. (1 << diff_len)))
  1131. priv->ref[i].valid = 0;
  1132. }
  1133. }
  1134. } else {
  1135. infer(current_frame_id, 0);
  1136. }
  1137. if (current->frame_type == AV1_FRAME_SWITCH)
  1138. infer(frame_size_override_flag, 1);
  1139. else if(seq->reduced_still_picture_header)
  1140. infer(frame_size_override_flag, 0);
  1141. else
  1142. flag(frame_size_override_flag);
  1143. order_hint_bits =
  1144. seq->enable_order_hint ? seq->order_hint_bits_minus_1 + 1 : 0;
  1145. if (order_hint_bits > 0)
  1146. fb(order_hint_bits, order_hint);
  1147. else
  1148. infer(order_hint, 0);
  1149. if (frame_is_intra || current->error_resilient_mode)
  1150. infer(primary_ref_frame, AV1_PRIMARY_REF_NONE);
  1151. else
  1152. fb(3, primary_ref_frame);
  1153. if (seq->decoder_model_info_present_flag) {
  1154. flag(buffer_removal_time_present_flag);
  1155. if (current->buffer_removal_time_present_flag) {
  1156. for (i = 0; i <= seq->operating_points_cnt_minus_1; i++) {
  1157. if (seq->decoder_model_present_for_this_op[i]) {
  1158. int op_pt_idc = seq->operating_point_idc[i];
  1159. int in_temporal_layer = (op_pt_idc >> priv->temporal_id ) & 1;
  1160. int in_spatial_layer = (op_pt_idc >> (priv->spatial_id + 8)) & 1;
  1161. if (seq->operating_point_idc[i] == 0 ||
  1162. in_temporal_layer || in_spatial_layer) {
  1163. fbs(seq->decoder_model_info.buffer_removal_time_length_minus_1 + 1,
  1164. buffer_removal_time[i], 1, i);
  1165. }
  1166. }
  1167. }
  1168. }
  1169. }
  1170. if (current->frame_type == AV1_FRAME_SWITCH ||
  1171. (current->frame_type == AV1_FRAME_KEY && current->show_frame))
  1172. infer(refresh_frame_flags, all_frames);
  1173. else
  1174. fb(8, refresh_frame_flags);
  1175. if (!frame_is_intra || current->refresh_frame_flags != all_frames) {
  1176. if (current->error_resilient_mode && seq->enable_order_hint) {
  1177. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  1178. fbs(order_hint_bits, ref_order_hint[i], 1, i);
  1179. if (current->ref_order_hint[i] != priv->ref[i].order_hint)
  1180. priv->ref[i].valid = 0;
  1181. }
  1182. }
  1183. }
  1184. if (current->frame_type == AV1_FRAME_KEY ||
  1185. current->frame_type == AV1_FRAME_INTRA_ONLY) {
  1186. CHECK(FUNC(frame_size)(ctx, rw, current));
  1187. CHECK(FUNC(render_size)(ctx, rw, current));
  1188. if (current->allow_screen_content_tools &&
  1189. priv->upscaled_width == priv->frame_width)
  1190. flag(allow_intrabc);
  1191. else
  1192. infer(allow_intrabc, 0);
  1193. } else {
  1194. if (!seq->enable_order_hint) {
  1195. infer(frame_refs_short_signaling, 0);
  1196. } else {
  1197. flag(frame_refs_short_signaling);
  1198. if (current->frame_refs_short_signaling) {
  1199. fb(3, last_frame_idx);
  1200. fb(3, golden_frame_idx);
  1201. CHECK(FUNC(set_frame_refs)(ctx, rw, current));
  1202. }
  1203. }
  1204. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  1205. if (!current->frame_refs_short_signaling)
  1206. fbs(3, ref_frame_idx[i], 1, i);
  1207. if (seq->frame_id_numbers_present_flag) {
  1208. fbs(seq->delta_frame_id_length_minus_2 + 2,
  1209. delta_frame_id_minus1[i], 1, i);
  1210. }
  1211. }
  1212. if (current->frame_size_override_flag &&
  1213. !current->error_resilient_mode) {
  1214. CHECK(FUNC(frame_size_with_refs)(ctx, rw, current));
  1215. } else {
  1216. CHECK(FUNC(frame_size)(ctx, rw, current));
  1217. CHECK(FUNC(render_size)(ctx, rw, current));
  1218. }
  1219. if (current->force_integer_mv)
  1220. infer(allow_high_precision_mv, 0);
  1221. else
  1222. flag(allow_high_precision_mv);
  1223. CHECK(FUNC(interpolation_filter)(ctx, rw, current));
  1224. flag(is_motion_mode_switchable);
  1225. if (current->error_resilient_mode ||
  1226. !seq->enable_ref_frame_mvs)
  1227. infer(use_ref_frame_mvs, 0);
  1228. else
  1229. flag(use_ref_frame_mvs);
  1230. infer(allow_intrabc, 0);
  1231. }
  1232. if (!frame_is_intra) {
  1233. // Derive reference frame sign biases.
  1234. }
  1235. if (seq->reduced_still_picture_header || current->disable_cdf_update)
  1236. infer(disable_frame_end_update_cdf, 1);
  1237. else
  1238. flag(disable_frame_end_update_cdf);
  1239. if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
  1240. // Init non-coeff CDFs.
  1241. // Setup past independence.
  1242. } else {
  1243. // Load CDF tables from previous frame.
  1244. // Load params from previous frame.
  1245. }
  1246. if (current->use_ref_frame_mvs) {
  1247. // Perform motion field estimation process.
  1248. }
  1249. CHECK(FUNC(tile_info)(ctx, rw, current));
  1250. CHECK(FUNC(quantization_params)(ctx, rw, current));
  1251. CHECK(FUNC(segmentation_params)(ctx, rw, current));
  1252. CHECK(FUNC(delta_q_params)(ctx, rw, current));
  1253. CHECK(FUNC(delta_lf_params)(ctx, rw, current));
  1254. // Init coeff CDFs / load previous segments.
  1255. priv->coded_lossless = 1;
  1256. for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
  1257. int qindex;
  1258. if (current->feature_enabled[i][AV1_SEG_LVL_ALT_Q]) {
  1259. qindex = (current->base_q_idx +
  1260. current->feature_value[i][AV1_SEG_LVL_ALT_Q]);
  1261. } else {
  1262. qindex = current->base_q_idx;
  1263. }
  1264. qindex = av_clip_uintp2(qindex, 8);
  1265. if (qindex || current->delta_q_y_dc ||
  1266. current->delta_q_u_ac || current->delta_q_u_dc ||
  1267. current->delta_q_v_ac || current->delta_q_v_dc) {
  1268. priv->coded_lossless = 0;
  1269. }
  1270. }
  1271. priv->all_lossless = priv->coded_lossless &&
  1272. priv->frame_width == priv->upscaled_width;
  1273. CHECK(FUNC(loop_filter_params)(ctx, rw, current));
  1274. CHECK(FUNC(cdef_params)(ctx, rw, current));
  1275. CHECK(FUNC(lr_params)(ctx, rw, current));
  1276. CHECK(FUNC(read_tx_mode)(ctx, rw, current));
  1277. CHECK(FUNC(frame_reference_mode)(ctx, rw, current));
  1278. CHECK(FUNC(skip_mode_params)(ctx, rw, current));
  1279. if (frame_is_intra || current->error_resilient_mode ||
  1280. !seq->enable_warped_motion)
  1281. infer(allow_warped_motion, 0);
  1282. else
  1283. flag(allow_warped_motion);
  1284. flag(reduced_tx_set);
  1285. CHECK(FUNC(global_motion_params)(ctx, rw, current));
  1286. CHECK(FUNC(film_grain_params)(ctx, rw, current));
  1287. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  1288. if (current->refresh_frame_flags & (1 << i)) {
  1289. priv->ref[i] = (AV1ReferenceFrameState) {
  1290. .valid = 1,
  1291. .frame_id = current->current_frame_id,
  1292. .upscaled_width = priv->upscaled_width,
  1293. .frame_width = priv->frame_width,
  1294. .frame_height = priv->frame_height,
  1295. .render_width = priv->render_width,
  1296. .render_height = priv->render_height,
  1297. .frame_type = current->frame_type,
  1298. .subsampling_x = seq->color_config.subsampling_x,
  1299. .subsampling_y = seq->color_config.subsampling_y,
  1300. .bit_depth = priv->bit_depth,
  1301. .order_hint = current->order_hint,
  1302. };
  1303. }
  1304. }
  1305. av_log(ctx->log_ctx, AV_LOG_DEBUG, "Frame %d: size %dx%d "
  1306. "upscaled %d render %dx%d subsample %dx%d "
  1307. "bitdepth %d tiles %dx%d.\n", current->order_hint,
  1308. priv->frame_width, priv->frame_height, priv->upscaled_width,
  1309. priv->render_width, priv->render_height,
  1310. seq->color_config.subsampling_x + 1,
  1311. seq->color_config.subsampling_y + 1, priv->bit_depth,
  1312. priv->tile_rows, priv->tile_cols);
  1313. return 0;
  1314. }
  1315. static int FUNC(frame_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1316. AV1RawFrameHeader *current, int redundant,
  1317. AVBufferRef *rw_buffer_ref)
  1318. {
  1319. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1320. int start_pos, fh_bits, fh_bytes, err;
  1321. uint8_t *fh_start;
  1322. if (priv->seen_frame_header) {
  1323. if (!redundant) {
  1324. av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid repeated "
  1325. "frame header OBU.\n");
  1326. return AVERROR_INVALIDDATA;
  1327. } else {
  1328. GetBitContext fh;
  1329. size_t i, b;
  1330. uint32_t val;
  1331. HEADER("Redundant Frame Header");
  1332. av_assert0(priv->frame_header_ref && priv->frame_header);
  1333. init_get_bits(&fh, priv->frame_header,
  1334. priv->frame_header_size);
  1335. for (i = 0; i < priv->frame_header_size; i += 8) {
  1336. b = FFMIN(priv->frame_header_size - i, 8);
  1337. val = get_bits(&fh, b);
  1338. xf(b, frame_header_copy[i],
  1339. val, val, val, 1, i / 8);
  1340. }
  1341. }
  1342. } else {
  1343. if (redundant)
  1344. HEADER("Redundant Frame Header (used as Frame Header)");
  1345. else
  1346. HEADER("Frame Header");
  1347. priv->seen_frame_header = 1;
  1348. #ifdef READ
  1349. start_pos = get_bits_count(rw);
  1350. #else
  1351. start_pos = put_bits_count(rw);
  1352. #endif
  1353. CHECK(FUNC(uncompressed_header)(ctx, rw, current));
  1354. if (current->show_existing_frame) {
  1355. priv->seen_frame_header = 0;
  1356. } else {
  1357. priv->seen_frame_header = 1;
  1358. av_buffer_unref(&priv->frame_header_ref);
  1359. #ifdef READ
  1360. fh_bits = get_bits_count(rw) - start_pos;
  1361. fh_start = (uint8_t*)rw->buffer + start_pos / 8;
  1362. #else
  1363. // Need to flush the bitwriter so that we can copy its output,
  1364. // but use a copy so we don't affect the caller's structure.
  1365. {
  1366. PutBitContext tmp = *rw;
  1367. flush_put_bits(&tmp);
  1368. }
  1369. fh_bits = put_bits_count(rw) - start_pos;
  1370. fh_start = rw->buf + start_pos / 8;
  1371. #endif
  1372. fh_bytes = (fh_bits + 7) / 8;
  1373. priv->frame_header_size = fh_bits;
  1374. if (rw_buffer_ref) {
  1375. priv->frame_header_ref = av_buffer_ref(rw_buffer_ref);
  1376. if (!priv->frame_header_ref)
  1377. return AVERROR(ENOMEM);
  1378. priv->frame_header = fh_start;
  1379. } else {
  1380. priv->frame_header_ref =
  1381. av_buffer_alloc(fh_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
  1382. if (!priv->frame_header_ref)
  1383. return AVERROR(ENOMEM);
  1384. priv->frame_header = priv->frame_header_ref->data;
  1385. memcpy(priv->frame_header, fh_start, fh_bytes);
  1386. }
  1387. }
  1388. }
  1389. return 0;
  1390. }
  1391. static int FUNC(tile_group_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1392. AV1RawTileGroup *current)
  1393. {
  1394. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1395. int num_tiles, tile_bits;
  1396. int err;
  1397. HEADER("Tile Group");
  1398. num_tiles = priv->tile_cols * priv->tile_rows;
  1399. if (num_tiles > 1)
  1400. flag(tile_start_and_end_present_flag);
  1401. else
  1402. infer(tile_start_and_end_present_flag, 0);
  1403. if (num_tiles == 1 || !current->tile_start_and_end_present_flag) {
  1404. infer(tg_start, 0);
  1405. infer(tg_end, num_tiles - 1);
  1406. } else {
  1407. tile_bits = cbs_av1_tile_log2(1, priv->tile_cols) +
  1408. cbs_av1_tile_log2(1, priv->tile_rows);
  1409. fb(tile_bits, tg_start);
  1410. fb(tile_bits, tg_end);
  1411. }
  1412. CHECK(FUNC(byte_alignment)(ctx, rw));
  1413. // Reset header for next frame.
  1414. if (current->tg_end == num_tiles - 1)
  1415. priv->seen_frame_header = 0;
  1416. // Tile data follows.
  1417. return 0;
  1418. }
  1419. static int FUNC(frame_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1420. AV1RawFrame *current,
  1421. AVBufferRef *rw_buffer_ref)
  1422. {
  1423. int err;
  1424. CHECK(FUNC(frame_header_obu)(ctx, rw, &current->header,
  1425. 0, rw_buffer_ref));
  1426. CHECK(FUNC(byte_alignment)(ctx, rw));
  1427. CHECK(FUNC(tile_group_obu)(ctx, rw, &current->tile_group));
  1428. return 0;
  1429. }
  1430. static int FUNC(tile_list_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1431. AV1RawTileList *current)
  1432. {
  1433. int err;
  1434. fb(8, output_frame_width_in_tiles_minus_1);
  1435. fb(8, output_frame_height_in_tiles_minus_1);
  1436. fb(16, tile_count_minus_1);
  1437. // Tile data follows.
  1438. return 0;
  1439. }
  1440. static int FUNC(metadata_hdr_cll)(CodedBitstreamContext *ctx, RWContext *rw,
  1441. AV1RawMetadataHDRCLL *current)
  1442. {
  1443. int err;
  1444. fb(16, max_cll);
  1445. fb(16, max_fall);
  1446. return 0;
  1447. }
  1448. static int FUNC(metadata_hdr_mdcv)(CodedBitstreamContext *ctx, RWContext *rw,
  1449. AV1RawMetadataHDRMDCV *current)
  1450. {
  1451. int err, i;
  1452. for (i = 0; i < 3; i++) {
  1453. fbs(16, primary_chromaticity_x[i], 1, i);
  1454. fbs(16, primary_chromaticity_y[i], 1, i);
  1455. }
  1456. fb(16, white_point_chromaticity_x);
  1457. fb(16, white_point_chromaticity_y);
  1458. fc(32, luminance_max, 1, MAX_UINT_BITS(32));
  1459. // luminance_min must be lower than luminance_max. Convert luminance_max from
  1460. // 24.8 fixed point to 18.14 fixed point in order to compare them.
  1461. fc(32, luminance_min, 0, FFMIN(((uint64_t)current->luminance_max << 6) - 1,
  1462. MAX_UINT_BITS(32)));
  1463. return 0;
  1464. }
  1465. static int FUNC(scalability_structure)(CodedBitstreamContext *ctx, RWContext *rw,
  1466. AV1RawMetadataScalability *current)
  1467. {
  1468. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1469. const AV1RawSequenceHeader *seq;
  1470. int err, i, j;
  1471. if (!priv->sequence_header) {
  1472. av_log(ctx->log_ctx, AV_LOG_ERROR, "No sequence header available: "
  1473. "unable to parse scalability metadata.\n");
  1474. return AVERROR_INVALIDDATA;
  1475. }
  1476. seq = priv->sequence_header;
  1477. fb(2, spatial_layers_cnt_minus_1);
  1478. flag(spatial_layer_dimensions_present_flag);
  1479. flag(spatial_layer_description_present_flag);
  1480. flag(temporal_group_description_present_flag);
  1481. fc(3, scalability_structure_reserved_3bits, 0, 0);
  1482. if (current->spatial_layer_dimensions_present_flag) {
  1483. for (i = 0; i <= current->spatial_layers_cnt_minus_1; i++) {
  1484. fcs(16, spatial_layer_max_width[i],
  1485. 0, seq->max_frame_width_minus_1 + 1, 1, i);
  1486. fcs(16, spatial_layer_max_height[i],
  1487. 0, seq->max_frame_height_minus_1 + 1, 1, i);
  1488. }
  1489. }
  1490. if (current->spatial_layer_description_present_flag) {
  1491. for (i = 0; i <= current->spatial_layers_cnt_minus_1; i++)
  1492. fbs(8, spatial_layer_ref_id[i], 1, i);
  1493. }
  1494. if (current->temporal_group_description_present_flag) {
  1495. fb(8, temporal_group_size);
  1496. for (i = 0; i < current->temporal_group_size; i++) {
  1497. fbs(3, temporal_group_temporal_id[i], 1, i);
  1498. flags(temporal_group_temporal_switching_up_point_flag[i], 1, i);
  1499. flags(temporal_group_spatial_switching_up_point_flag[i], 1, i);
  1500. fbs(3, temporal_group_ref_cnt[i], 1, i);
  1501. for (j = 0; j < current->temporal_group_ref_cnt[i]; j++) {
  1502. fbs(8, temporal_group_ref_pic_diff[i][j], 2, i, j);
  1503. }
  1504. }
  1505. }
  1506. return 0;
  1507. }
  1508. static int FUNC(metadata_scalability)(CodedBitstreamContext *ctx, RWContext *rw,
  1509. AV1RawMetadataScalability *current)
  1510. {
  1511. int err;
  1512. fb(8, scalability_mode_idc);
  1513. if (current->scalability_mode_idc == AV1_SCALABILITY_SS)
  1514. CHECK(FUNC(scalability_structure)(ctx, rw, current));
  1515. return 0;
  1516. }
  1517. static int FUNC(metadata_itut_t35)(CodedBitstreamContext *ctx, RWContext *rw,
  1518. AV1RawMetadataITUTT35 *current)
  1519. {
  1520. int err;
  1521. size_t i;
  1522. fb(8, itu_t_t35_country_code);
  1523. if (current->itu_t_t35_country_code == 0xff)
  1524. fb(8, itu_t_t35_country_code_extension_byte);
  1525. #ifdef READ
  1526. // The payload runs up to the start of the trailing bits, but there might
  1527. // be arbitrarily many trailing zeroes so we need to read through twice.
  1528. current->payload_size = cbs_av1_get_payload_bytes_left(rw);
  1529. current->payload_ref = av_buffer_alloc(current->payload_size);
  1530. if (!current->payload_ref)
  1531. return AVERROR(ENOMEM);
  1532. current->payload = current->payload_ref->data;
  1533. #endif
  1534. for (i = 0; i < current->payload_size; i++)
  1535. xf(8, itu_t_t35_payload_bytes[i], current->payload[i],
  1536. 0x00, 0xff, 1, i);
  1537. return 0;
  1538. }
  1539. static int FUNC(metadata_timecode)(CodedBitstreamContext *ctx, RWContext *rw,
  1540. AV1RawMetadataTimecode *current)
  1541. {
  1542. int err;
  1543. fb(5, counting_type);
  1544. flag(full_timestamp_flag);
  1545. flag(discontinuity_flag);
  1546. flag(cnt_dropped_flag);
  1547. fb(9, n_frames);
  1548. if (current->full_timestamp_flag) {
  1549. fc(6, seconds_value, 0, 59);
  1550. fc(6, minutes_value, 0, 59);
  1551. fc(5, hours_value, 0, 23);
  1552. } else {
  1553. flag(seconds_flag);
  1554. if (current->seconds_flag) {
  1555. fc(6, seconds_value, 0, 59);
  1556. flag(minutes_flag);
  1557. if (current->minutes_flag) {
  1558. fc(6, minutes_value, 0, 59);
  1559. flag(hours_flag);
  1560. if (current->hours_flag)
  1561. fc(5, hours_value, 0, 23);
  1562. }
  1563. }
  1564. }
  1565. fb(5, time_offset_length);
  1566. if (current->time_offset_length > 0)
  1567. fb(current->time_offset_length, time_offset_value);
  1568. else
  1569. infer(time_offset_length, 0);
  1570. return 0;
  1571. }
  1572. static int FUNC(metadata_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1573. AV1RawMetadata *current)
  1574. {
  1575. int err;
  1576. leb128(metadata_type);
  1577. switch (current->metadata_type) {
  1578. case AV1_METADATA_TYPE_HDR_CLL:
  1579. CHECK(FUNC(metadata_hdr_cll)(ctx, rw, &current->metadata.hdr_cll));
  1580. break;
  1581. case AV1_METADATA_TYPE_HDR_MDCV:
  1582. CHECK(FUNC(metadata_hdr_mdcv)(ctx, rw, &current->metadata.hdr_mdcv));
  1583. break;
  1584. case AV1_METADATA_TYPE_SCALABILITY:
  1585. CHECK(FUNC(metadata_scalability)(ctx, rw, &current->metadata.scalability));
  1586. break;
  1587. case AV1_METADATA_TYPE_ITUT_T35:
  1588. CHECK(FUNC(metadata_itut_t35)(ctx, rw, &current->metadata.itut_t35));
  1589. break;
  1590. case AV1_METADATA_TYPE_TIMECODE:
  1591. CHECK(FUNC(metadata_timecode)(ctx, rw, &current->metadata.timecode));
  1592. break;
  1593. default:
  1594. // Unknown metadata type.
  1595. return AVERROR_PATCHWELCOME;
  1596. }
  1597. return 0;
  1598. }
  1599. static int FUNC(padding_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1600. AV1RawPadding *current)
  1601. {
  1602. int i, err;
  1603. HEADER("Padding");
  1604. #ifdef READ
  1605. // The payload runs up to the start of the trailing bits, but there might
  1606. // be arbitrarily many trailing zeroes so we need to read through twice.
  1607. current->payload_size = cbs_av1_get_payload_bytes_left(rw);
  1608. current->payload_ref = av_buffer_alloc(current->payload_size);
  1609. if (!current->payload_ref)
  1610. return AVERROR(ENOMEM);
  1611. current->payload = current->payload_ref->data;
  1612. #endif
  1613. for (i = 0; i < current->payload_size; i++)
  1614. xf(8, obu_padding_byte[i], current->payload[i], 0x00, 0xff, 1, i);
  1615. return 0;
  1616. }