You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2052 lines
67KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. static int FUNC(obu_header)(CodedBitstreamContext *ctx, RWContext *rw,
  19. AV1RawOBUHeader *current)
  20. {
  21. CodedBitstreamAV1Context *priv = ctx->priv_data;
  22. int err;
  23. HEADER("OBU header");
  24. fc(1, obu_forbidden_bit, 0, 0);
  25. fc(4, obu_type, 0, AV1_OBU_PADDING);
  26. flag(obu_extension_flag);
  27. flag(obu_has_size_field);
  28. fc(1, obu_reserved_1bit, 0, 0);
  29. if (current->obu_extension_flag) {
  30. fb(3, temporal_id);
  31. fb(2, spatial_id);
  32. fc(3, extension_header_reserved_3bits, 0, 0);
  33. } else {
  34. infer(temporal_id, 0);
  35. infer(spatial_id, 0);
  36. }
  37. priv->temporal_id = current->temporal_id;
  38. priv->spatial_id = current->spatial_id;
  39. return 0;
  40. }
  41. static int FUNC(trailing_bits)(CodedBitstreamContext *ctx, RWContext *rw, int nb_bits)
  42. {
  43. int err;
  44. av_assert0(nb_bits > 0);
  45. fixed(1, trailing_one_bit, 1);
  46. --nb_bits;
  47. while (nb_bits > 0) {
  48. fixed(1, trailing_zero_bit, 0);
  49. --nb_bits;
  50. }
  51. return 0;
  52. }
  53. static int FUNC(byte_alignment)(CodedBitstreamContext *ctx, RWContext *rw)
  54. {
  55. int err;
  56. while (byte_alignment(rw) != 0)
  57. fixed(1, zero_bit, 0);
  58. return 0;
  59. }
  60. static int FUNC(color_config)(CodedBitstreamContext *ctx, RWContext *rw,
  61. AV1RawColorConfig *current, int seq_profile)
  62. {
  63. CodedBitstreamAV1Context *priv = ctx->priv_data;
  64. int err;
  65. flag(high_bitdepth);
  66. if (seq_profile == FF_PROFILE_AV1_PROFESSIONAL &&
  67. current->high_bitdepth) {
  68. flag(twelve_bit);
  69. priv->bit_depth = current->twelve_bit ? 12 : 10;
  70. } else {
  71. priv->bit_depth = current->high_bitdepth ? 10 : 8;
  72. }
  73. if (seq_profile == FF_PROFILE_AV1_HIGH)
  74. infer(mono_chrome, 0);
  75. else
  76. flag(mono_chrome);
  77. priv->num_planes = current->mono_chrome ? 1 : 3;
  78. flag(color_description_present_flag);
  79. if (current->color_description_present_flag) {
  80. fb(8, color_primaries);
  81. fb(8, transfer_characteristics);
  82. fb(8, matrix_coefficients);
  83. } else {
  84. infer(color_primaries, AVCOL_PRI_UNSPECIFIED);
  85. infer(transfer_characteristics, AVCOL_TRC_UNSPECIFIED);
  86. infer(matrix_coefficients, AVCOL_SPC_UNSPECIFIED);
  87. }
  88. if (current->mono_chrome) {
  89. flag(color_range);
  90. infer(subsampling_x, 1);
  91. infer(subsampling_y, 1);
  92. infer(chroma_sample_position, AV1_CSP_UNKNOWN);
  93. infer(separate_uv_delta_q, 0);
  94. } else if (current->color_primaries == AVCOL_PRI_BT709 &&
  95. current->transfer_characteristics == AVCOL_TRC_IEC61966_2_1 &&
  96. current->matrix_coefficients == AVCOL_SPC_RGB) {
  97. infer(color_range, 1);
  98. infer(subsampling_x, 0);
  99. infer(subsampling_y, 0);
  100. flag(separate_uv_delta_q);
  101. } else {
  102. flag(color_range);
  103. if (seq_profile == FF_PROFILE_AV1_MAIN) {
  104. infer(subsampling_x, 1);
  105. infer(subsampling_y, 1);
  106. } else if (seq_profile == FF_PROFILE_AV1_HIGH) {
  107. infer(subsampling_x, 0);
  108. infer(subsampling_y, 0);
  109. } else {
  110. if (priv->bit_depth == 12) {
  111. fb(1, subsampling_x);
  112. if (current->subsampling_x)
  113. fb(1, subsampling_y);
  114. else
  115. infer(subsampling_y, 0);
  116. } else {
  117. infer(subsampling_x, 1);
  118. infer(subsampling_y, 0);
  119. }
  120. }
  121. if (current->subsampling_x && current->subsampling_y) {
  122. fc(2, chroma_sample_position, AV1_CSP_UNKNOWN,
  123. AV1_CSP_COLOCATED);
  124. }
  125. flag(separate_uv_delta_q);
  126. }
  127. return 0;
  128. }
  129. static int FUNC(timing_info)(CodedBitstreamContext *ctx, RWContext *rw,
  130. AV1RawTimingInfo *current)
  131. {
  132. int err;
  133. fc(32, num_units_in_display_tick, 1, MAX_UINT_BITS(32));
  134. fc(32, time_scale, 1, MAX_UINT_BITS(32));
  135. flag(equal_picture_interval);
  136. if (current->equal_picture_interval)
  137. uvlc(num_ticks_per_picture_minus_1, 0, MAX_UINT_BITS(32) - 1);
  138. return 0;
  139. }
  140. static int FUNC(decoder_model_info)(CodedBitstreamContext *ctx, RWContext *rw,
  141. AV1RawDecoderModelInfo *current)
  142. {
  143. int err;
  144. fb(5, buffer_delay_length_minus_1);
  145. fb(32, num_units_in_decoding_tick);
  146. fb(5, buffer_removal_time_length_minus_1);
  147. fb(5, frame_presentation_time_length_minus_1);
  148. return 0;
  149. }
  150. static int FUNC(sequence_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  151. AV1RawSequenceHeader *current)
  152. {
  153. int i, err;
  154. HEADER("Sequence Header");
  155. fc(3, seq_profile, FF_PROFILE_AV1_MAIN,
  156. FF_PROFILE_AV1_PROFESSIONAL);
  157. flag(still_picture);
  158. flag(reduced_still_picture_header);
  159. if (current->reduced_still_picture_header) {
  160. infer(timing_info_present_flag, 0);
  161. infer(decoder_model_info_present_flag, 0);
  162. infer(initial_display_delay_present_flag, 0);
  163. infer(operating_points_cnt_minus_1, 0);
  164. infer(operating_point_idc[0], 0);
  165. fb(5, seq_level_idx[0]);
  166. infer(seq_tier[0], 0);
  167. infer(decoder_model_present_for_this_op[0], 0);
  168. infer(initial_display_delay_present_for_this_op[0], 0);
  169. } else {
  170. flag(timing_info_present_flag);
  171. if (current->timing_info_present_flag) {
  172. CHECK(FUNC(timing_info)(ctx, rw, &current->timing_info));
  173. flag(decoder_model_info_present_flag);
  174. if (current->decoder_model_info_present_flag) {
  175. CHECK(FUNC(decoder_model_info)
  176. (ctx, rw, &current->decoder_model_info));
  177. }
  178. } else {
  179. infer(decoder_model_info_present_flag, 0);
  180. }
  181. flag(initial_display_delay_present_flag);
  182. fb(5, operating_points_cnt_minus_1);
  183. for (i = 0; i <= current->operating_points_cnt_minus_1; i++) {
  184. fbs(12, operating_point_idc[i], 1, i);
  185. fbs(5, seq_level_idx[i], 1, i);
  186. if (current->seq_level_idx[i] > 7)
  187. flags(seq_tier[i], 1, i);
  188. else
  189. infer(seq_tier[i], 0);
  190. if (current->decoder_model_info_present_flag) {
  191. flags(decoder_model_present_for_this_op[i], 1, i);
  192. if (current->decoder_model_present_for_this_op[i]) {
  193. int n = current->decoder_model_info.buffer_delay_length_minus_1 + 1;
  194. fbs(n, decoder_buffer_delay[i], 1, i);
  195. fbs(n, encoder_buffer_delay[i], 1, i);
  196. flags(low_delay_mode_flag[i], 1, i);
  197. }
  198. } else {
  199. infer(decoder_model_present_for_this_op[i], 0);
  200. }
  201. if (current->initial_display_delay_present_flag) {
  202. flags(initial_display_delay_present_for_this_op[i], 1, i);
  203. if (current->initial_display_delay_present_for_this_op[i])
  204. fbs(4, initial_display_delay_minus_1[i], 1, i);
  205. }
  206. }
  207. }
  208. fb(4, frame_width_bits_minus_1);
  209. fb(4, frame_height_bits_minus_1);
  210. fb(current->frame_width_bits_minus_1 + 1, max_frame_width_minus_1);
  211. fb(current->frame_height_bits_minus_1 + 1, max_frame_height_minus_1);
  212. if (current->reduced_still_picture_header)
  213. infer(frame_id_numbers_present_flag, 0);
  214. else
  215. flag(frame_id_numbers_present_flag);
  216. if (current->frame_id_numbers_present_flag) {
  217. fb(4, delta_frame_id_length_minus_2);
  218. fb(3, additional_frame_id_length_minus_1);
  219. }
  220. flag(use_128x128_superblock);
  221. flag(enable_filter_intra);
  222. flag(enable_intra_edge_filter);
  223. if (current->reduced_still_picture_header) {
  224. infer(enable_interintra_compound, 0);
  225. infer(enable_masked_compound, 0);
  226. infer(enable_warped_motion, 0);
  227. infer(enable_dual_filter, 0);
  228. infer(enable_order_hint, 0);
  229. infer(enable_jnt_comp, 0);
  230. infer(enable_ref_frame_mvs, 0);
  231. infer(seq_force_screen_content_tools,
  232. AV1_SELECT_SCREEN_CONTENT_TOOLS);
  233. infer(seq_force_integer_mv,
  234. AV1_SELECT_INTEGER_MV);
  235. } else {
  236. flag(enable_interintra_compound);
  237. flag(enable_masked_compound);
  238. flag(enable_warped_motion);
  239. flag(enable_dual_filter);
  240. flag(enable_order_hint);
  241. if (current->enable_order_hint) {
  242. flag(enable_jnt_comp);
  243. flag(enable_ref_frame_mvs);
  244. } else {
  245. infer(enable_jnt_comp, 0);
  246. infer(enable_ref_frame_mvs, 0);
  247. }
  248. flag(seq_choose_screen_content_tools);
  249. if (current->seq_choose_screen_content_tools)
  250. infer(seq_force_screen_content_tools,
  251. AV1_SELECT_SCREEN_CONTENT_TOOLS);
  252. else
  253. fb(1, seq_force_screen_content_tools);
  254. if (current->seq_force_screen_content_tools > 0) {
  255. flag(seq_choose_integer_mv);
  256. if (current->seq_choose_integer_mv)
  257. infer(seq_force_integer_mv,
  258. AV1_SELECT_INTEGER_MV);
  259. else
  260. fb(1, seq_force_integer_mv);
  261. } else {
  262. infer(seq_force_integer_mv, AV1_SELECT_INTEGER_MV);
  263. }
  264. if (current->enable_order_hint)
  265. fb(3, order_hint_bits_minus_1);
  266. }
  267. flag(enable_superres);
  268. flag(enable_cdef);
  269. flag(enable_restoration);
  270. CHECK(FUNC(color_config)(ctx, rw, &current->color_config,
  271. current->seq_profile));
  272. flag(film_grain_params_present);
  273. return 0;
  274. }
  275. static int FUNC(temporal_delimiter_obu)(CodedBitstreamContext *ctx, RWContext *rw)
  276. {
  277. CodedBitstreamAV1Context *priv = ctx->priv_data;
  278. HEADER("Temporal Delimiter");
  279. priv->seen_frame_header = 0;
  280. return 0;
  281. }
  282. static int FUNC(set_frame_refs)(CodedBitstreamContext *ctx, RWContext *rw,
  283. AV1RawFrameHeader *current)
  284. {
  285. CodedBitstreamAV1Context *priv = ctx->priv_data;
  286. const AV1RawSequenceHeader *seq = priv->sequence_header;
  287. static const uint8_t ref_frame_list[AV1_NUM_REF_FRAMES - 2] = {
  288. AV1_REF_FRAME_LAST2, AV1_REF_FRAME_LAST3, AV1_REF_FRAME_BWDREF,
  289. AV1_REF_FRAME_ALTREF2, AV1_REF_FRAME_ALTREF
  290. };
  291. int8_t ref_frame_idx[AV1_REFS_PER_FRAME], used_frame[AV1_NUM_REF_FRAMES];
  292. int8_t shifted_order_hints[AV1_NUM_REF_FRAMES];
  293. int cur_frame_hint, latest_order_hint, earliest_order_hint, ref;
  294. int i, j;
  295. for (i = 0; i < AV1_REFS_PER_FRAME; i++)
  296. ref_frame_idx[i] = -1;
  297. ref_frame_idx[AV1_REF_FRAME_LAST - AV1_REF_FRAME_LAST] = current->last_frame_idx;
  298. ref_frame_idx[AV1_REF_FRAME_GOLDEN - AV1_REF_FRAME_LAST] = current->golden_frame_idx;
  299. for (i = 0; i < AV1_NUM_REF_FRAMES; i++)
  300. used_frame[i] = 0;
  301. used_frame[current->last_frame_idx] = 1;
  302. used_frame[current->golden_frame_idx] = 1;
  303. cur_frame_hint = 1 << (seq->order_hint_bits_minus_1);
  304. for (i = 0; i < AV1_NUM_REF_FRAMES; i++)
  305. shifted_order_hints[i] = cur_frame_hint +
  306. cbs_av1_get_relative_dist(seq, priv->ref[i].order_hint,
  307. priv->order_hint);
  308. latest_order_hint = shifted_order_hints[current->last_frame_idx];
  309. earliest_order_hint = shifted_order_hints[current->golden_frame_idx];
  310. ref = -1;
  311. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  312. int hint = shifted_order_hints[i];
  313. if (!used_frame[i] && hint >= cur_frame_hint &&
  314. (ref < 0 || hint >= latest_order_hint)) {
  315. ref = i;
  316. latest_order_hint = hint;
  317. }
  318. }
  319. if (ref >= 0) {
  320. ref_frame_idx[AV1_REF_FRAME_ALTREF - AV1_REF_FRAME_LAST] = ref;
  321. used_frame[ref] = 1;
  322. }
  323. ref = -1;
  324. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  325. int hint = shifted_order_hints[i];
  326. if (!used_frame[i] && hint >= cur_frame_hint &&
  327. (ref < 0 || hint < earliest_order_hint)) {
  328. ref = i;
  329. earliest_order_hint = hint;
  330. }
  331. }
  332. if (ref >= 0) {
  333. ref_frame_idx[AV1_REF_FRAME_BWDREF - AV1_REF_FRAME_LAST] = ref;
  334. used_frame[ref] = 1;
  335. }
  336. ref = -1;
  337. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  338. int hint = shifted_order_hints[i];
  339. if (!used_frame[i] && hint >= cur_frame_hint &&
  340. (ref < 0 || hint < earliest_order_hint)) {
  341. ref = i;
  342. earliest_order_hint = hint;
  343. }
  344. }
  345. if (ref >= 0) {
  346. ref_frame_idx[AV1_REF_FRAME_ALTREF2 - AV1_REF_FRAME_LAST] = ref;
  347. used_frame[ref] = 1;
  348. }
  349. for (i = 0; i < AV1_REFS_PER_FRAME - 2; i++) {
  350. int ref_frame = ref_frame_list[i];
  351. if (ref_frame_idx[ref_frame - AV1_REF_FRAME_LAST] < 0 ) {
  352. ref = -1;
  353. for (j = 0; j < AV1_NUM_REF_FRAMES; j++) {
  354. int hint = shifted_order_hints[j];
  355. if (!used_frame[j] && hint < cur_frame_hint &&
  356. (ref < 0 || hint >= latest_order_hint)) {
  357. ref = j;
  358. latest_order_hint = hint;
  359. }
  360. }
  361. if (ref >= 0) {
  362. ref_frame_idx[ref_frame - AV1_REF_FRAME_LAST] = ref;
  363. used_frame[ref] = 1;
  364. }
  365. }
  366. }
  367. ref = -1;
  368. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  369. int hint = shifted_order_hints[i];
  370. if (ref < 0 || hint < earliest_order_hint) {
  371. ref = i;
  372. earliest_order_hint = hint;
  373. }
  374. }
  375. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  376. if (ref_frame_idx[i] < 0)
  377. ref_frame_idx[i] = ref;
  378. infer(ref_frame_idx[i], ref_frame_idx[i]);
  379. }
  380. return 0;
  381. }
  382. static int FUNC(superres_params)(CodedBitstreamContext *ctx, RWContext *rw,
  383. AV1RawFrameHeader *current)
  384. {
  385. CodedBitstreamAV1Context *priv = ctx->priv_data;
  386. const AV1RawSequenceHeader *seq = priv->sequence_header;
  387. int denom, err;
  388. if (seq->enable_superres)
  389. flag(use_superres);
  390. else
  391. infer(use_superres, 0);
  392. if (current->use_superres) {
  393. fb(3, coded_denom);
  394. denom = current->coded_denom + AV1_SUPERRES_DENOM_MIN;
  395. } else {
  396. denom = AV1_SUPERRES_NUM;
  397. }
  398. priv->upscaled_width = priv->frame_width;
  399. priv->frame_width = (priv->upscaled_width * AV1_SUPERRES_NUM +
  400. denom / 2) / denom;
  401. return 0;
  402. }
  403. static int FUNC(frame_size)(CodedBitstreamContext *ctx, RWContext *rw,
  404. AV1RawFrameHeader *current)
  405. {
  406. CodedBitstreamAV1Context *priv = ctx->priv_data;
  407. const AV1RawSequenceHeader *seq = priv->sequence_header;
  408. int err;
  409. if (current->frame_size_override_flag) {
  410. fb(seq->frame_width_bits_minus_1 + 1, frame_width_minus_1);
  411. fb(seq->frame_height_bits_minus_1 + 1, frame_height_minus_1);
  412. } else {
  413. infer(frame_width_minus_1, seq->max_frame_width_minus_1);
  414. infer(frame_height_minus_1, seq->max_frame_height_minus_1);
  415. }
  416. priv->frame_width = current->frame_width_minus_1 + 1;
  417. priv->frame_height = current->frame_height_minus_1 + 1;
  418. CHECK(FUNC(superres_params)(ctx, rw, current));
  419. return 0;
  420. }
  421. static int FUNC(render_size)(CodedBitstreamContext *ctx, RWContext *rw,
  422. AV1RawFrameHeader *current)
  423. {
  424. CodedBitstreamAV1Context *priv = ctx->priv_data;
  425. int err;
  426. flag(render_and_frame_size_different);
  427. if (current->render_and_frame_size_different) {
  428. fb(16, render_width_minus_1);
  429. fb(16, render_height_minus_1);
  430. } else {
  431. infer(render_width_minus_1, current->frame_width_minus_1);
  432. infer(render_height_minus_1, current->frame_height_minus_1);
  433. }
  434. priv->render_width = current->render_width_minus_1 + 1;
  435. priv->render_height = current->render_height_minus_1 + 1;
  436. return 0;
  437. }
  438. static int FUNC(frame_size_with_refs)(CodedBitstreamContext *ctx, RWContext *rw,
  439. AV1RawFrameHeader *current)
  440. {
  441. CodedBitstreamAV1Context *priv = ctx->priv_data;
  442. int i, err;
  443. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  444. flags(found_ref[i], 1, i);
  445. if (current->found_ref[i]) {
  446. AV1ReferenceFrameState *ref =
  447. &priv->ref[current->ref_frame_idx[i]];
  448. if (!ref->valid) {
  449. av_log(ctx->log_ctx, AV_LOG_ERROR,
  450. "Missing reference frame needed for frame size "
  451. "(ref = %d, ref_frame_idx = %d).\n",
  452. i, current->ref_frame_idx[i]);
  453. return AVERROR_INVALIDDATA;
  454. }
  455. infer(frame_width_minus_1, ref->upscaled_width - 1);
  456. infer(frame_height_minus_1, ref->frame_height - 1);
  457. infer(render_width_minus_1, ref->render_width - 1);
  458. infer(render_height_minus_1, ref->render_height - 1);
  459. priv->upscaled_width = ref->upscaled_width;
  460. priv->frame_width = priv->upscaled_width;
  461. priv->frame_height = ref->frame_height;
  462. priv->render_width = ref->render_width;
  463. priv->render_height = ref->render_height;
  464. break;
  465. }
  466. }
  467. if (i >= AV1_REFS_PER_FRAME) {
  468. CHECK(FUNC(frame_size)(ctx, rw, current));
  469. CHECK(FUNC(render_size)(ctx, rw, current));
  470. } else {
  471. CHECK(FUNC(superres_params)(ctx, rw, current));
  472. }
  473. return 0;
  474. }
  475. static int FUNC(interpolation_filter)(CodedBitstreamContext *ctx, RWContext *rw,
  476. AV1RawFrameHeader *current)
  477. {
  478. int err;
  479. flag(is_filter_switchable);
  480. if (current->is_filter_switchable)
  481. infer(interpolation_filter,
  482. AV1_INTERPOLATION_FILTER_SWITCHABLE);
  483. else
  484. fb(2, interpolation_filter);
  485. return 0;
  486. }
  487. static int FUNC(tile_info)(CodedBitstreamContext *ctx, RWContext *rw,
  488. AV1RawFrameHeader *current)
  489. {
  490. CodedBitstreamAV1Context *priv = ctx->priv_data;
  491. const AV1RawSequenceHeader *seq = priv->sequence_header;
  492. int mi_cols, mi_rows, sb_cols, sb_rows, sb_shift, sb_size;
  493. int max_tile_width_sb, max_tile_height_sb, max_tile_area_sb;
  494. int min_log2_tile_cols, max_log2_tile_cols, max_log2_tile_rows;
  495. int min_log2_tiles, min_log2_tile_rows;
  496. int i, err;
  497. mi_cols = 2 * ((priv->frame_width + 7) >> 3);
  498. mi_rows = 2 * ((priv->frame_height + 7) >> 3);
  499. sb_cols = seq->use_128x128_superblock ? ((mi_cols + 31) >> 5)
  500. : ((mi_cols + 15) >> 4);
  501. sb_rows = seq->use_128x128_superblock ? ((mi_rows + 31) >> 5)
  502. : ((mi_rows + 15) >> 4);
  503. sb_shift = seq->use_128x128_superblock ? 5 : 4;
  504. sb_size = sb_shift + 2;
  505. max_tile_width_sb = AV1_MAX_TILE_WIDTH >> sb_size;
  506. max_tile_area_sb = AV1_MAX_TILE_AREA >> (2 * sb_size);
  507. min_log2_tile_cols = cbs_av1_tile_log2(max_tile_width_sb, sb_cols);
  508. max_log2_tile_cols = cbs_av1_tile_log2(1, FFMIN(sb_cols, AV1_MAX_TILE_COLS));
  509. max_log2_tile_rows = cbs_av1_tile_log2(1, FFMIN(sb_rows, AV1_MAX_TILE_ROWS));
  510. min_log2_tiles = FFMAX(min_log2_tile_cols,
  511. cbs_av1_tile_log2(max_tile_area_sb, sb_rows * sb_cols));
  512. flag(uniform_tile_spacing_flag);
  513. if (current->uniform_tile_spacing_flag) {
  514. int tile_width_sb, tile_height_sb;
  515. increment(tile_cols_log2, min_log2_tile_cols, max_log2_tile_cols);
  516. tile_width_sb = (sb_cols + (1 << current->tile_cols_log2) - 1) >>
  517. current->tile_cols_log2;
  518. current->tile_cols = (sb_cols + tile_width_sb - 1) / tile_width_sb;
  519. min_log2_tile_rows = FFMAX(min_log2_tiles - current->tile_cols_log2, 0);
  520. increment(tile_rows_log2, min_log2_tile_rows, max_log2_tile_rows);
  521. tile_height_sb = (sb_rows + (1 << current->tile_rows_log2) - 1) >>
  522. current->tile_rows_log2;
  523. current->tile_rows = (sb_rows + tile_height_sb - 1) / tile_height_sb;
  524. for (i = 0; i < current->tile_cols - 1; i++)
  525. infer(width_in_sbs_minus_1[i], tile_width_sb - 1);
  526. infer(width_in_sbs_minus_1[i],
  527. sb_cols - (current->tile_cols - 1) * tile_width_sb - 1);
  528. for (i = 0; i < current->tile_rows - 1; i++)
  529. infer(height_in_sbs_minus_1[i], tile_height_sb - 1);
  530. infer(height_in_sbs_minus_1[i],
  531. sb_rows - (current->tile_rows - 1) * tile_height_sb - 1);
  532. } else {
  533. int widest_tile_sb, start_sb, size_sb, max_width, max_height;
  534. widest_tile_sb = 0;
  535. start_sb = 0;
  536. for (i = 0; start_sb < sb_cols && i < AV1_MAX_TILE_COLS; i++) {
  537. max_width = FFMIN(sb_cols - start_sb, max_tile_width_sb);
  538. ns(max_width, width_in_sbs_minus_1[i], 1, i);
  539. size_sb = current->width_in_sbs_minus_1[i] + 1;
  540. widest_tile_sb = FFMAX(size_sb, widest_tile_sb);
  541. start_sb += size_sb;
  542. }
  543. current->tile_cols_log2 = cbs_av1_tile_log2(1, i);
  544. current->tile_cols = i;
  545. if (min_log2_tiles > 0)
  546. max_tile_area_sb = (sb_rows * sb_cols) >> (min_log2_tiles + 1);
  547. else
  548. max_tile_area_sb = sb_rows * sb_cols;
  549. max_tile_height_sb = FFMAX(max_tile_area_sb / widest_tile_sb, 1);
  550. start_sb = 0;
  551. for (i = 0; start_sb < sb_rows && i < AV1_MAX_TILE_ROWS; i++) {
  552. max_height = FFMIN(sb_rows - start_sb, max_tile_height_sb);
  553. ns(max_height, height_in_sbs_minus_1[i], 1, i);
  554. size_sb = current->height_in_sbs_minus_1[i] + 1;
  555. start_sb += size_sb;
  556. }
  557. current->tile_rows_log2 = cbs_av1_tile_log2(1, i);
  558. current->tile_rows = i;
  559. }
  560. if (current->tile_cols_log2 > 0 ||
  561. current->tile_rows_log2 > 0) {
  562. fb(current->tile_cols_log2 + current->tile_rows_log2,
  563. context_update_tile_id);
  564. fb(2, tile_size_bytes_minus1);
  565. } else {
  566. infer(context_update_tile_id, 0);
  567. }
  568. priv->tile_cols = current->tile_cols;
  569. priv->tile_rows = current->tile_rows;
  570. return 0;
  571. }
  572. static int FUNC(quantization_params)(CodedBitstreamContext *ctx, RWContext *rw,
  573. AV1RawFrameHeader *current)
  574. {
  575. CodedBitstreamAV1Context *priv = ctx->priv_data;
  576. const AV1RawSequenceHeader *seq = priv->sequence_header;
  577. int err;
  578. fb(8, base_q_idx);
  579. delta_q(delta_q_y_dc);
  580. if (priv->num_planes > 1) {
  581. if (seq->color_config.separate_uv_delta_q)
  582. flag(diff_uv_delta);
  583. else
  584. infer(diff_uv_delta, 0);
  585. delta_q(delta_q_u_dc);
  586. delta_q(delta_q_u_ac);
  587. if (current->diff_uv_delta) {
  588. delta_q(delta_q_v_dc);
  589. delta_q(delta_q_v_ac);
  590. } else {
  591. infer(delta_q_v_dc, current->delta_q_u_dc);
  592. infer(delta_q_v_ac, current->delta_q_u_ac);
  593. }
  594. } else {
  595. infer(delta_q_u_dc, 0);
  596. infer(delta_q_u_ac, 0);
  597. infer(delta_q_v_dc, 0);
  598. infer(delta_q_v_ac, 0);
  599. }
  600. flag(using_qmatrix);
  601. if (current->using_qmatrix) {
  602. fb(4, qm_y);
  603. fb(4, qm_u);
  604. if (seq->color_config.separate_uv_delta_q)
  605. fb(4, qm_v);
  606. else
  607. infer(qm_v, current->qm_u);
  608. }
  609. return 0;
  610. }
  611. static int FUNC(segmentation_params)(CodedBitstreamContext *ctx, RWContext *rw,
  612. AV1RawFrameHeader *current)
  613. {
  614. CodedBitstreamAV1Context *priv = ctx->priv_data;
  615. static const uint8_t bits[AV1_SEG_LVL_MAX] = { 8, 6, 6, 6, 6, 3, 0, 0 };
  616. static const uint8_t sign[AV1_SEG_LVL_MAX] = { 1, 1, 1, 1, 1, 0, 0, 0 };
  617. static const uint8_t default_feature_enabled[AV1_SEG_LVL_MAX] = { 0 };
  618. static const int16_t default_feature_value[AV1_SEG_LVL_MAX] = { 0 };
  619. int i, j, err;
  620. flag(segmentation_enabled);
  621. if (current->segmentation_enabled) {
  622. if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
  623. infer(segmentation_update_map, 1);
  624. infer(segmentation_temporal_update, 0);
  625. infer(segmentation_update_data, 1);
  626. } else {
  627. flag(segmentation_update_map);
  628. if (current->segmentation_update_map)
  629. flag(segmentation_temporal_update);
  630. else
  631. infer(segmentation_temporal_update, 0);
  632. flag(segmentation_update_data);
  633. }
  634. for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
  635. const uint8_t *ref_feature_enabled;
  636. const int16_t *ref_feature_value;
  637. if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
  638. ref_feature_enabled = default_feature_enabled;
  639. ref_feature_value = default_feature_value;
  640. } else {
  641. ref_feature_enabled =
  642. priv->ref[current->ref_frame_idx[current->primary_ref_frame]].feature_enabled[i];
  643. ref_feature_value =
  644. priv->ref[current->ref_frame_idx[current->primary_ref_frame]].feature_value[i];
  645. }
  646. for (j = 0; j < AV1_SEG_LVL_MAX; j++) {
  647. if (current->segmentation_update_data) {
  648. flags(feature_enabled[i][j], 2, i, j);
  649. if (current->feature_enabled[i][j] && bits[j] > 0) {
  650. if (sign[j])
  651. sus(1 + bits[j], feature_value[i][j], 2, i, j);
  652. else
  653. fbs(bits[j], feature_value[i][j], 2, i, j);
  654. } else {
  655. infer(feature_value[i][j], 0);
  656. }
  657. } else {
  658. infer(feature_enabled[i][j], ref_feature_enabled[j]);
  659. infer(feature_value[i][j], ref_feature_value[j]);
  660. }
  661. }
  662. }
  663. } else {
  664. for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
  665. for (j = 0; j < AV1_SEG_LVL_MAX; j++) {
  666. infer(feature_enabled[i][j], 0);
  667. infer(feature_value[i][j], 0);
  668. }
  669. }
  670. }
  671. return 0;
  672. }
  673. static int FUNC(delta_q_params)(CodedBitstreamContext *ctx, RWContext *rw,
  674. AV1RawFrameHeader *current)
  675. {
  676. int err;
  677. if (current->base_q_idx > 0)
  678. flag(delta_q_present);
  679. else
  680. infer(delta_q_present, 0);
  681. if (current->delta_q_present)
  682. fb(2, delta_q_res);
  683. return 0;
  684. }
  685. static int FUNC(delta_lf_params)(CodedBitstreamContext *ctx, RWContext *rw,
  686. AV1RawFrameHeader *current)
  687. {
  688. int err;
  689. if (current->delta_q_present) {
  690. if (!current->allow_intrabc)
  691. flag(delta_lf_present);
  692. else
  693. infer(delta_lf_present, 0);
  694. if (current->delta_lf_present) {
  695. fb(2, delta_lf_res);
  696. flag(delta_lf_multi);
  697. } else {
  698. infer(delta_lf_res, 0);
  699. infer(delta_lf_multi, 0);
  700. }
  701. } else {
  702. infer(delta_lf_present, 0);
  703. infer(delta_lf_res, 0);
  704. infer(delta_lf_multi, 0);
  705. }
  706. return 0;
  707. }
  708. static int FUNC(loop_filter_params)(CodedBitstreamContext *ctx, RWContext *rw,
  709. AV1RawFrameHeader *current)
  710. {
  711. CodedBitstreamAV1Context *priv = ctx->priv_data;
  712. static const int8_t default_loop_filter_ref_deltas[AV1_TOTAL_REFS_PER_FRAME] =
  713. { 1, 0, 0, 0, -1, 0, -1, -1 };
  714. static const int8_t default_loop_filter_mode_deltas[2] = { 0, 0 };
  715. int i, err;
  716. if (priv->coded_lossless || current->allow_intrabc) {
  717. infer(loop_filter_level[0], 0);
  718. infer(loop_filter_level[1], 0);
  719. infer(loop_filter_ref_deltas[AV1_REF_FRAME_INTRA], 1);
  720. infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST], 0);
  721. infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST2], 0);
  722. infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST3], 0);
  723. infer(loop_filter_ref_deltas[AV1_REF_FRAME_BWDREF], 0);
  724. infer(loop_filter_ref_deltas[AV1_REF_FRAME_GOLDEN], -1);
  725. infer(loop_filter_ref_deltas[AV1_REF_FRAME_ALTREF], -1);
  726. infer(loop_filter_ref_deltas[AV1_REF_FRAME_ALTREF2], -1);
  727. for (i = 0; i < 2; i++)
  728. infer(loop_filter_mode_deltas[i], 0);
  729. return 0;
  730. }
  731. fb(6, loop_filter_level[0]);
  732. fb(6, loop_filter_level[1]);
  733. if (priv->num_planes > 1) {
  734. if (current->loop_filter_level[0] ||
  735. current->loop_filter_level[1]) {
  736. fb(6, loop_filter_level[2]);
  737. fb(6, loop_filter_level[3]);
  738. }
  739. }
  740. fb(3, loop_filter_sharpness);
  741. flag(loop_filter_delta_enabled);
  742. if (current->loop_filter_delta_enabled) {
  743. const int8_t *ref_loop_filter_ref_deltas, *ref_loop_filter_mode_deltas;
  744. if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
  745. ref_loop_filter_ref_deltas = default_loop_filter_ref_deltas;
  746. ref_loop_filter_mode_deltas = default_loop_filter_mode_deltas;
  747. } else {
  748. ref_loop_filter_ref_deltas =
  749. priv->ref[current->ref_frame_idx[current->primary_ref_frame]].loop_filter_ref_deltas;
  750. ref_loop_filter_mode_deltas =
  751. priv->ref[current->ref_frame_idx[current->primary_ref_frame]].loop_filter_mode_deltas;
  752. }
  753. flag(loop_filter_delta_update);
  754. for (i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) {
  755. if (current->loop_filter_delta_update)
  756. flags(update_ref_delta[i], 1, i);
  757. else
  758. infer(update_ref_delta[i], 0);
  759. if (current->update_ref_delta[i])
  760. sus(1 + 6, loop_filter_ref_deltas[i], 1, i);
  761. else
  762. infer(loop_filter_ref_deltas[i], ref_loop_filter_ref_deltas[i]);
  763. }
  764. for (i = 0; i < 2; i++) {
  765. if (current->loop_filter_delta_update)
  766. flags(update_mode_delta[i], 1, i);
  767. else
  768. infer(update_mode_delta[i], 0);
  769. if (current->update_mode_delta[i])
  770. sus(1 + 6, loop_filter_mode_deltas[i], 1, i);
  771. else
  772. infer(loop_filter_mode_deltas[i], ref_loop_filter_mode_deltas[i]);
  773. }
  774. } else {
  775. for (i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++)
  776. infer(loop_filter_ref_deltas[i], default_loop_filter_ref_deltas[i]);
  777. for (i = 0; i < 2; i++)
  778. infer(loop_filter_mode_deltas[i], default_loop_filter_mode_deltas[i]);
  779. }
  780. return 0;
  781. }
  782. static int FUNC(cdef_params)(CodedBitstreamContext *ctx, RWContext *rw,
  783. AV1RawFrameHeader *current)
  784. {
  785. CodedBitstreamAV1Context *priv = ctx->priv_data;
  786. const AV1RawSequenceHeader *seq = priv->sequence_header;
  787. int i, err;
  788. if (priv->coded_lossless || current->allow_intrabc ||
  789. !seq->enable_cdef) {
  790. infer(cdef_damping_minus_3, 0);
  791. infer(cdef_bits, 0);
  792. infer(cdef_y_pri_strength[0], 0);
  793. infer(cdef_y_sec_strength[0], 0);
  794. infer(cdef_uv_pri_strength[0], 0);
  795. infer(cdef_uv_sec_strength[0], 0);
  796. return 0;
  797. }
  798. fb(2, cdef_damping_minus_3);
  799. fb(2, cdef_bits);
  800. for (i = 0; i < (1 << current->cdef_bits); i++) {
  801. fbs(4, cdef_y_pri_strength[i], 1, i);
  802. fbs(2, cdef_y_sec_strength[i], 1, i);
  803. if (priv->num_planes > 1) {
  804. fbs(4, cdef_uv_pri_strength[i], 1, i);
  805. fbs(2, cdef_uv_sec_strength[i], 1, i);
  806. }
  807. }
  808. return 0;
  809. }
  810. static int FUNC(lr_params)(CodedBitstreamContext *ctx, RWContext *rw,
  811. AV1RawFrameHeader *current)
  812. {
  813. CodedBitstreamAV1Context *priv = ctx->priv_data;
  814. const AV1RawSequenceHeader *seq = priv->sequence_header;
  815. int uses_lr, uses_chroma_lr;
  816. int i, err;
  817. if (priv->all_lossless || current->allow_intrabc ||
  818. !seq->enable_restoration) {
  819. return 0;
  820. }
  821. uses_lr = uses_chroma_lr = 0;
  822. for (i = 0; i < priv->num_planes; i++) {
  823. fbs(2, lr_type[i], 1, i);
  824. if (current->lr_type[i] != AV1_RESTORE_NONE) {
  825. uses_lr = 1;
  826. if (i > 0)
  827. uses_chroma_lr = 1;
  828. }
  829. }
  830. if (uses_lr) {
  831. if (seq->use_128x128_superblock)
  832. increment(lr_unit_shift, 1, 2);
  833. else
  834. increment(lr_unit_shift, 0, 2);
  835. if(seq->color_config.subsampling_x &&
  836. seq->color_config.subsampling_y && uses_chroma_lr) {
  837. fb(1, lr_uv_shift);
  838. } else {
  839. infer(lr_uv_shift, 0);
  840. }
  841. }
  842. return 0;
  843. }
  844. static int FUNC(read_tx_mode)(CodedBitstreamContext *ctx, RWContext *rw,
  845. AV1RawFrameHeader *current)
  846. {
  847. CodedBitstreamAV1Context *priv = ctx->priv_data;
  848. int err;
  849. if (priv->coded_lossless)
  850. infer(tx_mode, 0);
  851. else
  852. increment(tx_mode, 1, 2);
  853. return 0;
  854. }
  855. static int FUNC(frame_reference_mode)(CodedBitstreamContext *ctx, RWContext *rw,
  856. AV1RawFrameHeader *current)
  857. {
  858. int err;
  859. if (current->frame_type == AV1_FRAME_INTRA_ONLY ||
  860. current->frame_type == AV1_FRAME_KEY)
  861. infer(reference_select, 0);
  862. else
  863. flag(reference_select);
  864. return 0;
  865. }
  866. static int FUNC(skip_mode_params)(CodedBitstreamContext *ctx, RWContext *rw,
  867. AV1RawFrameHeader *current)
  868. {
  869. CodedBitstreamAV1Context *priv = ctx->priv_data;
  870. const AV1RawSequenceHeader *seq = priv->sequence_header;
  871. int skip_mode_allowed;
  872. int err;
  873. if (current->frame_type == AV1_FRAME_KEY ||
  874. current->frame_type == AV1_FRAME_INTRA_ONLY ||
  875. !current->reference_select || !seq->enable_order_hint) {
  876. skip_mode_allowed = 0;
  877. } else {
  878. int forward_idx, backward_idx;
  879. int forward_hint, backward_hint;
  880. int ref_hint, dist, i;
  881. forward_idx = -1;
  882. backward_idx = -1;
  883. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  884. ref_hint = priv->ref[current->ref_frame_idx[i]].order_hint;
  885. dist = cbs_av1_get_relative_dist(seq, ref_hint,
  886. priv->order_hint);
  887. if (dist < 0) {
  888. if (forward_idx < 0 ||
  889. cbs_av1_get_relative_dist(seq, ref_hint,
  890. forward_hint) > 0) {
  891. forward_idx = i;
  892. forward_hint = ref_hint;
  893. }
  894. } else if (dist > 0) {
  895. if (backward_idx < 0 ||
  896. cbs_av1_get_relative_dist(seq, ref_hint,
  897. backward_hint) < 0) {
  898. backward_idx = i;
  899. backward_hint = ref_hint;
  900. }
  901. }
  902. }
  903. if (forward_idx < 0) {
  904. skip_mode_allowed = 0;
  905. } else if (backward_idx >= 0) {
  906. skip_mode_allowed = 1;
  907. // Frames for skip mode are forward_idx and backward_idx.
  908. } else {
  909. int second_forward_idx;
  910. int second_forward_hint;
  911. second_forward_idx = -1;
  912. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  913. ref_hint = priv->ref[current->ref_frame_idx[i]].order_hint;
  914. if (cbs_av1_get_relative_dist(seq, ref_hint,
  915. forward_hint) < 0) {
  916. if (second_forward_idx < 0 ||
  917. cbs_av1_get_relative_dist(seq, ref_hint,
  918. second_forward_hint) > 0) {
  919. second_forward_idx = i;
  920. second_forward_hint = ref_hint;
  921. }
  922. }
  923. }
  924. if (second_forward_idx < 0) {
  925. skip_mode_allowed = 0;
  926. } else {
  927. skip_mode_allowed = 1;
  928. // Frames for skip mode are forward_idx and second_forward_idx.
  929. }
  930. }
  931. }
  932. if (skip_mode_allowed)
  933. flag(skip_mode_present);
  934. else
  935. infer(skip_mode_present, 0);
  936. return 0;
  937. }
  938. static int FUNC(global_motion_param)(CodedBitstreamContext *ctx, RWContext *rw,
  939. AV1RawFrameHeader *current,
  940. int type, int ref, int idx)
  941. {
  942. uint32_t abs_bits, prec_bits, num_syms;
  943. int err;
  944. if (idx < 2) {
  945. if (type == AV1_WARP_MODEL_TRANSLATION) {
  946. abs_bits = AV1_GM_ABS_TRANS_ONLY_BITS - !current->allow_high_precision_mv;
  947. prec_bits = AV1_GM_TRANS_ONLY_PREC_BITS - !current->allow_high_precision_mv;
  948. } else {
  949. abs_bits = AV1_GM_ABS_TRANS_BITS;
  950. prec_bits = AV1_GM_TRANS_PREC_BITS;
  951. }
  952. } else {
  953. abs_bits = AV1_GM_ABS_ALPHA_BITS;
  954. prec_bits = AV1_GM_ALPHA_PREC_BITS;
  955. }
  956. num_syms = 2 * (1 << abs_bits) + 1;
  957. subexp(gm_params[ref][idx], num_syms, 2, ref, idx);
  958. // Actual gm_params value is not reconstructed here.
  959. (void)prec_bits;
  960. return 0;
  961. }
  962. static int FUNC(global_motion_params)(CodedBitstreamContext *ctx, RWContext *rw,
  963. AV1RawFrameHeader *current)
  964. {
  965. int ref, type;
  966. int err;
  967. if (current->frame_type == AV1_FRAME_KEY ||
  968. current->frame_type == AV1_FRAME_INTRA_ONLY)
  969. return 0;
  970. for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
  971. flags(is_global[ref], 1, ref);
  972. if (current->is_global[ref]) {
  973. flags(is_rot_zoom[ref], 1, ref);
  974. if (current->is_rot_zoom[ref]) {
  975. type = AV1_WARP_MODEL_ROTZOOM;
  976. } else {
  977. flags(is_translation[ref], 1, ref);
  978. type = current->is_translation[ref] ? AV1_WARP_MODEL_TRANSLATION
  979. : AV1_WARP_MODEL_AFFINE;
  980. }
  981. } else {
  982. type = AV1_WARP_MODEL_IDENTITY;
  983. }
  984. if (type >= AV1_WARP_MODEL_ROTZOOM) {
  985. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 2));
  986. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 3));
  987. if (type == AV1_WARP_MODEL_AFFINE) {
  988. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 4));
  989. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 5));
  990. } else {
  991. // gm_params[ref][4] = -gm_params[ref][3]
  992. // gm_params[ref][5] = gm_params[ref][2]
  993. }
  994. }
  995. if (type >= AV1_WARP_MODEL_TRANSLATION) {
  996. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 0));
  997. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 1));
  998. }
  999. }
  1000. return 0;
  1001. }
  1002. static int FUNC(film_grain_params)(CodedBitstreamContext *ctx, RWContext *rw,
  1003. AV1RawFrameHeader *current)
  1004. {
  1005. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1006. const AV1RawSequenceHeader *seq = priv->sequence_header;
  1007. int num_pos_luma, num_pos_chroma;
  1008. int i, err;
  1009. if (!seq->film_grain_params_present ||
  1010. (!current->show_frame && !current->showable_frame))
  1011. return 0;
  1012. flag(apply_grain);
  1013. if (!current->apply_grain)
  1014. return 0;
  1015. fb(16, grain_seed);
  1016. if (current->frame_type == AV1_FRAME_INTER)
  1017. flag(update_grain);
  1018. else
  1019. infer(update_grain, 1);
  1020. if (!current->update_grain) {
  1021. fb(3, film_grain_params_ref_idx);
  1022. return 0;
  1023. }
  1024. fc(4, num_y_points, 0, 14);
  1025. for (i = 0; i < current->num_y_points; i++) {
  1026. fcs(8, point_y_value[i],
  1027. i ? current->point_y_value[i - 1] + 1 : 0,
  1028. MAX_UINT_BITS(8) - (current->num_y_points - i - 1),
  1029. 1, i);
  1030. fbs(8, point_y_scaling[i], 1, i);
  1031. }
  1032. if (seq->color_config.mono_chrome)
  1033. infer(chroma_scaling_from_luma, 0);
  1034. else
  1035. flag(chroma_scaling_from_luma);
  1036. if (seq->color_config.mono_chrome ||
  1037. current->chroma_scaling_from_luma ||
  1038. (seq->color_config.subsampling_x == 1 &&
  1039. seq->color_config.subsampling_y == 1 &&
  1040. current->num_y_points == 0)) {
  1041. infer(num_cb_points, 0);
  1042. infer(num_cr_points, 0);
  1043. } else {
  1044. fc(4, num_cb_points, 0, 10);
  1045. for (i = 0; i < current->num_cb_points; i++) {
  1046. fcs(8, point_cb_value[i],
  1047. i ? current->point_cb_value[i - 1] + 1 : 0,
  1048. MAX_UINT_BITS(8) - (current->num_cb_points - i - 1),
  1049. 1, i);
  1050. fbs(8, point_cb_scaling[i], 1, i);
  1051. }
  1052. fc(4, num_cr_points, 0, 10);
  1053. for (i = 0; i < current->num_cr_points; i++) {
  1054. fcs(8, point_cr_value[i],
  1055. i ? current->point_cr_value[i - 1] + 1 : 0,
  1056. MAX_UINT_BITS(8) - (current->num_cr_points - i - 1),
  1057. 1, i);
  1058. fbs(8, point_cr_scaling[i], 1, i);
  1059. }
  1060. }
  1061. fb(2, grain_scaling_minus_8);
  1062. fb(2, ar_coeff_lag);
  1063. num_pos_luma = 2 * current->ar_coeff_lag * (current->ar_coeff_lag + 1);
  1064. if (current->num_y_points) {
  1065. num_pos_chroma = num_pos_luma + 1;
  1066. for (i = 0; i < num_pos_luma; i++)
  1067. fbs(8, ar_coeffs_y_plus_128[i], 1, i);
  1068. } else {
  1069. num_pos_chroma = num_pos_luma;
  1070. }
  1071. if (current->chroma_scaling_from_luma || current->num_cb_points) {
  1072. for (i = 0; i < num_pos_chroma; i++)
  1073. fbs(8, ar_coeffs_cb_plus_128[i], 1, i);
  1074. }
  1075. if (current->chroma_scaling_from_luma || current->num_cr_points) {
  1076. for (i = 0; i < num_pos_chroma; i++)
  1077. fbs(8, ar_coeffs_cr_plus_128[i], 1, i);
  1078. }
  1079. fb(2, ar_coeff_shift_minus_6);
  1080. fb(2, grain_scale_shift);
  1081. if (current->num_cb_points) {
  1082. fb(8, cb_mult);
  1083. fb(8, cb_luma_mult);
  1084. fb(9, cb_offset);
  1085. }
  1086. if (current->num_cr_points) {
  1087. fb(8, cr_mult);
  1088. fb(8, cr_luma_mult);
  1089. fb(9, cr_offset);
  1090. }
  1091. flag(overlap_flag);
  1092. flag(clip_to_restricted_range);
  1093. return 0;
  1094. }
  1095. static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw,
  1096. AV1RawFrameHeader *current)
  1097. {
  1098. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1099. const AV1RawSequenceHeader *seq;
  1100. int id_len, diff_len, all_frames, frame_is_intra, order_hint_bits;
  1101. int i, err;
  1102. if (!priv->sequence_header) {
  1103. av_log(ctx->log_ctx, AV_LOG_ERROR, "No sequence header available: "
  1104. "unable to decode frame header.\n");
  1105. return AVERROR_INVALIDDATA;
  1106. }
  1107. seq = priv->sequence_header;
  1108. id_len = seq->additional_frame_id_length_minus_1 +
  1109. seq->delta_frame_id_length_minus_2 + 3;
  1110. all_frames = (1 << AV1_NUM_REF_FRAMES) - 1;
  1111. if (seq->reduced_still_picture_header) {
  1112. infer(show_existing_frame, 0);
  1113. infer(frame_type, AV1_FRAME_KEY);
  1114. infer(show_frame, 1);
  1115. infer(showable_frame, 0);
  1116. frame_is_intra = 1;
  1117. } else {
  1118. flag(show_existing_frame);
  1119. if (current->show_existing_frame) {
  1120. AV1ReferenceFrameState *ref;
  1121. fb(3, frame_to_show_map_idx);
  1122. ref = &priv->ref[current->frame_to_show_map_idx];
  1123. if (!ref->valid) {
  1124. av_log(ctx->log_ctx, AV_LOG_ERROR, "Missing reference frame needed for "
  1125. "show_existing_frame (frame_to_show_map_idx = %d).\n",
  1126. current->frame_to_show_map_idx);
  1127. return AVERROR_INVALIDDATA;
  1128. }
  1129. if (seq->decoder_model_info_present_flag &&
  1130. !seq->timing_info.equal_picture_interval) {
  1131. fb(seq->decoder_model_info.frame_presentation_time_length_minus_1 + 1,
  1132. frame_presentation_time);
  1133. }
  1134. if (seq->frame_id_numbers_present_flag)
  1135. fb(id_len, display_frame_id);
  1136. infer(frame_type, ref->frame_type);
  1137. if (current->frame_type == AV1_FRAME_KEY) {
  1138. infer(refresh_frame_flags, all_frames);
  1139. // Section 7.21
  1140. infer(current_frame_id, ref->frame_id);
  1141. priv->upscaled_width = ref->upscaled_width;
  1142. priv->frame_width = ref->frame_width;
  1143. priv->frame_height = ref->frame_height;
  1144. priv->render_width = ref->render_width;
  1145. priv->render_height = ref->render_height;
  1146. priv->bit_depth = ref->bit_depth;
  1147. priv->order_hint = ref->order_hint;
  1148. } else
  1149. infer(refresh_frame_flags, 0);
  1150. infer(frame_width_minus_1, ref->upscaled_width - 1);
  1151. infer(frame_height_minus_1, ref->frame_height - 1);
  1152. infer(render_width_minus_1, ref->render_width - 1);
  1153. infer(render_height_minus_1, ref->render_height - 1);
  1154. // Section 7.20
  1155. goto update_refs;
  1156. }
  1157. fb(2, frame_type);
  1158. frame_is_intra = (current->frame_type == AV1_FRAME_INTRA_ONLY ||
  1159. current->frame_type == AV1_FRAME_KEY);
  1160. flag(show_frame);
  1161. if (current->show_frame &&
  1162. seq->decoder_model_info_present_flag &&
  1163. !seq->timing_info.equal_picture_interval) {
  1164. fb(seq->decoder_model_info.frame_presentation_time_length_minus_1 + 1,
  1165. frame_presentation_time);
  1166. }
  1167. if (current->show_frame)
  1168. infer(showable_frame, current->frame_type != AV1_FRAME_KEY);
  1169. else
  1170. flag(showable_frame);
  1171. if (current->frame_type == AV1_FRAME_SWITCH ||
  1172. (current->frame_type == AV1_FRAME_KEY && current->show_frame))
  1173. infer(error_resilient_mode, 1);
  1174. else
  1175. flag(error_resilient_mode);
  1176. }
  1177. if (current->frame_type == AV1_FRAME_KEY && current->show_frame) {
  1178. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  1179. priv->ref[i].valid = 0;
  1180. priv->ref[i].order_hint = 0;
  1181. }
  1182. }
  1183. flag(disable_cdf_update);
  1184. if (seq->seq_force_screen_content_tools ==
  1185. AV1_SELECT_SCREEN_CONTENT_TOOLS) {
  1186. flag(allow_screen_content_tools);
  1187. } else {
  1188. infer(allow_screen_content_tools,
  1189. seq->seq_force_screen_content_tools);
  1190. }
  1191. if (current->allow_screen_content_tools) {
  1192. if (seq->seq_force_integer_mv == AV1_SELECT_INTEGER_MV)
  1193. flag(force_integer_mv);
  1194. else
  1195. infer(force_integer_mv, seq->seq_force_integer_mv);
  1196. } else {
  1197. infer(force_integer_mv, 0);
  1198. }
  1199. if (seq->frame_id_numbers_present_flag) {
  1200. fb(id_len, current_frame_id);
  1201. diff_len = seq->delta_frame_id_length_minus_2 + 2;
  1202. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  1203. if (current->current_frame_id > (1 << diff_len)) {
  1204. if (priv->ref[i].frame_id > current->current_frame_id ||
  1205. priv->ref[i].frame_id < (current->current_frame_id -
  1206. (1 << diff_len)))
  1207. priv->ref[i].valid = 0;
  1208. } else {
  1209. if (priv->ref[i].frame_id > current->current_frame_id &&
  1210. priv->ref[i].frame_id < ((1 << id_len) +
  1211. current->current_frame_id -
  1212. (1 << diff_len)))
  1213. priv->ref[i].valid = 0;
  1214. }
  1215. }
  1216. } else {
  1217. infer(current_frame_id, 0);
  1218. }
  1219. if (current->frame_type == AV1_FRAME_SWITCH)
  1220. infer(frame_size_override_flag, 1);
  1221. else if(seq->reduced_still_picture_header)
  1222. infer(frame_size_override_flag, 0);
  1223. else
  1224. flag(frame_size_override_flag);
  1225. order_hint_bits =
  1226. seq->enable_order_hint ? seq->order_hint_bits_minus_1 + 1 : 0;
  1227. if (order_hint_bits > 0)
  1228. fb(order_hint_bits, order_hint);
  1229. else
  1230. infer(order_hint, 0);
  1231. priv->order_hint = current->order_hint;
  1232. if (frame_is_intra || current->error_resilient_mode)
  1233. infer(primary_ref_frame, AV1_PRIMARY_REF_NONE);
  1234. else
  1235. fb(3, primary_ref_frame);
  1236. if (seq->decoder_model_info_present_flag) {
  1237. flag(buffer_removal_time_present_flag);
  1238. if (current->buffer_removal_time_present_flag) {
  1239. for (i = 0; i <= seq->operating_points_cnt_minus_1; i++) {
  1240. if (seq->decoder_model_present_for_this_op[i]) {
  1241. int op_pt_idc = seq->operating_point_idc[i];
  1242. int in_temporal_layer = (op_pt_idc >> priv->temporal_id ) & 1;
  1243. int in_spatial_layer = (op_pt_idc >> (priv->spatial_id + 8)) & 1;
  1244. if (seq->operating_point_idc[i] == 0 ||
  1245. (in_temporal_layer && in_spatial_layer)) {
  1246. fbs(seq->decoder_model_info.buffer_removal_time_length_minus_1 + 1,
  1247. buffer_removal_time[i], 1, i);
  1248. }
  1249. }
  1250. }
  1251. }
  1252. }
  1253. if (current->frame_type == AV1_FRAME_SWITCH ||
  1254. (current->frame_type == AV1_FRAME_KEY && current->show_frame))
  1255. infer(refresh_frame_flags, all_frames);
  1256. else
  1257. fb(8, refresh_frame_flags);
  1258. if (!frame_is_intra || current->refresh_frame_flags != all_frames) {
  1259. if (seq->enable_order_hint) {
  1260. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  1261. if (current->error_resilient_mode)
  1262. fbs(order_hint_bits, ref_order_hint[i], 1, i);
  1263. else
  1264. infer(ref_order_hint[i], priv->ref[i].order_hint);
  1265. if (current->ref_order_hint[i] != priv->ref[i].order_hint)
  1266. priv->ref[i].valid = 0;
  1267. }
  1268. }
  1269. } else if (!frame_is_intra && seq->enable_order_hint) {
  1270. for (i = 0; i < AV1_NUM_REF_FRAMES; i++)
  1271. infer(ref_order_hint[i], priv->ref[i].order_hint);
  1272. }
  1273. if (current->frame_type == AV1_FRAME_KEY ||
  1274. current->frame_type == AV1_FRAME_INTRA_ONLY) {
  1275. CHECK(FUNC(frame_size)(ctx, rw, current));
  1276. CHECK(FUNC(render_size)(ctx, rw, current));
  1277. if (current->allow_screen_content_tools &&
  1278. priv->upscaled_width == priv->frame_width)
  1279. flag(allow_intrabc);
  1280. else
  1281. infer(allow_intrabc, 0);
  1282. } else {
  1283. if (!seq->enable_order_hint) {
  1284. infer(frame_refs_short_signaling, 0);
  1285. } else {
  1286. flag(frame_refs_short_signaling);
  1287. if (current->frame_refs_short_signaling) {
  1288. fb(3, last_frame_idx);
  1289. fb(3, golden_frame_idx);
  1290. CHECK(FUNC(set_frame_refs)(ctx, rw, current));
  1291. }
  1292. }
  1293. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  1294. if (!current->frame_refs_short_signaling)
  1295. fbs(3, ref_frame_idx[i], 1, i);
  1296. if (seq->frame_id_numbers_present_flag) {
  1297. fbs(seq->delta_frame_id_length_minus_2 + 2,
  1298. delta_frame_id_minus1[i], 1, i);
  1299. }
  1300. }
  1301. if (current->frame_size_override_flag &&
  1302. !current->error_resilient_mode) {
  1303. CHECK(FUNC(frame_size_with_refs)(ctx, rw, current));
  1304. } else {
  1305. CHECK(FUNC(frame_size)(ctx, rw, current));
  1306. CHECK(FUNC(render_size)(ctx, rw, current));
  1307. }
  1308. if (current->force_integer_mv)
  1309. infer(allow_high_precision_mv, 0);
  1310. else
  1311. flag(allow_high_precision_mv);
  1312. CHECK(FUNC(interpolation_filter)(ctx, rw, current));
  1313. flag(is_motion_mode_switchable);
  1314. if (current->error_resilient_mode ||
  1315. !seq->enable_ref_frame_mvs)
  1316. infer(use_ref_frame_mvs, 0);
  1317. else
  1318. flag(use_ref_frame_mvs);
  1319. infer(allow_intrabc, 0);
  1320. }
  1321. if (!frame_is_intra) {
  1322. // Derive reference frame sign biases.
  1323. }
  1324. if (seq->reduced_still_picture_header || current->disable_cdf_update)
  1325. infer(disable_frame_end_update_cdf, 1);
  1326. else
  1327. flag(disable_frame_end_update_cdf);
  1328. if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
  1329. // Init non-coeff CDFs.
  1330. // Setup past independence.
  1331. } else {
  1332. // Load CDF tables from previous frame.
  1333. // Load params from previous frame.
  1334. }
  1335. if (current->use_ref_frame_mvs) {
  1336. // Perform motion field estimation process.
  1337. }
  1338. CHECK(FUNC(tile_info)(ctx, rw, current));
  1339. CHECK(FUNC(quantization_params)(ctx, rw, current));
  1340. CHECK(FUNC(segmentation_params)(ctx, rw, current));
  1341. CHECK(FUNC(delta_q_params)(ctx, rw, current));
  1342. CHECK(FUNC(delta_lf_params)(ctx, rw, current));
  1343. // Init coeff CDFs / load previous segments.
  1344. priv->coded_lossless = 1;
  1345. for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
  1346. int qindex;
  1347. if (current->feature_enabled[i][AV1_SEG_LVL_ALT_Q]) {
  1348. qindex = (current->base_q_idx +
  1349. current->feature_value[i][AV1_SEG_LVL_ALT_Q]);
  1350. } else {
  1351. qindex = current->base_q_idx;
  1352. }
  1353. qindex = av_clip_uintp2(qindex, 8);
  1354. if (qindex || current->delta_q_y_dc ||
  1355. current->delta_q_u_ac || current->delta_q_u_dc ||
  1356. current->delta_q_v_ac || current->delta_q_v_dc) {
  1357. priv->coded_lossless = 0;
  1358. }
  1359. }
  1360. priv->all_lossless = priv->coded_lossless &&
  1361. priv->frame_width == priv->upscaled_width;
  1362. CHECK(FUNC(loop_filter_params)(ctx, rw, current));
  1363. CHECK(FUNC(cdef_params)(ctx, rw, current));
  1364. CHECK(FUNC(lr_params)(ctx, rw, current));
  1365. CHECK(FUNC(read_tx_mode)(ctx, rw, current));
  1366. CHECK(FUNC(frame_reference_mode)(ctx, rw, current));
  1367. CHECK(FUNC(skip_mode_params)(ctx, rw, current));
  1368. if (frame_is_intra || current->error_resilient_mode ||
  1369. !seq->enable_warped_motion)
  1370. infer(allow_warped_motion, 0);
  1371. else
  1372. flag(allow_warped_motion);
  1373. flag(reduced_tx_set);
  1374. CHECK(FUNC(global_motion_params)(ctx, rw, current));
  1375. CHECK(FUNC(film_grain_params)(ctx, rw, current));
  1376. av_log(ctx->log_ctx, AV_LOG_DEBUG, "Frame %d: size %dx%d "
  1377. "upscaled %d render %dx%d subsample %dx%d "
  1378. "bitdepth %d tiles %dx%d.\n", priv->order_hint,
  1379. priv->frame_width, priv->frame_height, priv->upscaled_width,
  1380. priv->render_width, priv->render_height,
  1381. seq->color_config.subsampling_x + 1,
  1382. seq->color_config.subsampling_y + 1, priv->bit_depth,
  1383. priv->tile_rows, priv->tile_cols);
  1384. update_refs:
  1385. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  1386. if (current->refresh_frame_flags & (1 << i)) {
  1387. priv->ref[i] = (AV1ReferenceFrameState) {
  1388. .valid = 1,
  1389. .frame_id = current->current_frame_id,
  1390. .upscaled_width = priv->upscaled_width,
  1391. .frame_width = priv->frame_width,
  1392. .frame_height = priv->frame_height,
  1393. .render_width = priv->render_width,
  1394. .render_height = priv->render_height,
  1395. .frame_type = current->frame_type,
  1396. .subsampling_x = seq->color_config.subsampling_x,
  1397. .subsampling_y = seq->color_config.subsampling_y,
  1398. .bit_depth = priv->bit_depth,
  1399. .order_hint = priv->order_hint,
  1400. };
  1401. memcpy(priv->ref[i].loop_filter_ref_deltas, current->loop_filter_ref_deltas,
  1402. sizeof(current->loop_filter_ref_deltas));
  1403. memcpy(priv->ref[i].loop_filter_mode_deltas, current->loop_filter_mode_deltas,
  1404. sizeof(current->loop_filter_mode_deltas));
  1405. memcpy(priv->ref[i].feature_enabled, current->feature_enabled,
  1406. sizeof(current->feature_enabled));
  1407. memcpy(priv->ref[i].feature_value, current->feature_value,
  1408. sizeof(current->feature_value));
  1409. }
  1410. }
  1411. return 0;
  1412. }
  1413. static int FUNC(frame_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1414. AV1RawFrameHeader *current, int redundant,
  1415. AVBufferRef *rw_buffer_ref)
  1416. {
  1417. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1418. int start_pos, fh_bits, fh_bytes, err;
  1419. uint8_t *fh_start;
  1420. if (priv->seen_frame_header) {
  1421. if (!redundant) {
  1422. av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid repeated "
  1423. "frame header OBU.\n");
  1424. return AVERROR_INVALIDDATA;
  1425. } else {
  1426. GetBitContext fh;
  1427. size_t i, b;
  1428. uint32_t val;
  1429. HEADER("Redundant Frame Header");
  1430. av_assert0(priv->frame_header_ref && priv->frame_header);
  1431. init_get_bits(&fh, priv->frame_header,
  1432. priv->frame_header_size);
  1433. for (i = 0; i < priv->frame_header_size; i += 8) {
  1434. b = FFMIN(priv->frame_header_size - i, 8);
  1435. val = get_bits(&fh, b);
  1436. xf(b, frame_header_copy[i],
  1437. val, val, val, 1, i / 8);
  1438. }
  1439. }
  1440. } else {
  1441. if (redundant)
  1442. HEADER("Redundant Frame Header (used as Frame Header)");
  1443. else
  1444. HEADER("Frame Header");
  1445. #ifdef READ
  1446. start_pos = get_bits_count(rw);
  1447. #else
  1448. start_pos = put_bits_count(rw);
  1449. #endif
  1450. CHECK(FUNC(uncompressed_header)(ctx, rw, current));
  1451. if (current->show_existing_frame) {
  1452. priv->seen_frame_header = 0;
  1453. } else {
  1454. priv->seen_frame_header = 1;
  1455. av_buffer_unref(&priv->frame_header_ref);
  1456. #ifdef READ
  1457. fh_bits = get_bits_count(rw) - start_pos;
  1458. fh_start = (uint8_t*)rw->buffer + start_pos / 8;
  1459. #else
  1460. // Need to flush the bitwriter so that we can copy its output,
  1461. // but use a copy so we don't affect the caller's structure.
  1462. {
  1463. PutBitContext tmp = *rw;
  1464. flush_put_bits(&tmp);
  1465. }
  1466. fh_bits = put_bits_count(rw) - start_pos;
  1467. fh_start = rw->buf + start_pos / 8;
  1468. #endif
  1469. fh_bytes = (fh_bits + 7) / 8;
  1470. priv->frame_header_size = fh_bits;
  1471. if (rw_buffer_ref) {
  1472. priv->frame_header_ref = av_buffer_ref(rw_buffer_ref);
  1473. if (!priv->frame_header_ref)
  1474. return AVERROR(ENOMEM);
  1475. priv->frame_header = fh_start;
  1476. } else {
  1477. priv->frame_header_ref =
  1478. av_buffer_alloc(fh_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
  1479. if (!priv->frame_header_ref)
  1480. return AVERROR(ENOMEM);
  1481. priv->frame_header = priv->frame_header_ref->data;
  1482. memcpy(priv->frame_header, fh_start, fh_bytes);
  1483. }
  1484. }
  1485. }
  1486. return 0;
  1487. }
  1488. static int FUNC(tile_group_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1489. AV1RawTileGroup *current)
  1490. {
  1491. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1492. int num_tiles, tile_bits;
  1493. int err;
  1494. HEADER("Tile Group");
  1495. num_tiles = priv->tile_cols * priv->tile_rows;
  1496. if (num_tiles > 1)
  1497. flag(tile_start_and_end_present_flag);
  1498. else
  1499. infer(tile_start_and_end_present_flag, 0);
  1500. if (num_tiles == 1 || !current->tile_start_and_end_present_flag) {
  1501. infer(tg_start, 0);
  1502. infer(tg_end, num_tiles - 1);
  1503. } else {
  1504. tile_bits = cbs_av1_tile_log2(1, priv->tile_cols) +
  1505. cbs_av1_tile_log2(1, priv->tile_rows);
  1506. fb(tile_bits, tg_start);
  1507. fb(tile_bits, tg_end);
  1508. }
  1509. CHECK(FUNC(byte_alignment)(ctx, rw));
  1510. // Reset header for next frame.
  1511. if (current->tg_end == num_tiles - 1)
  1512. priv->seen_frame_header = 0;
  1513. // Tile data follows.
  1514. return 0;
  1515. }
  1516. static int FUNC(frame_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1517. AV1RawFrame *current,
  1518. AVBufferRef *rw_buffer_ref)
  1519. {
  1520. int err;
  1521. CHECK(FUNC(frame_header_obu)(ctx, rw, &current->header,
  1522. 0, rw_buffer_ref));
  1523. CHECK(FUNC(byte_alignment)(ctx, rw));
  1524. CHECK(FUNC(tile_group_obu)(ctx, rw, &current->tile_group));
  1525. return 0;
  1526. }
  1527. static int FUNC(tile_list_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1528. AV1RawTileList *current)
  1529. {
  1530. int err;
  1531. fb(8, output_frame_width_in_tiles_minus_1);
  1532. fb(8, output_frame_height_in_tiles_minus_1);
  1533. fb(16, tile_count_minus_1);
  1534. // Tile data follows.
  1535. return 0;
  1536. }
  1537. static int FUNC(metadata_hdr_cll)(CodedBitstreamContext *ctx, RWContext *rw,
  1538. AV1RawMetadataHDRCLL *current)
  1539. {
  1540. int err;
  1541. fb(16, max_cll);
  1542. fb(16, max_fall);
  1543. return 0;
  1544. }
  1545. static int FUNC(metadata_hdr_mdcv)(CodedBitstreamContext *ctx, RWContext *rw,
  1546. AV1RawMetadataHDRMDCV *current)
  1547. {
  1548. int err, i;
  1549. for (i = 0; i < 3; i++) {
  1550. fbs(16, primary_chromaticity_x[i], 1, i);
  1551. fbs(16, primary_chromaticity_y[i], 1, i);
  1552. }
  1553. fb(16, white_point_chromaticity_x);
  1554. fb(16, white_point_chromaticity_y);
  1555. fc(32, luminance_max, 1, MAX_UINT_BITS(32));
  1556. // luminance_min must be lower than luminance_max. Convert luminance_max from
  1557. // 24.8 fixed point to 18.14 fixed point in order to compare them.
  1558. fc(32, luminance_min, 0, FFMIN(((uint64_t)current->luminance_max << 6) - 1,
  1559. MAX_UINT_BITS(32)));
  1560. return 0;
  1561. }
  1562. static int FUNC(scalability_structure)(CodedBitstreamContext *ctx, RWContext *rw,
  1563. AV1RawMetadataScalability *current)
  1564. {
  1565. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1566. const AV1RawSequenceHeader *seq;
  1567. int err, i, j;
  1568. if (!priv->sequence_header) {
  1569. av_log(ctx->log_ctx, AV_LOG_ERROR, "No sequence header available: "
  1570. "unable to parse scalability metadata.\n");
  1571. return AVERROR_INVALIDDATA;
  1572. }
  1573. seq = priv->sequence_header;
  1574. fb(2, spatial_layers_cnt_minus_1);
  1575. flag(spatial_layer_dimensions_present_flag);
  1576. flag(spatial_layer_description_present_flag);
  1577. flag(temporal_group_description_present_flag);
  1578. fc(3, scalability_structure_reserved_3bits, 0, 0);
  1579. if (current->spatial_layer_dimensions_present_flag) {
  1580. for (i = 0; i <= current->spatial_layers_cnt_minus_1; i++) {
  1581. fcs(16, spatial_layer_max_width[i],
  1582. 0, seq->max_frame_width_minus_1 + 1, 1, i);
  1583. fcs(16, spatial_layer_max_height[i],
  1584. 0, seq->max_frame_height_minus_1 + 1, 1, i);
  1585. }
  1586. }
  1587. if (current->spatial_layer_description_present_flag) {
  1588. for (i = 0; i <= current->spatial_layers_cnt_minus_1; i++)
  1589. fbs(8, spatial_layer_ref_id[i], 1, i);
  1590. }
  1591. if (current->temporal_group_description_present_flag) {
  1592. fb(8, temporal_group_size);
  1593. for (i = 0; i < current->temporal_group_size; i++) {
  1594. fbs(3, temporal_group_temporal_id[i], 1, i);
  1595. flags(temporal_group_temporal_switching_up_point_flag[i], 1, i);
  1596. flags(temporal_group_spatial_switching_up_point_flag[i], 1, i);
  1597. fbs(3, temporal_group_ref_cnt[i], 1, i);
  1598. for (j = 0; j < current->temporal_group_ref_cnt[i]; j++) {
  1599. fbs(8, temporal_group_ref_pic_diff[i][j], 2, i, j);
  1600. }
  1601. }
  1602. }
  1603. return 0;
  1604. }
  1605. static int FUNC(metadata_scalability)(CodedBitstreamContext *ctx, RWContext *rw,
  1606. AV1RawMetadataScalability *current)
  1607. {
  1608. int err;
  1609. fb(8, scalability_mode_idc);
  1610. if (current->scalability_mode_idc == AV1_SCALABILITY_SS)
  1611. CHECK(FUNC(scalability_structure)(ctx, rw, current));
  1612. return 0;
  1613. }
  1614. static int FUNC(metadata_itut_t35)(CodedBitstreamContext *ctx, RWContext *rw,
  1615. AV1RawMetadataITUTT35 *current)
  1616. {
  1617. int err;
  1618. size_t i;
  1619. fb(8, itu_t_t35_country_code);
  1620. if (current->itu_t_t35_country_code == 0xff)
  1621. fb(8, itu_t_t35_country_code_extension_byte);
  1622. #ifdef READ
  1623. // The payload runs up to the start of the trailing bits, but there might
  1624. // be arbitrarily many trailing zeroes so we need to read through twice.
  1625. current->payload_size = cbs_av1_get_payload_bytes_left(rw);
  1626. current->payload_ref = av_buffer_alloc(current->payload_size);
  1627. if (!current->payload_ref)
  1628. return AVERROR(ENOMEM);
  1629. current->payload = current->payload_ref->data;
  1630. #endif
  1631. for (i = 0; i < current->payload_size; i++)
  1632. xf(8, itu_t_t35_payload_bytes[i], current->payload[i],
  1633. 0x00, 0xff, 1, i);
  1634. return 0;
  1635. }
  1636. static int FUNC(metadata_timecode)(CodedBitstreamContext *ctx, RWContext *rw,
  1637. AV1RawMetadataTimecode *current)
  1638. {
  1639. int err;
  1640. fb(5, counting_type);
  1641. flag(full_timestamp_flag);
  1642. flag(discontinuity_flag);
  1643. flag(cnt_dropped_flag);
  1644. fb(9, n_frames);
  1645. if (current->full_timestamp_flag) {
  1646. fc(6, seconds_value, 0, 59);
  1647. fc(6, minutes_value, 0, 59);
  1648. fc(5, hours_value, 0, 23);
  1649. } else {
  1650. flag(seconds_flag);
  1651. if (current->seconds_flag) {
  1652. fc(6, seconds_value, 0, 59);
  1653. flag(minutes_flag);
  1654. if (current->minutes_flag) {
  1655. fc(6, minutes_value, 0, 59);
  1656. flag(hours_flag);
  1657. if (current->hours_flag)
  1658. fc(5, hours_value, 0, 23);
  1659. }
  1660. }
  1661. }
  1662. fb(5, time_offset_length);
  1663. if (current->time_offset_length > 0)
  1664. fb(current->time_offset_length, time_offset_value);
  1665. else
  1666. infer(time_offset_length, 0);
  1667. return 0;
  1668. }
  1669. static int FUNC(metadata_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1670. AV1RawMetadata *current)
  1671. {
  1672. int err;
  1673. leb128(metadata_type);
  1674. switch (current->metadata_type) {
  1675. case AV1_METADATA_TYPE_HDR_CLL:
  1676. CHECK(FUNC(metadata_hdr_cll)(ctx, rw, &current->metadata.hdr_cll));
  1677. break;
  1678. case AV1_METADATA_TYPE_HDR_MDCV:
  1679. CHECK(FUNC(metadata_hdr_mdcv)(ctx, rw, &current->metadata.hdr_mdcv));
  1680. break;
  1681. case AV1_METADATA_TYPE_SCALABILITY:
  1682. CHECK(FUNC(metadata_scalability)(ctx, rw, &current->metadata.scalability));
  1683. break;
  1684. case AV1_METADATA_TYPE_ITUT_T35:
  1685. CHECK(FUNC(metadata_itut_t35)(ctx, rw, &current->metadata.itut_t35));
  1686. break;
  1687. case AV1_METADATA_TYPE_TIMECODE:
  1688. CHECK(FUNC(metadata_timecode)(ctx, rw, &current->metadata.timecode));
  1689. break;
  1690. default:
  1691. // Unknown metadata type.
  1692. return AVERROR_PATCHWELCOME;
  1693. }
  1694. return 0;
  1695. }
  1696. static int FUNC(padding_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1697. AV1RawPadding *current)
  1698. {
  1699. int i, err;
  1700. HEADER("Padding");
  1701. #ifdef READ
  1702. // The payload runs up to the start of the trailing bits, but there might
  1703. // be arbitrarily many trailing zeroes so we need to read through twice.
  1704. current->payload_size = cbs_av1_get_payload_bytes_left(rw);
  1705. current->payload_ref = av_buffer_alloc(current->payload_size);
  1706. if (!current->payload_ref)
  1707. return AVERROR(ENOMEM);
  1708. current->payload = current->payload_ref->data;
  1709. #endif
  1710. for (i = 0; i < current->payload_size; i++)
  1711. xf(8, obu_padding_byte[i], current->payload[i], 0x00, 0xff, 1, i);
  1712. return 0;
  1713. }