You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1837 lines
58KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. static int FUNC(obu_header)(CodedBitstreamContext *ctx, RWContext *rw,
  19. AV1RawOBUHeader *current)
  20. {
  21. int err;
  22. av_unused int zero = 0;
  23. HEADER("OBU header");
  24. fc(1, obu_forbidden_bit, 0, 0);
  25. fc(4, obu_type, 0, AV1_OBU_PADDING);
  26. flag(obu_extension_flag);
  27. flag(obu_has_size_field);
  28. fc(1, obu_reserved_1bit, 0, 0);
  29. if (current->obu_extension_flag) {
  30. fb(3, temporal_id);
  31. fb(2, spatial_id);
  32. fc(3, extension_header_reserved_3bits, 0, 0);
  33. }
  34. return 0;
  35. }
  36. static int FUNC(trailing_bits)(CodedBitstreamContext *ctx, RWContext *rw, int nb_bits)
  37. {
  38. int err;
  39. av_assert0(nb_bits > 0);
  40. fixed(1, trailing_one_bit, 1);
  41. --nb_bits;
  42. while (nb_bits > 0) {
  43. fixed(1, trailing_zero_bit, 0);
  44. --nb_bits;
  45. }
  46. return 0;
  47. }
  48. static int FUNC(byte_alignment)(CodedBitstreamContext *ctx, RWContext *rw)
  49. {
  50. int err;
  51. while (byte_alignment(rw) != 0)
  52. fixed(1, zero_bit, 0);
  53. return 0;
  54. }
  55. static int FUNC(color_config)(CodedBitstreamContext *ctx, RWContext *rw,
  56. AV1RawColorConfig *current, int seq_profile)
  57. {
  58. CodedBitstreamAV1Context *priv = ctx->priv_data;
  59. int err;
  60. flag(high_bitdepth);
  61. if (seq_profile == FF_PROFILE_AV1_PROFESSIONAL &&
  62. current->high_bitdepth) {
  63. flag(twelve_bit);
  64. priv->bit_depth = current->twelve_bit ? 12 : 10;
  65. } else {
  66. priv->bit_depth = current->high_bitdepth ? 10 : 8;
  67. }
  68. if (seq_profile == FF_PROFILE_AV1_HIGH)
  69. infer(mono_chrome, 0);
  70. else
  71. flag(mono_chrome);
  72. priv->num_planes = current->mono_chrome ? 1 : 3;
  73. flag(color_description_present_flag);
  74. if (current->color_description_present_flag) {
  75. fb(8, color_primaries);
  76. fb(8, transfer_characteristics);
  77. fb(8, matrix_coefficients);
  78. } else {
  79. infer(color_primaries, AVCOL_PRI_UNSPECIFIED);
  80. infer(transfer_characteristics, AVCOL_TRC_UNSPECIFIED);
  81. infer(matrix_coefficients, AVCOL_SPC_UNSPECIFIED);
  82. }
  83. if (current->mono_chrome) {
  84. flag(color_range);
  85. infer(subsampling_x, 1);
  86. infer(subsampling_y, 1);
  87. infer(chroma_sample_position, AV1_CSP_UNKNOWN);
  88. infer(separate_uv_delta_q, 0);
  89. } else if (current->color_primaries == AVCOL_PRI_BT709 &&
  90. current->transfer_characteristics == AVCOL_TRC_IEC61966_2_1 &&
  91. current->matrix_coefficients == AVCOL_SPC_RGB) {
  92. infer(color_range, 1);
  93. infer(subsampling_x, 0);
  94. infer(subsampling_y, 0);
  95. flag(separate_uv_delta_q);
  96. } else {
  97. flag(color_range);
  98. if (seq_profile == FF_PROFILE_AV1_MAIN) {
  99. infer(subsampling_x, 1);
  100. infer(subsampling_y, 1);
  101. } else if (seq_profile == FF_PROFILE_AV1_HIGH) {
  102. infer(subsampling_x, 0);
  103. infer(subsampling_y, 0);
  104. } else {
  105. if (priv->bit_depth == 12) {
  106. fb(1, subsampling_x);
  107. if (current->subsampling_x)
  108. fb(1, subsampling_y);
  109. else
  110. infer(subsampling_y, 0);
  111. } else {
  112. infer(subsampling_x, 1);
  113. infer(subsampling_y, 0);
  114. }
  115. }
  116. if (current->subsampling_x && current->subsampling_y) {
  117. fc(2, chroma_sample_position, AV1_CSP_UNKNOWN,
  118. AV1_CSP_COLOCATED);
  119. }
  120. flag(separate_uv_delta_q);
  121. }
  122. return 0;
  123. }
  124. static int FUNC(timing_info)(CodedBitstreamContext *ctx, RWContext *rw,
  125. AV1RawTimingInfo *current)
  126. {
  127. int err;
  128. fc(32, num_units_in_display_tick, 1, MAX_UINT_BITS(32));
  129. fc(32, time_scale, 1, MAX_UINT_BITS(32));
  130. flag(equal_picture_interval);
  131. if (current->equal_picture_interval)
  132. uvlc(num_ticks_per_picture_minus_1, 0, MAX_UINT_BITS(32) - 1);
  133. return 0;
  134. }
  135. static int FUNC(decoder_model_info)(CodedBitstreamContext *ctx, RWContext *rw,
  136. AV1RawDecoderModelInfo *current)
  137. {
  138. int err;
  139. fb(5, buffer_delay_length_minus_1);
  140. fb(32, num_units_in_decoding_tick);
  141. fb(5, buffer_removal_time_length_minus_1);
  142. fb(5, frame_presentation_time_length_minus_1);
  143. return 0;
  144. }
  145. static int FUNC(sequence_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  146. AV1RawSequenceHeader *current)
  147. {
  148. int i, err;
  149. HEADER("Sequence Header");
  150. fc(3, seq_profile, FF_PROFILE_AV1_MAIN,
  151. FF_PROFILE_AV1_PROFESSIONAL);
  152. flag(still_picture);
  153. flag(reduced_still_picture_header);
  154. if (current->reduced_still_picture_header) {
  155. infer(timing_info_present_flag, 0);
  156. infer(decoder_model_info_present_flag, 0);
  157. infer(initial_display_delay_present_flag, 0);
  158. infer(operating_points_cnt_minus_1, 0);
  159. infer(operating_point_idc[0], 0);
  160. fb(5, seq_level_idx[0]);
  161. infer(seq_tier[0], 0);
  162. infer(decoder_model_present_for_this_op[0], 0);
  163. infer(initial_display_delay_present_for_this_op[0], 0);
  164. } else {
  165. flag(timing_info_present_flag);
  166. if (current->timing_info_present_flag) {
  167. CHECK(FUNC(timing_info)(ctx, rw, &current->timing_info));
  168. flag(decoder_model_info_present_flag);
  169. if (current->decoder_model_info_present_flag) {
  170. CHECK(FUNC(decoder_model_info)
  171. (ctx, rw, &current->decoder_model_info));
  172. }
  173. } else {
  174. infer(decoder_model_info_present_flag, 0);
  175. }
  176. flag(initial_display_delay_present_flag);
  177. fb(5, operating_points_cnt_minus_1);
  178. for (i = 0; i <= current->operating_points_cnt_minus_1; i++) {
  179. fbs(12, operating_point_idc[i], 1, i);
  180. fbs(5, seq_level_idx[i], 1, i);
  181. if (current->seq_level_idx[i] > 7)
  182. flags(seq_tier[i], 1, i);
  183. else
  184. infer(seq_tier[i], 0);
  185. if (current->decoder_model_info_present_flag) {
  186. flags(decoder_model_present_for_this_op[i], 1, i);
  187. if (current->decoder_model_present_for_this_op[i]) {
  188. int n = current->decoder_model_info.buffer_delay_length_minus_1 + 1;
  189. fbs(n, decoder_buffer_delay[i], 1, i);
  190. fbs(n, encoder_buffer_delay[i], 1, i);
  191. flags(low_delay_mode_flag[i], 1, i);
  192. }
  193. } else {
  194. infer(decoder_model_present_for_this_op[i], 0);
  195. }
  196. if (current->initial_display_delay_present_flag) {
  197. flags(initial_display_delay_present_for_this_op[i], 1, i);
  198. if (current->initial_display_delay_present_for_this_op[i])
  199. fbs(4, initial_display_delay_minus_1[i], 1, i);
  200. }
  201. }
  202. }
  203. fb(4, frame_width_bits_minus_1);
  204. fb(4, frame_height_bits_minus_1);
  205. fb(current->frame_width_bits_minus_1 + 1, max_frame_width_minus_1);
  206. fb(current->frame_height_bits_minus_1 + 1, max_frame_height_minus_1);
  207. if (current->reduced_still_picture_header)
  208. infer(frame_id_numbers_present_flag, 0);
  209. else
  210. flag(frame_id_numbers_present_flag);
  211. if (current->frame_id_numbers_present_flag) {
  212. fb(4, delta_frame_id_length_minus_2);
  213. fb(3, additional_frame_id_length_minus_1);
  214. }
  215. flag(use_128x128_superblock);
  216. flag(enable_filter_intra);
  217. flag(enable_intra_edge_filter);
  218. if (current->reduced_still_picture_header) {
  219. infer(enable_intraintra_compound, 0);
  220. infer(enable_masked_compound, 0);
  221. infer(enable_warped_motion, 0);
  222. infer(enable_dual_filter, 0);
  223. infer(enable_order_hint, 0);
  224. infer(enable_jnt_comp, 0);
  225. infer(enable_ref_frame_mvs, 0);
  226. infer(seq_force_screen_content_tools,
  227. AV1_SELECT_SCREEN_CONTENT_TOOLS);
  228. infer(seq_force_integer_mv,
  229. AV1_SELECT_INTEGER_MV);
  230. } else {
  231. flag(enable_intraintra_compound);
  232. flag(enable_masked_compound);
  233. flag(enable_warped_motion);
  234. flag(enable_dual_filter);
  235. flag(enable_order_hint);
  236. if (current->enable_order_hint) {
  237. flag(enable_jnt_comp);
  238. flag(enable_ref_frame_mvs);
  239. } else {
  240. infer(enable_jnt_comp, 0);
  241. infer(enable_ref_frame_mvs, 0);
  242. }
  243. flag(seq_choose_screen_content_tools);
  244. if (current->seq_choose_screen_content_tools)
  245. infer(seq_force_screen_content_tools,
  246. AV1_SELECT_SCREEN_CONTENT_TOOLS);
  247. else
  248. fb(1, seq_force_screen_content_tools);
  249. if (current->seq_force_screen_content_tools > 0) {
  250. flag(seq_choose_integer_mv);
  251. if (current->seq_choose_integer_mv)
  252. infer(seq_force_integer_mv,
  253. AV1_SELECT_INTEGER_MV);
  254. else
  255. fb(1, seq_force_integer_mv);
  256. } else {
  257. infer(seq_force_integer_mv, AV1_SELECT_INTEGER_MV);
  258. }
  259. if (current->enable_order_hint)
  260. fb(3, order_hint_bits_minus_1);
  261. }
  262. flag(enable_superres);
  263. flag(enable_cdef);
  264. flag(enable_restoration);
  265. CHECK(FUNC(color_config)(ctx, rw, &current->color_config,
  266. current->seq_profile));
  267. flag(film_grain_params_present);
  268. return 0;
  269. }
  270. static int FUNC(temporal_delimiter_obu)(CodedBitstreamContext *ctx, RWContext *rw)
  271. {
  272. CodedBitstreamAV1Context *priv = ctx->priv_data;
  273. HEADER("Temporal Delimiter");
  274. priv->seen_frame_header = 0;
  275. return 0;
  276. }
  277. static int FUNC(superres_params)(CodedBitstreamContext *ctx, RWContext *rw,
  278. AV1RawFrameHeader *current)
  279. {
  280. CodedBitstreamAV1Context *priv = ctx->priv_data;
  281. const AV1RawSequenceHeader *seq = priv->sequence_header;
  282. int denom, err;
  283. if (seq->enable_superres)
  284. flag(use_superres);
  285. else
  286. infer(use_superres, 0);
  287. if (current->use_superres) {
  288. fb(3, coded_denom);
  289. denom = current->coded_denom + AV1_SUPERRES_DENOM_MIN;
  290. } else {
  291. denom = AV1_SUPERRES_NUM;
  292. }
  293. priv->upscaled_width = priv->frame_width;
  294. priv->frame_width = (priv->upscaled_width * AV1_SUPERRES_NUM +
  295. denom / 2) / denom;
  296. return 0;
  297. }
  298. static int FUNC(frame_size)(CodedBitstreamContext *ctx, RWContext *rw,
  299. AV1RawFrameHeader *current)
  300. {
  301. CodedBitstreamAV1Context *priv = ctx->priv_data;
  302. const AV1RawSequenceHeader *seq = priv->sequence_header;
  303. int err;
  304. if (current->frame_size_override_flag) {
  305. fb(seq->frame_width_bits_minus_1 + 1, frame_width_minus_1);
  306. fb(seq->frame_height_bits_minus_1 + 1, frame_height_minus_1);
  307. priv->frame_width = current->frame_width_minus_1 + 1;
  308. priv->frame_height = current->frame_height_minus_1 + 1;
  309. } else {
  310. priv->frame_width = seq->max_frame_width_minus_1 + 1;
  311. priv->frame_height = seq->max_frame_height_minus_1 + 1;
  312. }
  313. CHECK(FUNC(superres_params)(ctx, rw, current));
  314. return 0;
  315. }
  316. static int FUNC(render_size)(CodedBitstreamContext *ctx, RWContext *rw,
  317. AV1RawFrameHeader *current)
  318. {
  319. CodedBitstreamAV1Context *priv = ctx->priv_data;
  320. int err;
  321. flag(render_and_frame_size_different);
  322. if (current->render_and_frame_size_different) {
  323. fb(16, render_width_minus_1);
  324. fb(16, render_height_minus_1);
  325. priv->render_width = current->render_width_minus_1 + 1;
  326. priv->render_height = current->render_height_minus_1 + 1;
  327. } else {
  328. priv->render_width = priv->upscaled_width;
  329. priv->render_height = priv->frame_height;
  330. }
  331. return 0;
  332. }
  333. static int FUNC(frame_size_with_refs)(CodedBitstreamContext *ctx, RWContext *rw,
  334. AV1RawFrameHeader *current)
  335. {
  336. CodedBitstreamAV1Context *priv = ctx->priv_data;
  337. int i, err;
  338. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  339. flags(found_ref[i], 1, i);
  340. if (current->found_ref[i]) {
  341. AV1ReferenceFrameState *ref;
  342. if (current->ref_frame_idx[i] < 0 ||
  343. !priv->ref[current->ref_frame_idx[i]].valid) {
  344. av_log(ctx->log_ctx, AV_LOG_ERROR,
  345. "Missing reference frame needed for frame size "
  346. "(ref = %d, ref_frame_idx = %d).\n",
  347. i, current->ref_frame_idx[i]);
  348. return AVERROR_INVALIDDATA;
  349. }
  350. ref = &priv->ref[current->ref_frame_idx[i]];
  351. priv->upscaled_width = ref->upscaled_width;
  352. priv->frame_width = ref->frame_width;
  353. priv->frame_height = ref->frame_height;
  354. priv->render_width = ref->render_width;
  355. priv->render_height = ref->render_height;
  356. break;
  357. }
  358. }
  359. if (i >= AV1_REFS_PER_FRAME) {
  360. CHECK(FUNC(frame_size)(ctx, rw, current));
  361. CHECK(FUNC(render_size)(ctx, rw, current));
  362. } else {
  363. CHECK(FUNC(superres_params)(ctx, rw, current));
  364. }
  365. return 0;
  366. }
  367. static int FUNC(interpolation_filter)(CodedBitstreamContext *ctx, RWContext *rw,
  368. AV1RawFrameHeader *current)
  369. {
  370. int err;
  371. flag(is_filter_switchable);
  372. if (current->is_filter_switchable)
  373. infer(interpolation_filter,
  374. AV1_INTERPOLATION_FILTER_SWITCHABLE);
  375. else
  376. fb(2, interpolation_filter);
  377. return 0;
  378. }
  379. static int FUNC(tile_info)(CodedBitstreamContext *ctx, RWContext *rw,
  380. AV1RawFrameHeader *current)
  381. {
  382. CodedBitstreamAV1Context *priv = ctx->priv_data;
  383. const AV1RawSequenceHeader *seq = priv->sequence_header;
  384. int mi_cols, mi_rows, sb_cols, sb_rows, sb_shift, sb_size;
  385. int max_tile_width_sb, max_tile_height_sb, max_tile_area_sb;
  386. int min_log2_tile_cols, max_log2_tile_cols, max_log2_tile_rows;
  387. int min_log2_tiles, min_log2_tile_rows;
  388. int i, err;
  389. mi_cols = 2 * ((priv->frame_width + 7) >> 3);
  390. mi_rows = 2 * ((priv->frame_height + 7) >> 3);
  391. sb_cols = seq->use_128x128_superblock ? ((mi_cols + 31) >> 5)
  392. : ((mi_cols + 15) >> 4);
  393. sb_rows = seq->use_128x128_superblock ? ((mi_rows + 31) >> 5)
  394. : ((mi_rows + 15) >> 4);
  395. sb_shift = seq->use_128x128_superblock ? 5 : 4;
  396. sb_size = sb_shift + 2;
  397. max_tile_width_sb = AV1_MAX_TILE_WIDTH >> sb_size;
  398. max_tile_area_sb = AV1_MAX_TILE_AREA >> (2 * sb_size);
  399. min_log2_tile_cols = cbs_av1_tile_log2(max_tile_width_sb, sb_cols);
  400. max_log2_tile_cols = cbs_av1_tile_log2(1, FFMIN(sb_cols, AV1_MAX_TILE_COLS));
  401. max_log2_tile_rows = cbs_av1_tile_log2(1, FFMIN(sb_rows, AV1_MAX_TILE_ROWS));
  402. min_log2_tiles = FFMAX(min_log2_tile_cols,
  403. cbs_av1_tile_log2(max_tile_area_sb, sb_rows * sb_cols));
  404. flag(uniform_tile_spacing_flag);
  405. if (current->uniform_tile_spacing_flag) {
  406. int tile_width_sb, tile_height_sb;
  407. increment(tile_cols_log2, min_log2_tile_cols, max_log2_tile_cols);
  408. tile_width_sb = (sb_cols + (1 << current->tile_cols_log2) - 1) >>
  409. current->tile_cols_log2;
  410. current->tile_cols = (sb_cols + tile_width_sb - 1) / tile_width_sb;
  411. min_log2_tile_rows = FFMAX(min_log2_tiles - current->tile_cols_log2, 0);
  412. increment(tile_rows_log2, min_log2_tile_rows, max_log2_tile_rows);
  413. tile_height_sb = (sb_rows + (1 << current->tile_rows_log2) - 1) >>
  414. current->tile_rows_log2;
  415. current->tile_rows = (sb_rows + tile_height_sb - 1) / tile_height_sb;
  416. } else {
  417. int widest_tile_sb, start_sb, size_sb, max_width, max_height;
  418. widest_tile_sb = 0;
  419. start_sb = 0;
  420. for (i = 0; start_sb < sb_cols && i < AV1_MAX_TILE_COLS; i++) {
  421. max_width = FFMIN(sb_cols - start_sb, max_tile_width_sb);
  422. ns(max_width, width_in_sbs_minus_1[i], 1, i);
  423. size_sb = current->width_in_sbs_minus_1[i] + 1;
  424. widest_tile_sb = FFMAX(size_sb, widest_tile_sb);
  425. start_sb += size_sb;
  426. }
  427. current->tile_cols_log2 = cbs_av1_tile_log2(1, i);
  428. current->tile_cols = i;
  429. if (min_log2_tiles > 0)
  430. max_tile_area_sb = (sb_rows * sb_cols) >> (min_log2_tiles + 1);
  431. else
  432. max_tile_area_sb = sb_rows * sb_cols;
  433. max_tile_height_sb = FFMAX(max_tile_area_sb / widest_tile_sb, 1);
  434. start_sb = 0;
  435. for (i = 0; start_sb < sb_rows && i < AV1_MAX_TILE_ROWS; i++) {
  436. max_height = FFMIN(sb_rows - start_sb, max_tile_height_sb);
  437. ns(max_height, height_in_sbs_minus_1[i], 1, i);
  438. size_sb = current->height_in_sbs_minus_1[i] + 1;
  439. start_sb += size_sb;
  440. }
  441. current->tile_rows_log2 = cbs_av1_tile_log2(1, i);
  442. current->tile_rows = i;
  443. }
  444. if (current->tile_cols_log2 > 0 ||
  445. current->tile_rows_log2 > 0) {
  446. fb(current->tile_cols_log2 + current->tile_rows_log2,
  447. context_update_tile_id);
  448. fb(2, tile_size_bytes_minus1);
  449. } else {
  450. infer(context_update_tile_id, 0);
  451. }
  452. priv->tile_cols = current->tile_cols;
  453. priv->tile_rows = current->tile_rows;
  454. return 0;
  455. }
  456. static int FUNC(quantization_params)(CodedBitstreamContext *ctx, RWContext *rw,
  457. AV1RawFrameHeader *current)
  458. {
  459. CodedBitstreamAV1Context *priv = ctx->priv_data;
  460. const AV1RawSequenceHeader *seq = priv->sequence_header;
  461. int err;
  462. fb(8, base_q_idx);
  463. delta_q(delta_q_y_dc);
  464. if (priv->num_planes > 1) {
  465. if (seq->color_config.separate_uv_delta_q)
  466. flag(diff_uv_delta);
  467. else
  468. infer(diff_uv_delta, 0);
  469. delta_q(delta_q_u_dc);
  470. delta_q(delta_q_u_ac);
  471. if (current->diff_uv_delta) {
  472. delta_q(delta_q_v_dc);
  473. delta_q(delta_q_v_ac);
  474. } else {
  475. infer(delta_q_v_dc, current->delta_q_u_dc);
  476. infer(delta_q_v_ac, current->delta_q_u_ac);
  477. }
  478. } else {
  479. infer(delta_q_u_dc, 0);
  480. infer(delta_q_u_ac, 0);
  481. infer(delta_q_v_dc, 0);
  482. infer(delta_q_v_ac, 0);
  483. }
  484. flag(using_qmatrix);
  485. if (current->using_qmatrix) {
  486. fb(4, qm_y);
  487. fb(4, qm_u);
  488. if (seq->color_config.separate_uv_delta_q)
  489. fb(4, qm_v);
  490. else
  491. infer(qm_v, current->qm_u);
  492. }
  493. return 0;
  494. }
  495. static int FUNC(segmentation_params)(CodedBitstreamContext *ctx, RWContext *rw,
  496. AV1RawFrameHeader *current)
  497. {
  498. static const uint8_t bits[AV1_SEG_LVL_MAX] = { 8, 6, 6, 6, 6, 3, 0, 0 };
  499. static const uint8_t sign[AV1_SEG_LVL_MAX] = { 1, 1, 1, 1, 1, 0, 0, 0 };
  500. int i, j, err;
  501. flag(segmentation_enabled);
  502. if (current->segmentation_enabled) {
  503. if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
  504. infer(segmentation_update_map, 1);
  505. infer(segmentation_temporal_update, 0);
  506. infer(segmentation_update_data, 1);
  507. } else {
  508. flag(segmentation_update_map);
  509. if (current->segmentation_update_map)
  510. flag(segmentation_temporal_update);
  511. else
  512. infer(segmentation_temporal_update, 0);
  513. flag(segmentation_update_data);
  514. }
  515. if (current->segmentation_update_data) {
  516. for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
  517. for (j = 0; j < AV1_SEG_LVL_MAX; j++) {
  518. flags(feature_enabled[i][j], 2, i, j);
  519. if (current->feature_enabled[i][j] && bits[j] > 0) {
  520. if (sign[j])
  521. sus(1 + bits[j], feature_value[i][j], 2, i, j);
  522. else
  523. fbs(bits[j], feature_value[i][j], 2, i, j);
  524. } else {
  525. infer(feature_value[i][j], 0);
  526. }
  527. }
  528. }
  529. }
  530. } else {
  531. for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
  532. for (j = 0; j < AV1_SEG_LVL_MAX; j++) {
  533. infer(feature_enabled[i][j], 0);
  534. infer(feature_value[i][j], 0);
  535. }
  536. }
  537. }
  538. return 0;
  539. }
  540. static int FUNC(delta_q_params)(CodedBitstreamContext *ctx, RWContext *rw,
  541. AV1RawFrameHeader *current)
  542. {
  543. int err;
  544. if (current->base_q_idx > 0)
  545. flag(delta_q_present);
  546. else
  547. infer(delta_q_present, 0);
  548. if (current->delta_q_present)
  549. fb(2, delta_q_res);
  550. return 0;
  551. }
  552. static int FUNC(delta_lf_params)(CodedBitstreamContext *ctx, RWContext *rw,
  553. AV1RawFrameHeader *current)
  554. {
  555. int err;
  556. if (current->delta_q_present) {
  557. if (!current->allow_intrabc)
  558. flag(delta_lf_present);
  559. else
  560. infer(delta_lf_present, 0);
  561. if (current->delta_lf_present) {
  562. fb(2, delta_lf_res);
  563. flag(delta_lf_multi);
  564. } else {
  565. infer(delta_lf_res, 0);
  566. infer(delta_lf_multi, 0);
  567. }
  568. } else {
  569. infer(delta_lf_present, 0);
  570. infer(delta_lf_res, 0);
  571. infer(delta_lf_multi, 0);
  572. }
  573. return 0;
  574. }
  575. static int FUNC(loop_filter_params)(CodedBitstreamContext *ctx, RWContext *rw,
  576. AV1RawFrameHeader *current)
  577. {
  578. CodedBitstreamAV1Context *priv = ctx->priv_data;
  579. int i, err;
  580. if (priv->coded_lossless || current->allow_intrabc) {
  581. infer(loop_filter_level[0], 0);
  582. infer(loop_filter_level[1], 0);
  583. infer(loop_filter_ref_deltas[AV1_REF_FRAME_INTRA], 1);
  584. infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST], 0);
  585. infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST2], 0);
  586. infer(loop_filter_ref_deltas[AV1_REF_FRAME_LAST3], 0);
  587. infer(loop_filter_ref_deltas[AV1_REF_FRAME_BWDREF], 0);
  588. infer(loop_filter_ref_deltas[AV1_REF_FRAME_GOLDEN], -1);
  589. infer(loop_filter_ref_deltas[AV1_REF_FRAME_ALTREF], -1);
  590. infer(loop_filter_ref_deltas[AV1_REF_FRAME_ALTREF2], -1);
  591. for (i = 0; i < 2; i++)
  592. infer(loop_filter_mode_deltas[i], 0);
  593. return 0;
  594. }
  595. fb(6, loop_filter_level[0]);
  596. fb(6, loop_filter_level[1]);
  597. if (priv->num_planes > 1) {
  598. if (current->loop_filter_level[0] ||
  599. current->loop_filter_level[1]) {
  600. fb(6, loop_filter_level[2]);
  601. fb(6, loop_filter_level[3]);
  602. }
  603. }
  604. fb(3, loop_filter_sharpness);
  605. flag(loop_filter_delta_enabled);
  606. if (current->loop_filter_delta_enabled) {
  607. flag(loop_filter_delta_update);
  608. if (current->loop_filter_delta_update) {
  609. for (i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) {
  610. flags(update_ref_delta[i], 1, i);
  611. if (current->update_ref_delta[i])
  612. sus(1 + 6, loop_filter_ref_deltas[i], 1, i);
  613. }
  614. for (i = 0; i < 2; i++) {
  615. flags(update_mode_delta[i], 1, i);
  616. if (current->update_mode_delta[i])
  617. sus(1 + 6, loop_filter_mode_deltas[i], 1, i);
  618. }
  619. }
  620. }
  621. return 0;
  622. }
  623. static int FUNC(cdef_params)(CodedBitstreamContext *ctx, RWContext *rw,
  624. AV1RawFrameHeader *current)
  625. {
  626. CodedBitstreamAV1Context *priv = ctx->priv_data;
  627. const AV1RawSequenceHeader *seq = priv->sequence_header;
  628. int i, err;
  629. if (priv->coded_lossless || current->allow_intrabc ||
  630. !seq->enable_cdef) {
  631. infer(cdef_damping_minus_3, 0);
  632. infer(cdef_bits, 0);
  633. infer(cdef_y_pri_strength[0], 0);
  634. infer(cdef_y_sec_strength[0], 0);
  635. infer(cdef_uv_pri_strength[0], 0);
  636. infer(cdef_uv_sec_strength[0], 0);
  637. return 0;
  638. }
  639. fb(2, cdef_damping_minus_3);
  640. fb(2, cdef_bits);
  641. for (i = 0; i < (1 << current->cdef_bits); i++) {
  642. fbs(4, cdef_y_pri_strength[i], 1, i);
  643. fbs(2, cdef_y_sec_strength[i], 1, i);
  644. if (priv->num_planes > 1) {
  645. fbs(4, cdef_uv_pri_strength[i], 1, i);
  646. fbs(2, cdef_uv_sec_strength[i], 1, i);
  647. }
  648. }
  649. return 0;
  650. }
  651. static int FUNC(lr_params)(CodedBitstreamContext *ctx, RWContext *rw,
  652. AV1RawFrameHeader *current)
  653. {
  654. CodedBitstreamAV1Context *priv = ctx->priv_data;
  655. const AV1RawSequenceHeader *seq = priv->sequence_header;
  656. int uses_lr, uses_chroma_lr;
  657. int i, err;
  658. if (priv->all_lossless || current->allow_intrabc ||
  659. !seq->enable_restoration) {
  660. return 0;
  661. }
  662. uses_lr = uses_chroma_lr = 0;
  663. for (i = 0; i < priv->num_planes; i++) {
  664. fbs(2, lr_type[i], 1, i);
  665. if (current->lr_type[i] != 0) {
  666. uses_lr = 1;
  667. if (i > 0)
  668. uses_chroma_lr = 1;
  669. }
  670. }
  671. if (uses_lr) {
  672. if (seq->use_128x128_superblock)
  673. increment(lr_unit_shift, 1, 2);
  674. else
  675. increment(lr_unit_shift, 0, 2);
  676. if(seq->color_config.subsampling_x &&
  677. seq->color_config.subsampling_y && uses_chroma_lr) {
  678. fb(1, lr_uv_shift);
  679. } else {
  680. infer(lr_uv_shift, 0);
  681. }
  682. }
  683. return 0;
  684. }
  685. static int FUNC(read_tx_mode)(CodedBitstreamContext *ctx, RWContext *rw,
  686. AV1RawFrameHeader *current)
  687. {
  688. CodedBitstreamAV1Context *priv = ctx->priv_data;
  689. int err;
  690. if (priv->coded_lossless)
  691. infer(tx_mode, 0);
  692. else
  693. increment(tx_mode, 1, 2);
  694. return 0;
  695. }
  696. static int FUNC(frame_reference_mode)(CodedBitstreamContext *ctx, RWContext *rw,
  697. AV1RawFrameHeader *current)
  698. {
  699. int err;
  700. if (current->frame_type == AV1_FRAME_INTRA_ONLY ||
  701. current->frame_type == AV1_FRAME_KEY)
  702. infer(reference_select, 0);
  703. else
  704. flag(reference_select);
  705. return 0;
  706. }
  707. static int FUNC(skip_mode_params)(CodedBitstreamContext *ctx, RWContext *rw,
  708. AV1RawFrameHeader *current)
  709. {
  710. CodedBitstreamAV1Context *priv = ctx->priv_data;
  711. const AV1RawSequenceHeader *seq = priv->sequence_header;
  712. int skip_mode_allowed;
  713. int err;
  714. if (current->frame_type == AV1_FRAME_KEY ||
  715. current->frame_type == AV1_FRAME_INTRA_ONLY ||
  716. !current->reference_select || !seq->enable_order_hint) {
  717. skip_mode_allowed = 0;
  718. } else {
  719. int forward_idx, backward_idx;
  720. int forward_hint, backward_hint;
  721. int ref_hint, dist, i;
  722. forward_idx = -1;
  723. backward_idx = -1;
  724. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  725. ref_hint = priv->ref[i].order_hint;
  726. dist = cbs_av1_get_relative_dist(seq, ref_hint,
  727. current->order_hint);
  728. if (dist < 0) {
  729. if (forward_idx < 0 ||
  730. cbs_av1_get_relative_dist(seq, ref_hint,
  731. forward_hint) > 0) {
  732. forward_idx = i;
  733. forward_hint = ref_hint;
  734. }
  735. } else if (dist > 0) {
  736. if (backward_idx < 0 ||
  737. cbs_av1_get_relative_dist(seq, ref_hint,
  738. backward_hint) < 0) {
  739. backward_idx = i;
  740. backward_hint = ref_hint;
  741. }
  742. }
  743. }
  744. if (forward_idx < 0) {
  745. skip_mode_allowed = 0;
  746. } else if (backward_idx >= 0) {
  747. skip_mode_allowed = 1;
  748. // Frames for skip mode are forward_idx and backward_idx.
  749. } else {
  750. int second_forward_idx;
  751. int second_forward_hint;
  752. second_forward_idx = -1;
  753. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  754. ref_hint = priv->ref[i].order_hint;
  755. if (cbs_av1_get_relative_dist(seq, ref_hint,
  756. forward_hint) < 0) {
  757. if (second_forward_idx < 0 ||
  758. cbs_av1_get_relative_dist(seq, ref_hint,
  759. second_forward_hint) > 0) {
  760. second_forward_idx = i;
  761. second_forward_hint = ref_hint;
  762. }
  763. }
  764. }
  765. if (second_forward_idx < 0) {
  766. skip_mode_allowed = 0;
  767. } else {
  768. skip_mode_allowed = 1;
  769. // Frames for skip mode are forward_idx and second_forward_idx.
  770. }
  771. }
  772. }
  773. if (skip_mode_allowed)
  774. flag(skip_mode_present);
  775. else
  776. infer(skip_mode_present, 0);
  777. return 0;
  778. }
  779. static int FUNC(global_motion_param)(CodedBitstreamContext *ctx, RWContext *rw,
  780. AV1RawFrameHeader *current,
  781. int type, int ref, int idx)
  782. {
  783. uint32_t abs_bits, prec_bits, num_syms;
  784. int err;
  785. if (idx < 2) {
  786. if (type == AV1_WARP_MODEL_TRANSLATION) {
  787. abs_bits = AV1_GM_ABS_TRANS_ONLY_BITS - !current->allow_high_precision_mv;
  788. prec_bits = AV1_GM_TRANS_ONLY_PREC_BITS - !current->allow_high_precision_mv;
  789. } else {
  790. abs_bits = AV1_GM_ABS_TRANS_BITS;
  791. prec_bits = AV1_GM_TRANS_PREC_BITS;
  792. }
  793. } else {
  794. abs_bits = AV1_GM_ABS_ALPHA_BITS;
  795. prec_bits = AV1_GM_ALPHA_PREC_BITS;
  796. }
  797. num_syms = 2 * (1 << abs_bits) + 1;
  798. subexp(gm_params[ref][idx], num_syms, 2, ref, idx);
  799. // Actual gm_params value is not reconstructed here.
  800. (void)prec_bits;
  801. return 0;
  802. }
  803. static int FUNC(global_motion_params)(CodedBitstreamContext *ctx, RWContext *rw,
  804. AV1RawFrameHeader *current)
  805. {
  806. int ref, type;
  807. int err;
  808. if (current->frame_type == AV1_FRAME_KEY ||
  809. current->frame_type == AV1_FRAME_INTRA_ONLY)
  810. return 0;
  811. for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
  812. flags(is_global[ref], 1, ref);
  813. if (current->is_global[ref]) {
  814. flags(is_rot_zoom[ref], 1, ref);
  815. if (current->is_rot_zoom[ref]) {
  816. type = AV1_WARP_MODEL_ROTZOOM;
  817. } else {
  818. flags(is_translation[ref], 1, ref);
  819. type = current->is_translation[ref] ? AV1_WARP_MODEL_TRANSLATION
  820. : AV1_WARP_MODEL_AFFINE;
  821. }
  822. } else {
  823. type = AV1_WARP_MODEL_IDENTITY;
  824. }
  825. if (type >= AV1_WARP_MODEL_ROTZOOM) {
  826. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 2));
  827. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 3));
  828. if (type == AV1_WARP_MODEL_AFFINE) {
  829. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 4));
  830. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 5));
  831. } else {
  832. // gm_params[ref][4] = -gm_params[ref][3]
  833. // gm_params[ref][5] = gm_params[ref][2]
  834. }
  835. }
  836. if (type >= AV1_WARP_MODEL_TRANSLATION) {
  837. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 0));
  838. CHECK(FUNC(global_motion_param)(ctx, rw, current, type, ref, 1));
  839. }
  840. }
  841. return 0;
  842. }
  843. static int FUNC(film_grain_params)(CodedBitstreamContext *ctx, RWContext *rw,
  844. AV1RawFrameHeader *current)
  845. {
  846. CodedBitstreamAV1Context *priv = ctx->priv_data;
  847. const AV1RawSequenceHeader *seq = priv->sequence_header;
  848. int num_pos_luma, num_pos_chroma;
  849. int i, err;
  850. if (!seq->film_grain_params_present ||
  851. (!current->show_frame && !current->showable_frame))
  852. return 0;
  853. flag(apply_grain);
  854. if (!current->apply_grain)
  855. return 0;
  856. fb(16, grain_seed);
  857. if (current->frame_type == AV1_FRAME_INTER)
  858. flag(update_grain);
  859. else
  860. infer(update_grain, 1);
  861. if (!current->update_grain) {
  862. fb(3, film_grain_params_ref_idx);
  863. return 0;
  864. }
  865. fb(4, num_y_points);
  866. for (i = 0; i < current->num_y_points; i++) {
  867. fbs(8, point_y_value[i], 1, i);
  868. fbs(8, point_y_scaling[i], 1, i);
  869. }
  870. if (seq->color_config.mono_chrome)
  871. infer(chroma_scaling_from_luma, 0);
  872. else
  873. flag(chroma_scaling_from_luma);
  874. if (seq->color_config.mono_chrome ||
  875. current->chroma_scaling_from_luma ||
  876. (seq->color_config.subsampling_x == 1 &&
  877. seq->color_config.subsampling_y == 1 &&
  878. current->num_y_points == 0)) {
  879. infer(num_cb_points, 0);
  880. infer(num_cr_points, 0);
  881. } else {
  882. fb(4, num_cb_points);
  883. for (i = 0; i < current->num_cb_points; i++) {
  884. fbs(8, point_cb_value[i], 1, i);
  885. fbs(8, point_cb_scaling[i], 1, i);
  886. }
  887. fb(4, num_cr_points);
  888. for (i = 0; i < current->num_cr_points; i++) {
  889. fbs(8, point_cr_value[i], 1, i);
  890. fbs(8, point_cr_scaling[i], 1, i);
  891. }
  892. }
  893. fb(2, grain_scaling_minus_8);
  894. fb(2, ar_coeff_lag);
  895. num_pos_luma = 2 * current->ar_coeff_lag * (current->ar_coeff_lag + 1);
  896. if (current->num_y_points) {
  897. num_pos_chroma = num_pos_luma + 1;
  898. for (i = 0; i < num_pos_luma; i++)
  899. fbs(8, ar_coeffs_y_plus_128[i], 1, i);
  900. } else {
  901. num_pos_chroma = num_pos_luma;
  902. }
  903. if (current->chroma_scaling_from_luma || current->num_cb_points) {
  904. for (i = 0; i < num_pos_chroma; i++)
  905. fbs(8, ar_coeffs_cb_plus_128[i], 1, i);
  906. }
  907. if (current->chroma_scaling_from_luma || current->num_cr_points) {
  908. for (i = 0; i < num_pos_chroma; i++)
  909. fbs(8, ar_coeffs_cr_plus_128[i], 1, i);
  910. }
  911. fb(2, ar_coeff_shift_minus_6);
  912. fb(2, grain_scale_shift);
  913. if (current->num_cb_points) {
  914. fb(8, cb_mult);
  915. fb(8, cb_luma_mult);
  916. fb(9, cb_offset);
  917. }
  918. if (current->num_cr_points) {
  919. fb(8, cr_mult);
  920. fb(8, cr_luma_mult);
  921. fb(9, cr_offset);
  922. }
  923. flag(overlap_flag);
  924. flag(clip_to_restricted_range);
  925. return 0;
  926. }
  927. static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw,
  928. AV1RawFrameHeader *current)
  929. {
  930. CodedBitstreamAV1Context *priv = ctx->priv_data;
  931. const AV1RawSequenceHeader *seq;
  932. int id_len, diff_len, all_frames, frame_is_intra, order_hint_bits;
  933. int i, err;
  934. if (!priv->sequence_header) {
  935. av_log(ctx->log_ctx, AV_LOG_ERROR, "No sequence header available: "
  936. "unable to decode frame header.\n");
  937. return AVERROR_INVALIDDATA;
  938. }
  939. seq = priv->sequence_header;
  940. id_len = seq->additional_frame_id_length_minus_1 +
  941. seq->delta_frame_id_length_minus_2 + 3;
  942. all_frames = (1 << AV1_NUM_REF_FRAMES) - 1;
  943. if (seq->reduced_still_picture_header) {
  944. infer(show_existing_frame, 0);
  945. infer(frame_type, AV1_FRAME_KEY);
  946. infer(show_frame, 1);
  947. infer(showable_frame, 0);
  948. frame_is_intra = 1;
  949. } else {
  950. flag(show_existing_frame);
  951. if (current->show_existing_frame) {
  952. AV1ReferenceFrameState *frame;
  953. fb(3, frame_to_show_map_idx);
  954. frame = &priv->ref[current->frame_to_show_map_idx];
  955. if (seq->decoder_model_info_present_flag &&
  956. !seq->timing_info.equal_picture_interval) {
  957. fb(seq->decoder_model_info.frame_presentation_time_length_minus_1 + 1,
  958. frame_presentation_time);
  959. }
  960. if (seq->frame_id_numbers_present_flag)
  961. fb(id_len, display_frame_id);
  962. if (frame->frame_type == AV1_FRAME_KEY)
  963. infer(refresh_frame_flags, all_frames);
  964. else
  965. infer(refresh_frame_flags, 0);
  966. return 0;
  967. }
  968. fb(2, frame_type);
  969. frame_is_intra = (current->frame_type == AV1_FRAME_INTRA_ONLY ||
  970. current->frame_type == AV1_FRAME_KEY);
  971. flag(show_frame);
  972. if (current->show_frame &&
  973. seq->decoder_model_info_present_flag &&
  974. !seq->timing_info.equal_picture_interval) {
  975. fb(seq->decoder_model_info.frame_presentation_time_length_minus_1 + 1,
  976. frame_presentation_time);
  977. }
  978. if (current->show_frame)
  979. infer(showable_frame, current->frame_type != AV1_FRAME_KEY);
  980. else
  981. flag(showable_frame);
  982. if (current->frame_type == AV1_FRAME_SWITCH ||
  983. (current->frame_type == AV1_FRAME_KEY && current->show_frame))
  984. infer(error_resilient_mode, 1);
  985. else
  986. flag(error_resilient_mode);
  987. }
  988. if (current->frame_type == AV1_FRAME_KEY && current->show_frame) {
  989. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  990. priv->ref[i].valid = 0;
  991. priv->ref[i].order_hint = 0;
  992. }
  993. }
  994. flag(disable_cdf_update);
  995. if (seq->seq_force_screen_content_tools ==
  996. AV1_SELECT_SCREEN_CONTENT_TOOLS) {
  997. flag(allow_screen_content_tools);
  998. } else {
  999. infer(allow_screen_content_tools,
  1000. seq->seq_force_screen_content_tools);
  1001. }
  1002. if (current->allow_screen_content_tools) {
  1003. if (seq->seq_force_integer_mv == AV1_SELECT_INTEGER_MV)
  1004. flag(force_integer_mv);
  1005. else
  1006. infer(force_integer_mv, seq->seq_force_integer_mv);
  1007. } else {
  1008. infer(force_integer_mv, 0);
  1009. }
  1010. if (seq->frame_id_numbers_present_flag) {
  1011. fb(id_len, current_frame_id);
  1012. diff_len = seq->delta_frame_id_length_minus_2 + 2;
  1013. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  1014. if (current->current_frame_id > (1 << diff_len)) {
  1015. if (priv->ref[i].frame_id > current->current_frame_id ||
  1016. priv->ref[i].frame_id < (current->current_frame_id -
  1017. (1 << diff_len)))
  1018. priv->ref[i].valid = 0;
  1019. } else {
  1020. if (priv->ref[i].frame_id > current->current_frame_id &&
  1021. priv->ref[i].frame_id < ((1 << id_len) +
  1022. current->current_frame_id -
  1023. (1 << diff_len)))
  1024. priv->ref[i].valid = 0;
  1025. }
  1026. }
  1027. } else {
  1028. infer(current_frame_id, 0);
  1029. }
  1030. if (current->frame_type == AV1_FRAME_SWITCH)
  1031. infer(frame_size_override_flag, 1);
  1032. else if(seq->reduced_still_picture_header)
  1033. infer(frame_size_override_flag, 0);
  1034. else
  1035. flag(frame_size_override_flag);
  1036. order_hint_bits =
  1037. seq->enable_order_hint ? seq->order_hint_bits_minus_1 + 1 : 0;
  1038. if (order_hint_bits > 0)
  1039. fb(order_hint_bits, order_hint);
  1040. else
  1041. infer(order_hint, 0);
  1042. if (frame_is_intra || current->error_resilient_mode)
  1043. infer(primary_ref_frame, AV1_PRIMARY_REF_NONE);
  1044. else
  1045. fb(3, primary_ref_frame);
  1046. if (seq->decoder_model_info_present_flag) {
  1047. flag(buffer_removal_time_present_flag);
  1048. if (current->buffer_removal_time_present_flag) {
  1049. for (i = 0; i <= seq->operating_points_cnt_minus_1; i++) {
  1050. if (seq->decoder_model_present_for_this_op[i]) {
  1051. int op_pt_idc = seq->operating_point_idc[i];
  1052. int in_temporal_layer = (op_pt_idc >> priv->temporal_id ) & 1;
  1053. int in_spatial_layer = (op_pt_idc >> (priv->spatial_id + 8)) & 1;
  1054. if (seq->operating_point_idc[i] == 0 ||
  1055. in_temporal_layer || in_spatial_layer) {
  1056. fbs(seq->decoder_model_info.buffer_removal_time_length_minus_1 + 1,
  1057. buffer_removal_time[i], 1, i);
  1058. }
  1059. }
  1060. }
  1061. }
  1062. }
  1063. if (current->frame_type == AV1_FRAME_SWITCH ||
  1064. (current->frame_type == AV1_FRAME_KEY && current->show_frame))
  1065. infer(refresh_frame_flags, all_frames);
  1066. else
  1067. fb(8, refresh_frame_flags);
  1068. if (!frame_is_intra || current->refresh_frame_flags != all_frames) {
  1069. if (current->error_resilient_mode && seq->enable_order_hint) {
  1070. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  1071. fbs(order_hint_bits, ref_order_hint[i], 1, i);
  1072. if (current->ref_order_hint[i] != priv->ref[i].order_hint)
  1073. priv->ref[i].valid = 0;
  1074. }
  1075. }
  1076. }
  1077. if (current->frame_type == AV1_FRAME_KEY ||
  1078. current->frame_type == AV1_FRAME_INTRA_ONLY) {
  1079. CHECK(FUNC(frame_size)(ctx, rw, current));
  1080. CHECK(FUNC(render_size)(ctx, rw, current));
  1081. if (current->allow_screen_content_tools &&
  1082. priv->upscaled_width == priv->frame_width)
  1083. flag(allow_intrabc);
  1084. else
  1085. infer(allow_intrabc, 0);
  1086. } else {
  1087. if (!seq->enable_order_hint) {
  1088. infer(frame_refs_short_signaling, 0);
  1089. } else {
  1090. flag(frame_refs_short_signaling);
  1091. if (current->frame_refs_short_signaling) {
  1092. fb(3, last_frame_idx);
  1093. fb(3, golden_frame_idx);
  1094. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  1095. if (i == 0)
  1096. infer(ref_frame_idx[i], current->last_frame_idx);
  1097. else if (i == AV1_REF_FRAME_GOLDEN -
  1098. AV1_REF_FRAME_LAST)
  1099. infer(ref_frame_idx[i], current->golden_frame_idx);
  1100. else
  1101. infer(ref_frame_idx[i], -1);
  1102. }
  1103. }
  1104. }
  1105. for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
  1106. if (!current->frame_refs_short_signaling)
  1107. fbs(3, ref_frame_idx[i], 1, i);
  1108. if (seq->frame_id_numbers_present_flag) {
  1109. fbs(seq->delta_frame_id_length_minus_2 + 2,
  1110. delta_frame_id_minus1[i], 1, i);
  1111. }
  1112. }
  1113. if (current->frame_size_override_flag &&
  1114. !current->error_resilient_mode) {
  1115. CHECK(FUNC(frame_size_with_refs)(ctx, rw, current));
  1116. } else {
  1117. CHECK(FUNC(frame_size)(ctx, rw, current));
  1118. CHECK(FUNC(render_size)(ctx, rw, current));
  1119. }
  1120. if (current->force_integer_mv)
  1121. infer(allow_high_precision_mv, 0);
  1122. else
  1123. flag(allow_high_precision_mv);
  1124. CHECK(FUNC(interpolation_filter)(ctx, rw, current));
  1125. flag(is_motion_mode_switchable);
  1126. if (current->error_resilient_mode ||
  1127. !seq->enable_ref_frame_mvs)
  1128. infer(use_ref_frame_mvs, 0);
  1129. else
  1130. flag(use_ref_frame_mvs);
  1131. infer(allow_intrabc, 0);
  1132. }
  1133. if (!frame_is_intra) {
  1134. // Derive reference frame sign biases.
  1135. }
  1136. if (seq->reduced_still_picture_header || current->disable_cdf_update)
  1137. infer(disable_frame_end_update_cdf, 1);
  1138. else
  1139. flag(disable_frame_end_update_cdf);
  1140. if (current->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
  1141. // Init non-coeff CDFs.
  1142. // Setup past independence.
  1143. } else {
  1144. // Load CDF tables from previous frame.
  1145. // Load params from previous frame.
  1146. }
  1147. if (current->use_ref_frame_mvs) {
  1148. // Perform motion field estimation process.
  1149. }
  1150. CHECK(FUNC(tile_info)(ctx, rw, current));
  1151. CHECK(FUNC(quantization_params)(ctx, rw, current));
  1152. CHECK(FUNC(segmentation_params)(ctx, rw, current));
  1153. CHECK(FUNC(delta_q_params)(ctx, rw, current));
  1154. CHECK(FUNC(delta_lf_params)(ctx, rw, current));
  1155. // Init coeff CDFs / load previous segments.
  1156. priv->coded_lossless = 1;
  1157. for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
  1158. int qindex;
  1159. if (current->feature_enabled[i][AV1_SEG_LVL_ALT_Q]) {
  1160. qindex = (current->base_q_idx +
  1161. current->feature_value[i][AV1_SEG_LVL_ALT_Q]);
  1162. } else {
  1163. qindex = current->base_q_idx;
  1164. }
  1165. qindex = av_clip_uintp2(qindex, 8);
  1166. if (qindex || current->delta_q_y_dc ||
  1167. current->delta_q_u_ac || current->delta_q_u_dc ||
  1168. current->delta_q_v_ac || current->delta_q_v_dc) {
  1169. priv->coded_lossless = 0;
  1170. }
  1171. }
  1172. priv->all_lossless = priv->coded_lossless &&
  1173. priv->frame_width == priv->upscaled_width;
  1174. CHECK(FUNC(loop_filter_params)(ctx, rw, current));
  1175. CHECK(FUNC(cdef_params)(ctx, rw, current));
  1176. CHECK(FUNC(lr_params)(ctx, rw, current));
  1177. CHECK(FUNC(read_tx_mode)(ctx, rw, current));
  1178. CHECK(FUNC(frame_reference_mode)(ctx, rw, current));
  1179. CHECK(FUNC(skip_mode_params)(ctx, rw, current));
  1180. if (frame_is_intra || current->error_resilient_mode ||
  1181. !seq->enable_warped_motion)
  1182. infer(allow_warped_motion, 0);
  1183. else
  1184. flag(allow_warped_motion);
  1185. flag(reduced_tx_set);
  1186. CHECK(FUNC(global_motion_params)(ctx, rw, current));
  1187. CHECK(FUNC(film_grain_params)(ctx, rw, current));
  1188. for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
  1189. if (current->refresh_frame_flags & (1 << i)) {
  1190. priv->ref[i] = (AV1ReferenceFrameState) {
  1191. .valid = 1,
  1192. .frame_id = current->current_frame_id,
  1193. .upscaled_width = priv->upscaled_width,
  1194. .frame_width = priv->frame_width,
  1195. .frame_height = priv->frame_height,
  1196. .render_width = priv->render_width,
  1197. .render_height = priv->render_height,
  1198. .frame_type = current->frame_type,
  1199. .subsampling_x = seq->color_config.subsampling_x,
  1200. .subsampling_y = seq->color_config.subsampling_y,
  1201. .bit_depth = priv->bit_depth,
  1202. .order_hint = current->order_hint,
  1203. };
  1204. }
  1205. }
  1206. av_log(ctx->log_ctx, AV_LOG_DEBUG, "Frame %d: size %dx%d "
  1207. "upscaled %d render %dx%d subsample %dx%d "
  1208. "bitdepth %d tiles %dx%d.\n", current->order_hint,
  1209. priv->frame_width, priv->frame_height, priv->upscaled_width,
  1210. priv->render_width, priv->render_height,
  1211. seq->color_config.subsampling_x + 1,
  1212. seq->color_config.subsampling_y + 1, priv->bit_depth,
  1213. priv->tile_rows, priv->tile_cols);
  1214. return 0;
  1215. }
  1216. static int FUNC(frame_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1217. AV1RawFrameHeader *current, int redundant,
  1218. AVBufferRef *rw_buffer_ref)
  1219. {
  1220. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1221. int start_pos, fh_bits, fh_bytes, err;
  1222. uint8_t *fh_start;
  1223. if (priv->seen_frame_header) {
  1224. if (!redundant) {
  1225. av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid repeated "
  1226. "frame header OBU.\n");
  1227. return AVERROR_INVALIDDATA;
  1228. } else {
  1229. GetBitContext fh;
  1230. size_t i, b;
  1231. uint32_t val;
  1232. HEADER("Redundant Frame Header");
  1233. av_assert0(priv->frame_header_ref && priv->frame_header);
  1234. init_get_bits(&fh, priv->frame_header,
  1235. priv->frame_header_size);
  1236. for (i = 0; i < priv->frame_header_size; i += 8) {
  1237. b = FFMIN(priv->frame_header_size - i, 8);
  1238. val = get_bits(&fh, b);
  1239. xf(b, frame_header_copy[i],
  1240. val, val, val, 1, i / 8);
  1241. }
  1242. }
  1243. } else {
  1244. if (redundant)
  1245. HEADER("Redundant Frame Header (used as Frame Header)");
  1246. else
  1247. HEADER("Frame Header");
  1248. priv->seen_frame_header = 1;
  1249. #ifdef READ
  1250. start_pos = get_bits_count(rw);
  1251. #else
  1252. start_pos = put_bits_count(rw);
  1253. #endif
  1254. CHECK(FUNC(uncompressed_header)(ctx, rw, current));
  1255. if (current->show_existing_frame) {
  1256. priv->seen_frame_header = 0;
  1257. } else {
  1258. priv->seen_frame_header = 1;
  1259. av_buffer_unref(&priv->frame_header_ref);
  1260. #ifdef READ
  1261. fh_bits = get_bits_count(rw) - start_pos;
  1262. fh_start = (uint8_t*)rw->buffer + start_pos / 8;
  1263. #else
  1264. // Need to flush the bitwriter so that we can copy its output,
  1265. // but use a copy so we don't affect the caller's structure.
  1266. {
  1267. PutBitContext tmp = *rw;
  1268. flush_put_bits(&tmp);
  1269. }
  1270. fh_bits = put_bits_count(rw) - start_pos;
  1271. fh_start = rw->buf + start_pos / 8;
  1272. #endif
  1273. fh_bytes = (fh_bits + 7) / 8;
  1274. priv->frame_header_size = fh_bits;
  1275. if (rw_buffer_ref) {
  1276. priv->frame_header_ref = av_buffer_ref(rw_buffer_ref);
  1277. if (!priv->frame_header_ref)
  1278. return AVERROR(ENOMEM);
  1279. priv->frame_header = fh_start;
  1280. } else {
  1281. priv->frame_header_ref =
  1282. av_buffer_alloc(fh_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
  1283. if (!priv->frame_header_ref)
  1284. return AVERROR(ENOMEM);
  1285. priv->frame_header = priv->frame_header_ref->data;
  1286. memcpy(priv->frame_header, fh_start, fh_bytes);
  1287. }
  1288. }
  1289. }
  1290. return 0;
  1291. }
  1292. static int FUNC(tile_group_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1293. AV1RawTileGroup *current)
  1294. {
  1295. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1296. int num_tiles, tile_bits;
  1297. int err;
  1298. HEADER("Tile Group");
  1299. num_tiles = priv->tile_cols * priv->tile_rows;
  1300. if (num_tiles > 1)
  1301. flag(tile_start_and_end_present_flag);
  1302. else
  1303. infer(tile_start_and_end_present_flag, 0);
  1304. if (num_tiles == 1 || !current->tile_start_and_end_present_flag) {
  1305. infer(tg_start, 0);
  1306. infer(tg_end, num_tiles - 1);
  1307. } else {
  1308. tile_bits = cbs_av1_tile_log2(1, priv->tile_cols) +
  1309. cbs_av1_tile_log2(1, priv->tile_rows);
  1310. fb(tile_bits, tg_start);
  1311. fb(tile_bits, tg_end);
  1312. }
  1313. CHECK(FUNC(byte_alignment)(ctx, rw));
  1314. // Reset header for next frame.
  1315. if (current->tg_end == num_tiles - 1)
  1316. priv->seen_frame_header = 0;
  1317. // Tile data follows.
  1318. return 0;
  1319. }
  1320. static int FUNC(frame_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1321. AV1RawFrame *current,
  1322. AVBufferRef *rw_buffer_ref)
  1323. {
  1324. int err;
  1325. CHECK(FUNC(frame_header_obu)(ctx, rw, &current->header,
  1326. 0, rw_buffer_ref));
  1327. CHECK(FUNC(byte_alignment)(ctx, rw));
  1328. CHECK(FUNC(tile_group_obu)(ctx, rw, &current->tile_group));
  1329. return 0;
  1330. }
  1331. static int FUNC(tile_list_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1332. AV1RawTileList *current)
  1333. {
  1334. int err;
  1335. fb(8, output_frame_width_in_tiles_minus_1);
  1336. fb(8, output_frame_height_in_tiles_minus_1);
  1337. fb(16, tile_count_minus_1);
  1338. // Tile data follows.
  1339. return 0;
  1340. }
  1341. static int FUNC(metadata_hdr_cll)(CodedBitstreamContext *ctx, RWContext *rw,
  1342. AV1RawMetadataHDRCLL *current)
  1343. {
  1344. int err;
  1345. fb(16, max_cll);
  1346. fb(16, max_fall);
  1347. return 0;
  1348. }
  1349. static int FUNC(metadata_hdr_mdcv)(CodedBitstreamContext *ctx, RWContext *rw,
  1350. AV1RawMetadataHDRMDCV *current)
  1351. {
  1352. int err, i;
  1353. for (i = 0; i < 3; i++) {
  1354. fbs(16, primary_chromaticity_x[i], 1, i);
  1355. fbs(16, primary_chromaticity_y[i], 1, i);
  1356. }
  1357. fb(16, white_point_chromaticity_x);
  1358. fb(16, white_point_chromaticity_y);
  1359. fc(32, luminance_max, 1, MAX_UINT_BITS(32));
  1360. // luminance_min must be lower than luminance_max. Convert luminance_max from
  1361. // 24.8 fixed point to 18.14 fixed point in order to compare them.
  1362. fc(32, luminance_min, 0, FFMIN(((uint64_t)current->luminance_max << 6) - 1,
  1363. MAX_UINT_BITS(32)));
  1364. return 0;
  1365. }
  1366. static int FUNC(scalability_structure)(CodedBitstreamContext *ctx, RWContext *rw,
  1367. AV1RawMetadataScalability *current)
  1368. {
  1369. CodedBitstreamAV1Context *priv = ctx->priv_data;
  1370. const AV1RawSequenceHeader *seq;
  1371. int err, i, j;
  1372. if (!priv->sequence_header) {
  1373. av_log(ctx->log_ctx, AV_LOG_ERROR, "No sequence header available: "
  1374. "unable to parse scalability metadata.\n");
  1375. return AVERROR_INVALIDDATA;
  1376. }
  1377. seq = priv->sequence_header;
  1378. fb(2, spatial_layers_cnt_minus_1);
  1379. flag(spatial_layer_dimensions_present_flag);
  1380. flag(spatial_layer_description_present_flag);
  1381. flag(temporal_group_description_present_flag);
  1382. fc(3, scalability_structure_reserved_3bits, 0, 0);
  1383. if (current->spatial_layer_dimensions_present_flag) {
  1384. for (i = 0; i <= current->spatial_layers_cnt_minus_1; i++) {
  1385. fcs(16, spatial_layer_max_width[i],
  1386. 0, seq->max_frame_width_minus_1 + 1, 1, i);
  1387. fcs(16, spatial_layer_max_height[i],
  1388. 0, seq->max_frame_height_minus_1 + 1, 1, i);
  1389. }
  1390. }
  1391. if (current->spatial_layer_description_present_flag) {
  1392. for (i = 0; i <= current->spatial_layers_cnt_minus_1; i++)
  1393. fbs(8, spatial_layer_ref_id[i], 1, i);
  1394. }
  1395. if (current->temporal_group_description_present_flag) {
  1396. fb(8, temporal_group_size);
  1397. for (i = 0; i < current->temporal_group_size; i++) {
  1398. fbs(3, temporal_group_temporal_id[i], 1, i);
  1399. flags(temporal_group_temporal_switching_up_point_flag[i], 1, i);
  1400. flags(temporal_group_spatial_switching_up_point_flag[i], 1, i);
  1401. fbs(3, temporal_group_ref_cnt[i], 1, i);
  1402. for (j = 0; j < current->temporal_group_ref_cnt[i]; j++) {
  1403. fbs(8, temporal_group_ref_pic_diff[i][j], 2, i, j);
  1404. }
  1405. }
  1406. }
  1407. return 0;
  1408. }
  1409. static int FUNC(metadata_scalability)(CodedBitstreamContext *ctx, RWContext *rw,
  1410. AV1RawMetadataScalability *current)
  1411. {
  1412. int err;
  1413. fb(8, scalability_mode_idc);
  1414. if (current->scalability_mode_idc == AV1_SCALABILITY_SS)
  1415. CHECK(FUNC(scalability_structure)(ctx, rw, current));
  1416. return 0;
  1417. }
  1418. static int FUNC(metadata_itut_t35)(CodedBitstreamContext *ctx, RWContext *rw,
  1419. AV1RawMetadataITUTT35 *current)
  1420. {
  1421. int err;
  1422. size_t i;
  1423. fb(8, itu_t_t35_country_code);
  1424. if (current->itu_t_t35_country_code == 0xff)
  1425. fb(8, itu_t_t35_country_code_extension_byte);
  1426. #ifdef READ
  1427. // The payload runs up to the start of the trailing bits, but there might
  1428. // be arbitrarily many trailing zeroes so we need to read through twice.
  1429. current->payload_size = cbs_av1_get_payload_bytes_left(rw);
  1430. current->payload_ref = av_buffer_alloc(current->payload_size);
  1431. if (!current->payload_ref)
  1432. return AVERROR(ENOMEM);
  1433. current->payload = current->payload_ref->data;
  1434. #endif
  1435. for (i = 0; i < current->payload_size; i++)
  1436. xf(8, itu_t_t35_payload_bytes[i], current->payload[i],
  1437. 0x00, 0xff, 1, i);
  1438. return 0;
  1439. }
  1440. static int FUNC(metadata_timecode)(CodedBitstreamContext *ctx, RWContext *rw,
  1441. AV1RawMetadataTimecode *current)
  1442. {
  1443. int err;
  1444. fb(5, counting_type);
  1445. flag(full_timestamp_flag);
  1446. flag(discontinuity_flag);
  1447. flag(cnt_dropped_flag);
  1448. fb(9, n_frames);
  1449. if (current->full_timestamp_flag) {
  1450. fc(6, seconds_value, 0, 59);
  1451. fc(6, minutes_value, 0, 59);
  1452. fc(5, hours_value, 0, 23);
  1453. } else {
  1454. flag(seconds_flag);
  1455. if (current->seconds_flag) {
  1456. fc(6, seconds_value, 0, 59);
  1457. flag(minutes_flag);
  1458. if (current->minutes_flag) {
  1459. fc(6, minutes_value, 0, 59);
  1460. flag(hours_flag);
  1461. if (current->hours_flag)
  1462. fc(5, hours_value, 0, 23);
  1463. }
  1464. }
  1465. }
  1466. fb(5, time_offset_length);
  1467. if (current->time_offset_length > 0)
  1468. fb(current->time_offset_length, time_offset_value);
  1469. else
  1470. infer(time_offset_length, 0);
  1471. return 0;
  1472. }
  1473. static int FUNC(metadata_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1474. AV1RawMetadata *current)
  1475. {
  1476. int err;
  1477. leb128(metadata_type);
  1478. switch (current->metadata_type) {
  1479. case AV1_METADATA_TYPE_HDR_CLL:
  1480. CHECK(FUNC(metadata_hdr_cll)(ctx, rw, &current->metadata.hdr_cll));
  1481. break;
  1482. case AV1_METADATA_TYPE_HDR_MDCV:
  1483. CHECK(FUNC(metadata_hdr_mdcv)(ctx, rw, &current->metadata.hdr_mdcv));
  1484. break;
  1485. case AV1_METADATA_TYPE_SCALABILITY:
  1486. CHECK(FUNC(metadata_scalability)(ctx, rw, &current->metadata.scalability));
  1487. break;
  1488. case AV1_METADATA_TYPE_ITUT_T35:
  1489. CHECK(FUNC(metadata_itut_t35)(ctx, rw, &current->metadata.itut_t35));
  1490. break;
  1491. case AV1_METADATA_TYPE_TIMECODE:
  1492. CHECK(FUNC(metadata_timecode)(ctx, rw, &current->metadata.timecode));
  1493. break;
  1494. default:
  1495. // Unknown metadata type.
  1496. return AVERROR_PATCHWELCOME;
  1497. }
  1498. return 0;
  1499. }
  1500. static int FUNC(padding_obu)(CodedBitstreamContext *ctx, RWContext *rw,
  1501. AV1RawPadding *current)
  1502. {
  1503. int i, err;
  1504. HEADER("Padding");
  1505. #ifdef READ
  1506. // The payload runs up to the start of the trailing bits, but there might
  1507. // be arbitrarily many trailing zeroes so we need to read through twice.
  1508. current->payload_size = cbs_av1_get_payload_bytes_left(rw);
  1509. current->payload_ref = av_buffer_alloc(current->payload_size);
  1510. if (!current->payload_ref)
  1511. return AVERROR(ENOMEM);
  1512. current->payload = current->payload_ref->data;
  1513. #endif
  1514. for (i = 0; i < current->payload_size; i++)
  1515. xf(8, obu_padding_byte[i], current->payload[i], 0x00, 0xff, 1, i);
  1516. return 0;
  1517. }