You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1687 lines
63KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. static int FUNC(rbsp_trailing_bits)(CodedBitstreamContext *ctx, RWContext *rw)
  19. {
  20. int err;
  21. fixed(1, rbsp_stop_one_bit, 1);
  22. while (byte_alignment(rw) != 0)
  23. fixed(1, rbsp_alignment_zero_bit, 0);
  24. return 0;
  25. }
  26. static int FUNC(nal_unit_header)(CodedBitstreamContext *ctx, RWContext *rw,
  27. H265RawNALUnitHeader *current,
  28. int expected_nal_unit_type)
  29. {
  30. int err;
  31. u(1, forbidden_zero_bit, 0, 0);
  32. if (expected_nal_unit_type >= 0)
  33. u(6, nal_unit_type, expected_nal_unit_type,
  34. expected_nal_unit_type);
  35. else
  36. u(6, nal_unit_type, 0, 63);
  37. u(6, nuh_layer_id, 0, 62);
  38. u(3, nuh_temporal_id_plus1, 1, 7);
  39. return 0;
  40. }
  41. static int FUNC(byte_alignment)(CodedBitstreamContext *ctx, RWContext *rw)
  42. {
  43. int err;
  44. fixed(1, alignment_bit_equal_to_one, 1);
  45. while (byte_alignment(rw) != 0)
  46. fixed(1, alignment_bit_equal_to_zero, 0);
  47. return 0;
  48. }
  49. static int FUNC(extension_data)(CodedBitstreamContext *ctx, RWContext *rw,
  50. H265RawPSExtensionData *current)
  51. {
  52. int err;
  53. size_t k;
  54. #ifdef READ
  55. GetBitContext start;
  56. uint8_t bit;
  57. start = *rw;
  58. for (k = 0; cbs_h2645_read_more_rbsp_data(rw); k++)
  59. skip_bits(rw, 1);
  60. current->bit_length = k;
  61. if (k > 0) {
  62. *rw = start;
  63. allocate(current->data, (current->bit_length + 7) / 8);
  64. for (k = 0; k < current->bit_length; k++) {
  65. xu(1, extension_data, bit, 0, 1, 0);
  66. current->data[k / 8] |= bit << (7 - k % 8);
  67. }
  68. }
  69. #else
  70. for (k = 0; k < current->bit_length; k++)
  71. xu(1, extension_data, current->data[k / 8] >> (7 - k % 8), 0, 1, 0);
  72. #endif
  73. return 0;
  74. }
  75. static int FUNC(profile_tier_level)(CodedBitstreamContext *ctx, RWContext *rw,
  76. H265RawProfileTierLevel *current,
  77. int profile_present_flag,
  78. int max_num_sub_layers_minus1)
  79. {
  80. int err, i, j;
  81. if (profile_present_flag) {
  82. u(2, general_profile_space, 0, 0);
  83. flag(general_tier_flag);
  84. u(5, general_profile_idc, 0, 31);
  85. for (j = 0; j < 32; j++)
  86. flags(general_profile_compatibility_flag[j], 1, j);
  87. flag(general_progressive_source_flag);
  88. flag(general_interlaced_source_flag);
  89. flag(general_non_packed_constraint_flag);
  90. flag(general_frame_only_constraint_flag);
  91. #define profile_compatible(x) (current->general_profile_idc == (x) || \
  92. current->general_profile_compatibility_flag[x])
  93. if (profile_compatible(4) || profile_compatible(5) ||
  94. profile_compatible(6) || profile_compatible(7) ||
  95. profile_compatible(8) || profile_compatible(9) ||
  96. profile_compatible(10)) {
  97. flag(general_max_12bit_constraint_flag);
  98. flag(general_max_10bit_constraint_flag);
  99. flag(general_max_8bit_constraint_flag);
  100. flag(general_max_422chroma_constraint_flag);
  101. flag(general_max_420chroma_constraint_flag);
  102. flag(general_max_monochrome_constraint_flag);
  103. flag(general_intra_constraint_flag);
  104. flag(general_one_picture_only_constraint_flag);
  105. flag(general_lower_bit_rate_constraint_flag);
  106. if (profile_compatible(5) || profile_compatible(9) ||
  107. profile_compatible(10)) {
  108. flag(general_max_14bit_constraint_flag);
  109. fixed(24, general_reserved_zero_33bits, 0);
  110. fixed( 9, general_reserved_zero_33bits, 0);
  111. } else {
  112. fixed(24, general_reserved_zero_34bits, 0);
  113. fixed(10, general_reserved_zero_34bits, 0);
  114. }
  115. } else if (profile_compatible(2)) {
  116. fixed(7, general_reserved_zero_7bits, 0);
  117. flag(general_one_picture_only_constraint_flag);
  118. fixed(24, general_reserved_zero_35bits, 0);
  119. fixed(11, general_reserved_zero_35bits, 0);
  120. } else {
  121. fixed(24, general_reserved_zero_43bits, 0);
  122. fixed(19, general_reserved_zero_43bits, 0);
  123. }
  124. if (profile_compatible(1) || profile_compatible(2) ||
  125. profile_compatible(3) || profile_compatible(4) ||
  126. profile_compatible(5) || profile_compatible(9)) {
  127. flag(general_inbld_flag);
  128. } else {
  129. fixed(1, general_reserved_zero_bit, 0);
  130. }
  131. #undef profile_compatible
  132. }
  133. u(8, general_level_idc, 0, 255);
  134. for (i = 0; i < max_num_sub_layers_minus1; i++) {
  135. flags(sub_layer_profile_present_flag[i], 1, i);
  136. flags(sub_layer_level_present_flag[i], 1, i);
  137. }
  138. if (max_num_sub_layers_minus1 > 0) {
  139. for (i = max_num_sub_layers_minus1; i < 8; i++)
  140. fixed(2, reserved_zero_2bits, 0);
  141. }
  142. for (i = 0; i < max_num_sub_layers_minus1; i++) {
  143. if (current->sub_layer_profile_present_flag[i])
  144. return AVERROR_PATCHWELCOME;
  145. if (current->sub_layer_level_present_flag[i])
  146. return AVERROR_PATCHWELCOME;
  147. }
  148. return 0;
  149. }
  150. static int FUNC(sub_layer_hrd_parameters)(CodedBitstreamContext *ctx, RWContext *rw,
  151. H265RawHRDParameters *hrd,
  152. int nal, int sub_layer_id)
  153. {
  154. H265RawSubLayerHRDParameters *current;
  155. int err, i;
  156. if (nal)
  157. current = &hrd->nal_sub_layer_hrd_parameters[sub_layer_id];
  158. else
  159. current = &hrd->vcl_sub_layer_hrd_parameters[sub_layer_id];
  160. for (i = 0; i <= hrd->cpb_cnt_minus1[sub_layer_id]; i++) {
  161. ues(bit_rate_value_minus1[i], 0, UINT32_MAX - 1, 1, i);
  162. ues(cpb_size_value_minus1[i], 0, UINT32_MAX - 1, 1, i);
  163. if (hrd->sub_pic_hrd_params_present_flag) {
  164. ues(cpb_size_du_value_minus1[i], 0, UINT32_MAX - 1, 1, i);
  165. ues(bit_rate_du_value_minus1[i], 0, UINT32_MAX - 1, 1, i);
  166. }
  167. flags(cbr_flag[i], 1, i);
  168. }
  169. return 0;
  170. }
  171. static int FUNC(hrd_parameters)(CodedBitstreamContext *ctx, RWContext *rw,
  172. H265RawHRDParameters *current, int common_inf_present_flag,
  173. int max_num_sub_layers_minus1)
  174. {
  175. int err, i;
  176. if (common_inf_present_flag) {
  177. flag(nal_hrd_parameters_present_flag);
  178. flag(vcl_hrd_parameters_present_flag);
  179. if (current->nal_hrd_parameters_present_flag ||
  180. current->vcl_hrd_parameters_present_flag) {
  181. flag(sub_pic_hrd_params_present_flag);
  182. if (current->sub_pic_hrd_params_present_flag) {
  183. u(8, tick_divisor_minus2, 0, 255);
  184. u(5, du_cpb_removal_delay_increment_length_minus1, 0, 31);
  185. flag(sub_pic_cpb_params_in_pic_timing_sei_flag);
  186. u(5, dpb_output_delay_du_length_minus1, 0, 31);
  187. }
  188. u(4, bit_rate_scale, 0, 15);
  189. u(4, cpb_size_scale, 0, 15);
  190. if (current->sub_pic_hrd_params_present_flag)
  191. u(4, cpb_size_du_scale, 0, 15);
  192. u(5, initial_cpb_removal_delay_length_minus1, 0, 31);
  193. u(5, au_cpb_removal_delay_length_minus1, 0, 31);
  194. u(5, dpb_output_delay_length_minus1, 0, 31);
  195. } else {
  196. infer(sub_pic_hrd_params_present_flag, 0);
  197. infer(initial_cpb_removal_delay_length_minus1, 23);
  198. infer(au_cpb_removal_delay_length_minus1, 23);
  199. infer(dpb_output_delay_length_minus1, 23);
  200. }
  201. }
  202. for (i = 0; i <= max_num_sub_layers_minus1; i++) {
  203. flags(fixed_pic_rate_general_flag[i], 1, i);
  204. if (!current->fixed_pic_rate_general_flag[i])
  205. flags(fixed_pic_rate_within_cvs_flag[i], 1, i);
  206. else
  207. infer(fixed_pic_rate_within_cvs_flag[i], 1);
  208. if (current->fixed_pic_rate_within_cvs_flag[i]) {
  209. ues(elemental_duration_in_tc_minus1[i], 0, 2047, 1, i);
  210. infer(low_delay_hrd_flag[i], 0);
  211. } else
  212. flags(low_delay_hrd_flag[i], 1, i);
  213. if (!current->low_delay_hrd_flag[i])
  214. ues(cpb_cnt_minus1[i], 0, 31, 1, i);
  215. else
  216. infer(cpb_cnt_minus1[i], 0);
  217. if (current->nal_hrd_parameters_present_flag)
  218. CHECK(FUNC(sub_layer_hrd_parameters)(ctx, rw, current, 0, i));
  219. if (current->vcl_hrd_parameters_present_flag)
  220. CHECK(FUNC(sub_layer_hrd_parameters)(ctx, rw, current, 1, i));
  221. }
  222. return 0;
  223. }
  224. static int FUNC(vui_parameters)(CodedBitstreamContext *ctx, RWContext *rw,
  225. H265RawVUI *current, const H265RawSPS *sps)
  226. {
  227. int err;
  228. flag(aspect_ratio_info_present_flag);
  229. if (current->aspect_ratio_info_present_flag) {
  230. u(8, aspect_ratio_idc, 0, 255);
  231. if (current->aspect_ratio_idc == 255) {
  232. u(16, sar_width, 0, 65535);
  233. u(16, sar_height, 0, 65535);
  234. }
  235. } else {
  236. infer(aspect_ratio_idc, 0);
  237. }
  238. flag(overscan_info_present_flag);
  239. if (current->overscan_info_present_flag)
  240. flag(overscan_appropriate_flag);
  241. flag(video_signal_type_present_flag);
  242. if (current->video_signal_type_present_flag) {
  243. u(3, video_format, 0, 7);
  244. flag(video_full_range_flag);
  245. flag(colour_description_present_flag);
  246. if (current->colour_description_present_flag) {
  247. u(8, colour_primaries, 0, 255);
  248. u(8, transfer_characteristics, 0, 255);
  249. u(8, matrix_coefficients, 0, 255);
  250. } else {
  251. infer(colour_primaries, 2);
  252. infer(transfer_characteristics, 2);
  253. infer(matrix_coefficients, 2);
  254. }
  255. } else {
  256. infer(video_format, 5);
  257. infer(video_full_range_flag, 0);
  258. infer(colour_primaries, 2);
  259. infer(transfer_characteristics, 2);
  260. infer(matrix_coefficients, 2);
  261. }
  262. flag(chroma_loc_info_present_flag);
  263. if (current->chroma_loc_info_present_flag) {
  264. ue(chroma_sample_loc_type_top_field, 0, 5);
  265. ue(chroma_sample_loc_type_bottom_field, 0, 5);
  266. } else {
  267. infer(chroma_sample_loc_type_top_field, 0);
  268. infer(chroma_sample_loc_type_bottom_field, 0);
  269. }
  270. flag(neutral_chroma_indication_flag);
  271. flag(field_seq_flag);
  272. flag(frame_field_info_present_flag);
  273. flag(default_display_window_flag);
  274. if (current->default_display_window_flag) {
  275. ue(def_disp_win_left_offset, 0, 16384);
  276. ue(def_disp_win_right_offset, 0, 16384);
  277. ue(def_disp_win_top_offset, 0, 16384);
  278. ue(def_disp_win_bottom_offset, 0, 16384);
  279. }
  280. flag(vui_timing_info_present_flag);
  281. if (current->vui_timing_info_present_flag) {
  282. u(32, vui_num_units_in_tick, 1, UINT32_MAX);
  283. u(32, vui_time_scale, 1, UINT32_MAX);
  284. flag(vui_poc_proportional_to_timing_flag);
  285. if (current->vui_poc_proportional_to_timing_flag)
  286. ue(vui_num_ticks_poc_diff_one_minus1, 0, UINT32_MAX - 1);
  287. flag(vui_hrd_parameters_present_flag);
  288. if (current->vui_hrd_parameters_present_flag) {
  289. CHECK(FUNC(hrd_parameters)(ctx, rw, &current->hrd_parameters,
  290. 1, sps->sps_max_sub_layers_minus1));
  291. }
  292. }
  293. flag(bitstream_restriction_flag);
  294. if (current->bitstream_restriction_flag) {
  295. flag(tiles_fixed_structure_flag);
  296. flag(motion_vectors_over_pic_boundaries_flag);
  297. flag(restricted_ref_pic_lists_flag);
  298. ue(min_spatial_segmentation_idc, 0, 4095);
  299. ue(max_bytes_per_pic_denom, 0, 16);
  300. ue(max_bits_per_min_cu_denom, 0, 16);
  301. ue(log2_max_mv_length_horizontal, 0, 16);
  302. ue(log2_max_mv_length_vertical, 0, 16);
  303. } else {
  304. infer(tiles_fixed_structure_flag, 0);
  305. infer(motion_vectors_over_pic_boundaries_flag, 1);
  306. infer(min_spatial_segmentation_idc, 0);
  307. infer(max_bytes_per_pic_denom, 2);
  308. infer(max_bits_per_min_cu_denom, 1);
  309. infer(log2_max_mv_length_horizontal, 15);
  310. infer(log2_max_mv_length_vertical, 15);
  311. }
  312. return 0;
  313. }
  314. static int FUNC(vps)(CodedBitstreamContext *ctx, RWContext *rw,
  315. H265RawVPS *current)
  316. {
  317. int err, i, j;
  318. HEADER("Video Parameter Set");
  319. CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_VPS));
  320. u(4, vps_video_parameter_set_id, 0, 15);
  321. flag(vps_base_layer_internal_flag);
  322. flag(vps_base_layer_available_flag);
  323. u(6, vps_max_layers_minus1, 0, HEVC_MAX_LAYERS - 1);
  324. u(3, vps_max_sub_layers_minus1, 0, HEVC_MAX_SUB_LAYERS - 1);
  325. flag(vps_temporal_id_nesting_flag);
  326. if (current->vps_max_sub_layers_minus1 == 0 &&
  327. current->vps_temporal_id_nesting_flag != 1) {
  328. av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid stream: "
  329. "vps_temporal_id_nesting_flag must be 1 if "
  330. "vps_max_sub_layers_minus1 is 0.\n");
  331. return AVERROR_INVALIDDATA;
  332. }
  333. fixed(16, vps_reserved_0xffff_16bits, 0xffff);
  334. CHECK(FUNC(profile_tier_level)(ctx, rw, &current->profile_tier_level,
  335. 1, current->vps_max_sub_layers_minus1));
  336. flag(vps_sub_layer_ordering_info_present_flag);
  337. for (i = (current->vps_sub_layer_ordering_info_present_flag ?
  338. 0 : current->vps_max_sub_layers_minus1);
  339. i <= current->vps_max_sub_layers_minus1; i++) {
  340. ues(vps_max_dec_pic_buffering_minus1[i],
  341. 0, HEVC_MAX_DPB_SIZE - 1, 1, i);
  342. ues(vps_max_num_reorder_pics[i],
  343. 0, current->vps_max_dec_pic_buffering_minus1[i], 1, i);
  344. ues(vps_max_latency_increase_plus1[i],
  345. 0, UINT32_MAX - 1, 1, i);
  346. }
  347. if (!current->vps_sub_layer_ordering_info_present_flag) {
  348. for (i = 0; i < current->vps_max_sub_layers_minus1; i++) {
  349. infer(vps_max_dec_pic_buffering_minus1[i],
  350. current->vps_max_dec_pic_buffering_minus1[current->vps_max_sub_layers_minus1]);
  351. infer(vps_max_num_reorder_pics[i],
  352. current->vps_max_num_reorder_pics[current->vps_max_sub_layers_minus1]);
  353. infer(vps_max_latency_increase_plus1[i],
  354. current->vps_max_latency_increase_plus1[current->vps_max_sub_layers_minus1]);
  355. }
  356. }
  357. u(6, vps_max_layer_id, 0, HEVC_MAX_LAYERS - 1);
  358. ue(vps_num_layer_sets_minus1, 0, HEVC_MAX_LAYER_SETS - 1);
  359. for (i = 1; i <= current->vps_num_layer_sets_minus1; i++) {
  360. for (j = 0; j <= current->vps_max_layer_id; j++)
  361. flags(layer_id_included_flag[i][j], 2, i, j);
  362. }
  363. for (j = 0; j <= current->vps_max_layer_id; j++)
  364. infer(layer_id_included_flag[0][j], j == 0);
  365. flag(vps_timing_info_present_flag);
  366. if (current->vps_timing_info_present_flag) {
  367. u(32, vps_num_units_in_tick, 1, UINT32_MAX);
  368. u(32, vps_time_scale, 1, UINT32_MAX);
  369. flag(vps_poc_proportional_to_timing_flag);
  370. if (current->vps_poc_proportional_to_timing_flag)
  371. ue(vps_num_ticks_poc_diff_one_minus1, 0, UINT32_MAX - 1);
  372. ue(vps_num_hrd_parameters, 0, current->vps_num_layer_sets_minus1 + 1);
  373. for (i = 0; i < current->vps_num_hrd_parameters; i++) {
  374. ues(hrd_layer_set_idx[i],
  375. current->vps_base_layer_internal_flag ? 0 : 1,
  376. current->vps_num_layer_sets_minus1, 1, i);
  377. if (i > 0)
  378. flags(cprms_present_flag[i], 1, i);
  379. else
  380. infer(cprms_present_flag[0], 1);
  381. CHECK(FUNC(hrd_parameters)(ctx, rw, &current->hrd_parameters[i],
  382. current->cprms_present_flag[i],
  383. current->vps_max_sub_layers_minus1));
  384. }
  385. }
  386. flag(vps_extension_flag);
  387. if (current->vps_extension_flag)
  388. CHECK(FUNC(extension_data)(ctx, rw, &current->extension_data));
  389. CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
  390. return 0;
  391. }
  392. static int FUNC(st_ref_pic_set)(CodedBitstreamContext *ctx, RWContext *rw,
  393. H265RawSTRefPicSet *current, int st_rps_idx,
  394. const H265RawSPS *sps)
  395. {
  396. int err, i, j;
  397. if (st_rps_idx != 0)
  398. flag(inter_ref_pic_set_prediction_flag);
  399. else
  400. infer(inter_ref_pic_set_prediction_flag, 0);
  401. if (current->inter_ref_pic_set_prediction_flag) {
  402. unsigned int ref_rps_idx, num_delta_pocs;
  403. const H265RawSTRefPicSet *ref;
  404. int delta_rps, d_poc;
  405. int ref_delta_poc_s0[HEVC_MAX_REFS], ref_delta_poc_s1[HEVC_MAX_REFS];
  406. int delta_poc_s0[HEVC_MAX_REFS], delta_poc_s1[HEVC_MAX_REFS];
  407. uint8_t used_by_curr_pic_s0[HEVC_MAX_REFS],
  408. used_by_curr_pic_s1[HEVC_MAX_REFS];
  409. if (st_rps_idx == sps->num_short_term_ref_pic_sets)
  410. ue(delta_idx_minus1, 0, st_rps_idx - 1);
  411. else
  412. infer(delta_idx_minus1, 0);
  413. ref_rps_idx = st_rps_idx - (current->delta_idx_minus1 + 1);
  414. ref = &sps->st_ref_pic_set[ref_rps_idx];
  415. num_delta_pocs = ref->num_negative_pics + ref->num_positive_pics;
  416. flag(delta_rps_sign);
  417. ue(abs_delta_rps_minus1, 0, INT16_MAX);
  418. delta_rps = (1 - 2 * current->delta_rps_sign) *
  419. (current->abs_delta_rps_minus1 + 1);
  420. for (j = 0; j <= num_delta_pocs; j++) {
  421. flags(used_by_curr_pic_flag[j], 1, j);
  422. if (!current->used_by_curr_pic_flag[j])
  423. flags(use_delta_flag[j], 1, j);
  424. else
  425. infer(use_delta_flag[j], 1);
  426. }
  427. // Since the stored form of an RPS here is actually the delta-step
  428. // form used when inter_ref_pic_set_prediction_flag is not set, we
  429. // need to reconstruct that here in order to be able to refer to
  430. // the RPS later (which is required for parsing, because we don't
  431. // even know what syntax elements appear without it). Therefore,
  432. // this code takes the delta-step form of the reference set, turns
  433. // it into the delta-array form, applies the prediction process of
  434. // 7.4.8, converts the result back to the delta-step form, and
  435. // stores that as the current set for future use. Note that the
  436. // inferences here mean that writers using prediction will need
  437. // to fill in the delta-step values correctly as well - since the
  438. // whole RPS prediction process is somewhat overly sophisticated,
  439. // this hopefully forms a useful check for them to ensure their
  440. // predicted form actually matches what was intended rather than
  441. // an onerous additional requirement.
  442. d_poc = 0;
  443. for (i = 0; i < ref->num_negative_pics; i++) {
  444. d_poc -= ref->delta_poc_s0_minus1[i] + 1;
  445. ref_delta_poc_s0[i] = d_poc;
  446. }
  447. d_poc = 0;
  448. for (i = 0; i < ref->num_positive_pics; i++) {
  449. d_poc += ref->delta_poc_s1_minus1[i] + 1;
  450. ref_delta_poc_s1[i] = d_poc;
  451. }
  452. i = 0;
  453. for (j = ref->num_positive_pics - 1; j >= 0; j--) {
  454. d_poc = ref_delta_poc_s1[j] + delta_rps;
  455. if (d_poc < 0 && current->use_delta_flag[ref->num_negative_pics + j]) {
  456. delta_poc_s0[i] = d_poc;
  457. used_by_curr_pic_s0[i++] =
  458. current->used_by_curr_pic_flag[ref->num_negative_pics + j];
  459. }
  460. }
  461. if (delta_rps < 0 && current->use_delta_flag[num_delta_pocs]) {
  462. delta_poc_s0[i] = delta_rps;
  463. used_by_curr_pic_s0[i++] =
  464. current->used_by_curr_pic_flag[num_delta_pocs];
  465. }
  466. for (j = 0; j < ref->num_negative_pics; j++) {
  467. d_poc = ref_delta_poc_s0[j] + delta_rps;
  468. if (d_poc < 0 && current->use_delta_flag[j]) {
  469. delta_poc_s0[i] = d_poc;
  470. used_by_curr_pic_s0[i++] = current->used_by_curr_pic_flag[j];
  471. }
  472. }
  473. infer(num_negative_pics, i);
  474. for (i = 0; i < current->num_negative_pics; i++) {
  475. infer(delta_poc_s0_minus1[i],
  476. -(delta_poc_s0[i] - (i == 0 ? 0 : delta_poc_s0[i - 1])) - 1);
  477. infer(used_by_curr_pic_s0_flag[i], used_by_curr_pic_s0[i]);
  478. }
  479. i = 0;
  480. for (j = ref->num_negative_pics - 1; j >= 0; j--) {
  481. d_poc = ref_delta_poc_s0[j] + delta_rps;
  482. if (d_poc > 0 && current->use_delta_flag[j]) {
  483. delta_poc_s1[i] = d_poc;
  484. used_by_curr_pic_s1[i++] = current->used_by_curr_pic_flag[j];
  485. }
  486. }
  487. if (delta_rps > 0 && current->use_delta_flag[num_delta_pocs]) {
  488. delta_poc_s1[i] = delta_rps;
  489. used_by_curr_pic_s1[i++] =
  490. current->used_by_curr_pic_flag[num_delta_pocs];
  491. }
  492. for (j = 0; j < ref->num_positive_pics; j++) {
  493. d_poc = ref_delta_poc_s1[j] + delta_rps;
  494. if (d_poc > 0 && current->use_delta_flag[ref->num_negative_pics + j]) {
  495. delta_poc_s1[i] = d_poc;
  496. used_by_curr_pic_s1[i++] =
  497. current->used_by_curr_pic_flag[ref->num_negative_pics + j];
  498. }
  499. }
  500. infer(num_positive_pics, i);
  501. for (i = 0; i < current->num_positive_pics; i++) {
  502. infer(delta_poc_s1_minus1[i],
  503. delta_poc_s1[i] - (i == 0 ? 0 : delta_poc_s1[i - 1]) - 1);
  504. infer(used_by_curr_pic_s1_flag[i], used_by_curr_pic_s1[i]);
  505. }
  506. } else {
  507. ue(num_negative_pics, 0, 15);
  508. ue(num_positive_pics, 0, 15 - current->num_negative_pics);
  509. for (i = 0; i < current->num_negative_pics; i++) {
  510. ues(delta_poc_s0_minus1[i], 0, INT16_MAX, 1, i);
  511. flags(used_by_curr_pic_s0_flag[i], 1, i);
  512. }
  513. for (i = 0; i < current->num_positive_pics; i++) {
  514. ues(delta_poc_s1_minus1[i], 0, INT16_MAX, 1, i);
  515. flags(used_by_curr_pic_s1_flag[i], 1, i);
  516. }
  517. }
  518. return 0;
  519. }
  520. static int FUNC(scaling_list_data)(CodedBitstreamContext *ctx, RWContext *rw,
  521. H265RawScalingList *current)
  522. {
  523. int sizeId, matrixId;
  524. int err, n, i;
  525. for (sizeId = 0; sizeId < 4; sizeId++) {
  526. for (matrixId = 0; matrixId < 6; matrixId += (sizeId == 3 ? 3 : 1)) {
  527. flags(scaling_list_pred_mode_flag[sizeId][matrixId],
  528. 2, sizeId, matrixId);
  529. if (!current->scaling_list_pred_mode_flag[sizeId][matrixId]) {
  530. ues(scaling_list_pred_matrix_id_delta[sizeId][matrixId],
  531. 0, sizeId == 3 ? matrixId / 3 : matrixId,
  532. 2, sizeId, matrixId);
  533. } else {
  534. n = FFMIN(64, 1 << (4 + (sizeId << 1)));
  535. if (sizeId > 1) {
  536. ses(scaling_list_dc_coef_minus8[sizeId - 2][matrixId], -7, +247,
  537. 2, sizeId - 2, matrixId);
  538. }
  539. for (i = 0; i < n; i++) {
  540. ses(scaling_list_delta_coeff[sizeId][matrixId][i],
  541. -128, +127, 3, sizeId, matrixId, i);
  542. }
  543. }
  544. }
  545. }
  546. return 0;
  547. }
  548. static int FUNC(sps_range_extension)(CodedBitstreamContext *ctx, RWContext *rw,
  549. H265RawSPS *current)
  550. {
  551. int err;
  552. flag(transform_skip_rotation_enabled_flag);
  553. flag(transform_skip_context_enabled_flag);
  554. flag(implicit_rdpcm_enabled_flag);
  555. flag(explicit_rdpcm_enabled_flag);
  556. flag(extended_precision_processing_flag);
  557. flag(intra_smoothing_disabled_flag);
  558. flag(high_precision_offsets_enabled_flag);
  559. flag(persistent_rice_adaptation_enabled_flag);
  560. flag(cabac_bypass_alignment_enabled_flag);
  561. return 0;
  562. }
  563. static int FUNC(sps_scc_extension)(CodedBitstreamContext *ctx, RWContext *rw,
  564. H265RawSPS *current)
  565. {
  566. int err, comp, i;
  567. flag(sps_curr_pic_ref_enabled_flag);
  568. flag(palette_mode_enabled_flag);
  569. if (current->palette_mode_enabled_flag) {
  570. ue(palette_max_size, 0, 64);
  571. ue(delta_palette_max_predictor_size, 0, 128);
  572. flag(sps_palette_predictor_initializer_present_flag);
  573. if (current->sps_palette_predictor_initializer_present_flag) {
  574. ue(sps_num_palette_predictor_initializer_minus1, 0, 128);
  575. for (comp = 0; comp < (current->chroma_format_idc ? 3 : 1); comp++) {
  576. int bit_depth = comp == 0 ? current->bit_depth_luma_minus8 + 8
  577. : current->bit_depth_chroma_minus8 + 8;
  578. for (i = 0; i <= current->sps_num_palette_predictor_initializer_minus1; i++)
  579. us(bit_depth, sps_palette_predictor_initializers[comp][i],
  580. 0, MAX_UINT_BITS(bit_depth), 2, comp, i);
  581. }
  582. }
  583. }
  584. u(2, motion_vector_resolution_control_idc, 0, 2);
  585. flag(intra_boundary_filtering_disable_flag);
  586. return 0;
  587. }
  588. static int FUNC(sps)(CodedBitstreamContext *ctx, RWContext *rw,
  589. H265RawSPS *current)
  590. {
  591. CodedBitstreamH265Context *h265 = ctx->priv_data;
  592. const H265RawVPS *vps;
  593. int err, i;
  594. unsigned int min_cb_log2_size_y, ctb_log2_size_y,
  595. min_cb_size_y, min_tb_log2_size_y;
  596. HEADER("Sequence Parameter Set");
  597. CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_SPS));
  598. u(4, sps_video_parameter_set_id, 0, 15);
  599. h265->active_vps = vps = h265->vps[current->sps_video_parameter_set_id];
  600. u(3, sps_max_sub_layers_minus1, 0, HEVC_MAX_SUB_LAYERS - 1);
  601. flag(sps_temporal_id_nesting_flag);
  602. if (vps) {
  603. if (vps->vps_max_sub_layers_minus1 > current->sps_max_sub_layers_minus1) {
  604. av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid stream: "
  605. "sps_max_sub_layers_minus1 (%d) must be less than or equal to "
  606. "vps_max_sub_layers_minus1 (%d).\n",
  607. vps->vps_max_sub_layers_minus1,
  608. current->sps_max_sub_layers_minus1);
  609. return AVERROR_INVALIDDATA;
  610. }
  611. if (vps->vps_temporal_id_nesting_flag &&
  612. !current->sps_temporal_id_nesting_flag) {
  613. av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid stream: "
  614. "sps_temporal_id_nesting_flag must be 1 if "
  615. "vps_temporal_id_nesting_flag is 1.\n");
  616. return AVERROR_INVALIDDATA;
  617. }
  618. }
  619. CHECK(FUNC(profile_tier_level)(ctx, rw, &current->profile_tier_level,
  620. 1, current->sps_max_sub_layers_minus1));
  621. ue(sps_seq_parameter_set_id, 0, 15);
  622. ue(chroma_format_idc, 0, 3);
  623. if (current->chroma_format_idc == 3)
  624. flag(separate_colour_plane_flag);
  625. else
  626. infer(separate_colour_plane_flag, 0);
  627. ue(pic_width_in_luma_samples, 1, HEVC_MAX_WIDTH);
  628. ue(pic_height_in_luma_samples, 1, HEVC_MAX_HEIGHT);
  629. flag(conformance_window_flag);
  630. if (current->conformance_window_flag) {
  631. ue(conf_win_left_offset, 0, current->pic_width_in_luma_samples);
  632. ue(conf_win_right_offset, 0, current->pic_width_in_luma_samples);
  633. ue(conf_win_top_offset, 0, current->pic_height_in_luma_samples);
  634. ue(conf_win_bottom_offset, 0, current->pic_height_in_luma_samples);
  635. } else {
  636. infer(conf_win_left_offset, 0);
  637. infer(conf_win_right_offset, 0);
  638. infer(conf_win_top_offset, 0);
  639. infer(conf_win_bottom_offset, 0);
  640. }
  641. ue(bit_depth_luma_minus8, 0, 8);
  642. ue(bit_depth_chroma_minus8, 0, 8);
  643. ue(log2_max_pic_order_cnt_lsb_minus4, 0, 12);
  644. flag(sps_sub_layer_ordering_info_present_flag);
  645. for (i = (current->sps_sub_layer_ordering_info_present_flag ?
  646. 0 : current->sps_max_sub_layers_minus1);
  647. i <= current->sps_max_sub_layers_minus1; i++) {
  648. ues(sps_max_dec_pic_buffering_minus1[i],
  649. 0, HEVC_MAX_DPB_SIZE - 1, 1, i);
  650. ues(sps_max_num_reorder_pics[i],
  651. 0, current->sps_max_dec_pic_buffering_minus1[i], 1, i);
  652. ues(sps_max_latency_increase_plus1[i],
  653. 0, UINT32_MAX - 1, 1, i);
  654. }
  655. if (!current->sps_sub_layer_ordering_info_present_flag) {
  656. for (i = 0; i < current->sps_max_sub_layers_minus1; i++) {
  657. infer(sps_max_dec_pic_buffering_minus1[i],
  658. current->sps_max_dec_pic_buffering_minus1[current->sps_max_sub_layers_minus1]);
  659. infer(sps_max_num_reorder_pics[i],
  660. current->sps_max_num_reorder_pics[current->sps_max_sub_layers_minus1]);
  661. infer(sps_max_latency_increase_plus1[i],
  662. current->sps_max_latency_increase_plus1[current->sps_max_sub_layers_minus1]);
  663. }
  664. }
  665. ue(log2_min_luma_coding_block_size_minus3, 0, 3);
  666. min_cb_log2_size_y = current->log2_min_luma_coding_block_size_minus3 + 3;
  667. ue(log2_diff_max_min_luma_coding_block_size, 0, 3);
  668. ctb_log2_size_y = min_cb_log2_size_y +
  669. current->log2_diff_max_min_luma_coding_block_size;
  670. min_cb_size_y = 1 << min_cb_log2_size_y;
  671. if (current->pic_width_in_luma_samples % min_cb_size_y ||
  672. current->pic_height_in_luma_samples % min_cb_size_y) {
  673. av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid dimensions: %ux%u not divisible "
  674. "by MinCbSizeY = %u.\n", current->pic_width_in_luma_samples,
  675. current->pic_height_in_luma_samples, min_cb_size_y);
  676. return AVERROR_INVALIDDATA;
  677. }
  678. ue(log2_min_luma_transform_block_size_minus2, 0, min_cb_log2_size_y - 3);
  679. min_tb_log2_size_y = current->log2_min_luma_transform_block_size_minus2 + 2;
  680. ue(log2_diff_max_min_luma_transform_block_size,
  681. 0, FFMIN(ctb_log2_size_y, 5) - min_tb_log2_size_y);
  682. ue(max_transform_hierarchy_depth_inter,
  683. 0, ctb_log2_size_y - min_tb_log2_size_y);
  684. ue(max_transform_hierarchy_depth_intra,
  685. 0, ctb_log2_size_y - min_tb_log2_size_y);
  686. flag(scaling_list_enabled_flag);
  687. if (current->scaling_list_enabled_flag) {
  688. flag(sps_scaling_list_data_present_flag);
  689. if (current->sps_scaling_list_data_present_flag)
  690. CHECK(FUNC(scaling_list_data)(ctx, rw, &current->scaling_list));
  691. } else {
  692. infer(sps_scaling_list_data_present_flag, 0);
  693. }
  694. flag(amp_enabled_flag);
  695. flag(sample_adaptive_offset_enabled_flag);
  696. flag(pcm_enabled_flag);
  697. if (current->pcm_enabled_flag) {
  698. u(4, pcm_sample_bit_depth_luma_minus1,
  699. 0, current->bit_depth_luma_minus8 + 8 - 1);
  700. u(4, pcm_sample_bit_depth_chroma_minus1,
  701. 0, current->bit_depth_chroma_minus8 + 8 - 1);
  702. ue(log2_min_pcm_luma_coding_block_size_minus3,
  703. FFMIN(min_cb_log2_size_y, 5) - 3, FFMIN(ctb_log2_size_y, 5) - 3);
  704. ue(log2_diff_max_min_pcm_luma_coding_block_size,
  705. 0, FFMIN(ctb_log2_size_y, 5) - (current->log2_min_pcm_luma_coding_block_size_minus3 + 3));
  706. flag(pcm_loop_filter_disabled_flag);
  707. }
  708. ue(num_short_term_ref_pic_sets, 0, HEVC_MAX_SHORT_TERM_REF_PIC_SETS);
  709. for (i = 0; i < current->num_short_term_ref_pic_sets; i++)
  710. CHECK(FUNC(st_ref_pic_set)(ctx, rw, &current->st_ref_pic_set[i], i, current));
  711. flag(long_term_ref_pics_present_flag);
  712. if (current->long_term_ref_pics_present_flag) {
  713. ue(num_long_term_ref_pics_sps, 0, HEVC_MAX_LONG_TERM_REF_PICS);
  714. for (i = 0; i < current->num_long_term_ref_pics_sps; i++) {
  715. us(current->log2_max_pic_order_cnt_lsb_minus4 + 4,
  716. lt_ref_pic_poc_lsb_sps[i],
  717. 0, MAX_UINT_BITS(current->log2_max_pic_order_cnt_lsb_minus4 + 4), 1, i);
  718. flags(used_by_curr_pic_lt_sps_flag[i], 1, i);
  719. }
  720. }
  721. flag(sps_temporal_mvp_enabled_flag);
  722. flag(strong_intra_smoothing_enabled_flag);
  723. flag(vui_parameters_present_flag);
  724. if (current->vui_parameters_present_flag)
  725. CHECK(FUNC(vui_parameters)(ctx, rw, &current->vui, current));
  726. flag(sps_extension_present_flag);
  727. if (current->sps_extension_present_flag) {
  728. flag(sps_range_extension_flag);
  729. flag(sps_multilayer_extension_flag);
  730. flag(sps_3d_extension_flag);
  731. flag(sps_scc_extension_flag);
  732. u(4, sps_extension_4bits, 0, MAX_UINT_BITS(4));
  733. }
  734. if (current->sps_range_extension_flag)
  735. CHECK(FUNC(sps_range_extension)(ctx, rw, current));
  736. if (current->sps_multilayer_extension_flag)
  737. return AVERROR_PATCHWELCOME;
  738. if (current->sps_3d_extension_flag)
  739. return AVERROR_PATCHWELCOME;
  740. if (current->sps_scc_extension_flag)
  741. CHECK(FUNC(sps_scc_extension)(ctx, rw, current));
  742. if (current->sps_extension_4bits)
  743. CHECK(FUNC(extension_data)(ctx, rw, &current->extension_data));
  744. CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
  745. return 0;
  746. }
  747. static int FUNC(pps_range_extension)(CodedBitstreamContext *ctx, RWContext *rw,
  748. H265RawPPS *current)
  749. {
  750. CodedBitstreamH265Context *h265 = ctx->priv_data;
  751. const H265RawSPS *sps = h265->active_sps;
  752. int err, i;
  753. if (current->transform_skip_enabled_flag)
  754. ue(log2_max_transform_skip_block_size_minus2, 0, 3);
  755. flag(cross_component_prediction_enabled_flag);
  756. flag(chroma_qp_offset_list_enabled_flag);
  757. if (current->chroma_qp_offset_list_enabled_flag) {
  758. ue(diff_cu_chroma_qp_offset_depth,
  759. 0, sps->log2_diff_max_min_luma_coding_block_size);
  760. ue(chroma_qp_offset_list_len_minus1, 0, 5);
  761. for (i = 0; i <= current->chroma_qp_offset_list_len_minus1; i++) {
  762. ses(cb_qp_offset_list[i], -12, +12, 1, i);
  763. ses(cr_qp_offset_list[i], -12, +12, 1, i);
  764. }
  765. }
  766. ue(log2_sao_offset_scale_luma, 0, FFMAX(0, sps->bit_depth_luma_minus8 - 2));
  767. ue(log2_sao_offset_scale_chroma, 0, FFMAX(0, sps->bit_depth_chroma_minus8 - 2));
  768. return 0;
  769. }
  770. static int FUNC(pps_scc_extension)(CodedBitstreamContext *ctx, RWContext *rw,
  771. H265RawPPS *current)
  772. {
  773. int err, comp, i;
  774. flag(pps_curr_pic_ref_enabled_flag);
  775. flag(residual_adaptive_colour_transform_enabled_flag);
  776. if (current->residual_adaptive_colour_transform_enabled_flag) {
  777. flag(pps_slice_act_qp_offsets_present_flag);
  778. se(pps_act_y_qp_offset_plus5, -7, +17);
  779. se(pps_act_cb_qp_offset_plus5, -7, +17);
  780. se(pps_act_cr_qp_offset_plus3, -9, +15);
  781. } else {
  782. infer(pps_slice_act_qp_offsets_present_flag, 0);
  783. infer(pps_act_y_qp_offset_plus5, 0);
  784. infer(pps_act_cb_qp_offset_plus5, 0);
  785. infer(pps_act_cr_qp_offset_plus3, 0);
  786. }
  787. flag(pps_palette_predictor_initializer_present_flag);
  788. if (current->pps_palette_predictor_initializer_present_flag) {
  789. ue(pps_num_palette_predictor_initializer, 0, 128);
  790. if (current->pps_num_palette_predictor_initializer > 0) {
  791. flag(monochrome_palette_flag);
  792. ue(luma_bit_depth_entry_minus8, 0, 8);
  793. if (!current->monochrome_palette_flag)
  794. ue(chroma_bit_depth_entry_minus8, 0, 8);
  795. for (comp = 0; comp < (current->monochrome_palette_flag ? 1 : 3); comp++) {
  796. int bit_depth = comp == 0 ? current->luma_bit_depth_entry_minus8 + 8
  797. : current->chroma_bit_depth_entry_minus8 + 8;
  798. for (i = 0; i < current->pps_num_palette_predictor_initializer; i++)
  799. us(bit_depth, pps_palette_predictor_initializers[comp][i],
  800. 0, MAX_UINT_BITS(bit_depth), 2, comp, i);
  801. }
  802. }
  803. }
  804. return 0;
  805. }
  806. static int FUNC(pps)(CodedBitstreamContext *ctx, RWContext *rw,
  807. H265RawPPS *current)
  808. {
  809. CodedBitstreamH265Context *h265 = ctx->priv_data;
  810. const H265RawSPS *sps;
  811. int err, i;
  812. HEADER("Picture Parameter Set");
  813. CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_PPS));
  814. ue(pps_pic_parameter_set_id, 0, 63);
  815. ue(pps_seq_parameter_set_id, 0, 15);
  816. sps = h265->sps[current->pps_seq_parameter_set_id];
  817. if (!sps) {
  818. av_log(ctx->log_ctx, AV_LOG_ERROR, "SPS id %d not available.\n",
  819. current->pps_seq_parameter_set_id);
  820. return AVERROR_INVALIDDATA;
  821. }
  822. h265->active_sps = sps;
  823. flag(dependent_slice_segments_enabled_flag);
  824. flag(output_flag_present_flag);
  825. u(3, num_extra_slice_header_bits, 0, 7);
  826. flag(sign_data_hiding_enabled_flag);
  827. flag(cabac_init_present_flag);
  828. ue(num_ref_idx_l0_default_active_minus1, 0, 14);
  829. ue(num_ref_idx_l1_default_active_minus1, 0, 14);
  830. se(init_qp_minus26, -(26 + 6 * sps->bit_depth_luma_minus8), +25);
  831. flag(constrained_intra_pred_flag);
  832. flag(transform_skip_enabled_flag);
  833. flag(cu_qp_delta_enabled_flag);
  834. if (current->cu_qp_delta_enabled_flag)
  835. ue(diff_cu_qp_delta_depth,
  836. 0, sps->log2_diff_max_min_luma_coding_block_size);
  837. else
  838. infer(diff_cu_qp_delta_depth, 0);
  839. se(pps_cb_qp_offset, -12, +12);
  840. se(pps_cr_qp_offset, -12, +12);
  841. flag(pps_slice_chroma_qp_offsets_present_flag);
  842. flag(weighted_pred_flag);
  843. flag(weighted_bipred_flag);
  844. flag(transquant_bypass_enabled_flag);
  845. flag(tiles_enabled_flag);
  846. flag(entropy_coding_sync_enabled_flag);
  847. if (current->tiles_enabled_flag) {
  848. ue(num_tile_columns_minus1, 0, HEVC_MAX_TILE_COLUMNS);
  849. ue(num_tile_rows_minus1, 0, HEVC_MAX_TILE_ROWS);
  850. flag(uniform_spacing_flag);
  851. if (!current->uniform_spacing_flag) {
  852. for (i = 0; i < current->num_tile_columns_minus1; i++)
  853. ues(column_width_minus1[i], 0, sps->pic_width_in_luma_samples, 1, i);
  854. for (i = 0; i < current->num_tile_rows_minus1; i++)
  855. ues(row_height_minus1[i], 0, sps->pic_height_in_luma_samples, 1, i);
  856. }
  857. flag(loop_filter_across_tiles_enabled_flag);
  858. } else {
  859. infer(num_tile_columns_minus1, 0);
  860. infer(num_tile_rows_minus1, 0);
  861. }
  862. flag(pps_loop_filter_across_slices_enabled_flag);
  863. flag(deblocking_filter_control_present_flag);
  864. if (current->deblocking_filter_control_present_flag) {
  865. flag(deblocking_filter_override_enabled_flag);
  866. flag(pps_deblocking_filter_disabled_flag);
  867. if (!current->pps_deblocking_filter_disabled_flag) {
  868. se(pps_beta_offset_div2, -6, +6);
  869. se(pps_tc_offset_div2, -6, +6);
  870. } else {
  871. infer(pps_beta_offset_div2, 0);
  872. infer(pps_tc_offset_div2, 0);
  873. }
  874. } else {
  875. infer(deblocking_filter_override_enabled_flag, 0);
  876. infer(pps_deblocking_filter_disabled_flag, 0);
  877. infer(pps_beta_offset_div2, 0);
  878. infer(pps_tc_offset_div2, 0);
  879. }
  880. flag(pps_scaling_list_data_present_flag);
  881. if (current->pps_scaling_list_data_present_flag)
  882. CHECK(FUNC(scaling_list_data)(ctx, rw, &current->scaling_list));
  883. flag(lists_modification_present_flag);
  884. ue(log2_parallel_merge_level_minus2,
  885. 0, (sps->log2_min_luma_coding_block_size_minus3 + 3 +
  886. sps->log2_diff_max_min_luma_coding_block_size - 2));
  887. flag(slice_segment_header_extension_present_flag);
  888. flag(pps_extension_present_flag);
  889. if (current->pps_extension_present_flag) {
  890. flag(pps_range_extension_flag);
  891. flag(pps_multilayer_extension_flag);
  892. flag(pps_3d_extension_flag);
  893. flag(pps_scc_extension_flag);
  894. u(4, pps_extension_4bits, 0, MAX_UINT_BITS(4));
  895. }
  896. if (current->pps_range_extension_flag)
  897. CHECK(FUNC(pps_range_extension)(ctx, rw, current));
  898. if (current->pps_multilayer_extension_flag)
  899. return AVERROR_PATCHWELCOME;
  900. if (current->pps_3d_extension_flag)
  901. return AVERROR_PATCHWELCOME;
  902. if (current->pps_scc_extension_flag)
  903. CHECK(FUNC(pps_scc_extension)(ctx, rw, current));
  904. if (current->pps_extension_4bits)
  905. CHECK(FUNC(extension_data)(ctx, rw, &current->extension_data));
  906. CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
  907. return 0;
  908. }
  909. static int FUNC(aud)(CodedBitstreamContext *ctx, RWContext *rw,
  910. H265RawAUD *current)
  911. {
  912. int err;
  913. HEADER("Access Unit Delimiter");
  914. CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_AUD));
  915. u(3, pic_type, 0, 2);
  916. CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
  917. return 0;
  918. }
  919. static int FUNC(ref_pic_lists_modification)(CodedBitstreamContext *ctx, RWContext *rw,
  920. H265RawSliceHeader *current,
  921. unsigned int num_pic_total_curr)
  922. {
  923. unsigned int entry_size;
  924. int err, i;
  925. entry_size = av_log2(num_pic_total_curr - 1) + 1;
  926. flag(ref_pic_list_modification_flag_l0);
  927. if (current->ref_pic_list_modification_flag_l0) {
  928. for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++)
  929. us(entry_size, list_entry_l0[i], 0, num_pic_total_curr - 1, 1, i);
  930. }
  931. if (current->slice_type == HEVC_SLICE_B) {
  932. flag(ref_pic_list_modification_flag_l1);
  933. if (current->ref_pic_list_modification_flag_l1) {
  934. for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++)
  935. us(entry_size, list_entry_l1[i], 0, num_pic_total_curr - 1, 1, i);
  936. }
  937. }
  938. return 0;
  939. }
  940. static int FUNC(pred_weight_table)(CodedBitstreamContext *ctx, RWContext *rw,
  941. H265RawSliceHeader *current)
  942. {
  943. CodedBitstreamH265Context *h265 = ctx->priv_data;
  944. const H265RawSPS *sps = h265->active_sps;
  945. int err, i, j;
  946. int chroma = !sps->separate_colour_plane_flag &&
  947. sps->chroma_format_idc != 0;
  948. ue(luma_log2_weight_denom, 0, 7);
  949. if (chroma)
  950. se(delta_chroma_log2_weight_denom, -7, 7);
  951. else
  952. infer(delta_chroma_log2_weight_denom, 0);
  953. for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++) {
  954. if (1 /* is not same POC and same layer_id */)
  955. flags(luma_weight_l0_flag[i], 1, i);
  956. else
  957. infer(luma_weight_l0_flag[i], 0);
  958. }
  959. if (chroma) {
  960. for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++) {
  961. if (1 /* is not same POC and same layer_id */)
  962. flags(chroma_weight_l0_flag[i], 1, i);
  963. else
  964. infer(chroma_weight_l0_flag[i], 0);
  965. }
  966. }
  967. for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++) {
  968. if (current->luma_weight_l0_flag[i]) {
  969. ses(delta_luma_weight_l0[i], -128, +127, 1, i);
  970. ses(luma_offset_l0[i],
  971. -(1 << (sps->bit_depth_luma_minus8 + 8 - 1)),
  972. ((1 << (sps->bit_depth_luma_minus8 + 8 - 1)) - 1), 1, i);
  973. } else {
  974. infer(delta_luma_weight_l0[i], 0);
  975. infer(luma_offset_l0[i], 0);
  976. }
  977. if (current->chroma_weight_l0_flag[i]) {
  978. for (j = 0; j < 2; j++) {
  979. ses(delta_chroma_weight_l0[i][j], -128, +127, 2, i, j);
  980. ses(chroma_offset_l0[i][j],
  981. -(4 << (sps->bit_depth_chroma_minus8 + 8 - 1)),
  982. ((4 << (sps->bit_depth_chroma_minus8 + 8 - 1)) - 1), 2, i, j);
  983. }
  984. } else {
  985. for (j = 0; j < 2; j++) {
  986. infer(delta_chroma_weight_l0[i][j], 0);
  987. infer(chroma_offset_l0[i][j], 0);
  988. }
  989. }
  990. }
  991. if (current->slice_type == HEVC_SLICE_B) {
  992. for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++) {
  993. if (1 /* RefPicList1[i] is not CurrPic, nor is it in a different layer */)
  994. flags(luma_weight_l1_flag[i], 1, i);
  995. else
  996. infer(luma_weight_l1_flag[i], 0);
  997. }
  998. if (chroma) {
  999. for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++) {
  1000. if (1 /* RefPicList1[i] is not CurrPic, nor is it in a different layer */)
  1001. flags(chroma_weight_l1_flag[i], 1, i);
  1002. else
  1003. infer(chroma_weight_l1_flag[i], 0);
  1004. }
  1005. }
  1006. for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++) {
  1007. if (current->luma_weight_l1_flag[i]) {
  1008. ses(delta_luma_weight_l1[i], -128, +127, 1, i);
  1009. ses(luma_offset_l1[i],
  1010. -(1 << (sps->bit_depth_luma_minus8 + 8 - 1)),
  1011. ((1 << (sps->bit_depth_luma_minus8 + 8 - 1)) - 1), 1, i);
  1012. } else {
  1013. infer(delta_luma_weight_l1[i], 0);
  1014. infer(luma_offset_l1[i], 0);
  1015. }
  1016. if (current->chroma_weight_l1_flag[i]) {
  1017. for (j = 0; j < 2; j++) {
  1018. ses(delta_chroma_weight_l1[i][j], -128, +127, 2, i, j);
  1019. ses(chroma_offset_l1[i][j],
  1020. -(4 << (sps->bit_depth_chroma_minus8 + 8 - 1)),
  1021. ((4 << (sps->bit_depth_chroma_minus8 + 8 - 1)) - 1), 2, i, j);
  1022. }
  1023. } else {
  1024. for (j = 0; j < 2; j++) {
  1025. infer(delta_chroma_weight_l1[i][j], 0);
  1026. infer(chroma_offset_l1[i][j], 0);
  1027. }
  1028. }
  1029. }
  1030. }
  1031. return 0;
  1032. }
  1033. static int FUNC(slice_segment_header)(CodedBitstreamContext *ctx, RWContext *rw,
  1034. H265RawSliceHeader *current)
  1035. {
  1036. CodedBitstreamH265Context *h265 = ctx->priv_data;
  1037. const H265RawSPS *sps;
  1038. const H265RawPPS *pps;
  1039. unsigned int min_cb_log2_size_y, ctb_log2_size_y, ctb_size_y;
  1040. unsigned int pic_width_in_ctbs_y, pic_height_in_ctbs_y, pic_size_in_ctbs_y;
  1041. unsigned int num_pic_total_curr = 0;
  1042. int err, i;
  1043. HEADER("Slice Segment Header");
  1044. CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, -1));
  1045. flag(first_slice_segment_in_pic_flag);
  1046. if (current->nal_unit_header.nal_unit_type >= HEVC_NAL_BLA_W_LP &&
  1047. current->nal_unit_header.nal_unit_type <= HEVC_NAL_IRAP_VCL23)
  1048. flag(no_output_of_prior_pics_flag);
  1049. ue(slice_pic_parameter_set_id, 0, 63);
  1050. pps = h265->pps[current->slice_pic_parameter_set_id];
  1051. if (!pps) {
  1052. av_log(ctx->log_ctx, AV_LOG_ERROR, "PPS id %d not available.\n",
  1053. current->slice_pic_parameter_set_id);
  1054. return AVERROR_INVALIDDATA;
  1055. }
  1056. h265->active_pps = pps;
  1057. sps = h265->sps[pps->pps_seq_parameter_set_id];
  1058. if (!sps) {
  1059. av_log(ctx->log_ctx, AV_LOG_ERROR, "SPS id %d not available.\n",
  1060. pps->pps_seq_parameter_set_id);
  1061. return AVERROR_INVALIDDATA;
  1062. }
  1063. h265->active_sps = sps;
  1064. min_cb_log2_size_y = sps->log2_min_luma_coding_block_size_minus3 + 3;
  1065. ctb_log2_size_y = min_cb_log2_size_y + sps->log2_diff_max_min_luma_coding_block_size;
  1066. ctb_size_y = 1 << ctb_log2_size_y;
  1067. pic_width_in_ctbs_y =
  1068. (sps->pic_width_in_luma_samples + ctb_size_y - 1) / ctb_size_y;
  1069. pic_height_in_ctbs_y =
  1070. (sps->pic_height_in_luma_samples + ctb_size_y - 1) / ctb_size_y;
  1071. pic_size_in_ctbs_y = pic_width_in_ctbs_y * pic_height_in_ctbs_y;
  1072. if (!current->first_slice_segment_in_pic_flag) {
  1073. unsigned int address_size = av_log2(pic_size_in_ctbs_y - 1) + 1;
  1074. if (pps->dependent_slice_segments_enabled_flag)
  1075. flag(dependent_slice_segment_flag);
  1076. else
  1077. infer(dependent_slice_segment_flag, 0);
  1078. u(address_size, slice_segment_address, 0, pic_size_in_ctbs_y - 1);
  1079. } else {
  1080. infer(dependent_slice_segment_flag, 0);
  1081. }
  1082. if (!current->dependent_slice_segment_flag) {
  1083. for (i = 0; i < pps->num_extra_slice_header_bits; i++)
  1084. flags(slice_reserved_flag[i], 1, i);
  1085. ue(slice_type, 0, 2);
  1086. if (pps->output_flag_present_flag)
  1087. flag(pic_output_flag);
  1088. if (sps->separate_colour_plane_flag)
  1089. u(2, colour_plane_id, 0, 2);
  1090. if (current->nal_unit_header.nal_unit_type != HEVC_NAL_IDR_W_RADL &&
  1091. current->nal_unit_header.nal_unit_type != HEVC_NAL_IDR_N_LP) {
  1092. const H265RawSTRefPicSet *rps;
  1093. u(sps->log2_max_pic_order_cnt_lsb_minus4 + 4, slice_pic_order_cnt_lsb,
  1094. 0, MAX_UINT_BITS(sps->log2_max_pic_order_cnt_lsb_minus4 + 4));
  1095. flag(short_term_ref_pic_set_sps_flag);
  1096. if (!current->short_term_ref_pic_set_sps_flag) {
  1097. CHECK(FUNC(st_ref_pic_set)(ctx, rw, &current->short_term_ref_pic_set,
  1098. sps->num_short_term_ref_pic_sets, sps));
  1099. rps = &current->short_term_ref_pic_set;
  1100. } else if (sps->num_short_term_ref_pic_sets > 1) {
  1101. unsigned int idx_size = av_log2(sps->num_short_term_ref_pic_sets - 1) + 1;
  1102. u(idx_size, short_term_ref_pic_set_idx,
  1103. 0, sps->num_short_term_ref_pic_sets - 1);
  1104. rps = &sps->st_ref_pic_set[current->short_term_ref_pic_set_idx];
  1105. } else {
  1106. infer(short_term_ref_pic_set_idx, 0);
  1107. rps = &sps->st_ref_pic_set[0];
  1108. }
  1109. num_pic_total_curr = 0;
  1110. for (i = 0; i < rps->num_negative_pics; i++)
  1111. if (rps->used_by_curr_pic_s0_flag[i])
  1112. ++num_pic_total_curr;
  1113. for (i = 0; i < rps->num_positive_pics; i++)
  1114. if (rps->used_by_curr_pic_s1_flag[i])
  1115. ++num_pic_total_curr;
  1116. if (sps->long_term_ref_pics_present_flag) {
  1117. unsigned int idx_size;
  1118. if (sps->num_long_term_ref_pics_sps > 0) {
  1119. ue(num_long_term_sps, 0, sps->num_long_term_ref_pics_sps);
  1120. idx_size = av_log2(sps->num_long_term_ref_pics_sps - 1) + 1;
  1121. } else {
  1122. infer(num_long_term_sps, 0);
  1123. idx_size = 0;
  1124. }
  1125. ue(num_long_term_pics, 0, HEVC_MAX_LONG_TERM_REF_PICS);
  1126. for (i = 0; i < current->num_long_term_sps +
  1127. current->num_long_term_pics; i++) {
  1128. if (i < current->num_long_term_sps) {
  1129. if (sps->num_long_term_ref_pics_sps > 1)
  1130. us(idx_size, lt_idx_sps[i],
  1131. 0, sps->num_long_term_ref_pics_sps - 1, 1, i);
  1132. if (sps->used_by_curr_pic_lt_sps_flag[current->lt_idx_sps[i]])
  1133. ++num_pic_total_curr;
  1134. } else {
  1135. us(sps->log2_max_pic_order_cnt_lsb_minus4 + 4, poc_lsb_lt[i],
  1136. 0, MAX_UINT_BITS(sps->log2_max_pic_order_cnt_lsb_minus4 + 4), 1, i);
  1137. flags(used_by_curr_pic_lt_flag[i], 1, i);
  1138. if (current->used_by_curr_pic_lt_flag[i])
  1139. ++num_pic_total_curr;
  1140. }
  1141. flags(delta_poc_msb_present_flag[i], 1, i);
  1142. if (current->delta_poc_msb_present_flag[i])
  1143. ues(delta_poc_msb_cycle_lt[i], 0, UINT32_MAX - 1, 1, i);
  1144. else
  1145. infer(delta_poc_msb_cycle_lt[i], 0);
  1146. }
  1147. }
  1148. if (sps->sps_temporal_mvp_enabled_flag)
  1149. flag(slice_temporal_mvp_enabled_flag);
  1150. else
  1151. infer(slice_temporal_mvp_enabled_flag, 0);
  1152. if (pps->pps_curr_pic_ref_enabled_flag)
  1153. ++num_pic_total_curr;
  1154. }
  1155. if (sps->sample_adaptive_offset_enabled_flag) {
  1156. flag(slice_sao_luma_flag);
  1157. if (!sps->separate_colour_plane_flag && sps->chroma_format_idc != 0)
  1158. flag(slice_sao_chroma_flag);
  1159. else
  1160. infer(slice_sao_chroma_flag, 0);
  1161. } else {
  1162. infer(slice_sao_luma_flag, 0);
  1163. infer(slice_sao_chroma_flag, 0);
  1164. }
  1165. if (current->slice_type == HEVC_SLICE_P ||
  1166. current->slice_type == HEVC_SLICE_B) {
  1167. flag(num_ref_idx_active_override_flag);
  1168. if (current->num_ref_idx_active_override_flag) {
  1169. ue(num_ref_idx_l0_active_minus1, 0, 14);
  1170. if (current->slice_type == HEVC_SLICE_B)
  1171. ue(num_ref_idx_l1_active_minus1, 0, 14);
  1172. else
  1173. infer(num_ref_idx_l1_active_minus1, pps->num_ref_idx_l1_default_active_minus1);
  1174. } else {
  1175. infer(num_ref_idx_l0_active_minus1, pps->num_ref_idx_l0_default_active_minus1);
  1176. infer(num_ref_idx_l1_active_minus1, pps->num_ref_idx_l1_default_active_minus1);
  1177. }
  1178. if (pps->lists_modification_present_flag && num_pic_total_curr > 1)
  1179. CHECK(FUNC(ref_pic_lists_modification)(ctx, rw, current,
  1180. num_pic_total_curr));
  1181. if (current->slice_type == HEVC_SLICE_B)
  1182. flag(mvd_l1_zero_flag);
  1183. if (pps->cabac_init_present_flag)
  1184. flag(cabac_init_flag);
  1185. else
  1186. infer(cabac_init_flag, 0);
  1187. if (current->slice_temporal_mvp_enabled_flag) {
  1188. if (current->slice_type == HEVC_SLICE_B)
  1189. flag(collocated_from_l0_flag);
  1190. else
  1191. infer(collocated_from_l0_flag, 1);
  1192. if (current->collocated_from_l0_flag) {
  1193. if (current->num_ref_idx_l0_active_minus1 > 0)
  1194. ue(collocated_ref_idx, 0, current->num_ref_idx_l0_active_minus1);
  1195. else
  1196. infer(collocated_ref_idx, 0);
  1197. } else {
  1198. if (current->num_ref_idx_l1_active_minus1 > 0)
  1199. ue(collocated_ref_idx, 0, current->num_ref_idx_l1_active_minus1);
  1200. else
  1201. infer(collocated_ref_idx, 0);
  1202. }
  1203. }
  1204. if ((pps->weighted_pred_flag && current->slice_type == HEVC_SLICE_P) ||
  1205. (pps->weighted_bipred_flag && current->slice_type == HEVC_SLICE_B))
  1206. CHECK(FUNC(pred_weight_table)(ctx, rw, current));
  1207. ue(five_minus_max_num_merge_cand, 0, 4);
  1208. if (sps->motion_vector_resolution_control_idc == 2)
  1209. flag(use_integer_mv_flag);
  1210. else
  1211. infer(use_integer_mv_flag, sps->motion_vector_resolution_control_idc);
  1212. }
  1213. se(slice_qp_delta,
  1214. - 6 * sps->bit_depth_luma_minus8 - (pps->init_qp_minus26 + 26),
  1215. + 51 - (pps->init_qp_minus26 + 26));
  1216. if (pps->pps_slice_chroma_qp_offsets_present_flag) {
  1217. se(slice_cb_qp_offset, -12, +12);
  1218. se(slice_cr_qp_offset, -12, +12);
  1219. } else {
  1220. infer(slice_cb_qp_offset, 0);
  1221. infer(slice_cr_qp_offset, 0);
  1222. }
  1223. if (pps->pps_slice_act_qp_offsets_present_flag) {
  1224. se(slice_act_y_qp_offset,
  1225. -12 - (pps->pps_act_y_qp_offset_plus5 - 5),
  1226. +12 - (pps->pps_act_y_qp_offset_plus5 - 5));
  1227. se(slice_act_cb_qp_offset,
  1228. -12 - (pps->pps_act_cb_qp_offset_plus5 - 5),
  1229. +12 - (pps->pps_act_cb_qp_offset_plus5 - 5));
  1230. se(slice_act_cr_qp_offset,
  1231. -12 - (pps->pps_act_cr_qp_offset_plus3 - 3),
  1232. +12 - (pps->pps_act_cr_qp_offset_plus3 - 3));
  1233. } else {
  1234. infer(slice_act_y_qp_offset, 0);
  1235. infer(slice_act_cb_qp_offset, 0);
  1236. infer(slice_act_cr_qp_offset, 0);
  1237. }
  1238. if (pps->chroma_qp_offset_list_enabled_flag)
  1239. flag(cu_chroma_qp_offset_enabled_flag);
  1240. else
  1241. infer(cu_chroma_qp_offset_enabled_flag, 0);
  1242. if (pps->deblocking_filter_override_enabled_flag)
  1243. flag(deblocking_filter_override_flag);
  1244. else
  1245. infer(deblocking_filter_override_flag, 0);
  1246. if (current->deblocking_filter_override_flag) {
  1247. flag(slice_deblocking_filter_disabled_flag);
  1248. if (!current->slice_deblocking_filter_disabled_flag) {
  1249. se(slice_beta_offset_div2, -6, +6);
  1250. se(slice_tc_offset_div2, -6, +6);
  1251. } else {
  1252. infer(slice_beta_offset_div2, pps->pps_beta_offset_div2);
  1253. infer(slice_tc_offset_div2, pps->pps_tc_offset_div2);
  1254. }
  1255. } else {
  1256. infer(slice_deblocking_filter_disabled_flag,
  1257. pps->pps_deblocking_filter_disabled_flag);
  1258. infer(slice_beta_offset_div2, pps->pps_beta_offset_div2);
  1259. infer(slice_tc_offset_div2, pps->pps_tc_offset_div2);
  1260. }
  1261. if (pps->pps_loop_filter_across_slices_enabled_flag &&
  1262. (current->slice_sao_luma_flag || current->slice_sao_chroma_flag ||
  1263. !current->slice_deblocking_filter_disabled_flag))
  1264. flag(slice_loop_filter_across_slices_enabled_flag);
  1265. else
  1266. infer(slice_loop_filter_across_slices_enabled_flag,
  1267. pps->pps_loop_filter_across_slices_enabled_flag);
  1268. }
  1269. if (pps->tiles_enabled_flag || pps->entropy_coding_sync_enabled_flag) {
  1270. unsigned int num_entry_point_offsets_limit;
  1271. if (!pps->tiles_enabled_flag && pps->entropy_coding_sync_enabled_flag)
  1272. num_entry_point_offsets_limit = pic_height_in_ctbs_y - 1;
  1273. else if (pps->tiles_enabled_flag && !pps->entropy_coding_sync_enabled_flag)
  1274. num_entry_point_offsets_limit =
  1275. (pps->num_tile_columns_minus1 + 1) * (pps->num_tile_rows_minus1 + 1);
  1276. else
  1277. num_entry_point_offsets_limit =
  1278. (pps->num_tile_columns_minus1 + 1) * pic_height_in_ctbs_y - 1;
  1279. ue(num_entry_point_offsets, 0, num_entry_point_offsets_limit);
  1280. if (current->num_entry_point_offsets > HEVC_MAX_ENTRY_POINT_OFFSETS) {
  1281. av_log(ctx->log_ctx, AV_LOG_ERROR, "Too many entry points: "
  1282. "%"PRIu16".\n", current->num_entry_point_offsets);
  1283. return AVERROR_PATCHWELCOME;
  1284. }
  1285. if (current->num_entry_point_offsets > 0) {
  1286. ue(offset_len_minus1, 0, 31);
  1287. for (i = 0; i < current->num_entry_point_offsets; i++)
  1288. us(current->offset_len_minus1 + 1, entry_point_offset_minus1[i],
  1289. 0, MAX_UINT_BITS(current->offset_len_minus1 + 1), 1, i);
  1290. }
  1291. }
  1292. if (pps->slice_segment_header_extension_present_flag) {
  1293. ue(slice_segment_header_extension_length, 0, 256);
  1294. for (i = 0; i < current->slice_segment_header_extension_length; i++)
  1295. us(8, slice_segment_header_extension_data_byte[i], 0x00, 0xff, 1, i);
  1296. }
  1297. CHECK(FUNC(byte_alignment)(ctx, rw));
  1298. return 0;
  1299. }
  1300. static int FUNC(sei_mastering_display)(CodedBitstreamContext *ctx, RWContext *rw,
  1301. H265RawSEIMasteringDisplayColourVolume *current)
  1302. {
  1303. int err, c;
  1304. for (c = 0; c < 3; c++) {
  1305. us(16, display_primaries_x[c], 0, 50000, 1, c);
  1306. us(16, display_primaries_y[c], 0, 50000, 1, c);
  1307. }
  1308. u(16, white_point_x, 0, 50000);
  1309. u(16, white_point_y, 0, 50000);
  1310. u(32, max_display_mastering_luminance,
  1311. 1, MAX_UINT_BITS(32));
  1312. u(32, min_display_mastering_luminance,
  1313. 0, current->max_display_mastering_luminance - 1);
  1314. return 0;
  1315. }
  1316. static int FUNC(sei_content_light_level)(CodedBitstreamContext *ctx, RWContext *rw,
  1317. H265RawSEIContentLightLevelInfo *current)
  1318. {
  1319. int err;
  1320. u(16, max_content_light_level, 0, MAX_UINT_BITS(16));
  1321. u(16, max_pic_average_light_level, 0, MAX_UINT_BITS(16));
  1322. return 0;
  1323. }
  1324. static int FUNC(sei_payload)(CodedBitstreamContext *ctx, RWContext *rw,
  1325. H265RawSEIPayload *current)
  1326. {
  1327. int err, i;
  1328. int start_position, end_position;
  1329. #ifdef READ
  1330. start_position = get_bits_count(rw);
  1331. #else
  1332. start_position = put_bits_count(rw);
  1333. #endif
  1334. switch (current->payload_type) {
  1335. case HEVC_SEI_TYPE_MASTERING_DISPLAY_INFO:
  1336. CHECK(FUNC(sei_mastering_display)
  1337. (ctx, rw, &current->payload.mastering_display));
  1338. break;
  1339. case HEVC_SEI_TYPE_CONTENT_LIGHT_LEVEL_INFO:
  1340. CHECK(FUNC(sei_content_light_level)
  1341. (ctx, rw, &current->payload.content_light_level));
  1342. break;
  1343. default:
  1344. {
  1345. #ifdef READ
  1346. current->payload.other.data_length = current->payload_size;
  1347. #endif
  1348. allocate(current->payload.other.data, current->payload.other.data_length);
  1349. for (i = 0; i < current->payload_size; i++)
  1350. xu(8, payload_byte[i], current->payload.other.data[i], 0, 255,
  1351. 1, i);
  1352. }
  1353. }
  1354. if (byte_alignment(rw)) {
  1355. fixed(1, bit_equal_to_one, 1);
  1356. while (byte_alignment(rw))
  1357. fixed(1, bit_equal_to_zero, 0);
  1358. }
  1359. #ifdef READ
  1360. end_position = get_bits_count(rw);
  1361. if (end_position < start_position + 8 * current->payload_size) {
  1362. av_log(ctx->log_ctx, AV_LOG_ERROR, "Incorrect SEI payload length: "
  1363. "header %"PRIu32" bits, actually %d bits.\n",
  1364. 8 * current->payload_size,
  1365. end_position - start_position);
  1366. return AVERROR_INVALIDDATA;
  1367. }
  1368. #else
  1369. end_position = put_bits_count(rw);
  1370. current->payload_size = (end_position - start_position) >> 3;
  1371. #endif
  1372. return 0;
  1373. }
  1374. static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw,
  1375. H265RawSEI *current)
  1376. {
  1377. int err, k;
  1378. HEADER("Supplemental Enhancement Information");
  1379. CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header,
  1380. HEVC_NAL_SEI_PREFIX));
  1381. #ifdef READ
  1382. for (k = 0; k < H265_MAX_SEI_PAYLOADS; k++) {
  1383. uint32_t payload_type = 0;
  1384. uint32_t payload_size = 0;
  1385. uint32_t tmp;
  1386. while (show_bits(rw, 8) == 0xff) {
  1387. fixed(8, ff_byte, 0xff);
  1388. payload_type += 255;
  1389. }
  1390. xu(8, last_payload_type_byte, tmp, 0, 254, 0);
  1391. payload_type += tmp;
  1392. while (show_bits(rw, 8) == 0xff) {
  1393. fixed(8, ff_byte, 0xff);
  1394. payload_size += 255;
  1395. }
  1396. xu(8, last_payload_size_byte, tmp, 0, 254, 0);
  1397. payload_size += tmp;
  1398. current->payload[k].payload_type = payload_type;
  1399. current->payload[k].payload_size = payload_size;
  1400. CHECK(FUNC(sei_payload)(ctx, rw, &current->payload[k]));
  1401. if (!cbs_h2645_read_more_rbsp_data(rw))
  1402. break;
  1403. }
  1404. if (k >= H265_MAX_SEI_PAYLOADS) {
  1405. av_log(ctx->log_ctx, AV_LOG_ERROR, "Too many payloads in "
  1406. "SEI message: found %d.\n", k);
  1407. return AVERROR_INVALIDDATA;
  1408. }
  1409. current->payload_count = k + 1;
  1410. #else
  1411. for (k = 0; k < current->payload_count; k++) {
  1412. PutBitContext start_state;
  1413. uint32_t tmp;
  1414. int need_size, i;
  1415. // Somewhat clumsy: we write the payload twice when
  1416. // we don't know the size in advance. This will mess
  1417. // with trace output, but is otherwise harmless.
  1418. start_state = *rw;
  1419. need_size = !current->payload[k].payload_size;
  1420. for (i = 0; i < 1 + need_size; i++) {
  1421. *rw = start_state;
  1422. tmp = current->payload[k].payload_type;
  1423. while (tmp >= 255) {
  1424. fixed(8, ff_byte, 0xff);
  1425. tmp -= 255;
  1426. }
  1427. xu(8, last_payload_type_byte, tmp, 0, 254, 0);
  1428. tmp = current->payload[k].payload_size;
  1429. while (tmp >= 255) {
  1430. fixed(8, ff_byte, 0xff);
  1431. tmp -= 255;
  1432. }
  1433. xu(8, last_payload_size_byte, tmp, 0, 254, 0);
  1434. CHECK(FUNC(sei_payload)(ctx, rw, &current->payload[k]));
  1435. }
  1436. }
  1437. #endif
  1438. CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
  1439. return 0;
  1440. }