You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1503 lines
58KB

  1. /*
  2. * This file is part of Libav.
  3. *
  4. * Libav is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * Libav is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with Libav; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. static int FUNC(rbsp_trailing_bits)(CodedBitstreamContext *ctx, RWContext *rw)
  19. {
  20. int err;
  21. av_unused int one = 1, zero = 0;
  22. xu(1, rbsp_stop_one_bit, one, 1, 1);
  23. while (byte_alignment(rw) != 0)
  24. xu(1, rbsp_alignment_zero_bit, zero, 0, 0);
  25. return 0;
  26. }
  27. static int FUNC(nal_unit_header)(CodedBitstreamContext *ctx, RWContext *rw,
  28. H265RawNALUnitHeader *current,
  29. int expected_nal_unit_type)
  30. {
  31. int err;
  32. u(1, forbidden_zero_bit, 0, 0);
  33. if (expected_nal_unit_type >= 0)
  34. u(6, nal_unit_type, expected_nal_unit_type,
  35. expected_nal_unit_type);
  36. else
  37. u(6, nal_unit_type, 0, 63);
  38. u(6, nuh_layer_id, 0, 62);
  39. u(3, nuh_temporal_id_plus1, 1, 7);
  40. return 0;
  41. }
  42. static int FUNC(byte_alignment)(CodedBitstreamContext *ctx, RWContext *rw)
  43. {
  44. int err;
  45. av_unused int one = 1, zero = 0;
  46. xu(1, alignment_bit_equal_to_one, one, 1, 1);
  47. while (byte_alignment(rw) != 0)
  48. xu(1, alignment_bit_equal_to_zero, zero, 0, 0);
  49. return 0;
  50. }
  51. static int FUNC(extension_data)(CodedBitstreamContext *ctx, RWContext *rw,
  52. H265RawPSExtensionData *current)
  53. {
  54. int err;
  55. size_t k;
  56. #ifdef READ
  57. BitstreamContext start;
  58. uint8_t bit;
  59. start = *rw;
  60. for (k = 0; cbs_h2645_read_more_rbsp_data(rw); k++);
  61. current->bit_length = k;
  62. if (k > 0) {
  63. *rw = start;
  64. allocate(current->data, (current->bit_length + 7) / 8);
  65. for (k = 0; k < current->bit_length; k++) {
  66. xu(1, extension_data, bit, 0, 1);
  67. current->data[k / 8] |= bit << (7 - k % 8);
  68. }
  69. }
  70. #else
  71. for (k = 0; k < current->bit_length; k++)
  72. xu(1, extension_data, current->data[k / 8] >> (7 - k % 8), 0, 1);
  73. #endif
  74. return 0;
  75. }
  76. static int FUNC(profile_tier_level)(CodedBitstreamContext *ctx, RWContext *rw,
  77. H265RawProfileTierLevel *current,
  78. int profile_present_flag,
  79. int max_num_sub_layers_minus1)
  80. {
  81. av_unused unsigned int zero = 0;
  82. int err, i, j;
  83. if (profile_present_flag) {
  84. u(2, general_profile_space, 0, 0);
  85. flag(general_tier_flag);
  86. u(5, general_profile_idc, 0, 31);
  87. for (j = 0; j < 32; j++)
  88. flag(general_profile_compatibility_flag[j]);
  89. flag(general_progressive_source_flag);
  90. flag(general_interlaced_source_flag);
  91. flag(general_non_packed_constraint_flag);
  92. flag(general_frame_only_constraint_flag);
  93. #define profile_compatible(x) (current->general_profile_idc == (x) || \
  94. current->general_profile_compatibility_flag[x])
  95. if (profile_compatible(4) || profile_compatible(5) ||
  96. profile_compatible(6) || profile_compatible(7) ||
  97. profile_compatible(8) || profile_compatible(9) ||
  98. profile_compatible(10)) {
  99. flag(general_max_12bit_constraint_flag);
  100. flag(general_max_10bit_constraint_flag);
  101. flag(general_max_8bit_constraint_flag);
  102. flag(general_max_422chroma_constraint_flag);
  103. flag(general_max_420chroma_constraint_flag);
  104. flag(general_max_monochrome_constraint_flag);
  105. flag(general_intra_constraint_flag);
  106. flag(general_one_picture_only_constraint_flag);
  107. flag(general_lower_bit_rate_constraint_flag);
  108. if (profile_compatible(5) || profile_compatible(9) ||
  109. profile_compatible(10)) {
  110. flag(general_max_14bit_constraint_flag);
  111. xu(24, general_reserved_zero_33bits, zero, 0, 0);
  112. xu(9, general_reserved_zero_33bits, zero, 0, 0);
  113. } else {
  114. xu(24, general_reserved_zero_34bits, zero, 0, 0);
  115. xu(10, general_reserved_zero_34bits, zero, 0, 0);
  116. }
  117. } else {
  118. xu(24, general_reserved_zero_43bits, zero, 0, 0);
  119. xu(19, general_reserved_zero_43bits, zero, 0, 0);
  120. }
  121. if (profile_compatible(1) || profile_compatible(2) ||
  122. profile_compatible(3) || profile_compatible(4) ||
  123. profile_compatible(5) || profile_compatible(9)) {
  124. flag(general_inbld_flag);
  125. } else {
  126. xu(1, general_reserved_zero_bit, zero, 0, 0);
  127. }
  128. #undef profile_compatible
  129. }
  130. u(8, general_level_idc, 0, 255);
  131. for (i = 0; i < max_num_sub_layers_minus1; i++) {
  132. flag(sub_layer_profile_present_flag[i]);
  133. flag(sub_layer_level_present_flag[i]);
  134. }
  135. if (max_num_sub_layers_minus1 > 0) {
  136. for (i = max_num_sub_layers_minus1; i < 8; i++) {
  137. av_unused int zero = 0;
  138. xu(2, reserved_zero_2bits, zero, 0, 0);
  139. }
  140. }
  141. for (i = 0; i < max_num_sub_layers_minus1; i++) {
  142. if (current->sub_layer_profile_present_flag[i])
  143. return AVERROR_PATCHWELCOME;
  144. if (current->sub_layer_level_present_flag[i])
  145. return AVERROR_PATCHWELCOME;
  146. }
  147. return 0;
  148. }
  149. static int FUNC(sub_layer_hrd_parameters)(CodedBitstreamContext *ctx, RWContext *rw,
  150. H265RawHRDParameters *hrd,
  151. int nal, int sub_layer_id)
  152. {
  153. H265RawSubLayerHRDParameters *current;
  154. int err, i;
  155. if (nal)
  156. current = &hrd->nal_sub_layer_hrd_parameters[sub_layer_id];
  157. else
  158. current = &hrd->vcl_sub_layer_hrd_parameters[sub_layer_id];
  159. for (i = 0; i <= hrd->cpb_cnt_minus1[sub_layer_id]; i++) {
  160. ue(bit_rate_value_minus1[i], 0, UINT32_MAX - 1);
  161. ue(cpb_size_value_minus1[i], 0, UINT32_MAX - 1);
  162. if (hrd->sub_pic_hrd_params_present_flag) {
  163. ue(cpb_size_du_value_minus1[i], 0, UINT32_MAX - 1);
  164. ue(bit_rate_du_value_minus1[i], 0, UINT32_MAX - 1);
  165. }
  166. flag(cbr_flag[i]);
  167. }
  168. return 0;
  169. }
  170. static int FUNC(hrd_parameters)(CodedBitstreamContext *ctx, RWContext *rw,
  171. H265RawHRDParameters *current, int common_inf_present_flag,
  172. int max_num_sub_layers_minus1)
  173. {
  174. int err, i;
  175. if (common_inf_present_flag) {
  176. flag(nal_hrd_parameters_present_flag);
  177. flag(vcl_hrd_parameters_present_flag);
  178. if (current->nal_hrd_parameters_present_flag ||
  179. current->vcl_hrd_parameters_present_flag) {
  180. flag(sub_pic_hrd_params_present_flag);
  181. if (current->sub_pic_hrd_params_present_flag) {
  182. u(8, tick_divisor_minus2, 0, 255);
  183. u(5, du_cpb_removal_delay_increment_length_minus1, 0, 31);
  184. flag(sub_pic_cpb_params_in_pic_timing_sei_flag);
  185. u(5, dpb_output_delay_du_length_minus1, 0, 31);
  186. }
  187. u(4, bit_rate_scale, 0, 15);
  188. u(4, cpb_size_scale, 0, 15);
  189. if (current->sub_pic_hrd_params_present_flag)
  190. u(4, cpb_size_du_scale, 0, 15);
  191. u(5, initial_cpb_removal_delay_length_minus1, 0, 31);
  192. u(5, au_cpb_removal_delay_length_minus1, 0, 31);
  193. u(5, dpb_output_delay_length_minus1, 0, 31);
  194. } else {
  195. infer(sub_pic_hrd_params_present_flag, 0);
  196. infer(initial_cpb_removal_delay_length_minus1, 23);
  197. infer(au_cpb_removal_delay_length_minus1, 23);
  198. infer(dpb_output_delay_length_minus1, 23);
  199. }
  200. }
  201. for (i = 0; i <= max_num_sub_layers_minus1; i++) {
  202. flag(fixed_pic_rate_general_flag[i]);
  203. if (!current->fixed_pic_rate_general_flag[i])
  204. flag(fixed_pic_rate_within_cvs_flag[i]);
  205. else
  206. infer(fixed_pic_rate_within_cvs_flag[i], 1);
  207. if (current->fixed_pic_rate_within_cvs_flag[i]) {
  208. ue(elemental_duration_in_tc_minus1[i], 0, 2047);
  209. infer(low_delay_hrd_flag[i], 0);
  210. } else
  211. flag(low_delay_hrd_flag[i]);
  212. if (!current->low_delay_hrd_flag[i])
  213. ue(cpb_cnt_minus1[i], 0, 31);
  214. else
  215. infer(cpb_cnt_minus1[i], 0);
  216. if (current->nal_hrd_parameters_present_flag)
  217. CHECK(FUNC(sub_layer_hrd_parameters)(ctx, rw, current, 0, i));
  218. if (current->vcl_hrd_parameters_present_flag)
  219. CHECK(FUNC(sub_layer_hrd_parameters)(ctx, rw, current, 1, i));
  220. }
  221. return 0;
  222. }
  223. static int FUNC(vui_parameters)(CodedBitstreamContext *ctx, RWContext *rw,
  224. H265RawVUI *current, const H265RawSPS *sps)
  225. {
  226. int err;
  227. flag(aspect_ratio_info_present_flag);
  228. if (current->aspect_ratio_info_present_flag) {
  229. u(8, aspect_ratio_idc, 0, 255);
  230. if (current->aspect_ratio_idc == 255) {
  231. u(16, sar_width, 0, 65535);
  232. u(16, sar_height, 0, 65535);
  233. }
  234. } else {
  235. infer(aspect_ratio_idc, 0);
  236. }
  237. flag(overscan_info_present_flag);
  238. if (current->overscan_info_present_flag)
  239. flag(overscan_appropriate_flag);
  240. flag(video_signal_type_present_flag);
  241. if (current->video_signal_type_present_flag) {
  242. u(3, video_format, 0, 7);
  243. flag(video_full_range_flag);
  244. flag(colour_description_present_flag);
  245. if (current->colour_description_present_flag) {
  246. u(8, colour_primaries, 0, 255);
  247. u(8, transfer_characteristics, 0, 255);
  248. u(8, matrix_coefficients, 0, 255);
  249. } else {
  250. infer(colour_primaries, 2);
  251. infer(transfer_characteristics, 2);
  252. infer(matrix_coefficients, 2);
  253. }
  254. } else {
  255. infer(video_format, 5);
  256. infer(video_full_range_flag, 0);
  257. infer(colour_primaries, 2);
  258. infer(transfer_characteristics, 2);
  259. infer(matrix_coefficients, 2);
  260. }
  261. flag(chroma_loc_info_present_flag);
  262. if (current->chroma_loc_info_present_flag) {
  263. ue(chroma_sample_loc_type_top_field, 0, 5);
  264. ue(chroma_sample_loc_type_bottom_field, 0, 5);
  265. } else {
  266. infer(chroma_sample_loc_type_top_field, 0);
  267. infer(chroma_sample_loc_type_bottom_field, 0);
  268. }
  269. flag(neutral_chroma_indication_flag);
  270. flag(field_seq_flag);
  271. flag(frame_field_info_present_flag);
  272. flag(default_display_window_flag);
  273. if (current->default_display_window_flag) {
  274. ue(def_disp_win_left_offset, 0, 16384);
  275. ue(def_disp_win_right_offset, 0, 16384);
  276. ue(def_disp_win_top_offset, 0, 16384);
  277. ue(def_disp_win_bottom_offset, 0, 16384);
  278. }
  279. flag(vui_timing_info_present_flag);
  280. if (current->vui_timing_info_present_flag) {
  281. u(32, vui_num_units_in_tick, 1, UINT32_MAX);
  282. u(32, vui_time_scale, 1, UINT32_MAX);
  283. flag(vui_poc_proportional_to_timing_flag);
  284. if (current->vui_poc_proportional_to_timing_flag)
  285. ue(vui_num_ticks_poc_diff_one_minus1, 0, UINT32_MAX - 1);
  286. flag(vui_hrd_parameters_present_flag);
  287. if (current->vui_hrd_parameters_present_flag) {
  288. CHECK(FUNC(hrd_parameters)(ctx, rw, &current->hrd_parameters,
  289. 1, sps->sps_max_sub_layers_minus1));
  290. }
  291. }
  292. flag(bitstream_restriction_flag);
  293. if (current->bitstream_restriction_flag) {
  294. flag(tiles_fixed_structure_flag);
  295. flag(motion_vectors_over_pic_boundaries_flag);
  296. flag(restricted_ref_pic_lists_flag);
  297. ue(min_spatial_segmentation_idc, 0, 4095);
  298. ue(max_bytes_per_pic_denom, 0, 16);
  299. ue(max_bits_per_min_cu_denom, 0, 16);
  300. ue(log2_max_mv_length_horizontal, 0, 16);
  301. ue(log2_max_mv_length_vertical, 0, 16);
  302. } else {
  303. infer(tiles_fixed_structure_flag, 0);
  304. infer(motion_vectors_over_pic_boundaries_flag, 1);
  305. infer(min_spatial_segmentation_idc, 0);
  306. infer(max_bytes_per_pic_denom, 2);
  307. infer(max_bits_per_min_cu_denom, 1);
  308. infer(log2_max_mv_length_horizontal, 15);
  309. infer(log2_max_mv_length_vertical, 15);
  310. }
  311. return 0;
  312. }
  313. static int FUNC(vps)(CodedBitstreamContext *ctx, RWContext *rw,
  314. H265RawVPS *current)
  315. {
  316. int err, i, j;
  317. HEADER("Video Parameter Set");
  318. CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_VPS));
  319. u(4, vps_video_parameter_set_id, 0, 15);
  320. flag(vps_base_layer_internal_flag);
  321. flag(vps_base_layer_available_flag);
  322. u(6, vps_max_layers_minus1, 0, HEVC_MAX_LAYERS - 1);
  323. u(3, vps_max_sub_layers_minus1, 0, HEVC_MAX_SUB_LAYERS - 1);
  324. flag(vps_temporal_id_nesting_flag);
  325. if (current->vps_max_sub_layers_minus1 == 0 &&
  326. current->vps_temporal_id_nesting_flag != 1) {
  327. av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid stream: "
  328. "vps_temporal_id_nesting_flag must be 1 if "
  329. "vps_max_sub_layers_minus1 is 0.\n");
  330. return AVERROR_INVALIDDATA;
  331. }
  332. {
  333. av_unused uint16_t ffff = 0xffff;
  334. xu(16, vps_reserved_0xffff_16bits, ffff, 0xffff, 0xffff);
  335. }
  336. CHECK(FUNC(profile_tier_level)(ctx, rw, &current->profile_tier_level,
  337. 1, current->vps_max_sub_layers_minus1));
  338. flag(vps_sub_layer_ordering_info_present_flag);
  339. for (i = (current->vps_sub_layer_ordering_info_present_flag ?
  340. 0 : current->vps_max_sub_layers_minus1);
  341. i <= current->vps_max_sub_layers_minus1; i++) {
  342. ue(vps_max_dec_pic_buffering_minus1[i], 0, HEVC_MAX_DPB_SIZE - 1);
  343. ue(vps_max_num_reorder_pics[i], 0, current->vps_max_dec_pic_buffering_minus1[i]);
  344. ue(vps_max_latency_increase_plus1[i], 0, UINT32_MAX - 1);
  345. }
  346. if (!current->vps_sub_layer_ordering_info_present_flag) {
  347. for (i = 0; i < current->vps_max_sub_layers_minus1; i++) {
  348. infer(vps_max_dec_pic_buffering_minus1[i],
  349. current->vps_max_dec_pic_buffering_minus1[current->vps_max_sub_layers_minus1]);
  350. infer(vps_max_num_reorder_pics[i],
  351. current->vps_max_num_reorder_pics[current->vps_max_sub_layers_minus1]);
  352. infer(vps_max_latency_increase_plus1[i],
  353. current->vps_max_latency_increase_plus1[current->vps_max_sub_layers_minus1]);
  354. }
  355. }
  356. u(6, vps_max_layer_id, 0, HEVC_MAX_LAYERS - 1);
  357. ue(vps_num_layer_sets_minus1, 0, HEVC_MAX_LAYER_SETS - 1);
  358. for (i = 1; i <= current->vps_num_layer_sets_minus1; i++) {
  359. for (j = 0; j <= current->vps_max_layer_id; j++)
  360. flag(layer_id_included_flag[i][j]);
  361. }
  362. for (j = 0; j <= current->vps_max_layer_id; j++)
  363. infer(layer_id_included_flag[0][j], j == 0);
  364. flag(vps_timing_info_present_flag);
  365. if (current->vps_timing_info_present_flag) {
  366. u(32, vps_num_units_in_tick, 1, UINT32_MAX);
  367. u(32, vps_time_scale, 1, UINT32_MAX);
  368. flag(vps_poc_proportional_to_timing_flag);
  369. if (current->vps_poc_proportional_to_timing_flag)
  370. ue(vps_num_ticks_poc_diff_one_minus1, 0, UINT32_MAX - 1);
  371. ue(vps_num_hrd_parameters, 0, current->vps_num_layer_sets_minus1 + 1);
  372. for (i = 0; i < current->vps_num_hrd_parameters; i++) {
  373. ue(hrd_layer_set_idx[i],
  374. current->vps_base_layer_internal_flag ? 0 : 1,
  375. current->vps_num_layer_sets_minus1);
  376. if (i > 0)
  377. flag(cprms_present_flag[i]);
  378. else
  379. infer(cprms_present_flag[0], 1);
  380. CHECK(FUNC(hrd_parameters)(ctx, rw, &current->hrd_parameters[i],
  381. current->cprms_present_flag[i],
  382. current->vps_max_sub_layers_minus1));
  383. }
  384. }
  385. flag(vps_extension_flag);
  386. if (current->vps_extension_flag)
  387. CHECK(FUNC(extension_data)(ctx, rw, &current->extension_data));
  388. CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
  389. return 0;
  390. }
  391. static int FUNC(st_ref_pic_set)(CodedBitstreamContext *ctx, RWContext *rw,
  392. H265RawSTRefPicSet *current, int st_rps_idx,
  393. const H265RawSPS *sps)
  394. {
  395. int err, i, j;
  396. if (st_rps_idx != 0)
  397. flag(inter_ref_pic_set_prediction_flag);
  398. else
  399. infer(inter_ref_pic_set_prediction_flag, 0);
  400. if (current->inter_ref_pic_set_prediction_flag) {
  401. unsigned int ref_rps_idx, num_delta_pocs;
  402. const H265RawSTRefPicSet *ref;
  403. int delta_rps, d_poc;
  404. int ref_delta_poc_s0[HEVC_MAX_REFS], ref_delta_poc_s1[HEVC_MAX_REFS];
  405. int delta_poc_s0[HEVC_MAX_REFS], delta_poc_s1[HEVC_MAX_REFS];
  406. uint8_t used_by_curr_pic_s0[HEVC_MAX_REFS],
  407. used_by_curr_pic_s1[HEVC_MAX_REFS];
  408. if (st_rps_idx == sps->num_short_term_ref_pic_sets)
  409. ue(delta_idx_minus1, 0, st_rps_idx - 1);
  410. else
  411. infer(delta_idx_minus1, 0);
  412. ref_rps_idx = st_rps_idx - (current->delta_idx_minus1 + 1);
  413. ref = &sps->st_ref_pic_set[ref_rps_idx];
  414. num_delta_pocs = ref->num_negative_pics + ref->num_positive_pics;
  415. flag(delta_rps_sign);
  416. ue(abs_delta_rps_minus1, 0, INT16_MAX);
  417. delta_rps = (1 - 2 * current->delta_rps_sign) *
  418. (current->abs_delta_rps_minus1 + 1);
  419. for (j = 0; j <= num_delta_pocs; j++) {
  420. flag(used_by_curr_pic_flag[j]);
  421. if (!current->used_by_curr_pic_flag[j])
  422. flag(use_delta_flag[j]);
  423. else
  424. infer(use_delta_flag[j], 1);
  425. }
  426. // Since the stored form of an RPS here is actually the delta-step
  427. // form used when inter_ref_pic_set_prediction_flag is not set, we
  428. // need to reconstruct that here in order to be able to refer to
  429. // the RPS later (which is required for parsing, because we don't
  430. // even know what syntax elements appear without it). Therefore,
  431. // this code takes the delta-step form of the reference set, turns
  432. // it into the delta-array form, applies the prediction process of
  433. // 7.4.8, converts the result back to the delta-step form, and
  434. // stores that as the current set for future use. Note that the
  435. // inferences here mean that writers using prediction will need
  436. // to fill in the delta-step values correctly as well - since the
  437. // whole RPS prediction process is somewhat overly sophisticated,
  438. // this hopefully forms a useful check for them to ensure their
  439. // predicted form actually matches what was intended rather than
  440. // an onerous additional requirement.
  441. d_poc = 0;
  442. for (i = 0; i < ref->num_negative_pics; i++) {
  443. d_poc -= ref->delta_poc_s0_minus1[i] + 1;
  444. ref_delta_poc_s0[i] = d_poc;
  445. }
  446. d_poc = 0;
  447. for (i = 0; i < ref->num_positive_pics; i++) {
  448. d_poc += ref->delta_poc_s1_minus1[i] + 1;
  449. ref_delta_poc_s1[i] = d_poc;
  450. }
  451. i = 0;
  452. for (j = ref->num_positive_pics - 1; j >= 0; j--) {
  453. d_poc = ref_delta_poc_s1[j] + delta_rps;
  454. if (d_poc < 0 && current->use_delta_flag[ref->num_negative_pics + j]) {
  455. delta_poc_s0[i] = d_poc;
  456. used_by_curr_pic_s0[i++] =
  457. current->used_by_curr_pic_flag[ref->num_negative_pics + j];
  458. }
  459. }
  460. if (delta_rps < 0 && current->use_delta_flag[num_delta_pocs]) {
  461. delta_poc_s0[i] = delta_rps;
  462. used_by_curr_pic_s0[i++] =
  463. current->used_by_curr_pic_flag[num_delta_pocs];
  464. }
  465. for (j = 0; j < ref->num_negative_pics; j++) {
  466. d_poc = ref_delta_poc_s0[j] + delta_rps;
  467. if (d_poc < 0 && current->use_delta_flag[j]) {
  468. delta_poc_s0[i] = d_poc;
  469. used_by_curr_pic_s0[i++] = current->used_by_curr_pic_flag[j];
  470. }
  471. }
  472. infer(num_negative_pics, i);
  473. for (i = 0; i < current->num_negative_pics; i++) {
  474. infer(delta_poc_s0_minus1[i],
  475. -(delta_poc_s0[i] - (i == 0 ? 0 : delta_poc_s0[i - 1])) - 1);
  476. infer(used_by_curr_pic_s0_flag[i], used_by_curr_pic_s0[i]);
  477. }
  478. i = 0;
  479. for (j = ref->num_negative_pics - 1; j >= 0; j--) {
  480. d_poc = ref_delta_poc_s0[j] + delta_rps;
  481. if (d_poc > 0 && current->use_delta_flag[j]) {
  482. delta_poc_s1[i] = d_poc;
  483. used_by_curr_pic_s1[i++] = current->used_by_curr_pic_flag[j];
  484. }
  485. }
  486. if (delta_rps > 0 && current->use_delta_flag[num_delta_pocs]) {
  487. delta_poc_s1[i] = delta_rps;
  488. used_by_curr_pic_s1[i++] =
  489. current->used_by_curr_pic_flag[num_delta_pocs];
  490. }
  491. for (j = 0; j < ref->num_positive_pics; j++) {
  492. d_poc = ref_delta_poc_s1[j] + delta_rps;
  493. if (d_poc > 0 && current->use_delta_flag[ref->num_negative_pics + j]) {
  494. delta_poc_s1[i] = d_poc;
  495. used_by_curr_pic_s1[i++] =
  496. current->used_by_curr_pic_flag[ref->num_negative_pics + j];
  497. }
  498. }
  499. infer(num_positive_pics, i);
  500. for (i = 0; i < current->num_positive_pics; i++) {
  501. infer(delta_poc_s1_minus1[i],
  502. delta_poc_s1[i] - (i == 0 ? 0 : delta_poc_s1[i - 1]) - 1);
  503. infer(used_by_curr_pic_s1_flag[i], used_by_curr_pic_s1[i]);
  504. }
  505. } else {
  506. ue(num_negative_pics, 0, 15);
  507. ue(num_positive_pics, 0, 15 - current->num_negative_pics);
  508. for (i = 0; i < current->num_negative_pics; i++) {
  509. ue(delta_poc_s0_minus1[i], 0, INT16_MAX);
  510. flag(used_by_curr_pic_s0_flag[i]);
  511. }
  512. for (i = 0; i < current->num_positive_pics; i++) {
  513. ue(delta_poc_s1_minus1[i], 0, INT16_MAX);
  514. flag(used_by_curr_pic_s1_flag[i]);
  515. }
  516. }
  517. return 0;
  518. }
  519. static int FUNC(scaling_list_data)(CodedBitstreamContext *ctx, RWContext *rw,
  520. H265RawScalingList *current)
  521. {
  522. int sizeId, matrixId;
  523. int err, n, i;
  524. for (sizeId = 0; sizeId < 4; sizeId++) {
  525. for (matrixId = 0; matrixId < 6; matrixId += (sizeId == 3 ? 3 : 1)) {
  526. flag(scaling_list_pred_mode_flag[sizeId][matrixId]);
  527. if (!current->scaling_list_pred_mode_flag[sizeId][matrixId]) {
  528. ue(scaling_list_pred_matrix_id_delta[sizeId][matrixId],
  529. 0, sizeId == 3 ? matrixId / 3 : matrixId);
  530. } else {
  531. n = FFMIN(64, 1 << (4 + (sizeId << 1)));
  532. if (sizeId > 1)
  533. se(scaling_list_dc_coef_minus8[sizeId - 2][matrixId], -7, +247);
  534. for (i = 0; i < n; i++) {
  535. xse(scaling_list_delta_coeff,
  536. current->scaling_list_delta_coeff[sizeId][matrixId][i],
  537. -128, +127);
  538. }
  539. }
  540. }
  541. }
  542. return 0;
  543. }
  544. static int FUNC(sps_range_extension)(CodedBitstreamContext *ctx, RWContext *rw,
  545. H265RawSPS *current)
  546. {
  547. int err;
  548. flag(transform_skip_rotation_enabled_flag);
  549. flag(transform_skip_context_enabled_flag);
  550. flag(implicit_rdpcm_enabled_flag);
  551. flag(explicit_rdpcm_enabled_flag);
  552. flag(extended_precision_processing_flag);
  553. flag(intra_smoothing_disabled_flag);
  554. flag(high_precision_offsets_enabled_flag);
  555. flag(persistent_rice_adaptation_enabled_flag);
  556. flag(cabac_bypass_alignment_enabled_flag);
  557. return 0;
  558. }
  559. static int FUNC(sps_scc_extension)(CodedBitstreamContext *ctx, RWContext *rw,
  560. H265RawSPS *current)
  561. {
  562. int err, comp, i;
  563. flag(sps_curr_pic_ref_enabled_flag);
  564. flag(palette_mode_enabled_flag);
  565. if (current->palette_mode_enabled_flag) {
  566. ue(palette_max_size, 0, 64);
  567. ue(delta_palette_max_predictor_size, 0, 128);
  568. flag(sps_palette_predictor_initializer_present_flag);
  569. if (current->sps_palette_predictor_initializer_present_flag) {
  570. ue(sps_num_palette_predictor_initializer_minus1, 0, 128);
  571. for (comp = 0; comp < (current->chroma_format_idc ? 3 : 1); comp++) {
  572. int bit_depth = comp == 0 ? current->bit_depth_luma_minus8 + 8
  573. : current->bit_depth_chroma_minus8 + 8;
  574. for (i = 0; i <= current->sps_num_palette_predictor_initializer_minus1; i++)
  575. u(bit_depth, sps_palette_predictor_initializers[comp][i],
  576. 0, (1 << bit_depth) - 1);
  577. }
  578. }
  579. }
  580. u(2, motion_vector_resolution_control_idc, 0, 2);
  581. flag(intra_boundary_filtering_disable_flag);
  582. return 0;
  583. }
  584. static int FUNC(sps)(CodedBitstreamContext *ctx, RWContext *rw,
  585. H265RawSPS *current)
  586. {
  587. CodedBitstreamH265Context *h265 = ctx->priv_data;
  588. const H265RawVPS *vps;
  589. int err, i;
  590. unsigned int min_cb_log2_size_y, ctb_log2_size_y,
  591. min_cb_size_y, min_tb_log2_size_y;
  592. HEADER("Sequence Parameter Set");
  593. CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_SPS));
  594. u(4, sps_video_parameter_set_id, 0, 15);
  595. h265->active_vps = vps = h265->vps[current->sps_video_parameter_set_id];
  596. u(3, sps_max_sub_layers_minus1, 0, HEVC_MAX_SUB_LAYERS - 1);
  597. flag(sps_temporal_id_nesting_flag);
  598. if (vps) {
  599. if (vps->vps_max_sub_layers_minus1 > current->sps_max_sub_layers_minus1) {
  600. av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid stream: "
  601. "sps_max_sub_layers_minus1 (%d) must be less than or equal to "
  602. "vps_max_sub_layers_minus1 (%d).\n",
  603. vps->vps_max_sub_layers_minus1,
  604. current->sps_max_sub_layers_minus1);
  605. return AVERROR_INVALIDDATA;
  606. }
  607. if (vps->vps_temporal_id_nesting_flag &&
  608. !current->sps_temporal_id_nesting_flag) {
  609. av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid stream: "
  610. "sps_temporal_id_nesting_flag must be 1 if "
  611. "vps_temporal_id_nesting_flag is 1.\n");
  612. return AVERROR_INVALIDDATA;
  613. }
  614. }
  615. CHECK(FUNC(profile_tier_level)(ctx, rw, &current->profile_tier_level,
  616. 1, current->sps_max_sub_layers_minus1));
  617. ue(sps_seq_parameter_set_id, 0, 15);
  618. ue(chroma_format_idc, 0, 3);
  619. if (current->chroma_format_idc == 3)
  620. flag(separate_colour_plane_flag);
  621. else
  622. infer(separate_colour_plane_flag, 0);
  623. ue(pic_width_in_luma_samples, 1, HEVC_MAX_WIDTH);
  624. ue(pic_height_in_luma_samples, 1, HEVC_MAX_HEIGHT);
  625. flag(conformance_window_flag);
  626. if (current->conformance_window_flag) {
  627. ue(conf_win_left_offset, 0, current->pic_width_in_luma_samples);
  628. ue(conf_win_right_offset, 0, current->pic_width_in_luma_samples);
  629. ue(conf_win_top_offset, 0, current->pic_height_in_luma_samples);
  630. ue(conf_win_bottom_offset, 0, current->pic_height_in_luma_samples);
  631. } else {
  632. infer(conf_win_left_offset, 0);
  633. infer(conf_win_right_offset, 0);
  634. infer(conf_win_top_offset, 0);
  635. infer(conf_win_bottom_offset, 0);
  636. }
  637. ue(bit_depth_luma_minus8, 0, 8);
  638. ue(bit_depth_chroma_minus8, 0, 8);
  639. ue(log2_max_pic_order_cnt_lsb_minus4, 0, 12);
  640. flag(sps_sub_layer_ordering_info_present_flag);
  641. for (i = (current->sps_sub_layer_ordering_info_present_flag ?
  642. 0 : current->sps_max_sub_layers_minus1);
  643. i <= current->sps_max_sub_layers_minus1; i++) {
  644. ue(sps_max_dec_pic_buffering_minus1[i], 0, HEVC_MAX_DPB_SIZE - 1);
  645. ue(sps_max_num_reorder_pics[i], 0, current->sps_max_dec_pic_buffering_minus1[i]);
  646. ue(sps_max_latency_increase_plus1[i], 0, UINT32_MAX - 1);
  647. }
  648. if (!current->sps_sub_layer_ordering_info_present_flag) {
  649. for (i = 0; i < current->sps_max_sub_layers_minus1; i++) {
  650. infer(sps_max_dec_pic_buffering_minus1[i],
  651. current->sps_max_dec_pic_buffering_minus1[current->sps_max_sub_layers_minus1]);
  652. infer(sps_max_num_reorder_pics[i],
  653. current->sps_max_num_reorder_pics[current->sps_max_sub_layers_minus1]);
  654. infer(sps_max_latency_increase_plus1[i],
  655. current->sps_max_latency_increase_plus1[current->sps_max_sub_layers_minus1]);
  656. }
  657. }
  658. ue(log2_min_luma_coding_block_size_minus3, 0, 3);
  659. min_cb_log2_size_y = current->log2_min_luma_coding_block_size_minus3 + 3;
  660. ue(log2_diff_max_min_luma_coding_block_size, 0, 3);
  661. ctb_log2_size_y = min_cb_log2_size_y +
  662. current->log2_diff_max_min_luma_coding_block_size;
  663. min_cb_size_y = 1 << min_cb_log2_size_y;
  664. if (current->pic_width_in_luma_samples % min_cb_size_y ||
  665. current->pic_height_in_luma_samples % min_cb_size_y) {
  666. av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid dimensions: %ux%u not divisible "
  667. "by MinCbSizeY = %u.\n", current->pic_width_in_luma_samples,
  668. current->pic_height_in_luma_samples, min_cb_size_y);
  669. return AVERROR_INVALIDDATA;
  670. }
  671. ue(log2_min_luma_transform_block_size_minus2, 0, min_cb_log2_size_y - 3);
  672. min_tb_log2_size_y = current->log2_min_luma_transform_block_size_minus2 + 2;
  673. ue(log2_diff_max_min_luma_transform_block_size,
  674. 0, FFMIN(ctb_log2_size_y, 5) - min_tb_log2_size_y);
  675. ue(max_transform_hierarchy_depth_inter,
  676. 0, ctb_log2_size_y - min_tb_log2_size_y);
  677. ue(max_transform_hierarchy_depth_intra,
  678. 0, ctb_log2_size_y - min_tb_log2_size_y);
  679. flag(scaling_list_enabled_flag);
  680. if (current->scaling_list_enabled_flag) {
  681. flag(sps_scaling_list_data_present_flag);
  682. if (current->sps_scaling_list_data_present_flag)
  683. CHECK(FUNC(scaling_list_data)(ctx, rw, &current->scaling_list));
  684. } else {
  685. infer(sps_scaling_list_data_present_flag, 0);
  686. }
  687. flag(amp_enabled_flag);
  688. flag(sample_adaptive_offset_enabled_flag);
  689. flag(pcm_enabled_flag);
  690. if (current->pcm_enabled_flag) {
  691. u(4, pcm_sample_bit_depth_luma_minus1,
  692. 0, current->bit_depth_luma_minus8 + 8 - 1);
  693. u(4, pcm_sample_bit_depth_chroma_minus1,
  694. 0, current->bit_depth_chroma_minus8 + 8 - 1);
  695. ue(log2_min_pcm_luma_coding_block_size_minus3,
  696. FFMIN(min_cb_log2_size_y, 5) - 3, FFMIN(ctb_log2_size_y, 5) - 3);
  697. ue(log2_diff_max_min_pcm_luma_coding_block_size,
  698. 0, FFMIN(ctb_log2_size_y, 5) - (current->log2_min_pcm_luma_coding_block_size_minus3 + 3));
  699. flag(pcm_loop_filter_disabled_flag);
  700. }
  701. ue(num_short_term_ref_pic_sets, 0, HEVC_MAX_SHORT_TERM_REF_PIC_SETS);
  702. for (i = 0; i < current->num_short_term_ref_pic_sets; i++)
  703. CHECK(FUNC(st_ref_pic_set)(ctx, rw, &current->st_ref_pic_set[i], i, current));
  704. flag(long_term_ref_pics_present_flag);
  705. if (current->long_term_ref_pics_present_flag) {
  706. ue(num_long_term_ref_pics_sps, 0, HEVC_MAX_LONG_TERM_REF_PICS);
  707. for (i = 0; i < current->num_long_term_ref_pics_sps; i++) {
  708. u(current->log2_max_pic_order_cnt_lsb_minus4 + 4,
  709. lt_ref_pic_poc_lsb_sps[i],
  710. 0, (1 << (current->log2_max_pic_order_cnt_lsb_minus4 + 4)) - 1);
  711. flag(used_by_curr_pic_lt_sps_flag[i]);
  712. }
  713. }
  714. flag(sps_temporal_mvp_enabled_flag);
  715. flag(strong_intra_smoothing_enabled_flag);
  716. flag(vui_parameters_present_flag);
  717. if (current->vui_parameters_present_flag)
  718. CHECK(FUNC(vui_parameters)(ctx, rw, &current->vui, current));
  719. flag(sps_extension_present_flag);
  720. if (current->sps_extension_present_flag) {
  721. flag(sps_range_extension_flag);
  722. flag(sps_multilayer_extension_flag);
  723. flag(sps_3d_extension_flag);
  724. flag(sps_scc_extension_flag);
  725. u(4, sps_extension_4bits, 0, (1 << 4) - 1);
  726. }
  727. if (current->sps_range_extension_flag)
  728. CHECK(FUNC(sps_range_extension)(ctx, rw, current));
  729. if (current->sps_multilayer_extension_flag)
  730. return AVERROR_PATCHWELCOME;
  731. if (current->sps_3d_extension_flag)
  732. return AVERROR_PATCHWELCOME;
  733. if (current->sps_scc_extension_flag)
  734. CHECK(FUNC(sps_scc_extension)(ctx, rw, current));
  735. if (current->sps_extension_4bits)
  736. CHECK(FUNC(extension_data)(ctx, rw, &current->extension_data));
  737. CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
  738. return 0;
  739. }
  740. static int FUNC(pps_range_extension)(CodedBitstreamContext *ctx, RWContext *rw,
  741. H265RawPPS *current)
  742. {
  743. CodedBitstreamH265Context *h265 = ctx->priv_data;
  744. const H265RawSPS *sps = h265->active_sps;
  745. int err, i;
  746. if (current->transform_skip_enabled_flag)
  747. ue(log2_max_transform_skip_block_size_minus2, 0, 4);
  748. flag(cross_component_prediction_enabled_flag);
  749. flag(chroma_qp_offset_list_enabled_flag);
  750. if (current->chroma_qp_offset_list_enabled_flag) {
  751. ue(diff_cu_chroma_qp_offset_depth,
  752. 0, sps->log2_diff_max_min_luma_coding_block_size);
  753. ue(chroma_qp_offset_list_len_minus1, 0, 5);
  754. for (i = 0; i <= current->chroma_qp_offset_list_len_minus1; i++) {
  755. se(cb_qp_offset_list[i], -12, +12);
  756. se(cr_qp_offset_list[i], -12, +12);
  757. }
  758. }
  759. ue(log2_sao_offset_scale_luma, 0, FFMAX(0, sps->bit_depth_luma_minus8 - 2));
  760. ue(log2_sao_offset_scale_chroma, 0, FFMAX(0, sps->bit_depth_chroma_minus8 - 2));
  761. return 0;
  762. }
  763. static int FUNC(pps_scc_extension)(CodedBitstreamContext *ctx, RWContext *rw,
  764. H265RawPPS *current)
  765. {
  766. int err, comp, i;
  767. flag(pps_curr_pic_ref_enabled_flag);
  768. flag(residual_adaptive_colour_transform_enabled_flag);
  769. if (current->residual_adaptive_colour_transform_enabled_flag) {
  770. flag(pps_slice_act_qp_offsets_present_flag);
  771. se(pps_act_y_qp_offset_plus5, -7, +17);
  772. se(pps_act_cb_qp_offset_plus5, -7, +17);
  773. se(pps_act_cr_qp_offset_plus3, -9, +15);
  774. } else {
  775. infer(pps_slice_act_qp_offsets_present_flag, 0);
  776. infer(pps_act_y_qp_offset_plus5, 0);
  777. infer(pps_act_cb_qp_offset_plus5, 0);
  778. infer(pps_act_cr_qp_offset_plus3, 0);
  779. }
  780. flag(pps_palette_predictor_initializer_present_flag);
  781. if (current->pps_palette_predictor_initializer_present_flag) {
  782. ue(pps_num_palette_predictor_initializer, 0, 128);
  783. if (current->pps_num_palette_predictor_initializer > 0) {
  784. flag(monochrome_palette_flag);
  785. ue(luma_bit_depth_entry_minus8, 0, 8);
  786. if (!current->monochrome_palette_flag)
  787. ue(chroma_bit_depth_entry_minus8, 0, 8);
  788. for (comp = 0; comp < (current->monochrome_palette_flag ? 1 : 3); comp++) {
  789. int bit_depth = comp == 0 ? current->luma_bit_depth_entry_minus8 + 8
  790. : current->chroma_bit_depth_entry_minus8 + 8;
  791. for (i = 0; i < current->pps_num_palette_predictor_initializer; i++)
  792. u(bit_depth, pps_palette_predictor_initializers[comp][i],
  793. 0, (1 << bit_depth) - 1);
  794. }
  795. }
  796. }
  797. return 0;
  798. }
  799. static int FUNC(pps)(CodedBitstreamContext *ctx, RWContext *rw,
  800. H265RawPPS *current)
  801. {
  802. CodedBitstreamH265Context *h265 = ctx->priv_data;
  803. const H265RawSPS *sps;
  804. int err, i;
  805. HEADER("Picture Parameter Set");
  806. CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_PPS));
  807. ue(pps_pic_parameter_set_id, 0, 63);
  808. ue(pps_seq_parameter_set_id, 0, 15);
  809. sps = h265->sps[current->pps_seq_parameter_set_id];
  810. if (!sps) {
  811. av_log(ctx->log_ctx, AV_LOG_ERROR, "SPS id %d not available.\n",
  812. current->pps_seq_parameter_set_id);
  813. return AVERROR_INVALIDDATA;
  814. }
  815. h265->active_sps = sps;
  816. flag(dependent_slice_segments_enabled_flag);
  817. flag(output_flag_present_flag);
  818. u(3, num_extra_slice_header_bits, 0, 7);
  819. flag(sign_data_hiding_enabled_flag);
  820. flag(cabac_init_present_flag);
  821. ue(num_ref_idx_l0_default_active_minus1, 0, 14);
  822. ue(num_ref_idx_l1_default_active_minus1, 0, 14);
  823. se(init_qp_minus26, -(26 + 6 * sps->bit_depth_luma_minus8), +25);
  824. flag(constrained_intra_pred_flag);
  825. flag(transform_skip_enabled_flag);
  826. flag(cu_qp_delta_enabled_flag);
  827. if (current->cu_qp_delta_enabled_flag)
  828. ue(diff_cu_qp_delta_depth,
  829. 0, sps->log2_diff_max_min_luma_coding_block_size);
  830. else
  831. infer(diff_cu_qp_delta_depth, 0);
  832. se(pps_cb_qp_offset, -12, +12);
  833. se(pps_cr_qp_offset, -12, +12);
  834. flag(pps_slice_chroma_qp_offsets_present_flag);
  835. flag(weighted_pred_flag);
  836. flag(weighted_bipred_flag);
  837. flag(transquant_bypass_enabled_flag);
  838. flag(tiles_enabled_flag);
  839. flag(entropy_coding_sync_enabled_flag);
  840. if (current->tiles_enabled_flag) {
  841. ue(num_tile_columns_minus1, 0, HEVC_MAX_TILE_COLUMNS);
  842. ue(num_tile_rows_minus1, 0, HEVC_MAX_TILE_ROWS);
  843. flag(uniform_spacing_flag);
  844. if (!current->uniform_spacing_flag) {
  845. for (i = 0; i < current->num_tile_columns_minus1; i++)
  846. ue(column_width_minus1[i], 0, sps->pic_width_in_luma_samples);
  847. for (i = 0; i < current->num_tile_rows_minus1; i++)
  848. ue(row_height_minus1[i], 0, sps->pic_height_in_luma_samples);
  849. }
  850. flag(loop_filter_across_tiles_enabled_flag);
  851. } else {
  852. infer(num_tile_columns_minus1, 0);
  853. infer(num_tile_rows_minus1, 0);
  854. }
  855. flag(pps_loop_filter_across_slices_enabled_flag);
  856. flag(deblocking_filter_control_present_flag);
  857. if (current->deblocking_filter_control_present_flag) {
  858. flag(deblocking_filter_override_enabled_flag);
  859. flag(pps_deblocking_filter_disabled_flag);
  860. if (!current->pps_deblocking_filter_disabled_flag) {
  861. se(pps_beta_offset_div2, -6, +6);
  862. se(pps_tc_offset_div2, -6, +6);
  863. } else {
  864. infer(pps_beta_offset_div2, 0);
  865. infer(pps_tc_offset_div2, 0);
  866. }
  867. } else {
  868. infer(deblocking_filter_override_enabled_flag, 0);
  869. infer(pps_deblocking_filter_disabled_flag, 0);
  870. infer(pps_beta_offset_div2, 0);
  871. infer(pps_tc_offset_div2, 0);
  872. }
  873. flag(pps_scaling_list_data_present_flag);
  874. if (current->pps_scaling_list_data_present_flag)
  875. CHECK(FUNC(scaling_list_data)(ctx, rw, &current->scaling_list));
  876. flag(lists_modification_present_flag);
  877. ue(log2_parallel_merge_level_minus2,
  878. 0, (sps->log2_min_luma_coding_block_size_minus3 + 3 +
  879. sps->log2_diff_max_min_luma_coding_block_size - 2));
  880. flag(slice_segment_header_extension_present_flag);
  881. flag(pps_extension_present_flag);
  882. if (current->pps_extension_present_flag) {
  883. flag(pps_range_extension_flag);
  884. flag(pps_multilayer_extension_flag);
  885. flag(pps_3d_extension_flag);
  886. flag(pps_scc_extension_flag);
  887. u(4, pps_extension_4bits, 0, (1 << 4) - 1);
  888. }
  889. if (current->pps_range_extension_flag)
  890. CHECK(FUNC(pps_range_extension)(ctx, rw, current));
  891. if (current->pps_multilayer_extension_flag)
  892. return AVERROR_PATCHWELCOME;
  893. if (current->pps_3d_extension_flag)
  894. return AVERROR_PATCHWELCOME;
  895. if (current->pps_scc_extension_flag)
  896. CHECK(FUNC(pps_scc_extension)(ctx, rw, current));
  897. if (current->pps_extension_4bits)
  898. CHECK(FUNC(extension_data)(ctx, rw, &current->extension_data));
  899. CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
  900. return 0;
  901. }
  902. static int FUNC(aud)(CodedBitstreamContext *ctx, RWContext *rw,
  903. H265RawAUD *current)
  904. {
  905. int err;
  906. HEADER("Access Unit Delimiter");
  907. CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, HEVC_NAL_AUD));
  908. u(3, pic_type, 0, 2);
  909. CHECK(FUNC(rbsp_trailing_bits)(ctx, rw));
  910. return 0;
  911. }
  912. static int FUNC(ref_pic_lists_modification)(CodedBitstreamContext *ctx, RWContext *rw,
  913. H265RawSliceHeader *current,
  914. unsigned int num_pic_total_curr)
  915. {
  916. unsigned int entry_size;
  917. int err, i;
  918. entry_size = av_log2(num_pic_total_curr - 1) + 1;
  919. flag(ref_pic_list_modification_flag_l0);
  920. if (current->ref_pic_list_modification_flag_l0) {
  921. for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++)
  922. u(entry_size, list_entry_l0[i], 0, num_pic_total_curr - 1);
  923. }
  924. if (current->slice_type == HEVC_SLICE_B) {
  925. flag(ref_pic_list_modification_flag_l1);
  926. if (current->ref_pic_list_modification_flag_l1) {
  927. for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++)
  928. u(entry_size, list_entry_l1[i], 0, num_pic_total_curr - 1);
  929. }
  930. }
  931. return 0;
  932. }
  933. static int FUNC(pred_weight_table)(CodedBitstreamContext *ctx, RWContext *rw,
  934. H265RawSliceHeader *current)
  935. {
  936. CodedBitstreamH265Context *h265 = ctx->priv_data;
  937. const H265RawSPS *sps = h265->active_sps;
  938. int err, i, j;
  939. int chroma = !sps->separate_colour_plane_flag &&
  940. sps->chroma_format_idc != 0;
  941. ue(luma_log2_weight_denom, 0, 7);
  942. if (chroma)
  943. se(delta_chroma_log2_weight_denom, -7, 7);
  944. else
  945. infer(delta_chroma_log2_weight_denom, 0);
  946. for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++) {
  947. if (1 /* is not same POC and same layer_id */)
  948. flag(luma_weight_l0_flag[i]);
  949. else
  950. infer(luma_weight_l0_flag[i], 0);
  951. }
  952. if (chroma) {
  953. for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++) {
  954. if (1 /* is not same POC and same layer_id */)
  955. flag(chroma_weight_l0_flag[i]);
  956. else
  957. infer(chroma_weight_l0_flag[i], 0);
  958. }
  959. }
  960. for (i = 0; i <= current->num_ref_idx_l0_active_minus1; i++) {
  961. if (current->luma_weight_l0_flag[i]) {
  962. se(delta_luma_weight_l0[i], -128, +127);
  963. se(luma_offset_l0[i],
  964. -(1 << (sps->bit_depth_luma_minus8 + 8 - 1)),
  965. +(1 << (sps->bit_depth_luma_minus8 + 8 - 1) - 1));
  966. } else {
  967. infer(delta_luma_weight_l0[i], 0);
  968. infer(luma_offset_l0[i], 0);
  969. }
  970. if (current->chroma_weight_l0_flag[i]) {
  971. for (j = 0; j < 2; j++) {
  972. se(delta_chroma_weight_l0[i][j], -128, +127);
  973. se(chroma_offset_l0[i][j],
  974. -(4 << (sps->bit_depth_chroma_minus8 + 8 - 1)),
  975. +(4 << (sps->bit_depth_chroma_minus8 + 8 - 1) - 1));
  976. }
  977. } else {
  978. for (j = 0; j < 2; j++) {
  979. infer(delta_chroma_weight_l0[i][j], 0);
  980. infer(chroma_offset_l0[i][j], 0);
  981. }
  982. }
  983. }
  984. if (current->slice_type == HEVC_SLICE_B) {
  985. for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++) {
  986. if (1 /* RefPicList1[i] is not CurrPic, nor is it in a different layer */)
  987. flag(luma_weight_l1_flag[i]);
  988. else
  989. infer(luma_weight_l1_flag[i], 0);
  990. }
  991. if (chroma) {
  992. for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++) {
  993. if (1 /* RefPicList1[i] is not CurrPic, nor is it in a different layer */)
  994. flag(chroma_weight_l1_flag[i]);
  995. else
  996. infer(chroma_weight_l1_flag[i], 0);
  997. }
  998. }
  999. for (i = 0; i <= current->num_ref_idx_l1_active_minus1; i++) {
  1000. if (current->luma_weight_l1_flag[i]) {
  1001. se(delta_luma_weight_l1[i], -128, +127);
  1002. se(luma_offset_l1[i],
  1003. - 1 << (sps->bit_depth_luma_minus8 + 8 - 1),
  1004. + 1 << (sps->bit_depth_luma_minus8 + 8 - 1) - 1);
  1005. } else {
  1006. infer(delta_luma_weight_l1[i], 0);
  1007. infer(luma_offset_l1[i], 0);
  1008. }
  1009. if (current->chroma_weight_l1_flag[i]) {
  1010. for (j = 0; j < 2; j++) {
  1011. se(delta_chroma_weight_l1[i][j], -128, +127);
  1012. se(chroma_offset_l1[i][j],
  1013. - 4 << (sps->bit_depth_chroma_minus8 + 8 - 1),
  1014. + 4 << (sps->bit_depth_chroma_minus8 + 8 - 1) - 1);
  1015. }
  1016. } else {
  1017. for (j = 0; j < 2; j++) {
  1018. infer(delta_chroma_weight_l1[i][j], 0);
  1019. infer(chroma_offset_l1[i][j], 0);
  1020. }
  1021. }
  1022. }
  1023. }
  1024. return 0;
  1025. }
  1026. static int FUNC(slice_segment_header)(CodedBitstreamContext *ctx, RWContext *rw,
  1027. H265RawSliceHeader *current)
  1028. {
  1029. CodedBitstreamH265Context *h265 = ctx->priv_data;
  1030. const H265RawSPS *sps;
  1031. const H265RawPPS *pps;
  1032. unsigned int min_cb_log2_size_y, ctb_log2_size_y, ctb_size_y;
  1033. unsigned int pic_width_in_ctbs_y, pic_height_in_ctbs_y, pic_size_in_ctbs_y;
  1034. unsigned int num_pic_total_curr = 0;
  1035. int err, i;
  1036. HEADER("Slice Segment Header");
  1037. CHECK(FUNC(nal_unit_header)(ctx, rw, &current->nal_unit_header, -1));
  1038. flag(first_slice_segment_in_pic_flag);
  1039. if (current->nal_unit_header.nal_unit_type >= HEVC_NAL_BLA_W_LP &&
  1040. current->nal_unit_header.nal_unit_type <= HEVC_NAL_IRAP_VCL23)
  1041. flag(no_output_of_prior_pics_flag);
  1042. ue(slice_pic_parameter_set_id, 0, 63);
  1043. pps = h265->pps[current->slice_pic_parameter_set_id];
  1044. if (!pps) {
  1045. av_log(ctx->log_ctx, AV_LOG_ERROR, "PPS id %d not available.\n",
  1046. current->slice_pic_parameter_set_id);
  1047. return AVERROR_INVALIDDATA;
  1048. }
  1049. h265->active_pps = pps;
  1050. sps = h265->sps[pps->pps_seq_parameter_set_id];
  1051. if (!sps) {
  1052. av_log(ctx->log_ctx, AV_LOG_ERROR, "SPS id %d not available.\n",
  1053. pps->pps_seq_parameter_set_id);
  1054. return AVERROR_INVALIDDATA;
  1055. }
  1056. h265->active_sps = sps;
  1057. min_cb_log2_size_y = sps->log2_min_luma_coding_block_size_minus3 + 3;
  1058. ctb_log2_size_y = min_cb_log2_size_y + sps->log2_diff_max_min_luma_coding_block_size;
  1059. ctb_size_y = 1 << ctb_log2_size_y;
  1060. pic_width_in_ctbs_y =
  1061. (sps->pic_width_in_luma_samples + ctb_size_y - 1) / ctb_size_y;
  1062. pic_height_in_ctbs_y =
  1063. (sps->pic_height_in_luma_samples + ctb_size_y - 1) / ctb_size_y;
  1064. pic_size_in_ctbs_y = pic_width_in_ctbs_y * pic_height_in_ctbs_y;
  1065. if (!current->first_slice_segment_in_pic_flag) {
  1066. unsigned int address_size = av_log2(pic_size_in_ctbs_y - 1) + 1;
  1067. if (pps->dependent_slice_segments_enabled_flag)
  1068. flag(dependent_slice_segment_flag);
  1069. else
  1070. infer(dependent_slice_segment_flag, 0);
  1071. u(address_size, slice_segment_address, 0, pic_size_in_ctbs_y - 1);
  1072. } else {
  1073. infer(dependent_slice_segment_flag, 0);
  1074. }
  1075. if (!current->dependent_slice_segment_flag) {
  1076. for (i = 0; i < pps->num_extra_slice_header_bits; i++)
  1077. flag(slice_reserved_flag[i]);
  1078. ue(slice_type, 0, 2);
  1079. if (pps->output_flag_present_flag)
  1080. flag(pic_output_flag);
  1081. if (sps->separate_colour_plane_flag)
  1082. u(2, colour_plane_id, 0, 2);
  1083. if (current->nal_unit_header.nal_unit_type != HEVC_NAL_IDR_W_RADL &&
  1084. current->nal_unit_header.nal_unit_type != HEVC_NAL_IDR_N_LP) {
  1085. const H265RawSTRefPicSet *rps;
  1086. u(sps->log2_max_pic_order_cnt_lsb_minus4 + 4, slice_pic_order_cnt_lsb,
  1087. 0, (1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4)) - 1);
  1088. flag(short_term_ref_pic_set_sps_flag);
  1089. if (!current->short_term_ref_pic_set_sps_flag) {
  1090. CHECK(FUNC(st_ref_pic_set)(ctx, rw, &current->short_term_ref_pic_set,
  1091. sps->num_short_term_ref_pic_sets, sps));
  1092. rps = &current->short_term_ref_pic_set;
  1093. } else if (sps->num_short_term_ref_pic_sets > 1) {
  1094. unsigned int idx_size = av_log2(sps->num_short_term_ref_pic_sets - 1) + 1;
  1095. u(idx_size, short_term_ref_pic_set_idx,
  1096. 0, sps->num_short_term_ref_pic_sets - 1);
  1097. rps = &sps->st_ref_pic_set[current->short_term_ref_pic_set_idx];
  1098. } else {
  1099. infer(short_term_ref_pic_set_idx, 0);
  1100. rps = &sps->st_ref_pic_set[0];
  1101. }
  1102. num_pic_total_curr = 0;
  1103. for (i = 0; i < rps->num_negative_pics; i++)
  1104. if (rps->used_by_curr_pic_s0_flag[i])
  1105. ++num_pic_total_curr;
  1106. for (i = 0; i < rps->num_positive_pics; i++)
  1107. if (rps->used_by_curr_pic_s1_flag[i])
  1108. ++num_pic_total_curr;
  1109. if (sps->long_term_ref_pics_present_flag) {
  1110. unsigned int idx_size;
  1111. if (sps->num_long_term_ref_pics_sps > 0) {
  1112. ue(num_long_term_sps, 0, sps->num_long_term_ref_pics_sps);
  1113. idx_size = av_log2(sps->num_long_term_ref_pics_sps - 1) + 1;
  1114. } else {
  1115. infer(num_long_term_sps, 0);
  1116. idx_size = 0;
  1117. }
  1118. ue(num_long_term_pics, 0, HEVC_MAX_LONG_TERM_REF_PICS);
  1119. for (i = 0; i < current->num_long_term_sps +
  1120. current->num_long_term_pics; i++) {
  1121. if (i < current->num_long_term_sps) {
  1122. if (sps->num_long_term_ref_pics_sps > 1)
  1123. u(idx_size, lt_idx_sps[i],
  1124. 0, sps->num_long_term_ref_pics_sps - 1);
  1125. if (sps->used_by_curr_pic_lt_sps_flag[current->lt_idx_sps[i]])
  1126. ++num_pic_total_curr;
  1127. } else {
  1128. u(sps->log2_max_pic_order_cnt_lsb_minus4 + 4, poc_lsb_lt[i],
  1129. 0, (1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4)) - 1);
  1130. flag(used_by_curr_pic_lt_flag[i]);
  1131. if (current->used_by_curr_pic_lt_flag[i])
  1132. ++num_pic_total_curr;
  1133. }
  1134. flag(delta_poc_msb_present_flag[i]);
  1135. if (current->delta_poc_msb_present_flag[i])
  1136. ue(delta_poc_msb_cycle_lt[i], 0, UINT32_MAX - 1);
  1137. else
  1138. infer(delta_poc_msb_cycle_lt[i], 0);
  1139. }
  1140. }
  1141. if (sps->sps_temporal_mvp_enabled_flag)
  1142. flag(slice_temporal_mvp_enabled_flag);
  1143. else
  1144. infer(slice_temporal_mvp_enabled_flag, 0);
  1145. if (pps->pps_curr_pic_ref_enabled_flag)
  1146. ++num_pic_total_curr;
  1147. }
  1148. if (sps->sample_adaptive_offset_enabled_flag) {
  1149. flag(slice_sao_luma_flag);
  1150. if (!sps->separate_colour_plane_flag && sps->chroma_format_idc != 0)
  1151. flag(slice_sao_chroma_flag);
  1152. else
  1153. infer(slice_sao_chroma_flag, 0);
  1154. } else {
  1155. infer(slice_sao_luma_flag, 0);
  1156. infer(slice_sao_chroma_flag, 0);
  1157. }
  1158. if (current->slice_type == HEVC_SLICE_P ||
  1159. current->slice_type == HEVC_SLICE_B) {
  1160. flag(num_ref_idx_active_override_flag);
  1161. if (current->num_ref_idx_active_override_flag) {
  1162. ue(num_ref_idx_l0_active_minus1, 0, 14);
  1163. if (current->slice_type == HEVC_SLICE_B)
  1164. ue(num_ref_idx_l1_active_minus1, 0, 14);
  1165. else
  1166. infer(num_ref_idx_l1_active_minus1, pps->num_ref_idx_l1_default_active_minus1);
  1167. } else {
  1168. infer(num_ref_idx_l0_active_minus1, pps->num_ref_idx_l0_default_active_minus1);
  1169. infer(num_ref_idx_l1_active_minus1, pps->num_ref_idx_l1_default_active_minus1);
  1170. }
  1171. if (pps->lists_modification_present_flag && num_pic_total_curr > 1)
  1172. CHECK(FUNC(ref_pic_lists_modification)(ctx, rw, current,
  1173. num_pic_total_curr));
  1174. if (current->slice_type == HEVC_SLICE_B)
  1175. flag(mvd_l1_zero_flag);
  1176. if (pps->cabac_init_present_flag)
  1177. flag(cabac_init_flag);
  1178. else
  1179. infer(cabac_init_flag, 0);
  1180. if (current->slice_temporal_mvp_enabled_flag) {
  1181. if (current->slice_type == HEVC_SLICE_B)
  1182. flag(collocated_from_l0_flag);
  1183. else
  1184. infer(collocated_from_l0_flag, 1);
  1185. if (current->collocated_from_l0_flag) {
  1186. if (current->num_ref_idx_l0_active_minus1 > 0)
  1187. ue(collocated_ref_idx, 0, current->num_ref_idx_l0_active_minus1);
  1188. else
  1189. infer(collocated_ref_idx, 0);
  1190. } else {
  1191. if (current->num_ref_idx_l1_active_minus1 > 0)
  1192. ue(collocated_ref_idx, 0, current->num_ref_idx_l1_active_minus1);
  1193. else
  1194. infer(collocated_ref_idx, 0);
  1195. }
  1196. }
  1197. if ((pps->weighted_pred_flag && current->slice_type == HEVC_SLICE_P) ||
  1198. (pps->weighted_bipred_flag && current->slice_type == HEVC_SLICE_B))
  1199. CHECK(FUNC(pred_weight_table)(ctx, rw, current));
  1200. ue(five_minus_max_num_merge_cand, 0, 4);
  1201. if (sps->motion_vector_resolution_control_idc == 2)
  1202. flag(use_integer_mv_flag);
  1203. else
  1204. infer(use_integer_mv_flag, sps->motion_vector_resolution_control_idc);
  1205. }
  1206. se(slice_qp_delta,
  1207. - 6 * sps->bit_depth_luma_minus8 - (pps->init_qp_minus26 + 26),
  1208. + 51 - (pps->init_qp_minus26 + 26));
  1209. if (pps->pps_slice_chroma_qp_offsets_present_flag) {
  1210. se(slice_cb_qp_offset, -12, +12);
  1211. se(slice_cr_qp_offset, -12, +12);
  1212. } else {
  1213. infer(slice_cb_qp_offset, 0);
  1214. infer(slice_cr_qp_offset, 0);
  1215. }
  1216. if (pps->pps_slice_act_qp_offsets_present_flag) {
  1217. se(slice_act_y_qp_offset,
  1218. -12 - (pps->pps_act_y_qp_offset_plus5 - 5),
  1219. +12 - (pps->pps_act_y_qp_offset_plus5 - 5));
  1220. se(slice_act_cb_qp_offset,
  1221. -12 - (pps->pps_act_cb_qp_offset_plus5 - 5),
  1222. +12 - (pps->pps_act_cb_qp_offset_plus5 - 5));
  1223. se(slice_act_cr_qp_offset,
  1224. -12 - (pps->pps_act_cr_qp_offset_plus3 - 3),
  1225. +12 - (pps->pps_act_cr_qp_offset_plus3 - 3));
  1226. } else {
  1227. infer(slice_act_y_qp_offset, 0);
  1228. infer(slice_act_cb_qp_offset, 0);
  1229. infer(slice_act_cr_qp_offset, 0);
  1230. }
  1231. if (pps->chroma_qp_offset_list_enabled_flag)
  1232. flag(cu_chroma_qp_offset_enabled_flag);
  1233. else
  1234. infer(cu_chroma_qp_offset_enabled_flag, 0);
  1235. if (pps->deblocking_filter_override_enabled_flag)
  1236. flag(deblocking_filter_override_flag);
  1237. else
  1238. infer(deblocking_filter_override_flag, 0);
  1239. if (current->deblocking_filter_override_flag) {
  1240. flag(slice_deblocking_filter_disabled_flag);
  1241. if (!current->slice_deblocking_filter_disabled_flag) {
  1242. se(slice_beta_offset_div2, -6, +6);
  1243. se(slice_tc_offset_div2, -6, +6);
  1244. } else {
  1245. infer(slice_beta_offset_div2, pps->pps_beta_offset_div2);
  1246. infer(slice_tc_offset_div2, pps->pps_tc_offset_div2);
  1247. }
  1248. } else {
  1249. infer(slice_deblocking_filter_disabled_flag,
  1250. pps->pps_deblocking_filter_disabled_flag);
  1251. infer(slice_beta_offset_div2, pps->pps_beta_offset_div2);
  1252. infer(slice_tc_offset_div2, pps->pps_tc_offset_div2);
  1253. }
  1254. if (pps->pps_loop_filter_across_slices_enabled_flag &&
  1255. (current->slice_sao_luma_flag || current->slice_sao_chroma_flag ||
  1256. !current->slice_deblocking_filter_disabled_flag))
  1257. flag(slice_loop_filter_across_slices_enabled_flag);
  1258. else
  1259. infer(slice_loop_filter_across_slices_enabled_flag,
  1260. pps->pps_loop_filter_across_slices_enabled_flag);
  1261. }
  1262. if (pps->tiles_enabled_flag || pps->entropy_coding_sync_enabled_flag) {
  1263. unsigned int num_entry_point_offsets_limit;
  1264. if (!pps->tiles_enabled_flag && pps->entropy_coding_sync_enabled_flag)
  1265. num_entry_point_offsets_limit = pic_height_in_ctbs_y - 1;
  1266. else if (pps->tiles_enabled_flag && !pps->entropy_coding_sync_enabled_flag)
  1267. num_entry_point_offsets_limit =
  1268. (pps->num_tile_columns_minus1 + 1) * (pps->num_tile_rows_minus1 + 1);
  1269. else
  1270. num_entry_point_offsets_limit =
  1271. (pps->num_tile_columns_minus1 + 1) * pic_height_in_ctbs_y - 1;
  1272. ue(num_entry_point_offsets, 0, num_entry_point_offsets_limit);
  1273. if (current->num_entry_point_offsets > HEVC_MAX_ENTRY_POINT_OFFSETS) {
  1274. av_log(ctx->log_ctx, AV_LOG_ERROR, "Too many entry points: "
  1275. "%"PRIu16".\n", current->num_entry_point_offsets);
  1276. return AVERROR_PATCHWELCOME;
  1277. }
  1278. if (current->num_entry_point_offsets > 0) {
  1279. ue(offset_len_minus1, 0, 31);
  1280. for (i = 0; i < current->num_entry_point_offsets; i++)
  1281. u(current->offset_len_minus1 + 1, entry_point_offset_minus1[i],
  1282. 0, (1 << (current->offset_len_minus1 + 1)) - 1);
  1283. }
  1284. }
  1285. if (pps->slice_segment_header_extension_present_flag) {
  1286. ue(slice_segment_header_extension_length, 0, 256);
  1287. for (i = 0; i < current->slice_segment_header_extension_length; i++)
  1288. u(8, slice_segment_header_extension_data_byte[i], 0x00, 0xff);
  1289. }
  1290. CHECK(FUNC(byte_alignment)(ctx, rw));
  1291. return 0;
  1292. }