You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

479 lines
16KB

  1. /*
  2. * This file is part of Libav.
  3. *
  4. * Libav is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * Libav is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with Libav; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "bytestream.h"
  19. #include "get_bits.h"
  20. #include "golomb.h"
  21. #include "h264.h"
  22. #include "h264_parse.h"
  23. int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps,
  24. const int *ref_count, int slice_type_nos,
  25. H264PredWeightTable *pwt)
  26. {
  27. int list, i;
  28. int luma_def, chroma_def;
  29. pwt->use_weight = 0;
  30. pwt->use_weight_chroma = 0;
  31. pwt->luma_log2_weight_denom = get_ue_golomb(gb);
  32. if (sps->chroma_format_idc)
  33. pwt->chroma_log2_weight_denom = get_ue_golomb(gb);
  34. luma_def = 1 << pwt->luma_log2_weight_denom;
  35. chroma_def = 1 << pwt->chroma_log2_weight_denom;
  36. for (list = 0; list < 2; list++) {
  37. pwt->luma_weight_flag[list] = 0;
  38. pwt->chroma_weight_flag[list] = 0;
  39. for (i = 0; i < ref_count[list]; i++) {
  40. int luma_weight_flag, chroma_weight_flag;
  41. luma_weight_flag = get_bits1(gb);
  42. if (luma_weight_flag) {
  43. pwt->luma_weight[i][list][0] = get_se_golomb(gb);
  44. pwt->luma_weight[i][list][1] = get_se_golomb(gb);
  45. if (pwt->luma_weight[i][list][0] != luma_def ||
  46. pwt->luma_weight[i][list][1] != 0) {
  47. pwt->use_weight = 1;
  48. pwt->luma_weight_flag[list] = 1;
  49. }
  50. } else {
  51. pwt->luma_weight[i][list][0] = luma_def;
  52. pwt->luma_weight[i][list][1] = 0;
  53. }
  54. if (sps->chroma_format_idc) {
  55. chroma_weight_flag = get_bits1(gb);
  56. if (chroma_weight_flag) {
  57. int j;
  58. for (j = 0; j < 2; j++) {
  59. pwt->chroma_weight[i][list][j][0] = get_se_golomb(gb);
  60. pwt->chroma_weight[i][list][j][1] = get_se_golomb(gb);
  61. if (pwt->chroma_weight[i][list][j][0] != chroma_def ||
  62. pwt->chroma_weight[i][list][j][1] != 0) {
  63. pwt->use_weight_chroma = 1;
  64. pwt->chroma_weight_flag[list] = 1;
  65. }
  66. }
  67. } else {
  68. int j;
  69. for (j = 0; j < 2; j++) {
  70. pwt->chroma_weight[i][list][j][0] = chroma_def;
  71. pwt->chroma_weight[i][list][j][1] = 0;
  72. }
  73. }
  74. }
  75. }
  76. if (slice_type_nos != AV_PICTURE_TYPE_B)
  77. break;
  78. }
  79. pwt->use_weight = pwt->use_weight || pwt->use_weight_chroma;
  80. return 0;
  81. }
  82. /**
  83. * Check if the top & left blocks are available if needed and
  84. * change the dc mode so it only uses the available blocks.
  85. */
  86. int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx,
  87. int top_samples_available, int left_samples_available)
  88. {
  89. static const int8_t top[12] = {
  90. -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
  91. };
  92. static const int8_t left[12] = {
  93. 0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
  94. };
  95. int i;
  96. if (!(top_samples_available & 0x8000)) {
  97. for (i = 0; i < 4; i++) {
  98. int status = top[pred_mode_cache[scan8[0] + i]];
  99. if (status < 0) {
  100. av_log(logctx, AV_LOG_ERROR,
  101. "top block unavailable for requested intra4x4 mode %d\n",
  102. status);
  103. return AVERROR_INVALIDDATA;
  104. } else if (status) {
  105. pred_mode_cache[scan8[0] + i] = status;
  106. }
  107. }
  108. }
  109. if ((left_samples_available & 0x8888) != 0x8888) {
  110. static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
  111. for (i = 0; i < 4; i++)
  112. if (!(left_samples_available & mask[i])) {
  113. int status = left[pred_mode_cache[scan8[0] + 8 * i]];
  114. if (status < 0) {
  115. av_log(logctx, AV_LOG_ERROR,
  116. "left block unavailable for requested intra4x4 mode %d\n",
  117. status);
  118. return AVERROR_INVALIDDATA;
  119. } else if (status) {
  120. pred_mode_cache[scan8[0] + 8 * i] = status;
  121. }
  122. }
  123. }
  124. return 0;
  125. }
  126. /**
  127. * Check if the top & left blocks are available if needed and
  128. * change the dc mode so it only uses the available blocks.
  129. */
  130. int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available,
  131. int left_samples_available,
  132. int mode, int is_chroma)
  133. {
  134. static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
  135. static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
  136. if (mode > 3U) {
  137. av_log(logctx, AV_LOG_ERROR,
  138. "out of range intra chroma pred mode\n");
  139. return AVERROR_INVALIDDATA;
  140. }
  141. if (!(top_samples_available & 0x8000)) {
  142. mode = top[mode];
  143. if (mode < 0) {
  144. av_log(logctx, AV_LOG_ERROR,
  145. "top block unavailable for requested intra mode\n");
  146. return AVERROR_INVALIDDATA;
  147. }
  148. }
  149. if ((left_samples_available & 0x8080) != 0x8080) {
  150. mode = left[mode];
  151. if (is_chroma && (left_samples_available & 0x8080)) {
  152. // mad cow disease mode, aka MBAFF + constrained_intra_pred
  153. mode = ALZHEIMER_DC_L0T_PRED8x8 +
  154. (!(left_samples_available & 0x8000)) +
  155. 2 * (mode == DC_128_PRED8x8);
  156. }
  157. if (mode < 0) {
  158. av_log(logctx, AV_LOG_ERROR,
  159. "left block unavailable for requested intra mode\n");
  160. return AVERROR_INVALIDDATA;
  161. }
  162. }
  163. return mode;
  164. }
  165. int ff_h264_parse_ref_count(int *plist_count, int ref_count[2],
  166. GetBitContext *gb, const PPS *pps,
  167. int slice_type_nos, int picture_structure)
  168. {
  169. int list_count;
  170. int num_ref_idx_active_override_flag, max_refs;
  171. // set defaults, might be overridden a few lines later
  172. ref_count[0] = pps->ref_count[0];
  173. ref_count[1] = pps->ref_count[1];
  174. if (slice_type_nos != AV_PICTURE_TYPE_I) {
  175. num_ref_idx_active_override_flag = get_bits1(gb);
  176. if (num_ref_idx_active_override_flag) {
  177. ref_count[0] = get_ue_golomb(gb) + 1;
  178. if (ref_count[0] < 1)
  179. goto fail;
  180. if (slice_type_nos == AV_PICTURE_TYPE_B) {
  181. ref_count[1] = get_ue_golomb(gb) + 1;
  182. if (ref_count[1] < 1)
  183. goto fail;
  184. }
  185. }
  186. if (slice_type_nos == AV_PICTURE_TYPE_B)
  187. list_count = 2;
  188. else
  189. list_count = 1;
  190. } else {
  191. list_count = 0;
  192. ref_count[0] = ref_count[1] = 0;
  193. }
  194. max_refs = picture_structure == PICT_FRAME ? 16 : 32;
  195. if (ref_count[0] > max_refs || ref_count[1] > max_refs)
  196. goto fail;
  197. *plist_count = list_count;
  198. return 0;
  199. fail:
  200. *plist_count = 0;
  201. ref_count[0] = 0;
  202. ref_count[1] = 0;
  203. return AVERROR_INVALIDDATA;
  204. }
  205. int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc,
  206. const SPS *sps, H264POCContext *pc,
  207. int picture_structure, int nal_ref_idc)
  208. {
  209. const int max_frame_num = 1 << sps->log2_max_frame_num;
  210. int field_poc[2];
  211. pc->frame_num_offset = pc->prev_frame_num_offset;
  212. if (pc->frame_num < pc->prev_frame_num)
  213. pc->frame_num_offset += max_frame_num;
  214. if (sps->poc_type == 0) {
  215. const int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
  216. if (pc->poc_lsb < pc->prev_poc_lsb &&
  217. pc->prev_poc_lsb - pc->poc_lsb >= max_poc_lsb / 2)
  218. pc->poc_msb = pc->prev_poc_msb + max_poc_lsb;
  219. else if (pc->poc_lsb > pc->prev_poc_lsb &&
  220. pc->prev_poc_lsb - pc->poc_lsb < -max_poc_lsb / 2)
  221. pc->poc_msb = pc->prev_poc_msb - max_poc_lsb;
  222. else
  223. pc->poc_msb = pc->prev_poc_msb;
  224. field_poc[0] =
  225. field_poc[1] = pc->poc_msb + pc->poc_lsb;
  226. if (picture_structure == PICT_FRAME)
  227. field_poc[1] += pc->delta_poc_bottom;
  228. } else if (sps->poc_type == 1) {
  229. int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
  230. int i;
  231. if (sps->poc_cycle_length != 0)
  232. abs_frame_num = pc->frame_num_offset + pc->frame_num;
  233. else
  234. abs_frame_num = 0;
  235. if (nal_ref_idc == 0 && abs_frame_num > 0)
  236. abs_frame_num--;
  237. expected_delta_per_poc_cycle = 0;
  238. for (i = 0; i < sps->poc_cycle_length; i++)
  239. // FIXME integrate during sps parse
  240. expected_delta_per_poc_cycle += sps->offset_for_ref_frame[i];
  241. if (abs_frame_num > 0) {
  242. int poc_cycle_cnt = (abs_frame_num - 1) / sps->poc_cycle_length;
  243. int frame_num_in_poc_cycle = (abs_frame_num - 1) % sps->poc_cycle_length;
  244. expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
  245. for (i = 0; i <= frame_num_in_poc_cycle; i++)
  246. expectedpoc = expectedpoc + sps->offset_for_ref_frame[i];
  247. } else
  248. expectedpoc = 0;
  249. if (nal_ref_idc == 0)
  250. expectedpoc = expectedpoc + sps->offset_for_non_ref_pic;
  251. field_poc[0] = expectedpoc + pc->delta_poc[0];
  252. field_poc[1] = field_poc[0] + sps->offset_for_top_to_bottom_field;
  253. if (picture_structure == PICT_FRAME)
  254. field_poc[1] += pc->delta_poc[1];
  255. } else {
  256. int poc = 2 * (pc->frame_num_offset + pc->frame_num);
  257. if (!nal_ref_idc)
  258. poc--;
  259. field_poc[0] = poc;
  260. field_poc[1] = poc;
  261. }
  262. if (picture_structure != PICT_BOTTOM_FIELD)
  263. pic_field_poc[0] = field_poc[0];
  264. if (picture_structure != PICT_TOP_FIELD)
  265. pic_field_poc[1] = field_poc[1];
  266. *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
  267. return 0;
  268. }
  269. static int decode_extradata_ps(const uint8_t *data, int size, H264ParamSets *ps,
  270. int is_avc, void *logctx)
  271. {
  272. H2645Packet pkt = { 0 };
  273. int i, ret = 0;
  274. ret = ff_h2645_packet_split(&pkt, data, size, logctx, is_avc, 2, AV_CODEC_ID_H264);
  275. if (ret < 0)
  276. goto fail;
  277. for (i = 0; i < pkt.nb_nals; i++) {
  278. H2645NAL *nal = &pkt.nals[i];
  279. switch (nal->type) {
  280. case NAL_SPS:
  281. ret = ff_h264_decode_seq_parameter_set(&nal->gb, logctx, ps);
  282. if (ret < 0)
  283. goto fail;
  284. break;
  285. case NAL_PPS:
  286. ret = ff_h264_decode_picture_parameter_set(&nal->gb, logctx, ps,
  287. nal->size_bits);
  288. if (ret < 0)
  289. goto fail;
  290. break;
  291. default:
  292. av_log(logctx, AV_LOG_VERBOSE, "Ignoring NAL type %d in extradata\n",
  293. nal->type);
  294. break;
  295. }
  296. }
  297. fail:
  298. ff_h2645_packet_uninit(&pkt);
  299. return ret;
  300. }
  301. /* There are (invalid) samples in the wild with mp4-style extradata, where the
  302. * parameter sets are stored unescaped (i.e. as RBSP).
  303. * This function catches the parameter set decoding failure and tries again
  304. * after escaping it */
  305. static int decode_extradata_ps_mp4(const uint8_t *buf, int buf_size, H264ParamSets *ps,
  306. int err_recognition, void *logctx)
  307. {
  308. int ret;
  309. ret = decode_extradata_ps(buf, buf_size, ps, 1, logctx);
  310. if (ret < 0 && !(err_recognition & AV_EF_EXPLODE)) {
  311. GetByteContext gbc;
  312. PutByteContext pbc;
  313. uint8_t *escaped_buf;
  314. int escaped_buf_size;
  315. av_log(logctx, AV_LOG_WARNING,
  316. "SPS decoding failure, trying again after escaping the NAL\n");
  317. if (buf_size / 2 >= (INT16_MAX - AV_INPUT_BUFFER_PADDING_SIZE) / 3)
  318. return AVERROR(ERANGE);
  319. escaped_buf_size = buf_size * 3 / 2 + AV_INPUT_BUFFER_PADDING_SIZE;
  320. escaped_buf = av_mallocz(escaped_buf_size);
  321. if (!escaped_buf)
  322. return AVERROR(ENOMEM);
  323. bytestream2_init(&gbc, buf, buf_size);
  324. bytestream2_init_writer(&pbc, escaped_buf, escaped_buf_size);
  325. while (bytestream2_get_bytes_left(&gbc)) {
  326. if (bytestream2_get_bytes_left(&gbc) >= 3 &&
  327. bytestream2_peek_be24(&gbc) <= 3) {
  328. bytestream2_put_be24(&pbc, 3);
  329. bytestream2_skip(&gbc, 2);
  330. } else
  331. bytestream2_put_byte(&pbc, bytestream2_get_byte(&gbc));
  332. }
  333. escaped_buf_size = bytestream2_tell_p(&pbc);
  334. AV_WB16(escaped_buf, escaped_buf_size - 2);
  335. ret = decode_extradata_ps(escaped_buf, escaped_buf_size, ps, 1, logctx);
  336. av_freep(&escaped_buf);
  337. if (ret < 0)
  338. return ret;
  339. }
  340. return 0;
  341. }
  342. int ff_h264_decode_extradata(const uint8_t *data, int size, H264ParamSets *ps,
  343. int *is_avc, int *nal_length_size,
  344. int err_recognition, void *logctx)
  345. {
  346. int ret;
  347. if (data[0] == 1) {
  348. int i, cnt, nalsize;
  349. const uint8_t *p = data;
  350. *is_avc = 1;
  351. if (size < 7) {
  352. av_log(logctx, AV_LOG_ERROR, "avcC %d too short\n", size);
  353. return AVERROR_INVALIDDATA;
  354. }
  355. // Decode sps from avcC
  356. cnt = *(p + 5) & 0x1f; // Number of sps
  357. p += 6;
  358. for (i = 0; i < cnt; i++) {
  359. nalsize = AV_RB16(p) + 2;
  360. if (p - data + nalsize > size)
  361. return AVERROR_INVALIDDATA;
  362. ret = decode_extradata_ps_mp4(p, nalsize, ps, err_recognition, logctx);
  363. if (ret < 0) {
  364. av_log(logctx, AV_LOG_ERROR,
  365. "Decoding sps %d from avcC failed\n", i);
  366. return ret;
  367. }
  368. p += nalsize;
  369. }
  370. // Decode pps from avcC
  371. cnt = *(p++); // Number of pps
  372. for (i = 0; i < cnt; i++) {
  373. nalsize = AV_RB16(p) + 2;
  374. if (p - data + nalsize > size)
  375. return AVERROR_INVALIDDATA;
  376. ret = decode_extradata_ps_mp4(p, nalsize, ps, err_recognition, logctx);
  377. if (ret < 0) {
  378. av_log(logctx, AV_LOG_ERROR,
  379. "Decoding pps %d from avcC failed\n", i);
  380. return ret;
  381. }
  382. p += nalsize;
  383. }
  384. // Store right nal length size that will be used to parse all other nals
  385. *nal_length_size = (data[4] & 0x03) + 1;
  386. } else {
  387. *is_avc = 0;
  388. ret = decode_extradata_ps(data, size, ps, 0, logctx);
  389. if (ret < 0)
  390. return ret;
  391. }
  392. return 0;
  393. }
  394. /**
  395. * Compute profile from profile_idc and constraint_set?_flags.
  396. *
  397. * @param sps SPS
  398. *
  399. * @return profile as defined by FF_PROFILE_H264_*
  400. */
  401. int ff_h264_get_profile(const SPS *sps)
  402. {
  403. int profile = sps->profile_idc;
  404. switch (sps->profile_idc) {
  405. case FF_PROFILE_H264_BASELINE:
  406. // constraint_set1_flag set to 1
  407. profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
  408. break;
  409. case FF_PROFILE_H264_HIGH_10:
  410. case FF_PROFILE_H264_HIGH_422:
  411. case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
  412. // constraint_set3_flag set to 1
  413. profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
  414. break;
  415. }
  416. return profile;
  417. }