You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

520 lines
18KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "bytestream.h"
  19. #include "get_bits.h"
  20. #include "golomb.h"
  21. #include "h264.h"
  22. #include "h264dec.h"
  23. #include "h264_parse.h"
  24. #include "h264_ps.h"
  25. int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps,
  26. const int *ref_count, int slice_type_nos,
  27. H264PredWeightTable *pwt,
  28. int picture_structure, void *logctx)
  29. {
  30. int list, i, j;
  31. int luma_def, chroma_def;
  32. pwt->use_weight = 0;
  33. pwt->use_weight_chroma = 0;
  34. pwt->luma_log2_weight_denom = get_ue_golomb(gb);
  35. if (sps->chroma_format_idc)
  36. pwt->chroma_log2_weight_denom = get_ue_golomb(gb);
  37. if (pwt->luma_log2_weight_denom > 7U) {
  38. av_log(logctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", pwt->luma_log2_weight_denom);
  39. pwt->luma_log2_weight_denom = 0;
  40. }
  41. if (pwt->chroma_log2_weight_denom > 7U) {
  42. av_log(logctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", pwt->chroma_log2_weight_denom);
  43. pwt->chroma_log2_weight_denom = 0;
  44. }
  45. luma_def = 1 << pwt->luma_log2_weight_denom;
  46. chroma_def = 1 << pwt->chroma_log2_weight_denom;
  47. for (list = 0; list < 2; list++) {
  48. pwt->luma_weight_flag[list] = 0;
  49. pwt->chroma_weight_flag[list] = 0;
  50. for (i = 0; i < ref_count[list]; i++) {
  51. int luma_weight_flag, chroma_weight_flag;
  52. luma_weight_flag = get_bits1(gb);
  53. if (luma_weight_flag) {
  54. pwt->luma_weight[i][list][0] = get_se_golomb(gb);
  55. pwt->luma_weight[i][list][1] = get_se_golomb(gb);
  56. if ((int8_t)pwt->luma_weight[i][list][0] != pwt->luma_weight[i][list][0] ||
  57. (int8_t)pwt->luma_weight[i][list][1] != pwt->luma_weight[i][list][1])
  58. goto out_range_weight;
  59. if (pwt->luma_weight[i][list][0] != luma_def ||
  60. pwt->luma_weight[i][list][1] != 0) {
  61. pwt->use_weight = 1;
  62. pwt->luma_weight_flag[list] = 1;
  63. }
  64. } else {
  65. pwt->luma_weight[i][list][0] = luma_def;
  66. pwt->luma_weight[i][list][1] = 0;
  67. }
  68. if (sps->chroma_format_idc) {
  69. chroma_weight_flag = get_bits1(gb);
  70. if (chroma_weight_flag) {
  71. int j;
  72. for (j = 0; j < 2; j++) {
  73. pwt->chroma_weight[i][list][j][0] = get_se_golomb(gb);
  74. pwt->chroma_weight[i][list][j][1] = get_se_golomb(gb);
  75. if ((int8_t)pwt->chroma_weight[i][list][j][0] != pwt->chroma_weight[i][list][j][0] ||
  76. (int8_t)pwt->chroma_weight[i][list][j][1] != pwt->chroma_weight[i][list][j][1])
  77. goto out_range_weight;
  78. if (pwt->chroma_weight[i][list][j][0] != chroma_def ||
  79. pwt->chroma_weight[i][list][j][1] != 0) {
  80. pwt->use_weight_chroma = 1;
  81. pwt->chroma_weight_flag[list] = 1;
  82. }
  83. }
  84. } else {
  85. int j;
  86. for (j = 0; j < 2; j++) {
  87. pwt->chroma_weight[i][list][j][0] = chroma_def;
  88. pwt->chroma_weight[i][list][j][1] = 0;
  89. }
  90. }
  91. }
  92. // for MBAFF
  93. if (picture_structure == PICT_FRAME) {
  94. pwt->luma_weight[16 + 2 * i][list][0] = pwt->luma_weight[16 + 2 * i + 1][list][0] = pwt->luma_weight[i][list][0];
  95. pwt->luma_weight[16 + 2 * i][list][1] = pwt->luma_weight[16 + 2 * i + 1][list][1] = pwt->luma_weight[i][list][1];
  96. for (j = 0; j < 2; j++) {
  97. pwt->chroma_weight[16 + 2 * i][list][j][0] = pwt->chroma_weight[16 + 2 * i + 1][list][j][0] = pwt->chroma_weight[i][list][j][0];
  98. pwt->chroma_weight[16 + 2 * i][list][j][1] = pwt->chroma_weight[16 + 2 * i + 1][list][j][1] = pwt->chroma_weight[i][list][j][1];
  99. }
  100. }
  101. }
  102. if (slice_type_nos != AV_PICTURE_TYPE_B)
  103. break;
  104. }
  105. pwt->use_weight = pwt->use_weight || pwt->use_weight_chroma;
  106. return 0;
  107. out_range_weight:
  108. avpriv_request_sample(logctx, "Out of range weight\n");
  109. return AVERROR_INVALIDDATA;
  110. }
  111. /**
  112. * Check if the top & left blocks are available if needed and
  113. * change the dc mode so it only uses the available blocks.
  114. */
  115. int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx,
  116. int top_samples_available, int left_samples_available)
  117. {
  118. static const int8_t top[12] = {
  119. -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
  120. };
  121. static const int8_t left[12] = {
  122. 0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
  123. };
  124. int i;
  125. if (!(top_samples_available & 0x8000)) {
  126. for (i = 0; i < 4; i++) {
  127. int status = top[pred_mode_cache[scan8[0] + i]];
  128. if (status < 0) {
  129. av_log(logctx, AV_LOG_ERROR,
  130. "top block unavailable for requested intra mode %d\n",
  131. status);
  132. return AVERROR_INVALIDDATA;
  133. } else if (status) {
  134. pred_mode_cache[scan8[0] + i] = status;
  135. }
  136. }
  137. }
  138. if ((left_samples_available & 0x8888) != 0x8888) {
  139. static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
  140. for (i = 0; i < 4; i++)
  141. if (!(left_samples_available & mask[i])) {
  142. int status = left[pred_mode_cache[scan8[0] + 8 * i]];
  143. if (status < 0) {
  144. av_log(logctx, AV_LOG_ERROR,
  145. "left block unavailable for requested intra4x4 mode %d\n",
  146. status);
  147. return AVERROR_INVALIDDATA;
  148. } else if (status) {
  149. pred_mode_cache[scan8[0] + 8 * i] = status;
  150. }
  151. }
  152. }
  153. return 0;
  154. }
  155. /**
  156. * Check if the top & left blocks are available if needed and
  157. * change the dc mode so it only uses the available blocks.
  158. */
  159. int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available,
  160. int left_samples_available,
  161. int mode, int is_chroma)
  162. {
  163. static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
  164. static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
  165. if (mode > 3U) {
  166. av_log(logctx, AV_LOG_ERROR,
  167. "out of range intra chroma pred mode\n");
  168. return AVERROR_INVALIDDATA;
  169. }
  170. if (!(top_samples_available & 0x8000)) {
  171. mode = top[mode];
  172. if (mode < 0) {
  173. av_log(logctx, AV_LOG_ERROR,
  174. "top block unavailable for requested intra mode\n");
  175. return AVERROR_INVALIDDATA;
  176. }
  177. }
  178. if ((left_samples_available & 0x8080) != 0x8080) {
  179. mode = left[mode];
  180. if (mode < 0) {
  181. av_log(logctx, AV_LOG_ERROR,
  182. "left block unavailable for requested intra mode\n");
  183. return AVERROR_INVALIDDATA;
  184. }
  185. if (is_chroma && (left_samples_available & 0x8080)) {
  186. // mad cow disease mode, aka MBAFF + constrained_intra_pred
  187. mode = ALZHEIMER_DC_L0T_PRED8x8 +
  188. (!(left_samples_available & 0x8000)) +
  189. 2 * (mode == DC_128_PRED8x8);
  190. }
  191. }
  192. return mode;
  193. }
  194. int ff_h264_parse_ref_count(int *plist_count, int ref_count[2],
  195. GetBitContext *gb, const PPS *pps,
  196. int slice_type_nos, int picture_structure, void *logctx)
  197. {
  198. int list_count;
  199. int num_ref_idx_active_override_flag;
  200. // set defaults, might be overridden a few lines later
  201. ref_count[0] = pps->ref_count[0];
  202. ref_count[1] = pps->ref_count[1];
  203. if (slice_type_nos != AV_PICTURE_TYPE_I) {
  204. unsigned max[2];
  205. max[0] = max[1] = picture_structure == PICT_FRAME ? 15 : 31;
  206. num_ref_idx_active_override_flag = get_bits1(gb);
  207. if (num_ref_idx_active_override_flag) {
  208. ref_count[0] = get_ue_golomb(gb) + 1;
  209. if (slice_type_nos == AV_PICTURE_TYPE_B) {
  210. ref_count[1] = get_ue_golomb(gb) + 1;
  211. } else
  212. // full range is spec-ok in this case, even for frames
  213. ref_count[1] = 1;
  214. }
  215. if (ref_count[0] - 1 > max[0] || ref_count[1] - 1 > max[1]) {
  216. av_log(logctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n",
  217. ref_count[0] - 1, max[0], ref_count[1] - 1, max[1]);
  218. ref_count[0] = ref_count[1] = 0;
  219. *plist_count = 0;
  220. goto fail;
  221. }
  222. if (slice_type_nos == AV_PICTURE_TYPE_B)
  223. list_count = 2;
  224. else
  225. list_count = 1;
  226. } else {
  227. list_count = 0;
  228. ref_count[0] = ref_count[1] = 0;
  229. }
  230. *plist_count = list_count;
  231. return 0;
  232. fail:
  233. *plist_count = 0;
  234. ref_count[0] = 0;
  235. ref_count[1] = 0;
  236. return AVERROR_INVALIDDATA;
  237. }
  238. int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc,
  239. const SPS *sps, H264POCContext *pc,
  240. int picture_structure, int nal_ref_idc)
  241. {
  242. const int max_frame_num = 1 << sps->log2_max_frame_num;
  243. int field_poc[2];
  244. pc->frame_num_offset = pc->prev_frame_num_offset;
  245. if (pc->frame_num < pc->prev_frame_num)
  246. pc->frame_num_offset += max_frame_num;
  247. if (sps->poc_type == 0) {
  248. const int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
  249. if (pc->poc_lsb < pc->prev_poc_lsb &&
  250. pc->prev_poc_lsb - pc->poc_lsb >= max_poc_lsb / 2)
  251. pc->poc_msb = pc->prev_poc_msb + max_poc_lsb;
  252. else if (pc->poc_lsb > pc->prev_poc_lsb &&
  253. pc->prev_poc_lsb - pc->poc_lsb < -max_poc_lsb / 2)
  254. pc->poc_msb = pc->prev_poc_msb - max_poc_lsb;
  255. else
  256. pc->poc_msb = pc->prev_poc_msb;
  257. field_poc[0] =
  258. field_poc[1] = pc->poc_msb + pc->poc_lsb;
  259. if (picture_structure == PICT_FRAME)
  260. field_poc[1] += pc->delta_poc_bottom;
  261. } else if (sps->poc_type == 1) {
  262. int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
  263. int i;
  264. if (sps->poc_cycle_length != 0)
  265. abs_frame_num = pc->frame_num_offset + pc->frame_num;
  266. else
  267. abs_frame_num = 0;
  268. if (nal_ref_idc == 0 && abs_frame_num > 0)
  269. abs_frame_num--;
  270. expected_delta_per_poc_cycle = 0;
  271. for (i = 0; i < sps->poc_cycle_length; i++)
  272. // FIXME integrate during sps parse
  273. expected_delta_per_poc_cycle += sps->offset_for_ref_frame[i];
  274. if (abs_frame_num > 0) {
  275. int poc_cycle_cnt = (abs_frame_num - 1) / sps->poc_cycle_length;
  276. int frame_num_in_poc_cycle = (abs_frame_num - 1) % sps->poc_cycle_length;
  277. expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
  278. for (i = 0; i <= frame_num_in_poc_cycle; i++)
  279. expectedpoc = expectedpoc + sps->offset_for_ref_frame[i];
  280. } else
  281. expectedpoc = 0;
  282. if (nal_ref_idc == 0)
  283. expectedpoc = expectedpoc + sps->offset_for_non_ref_pic;
  284. field_poc[0] = expectedpoc + pc->delta_poc[0];
  285. field_poc[1] = field_poc[0] + sps->offset_for_top_to_bottom_field;
  286. if (picture_structure == PICT_FRAME)
  287. field_poc[1] += pc->delta_poc[1];
  288. } else {
  289. int poc = 2 * (pc->frame_num_offset + pc->frame_num);
  290. if (!nal_ref_idc)
  291. poc--;
  292. field_poc[0] = poc;
  293. field_poc[1] = poc;
  294. }
  295. if (picture_structure != PICT_BOTTOM_FIELD)
  296. pic_field_poc[0] = field_poc[0];
  297. if (picture_structure != PICT_TOP_FIELD)
  298. pic_field_poc[1] = field_poc[1];
  299. *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
  300. return 0;
  301. }
  302. static int decode_extradata_ps(const uint8_t *data, int size, H264ParamSets *ps,
  303. int is_avc, void *logctx)
  304. {
  305. H2645Packet pkt = { 0 };
  306. int i, ret = 0;
  307. ret = ff_h2645_packet_split(&pkt, data, size, logctx, is_avc, 2, AV_CODEC_ID_H264, 1);
  308. if (ret < 0) {
  309. ret = 0;
  310. goto fail;
  311. }
  312. for (i = 0; i < pkt.nb_nals; i++) {
  313. H2645NAL *nal = &pkt.nals[i];
  314. switch (nal->type) {
  315. case H264_NAL_SPS:
  316. ret = ff_h264_decode_seq_parameter_set(&nal->gb, logctx, ps, 0);
  317. if (ret < 0)
  318. goto fail;
  319. break;
  320. case H264_NAL_PPS:
  321. ret = ff_h264_decode_picture_parameter_set(&nal->gb, logctx, ps,
  322. nal->size_bits);
  323. if (ret < 0)
  324. goto fail;
  325. break;
  326. default:
  327. av_log(logctx, AV_LOG_VERBOSE, "Ignoring NAL type %d in extradata\n",
  328. nal->type);
  329. break;
  330. }
  331. }
  332. fail:
  333. ff_h2645_packet_uninit(&pkt);
  334. return ret;
  335. }
  336. /* There are (invalid) samples in the wild with mp4-style extradata, where the
  337. * parameter sets are stored unescaped (i.e. as RBSP).
  338. * This function catches the parameter set decoding failure and tries again
  339. * after escaping it */
  340. static int decode_extradata_ps_mp4(const uint8_t *buf, int buf_size, H264ParamSets *ps,
  341. int err_recognition, void *logctx)
  342. {
  343. int ret;
  344. ret = decode_extradata_ps(buf, buf_size, ps, 1, logctx);
  345. if (ret < 0 && !(err_recognition & AV_EF_EXPLODE)) {
  346. GetByteContext gbc;
  347. PutByteContext pbc;
  348. uint8_t *escaped_buf;
  349. int escaped_buf_size;
  350. av_log(logctx, AV_LOG_WARNING,
  351. "SPS decoding failure, trying again after escaping the NAL\n");
  352. if (buf_size / 2 >= (INT16_MAX - AV_INPUT_BUFFER_PADDING_SIZE) / 3)
  353. return AVERROR(ERANGE);
  354. escaped_buf_size = buf_size * 3 / 2 + AV_INPUT_BUFFER_PADDING_SIZE;
  355. escaped_buf = av_mallocz(escaped_buf_size);
  356. if (!escaped_buf)
  357. return AVERROR(ENOMEM);
  358. bytestream2_init(&gbc, buf, buf_size);
  359. bytestream2_init_writer(&pbc, escaped_buf, escaped_buf_size);
  360. while (bytestream2_get_bytes_left(&gbc)) {
  361. if (bytestream2_get_bytes_left(&gbc) >= 3 &&
  362. bytestream2_peek_be24(&gbc) <= 3) {
  363. bytestream2_put_be24(&pbc, 3);
  364. bytestream2_skip(&gbc, 2);
  365. } else
  366. bytestream2_put_byte(&pbc, bytestream2_get_byte(&gbc));
  367. }
  368. escaped_buf_size = bytestream2_tell_p(&pbc);
  369. AV_WB16(escaped_buf, escaped_buf_size - 2);
  370. ret = decode_extradata_ps(escaped_buf, escaped_buf_size, ps, 1, logctx);
  371. av_freep(&escaped_buf);
  372. if (ret < 0)
  373. return ret;
  374. }
  375. return 0;
  376. }
  377. int ff_h264_decode_extradata(const uint8_t *data, int size, H264ParamSets *ps,
  378. int *is_avc, int *nal_length_size,
  379. int err_recognition, void *logctx)
  380. {
  381. int ret;
  382. if (!data || size <= 0)
  383. return -1;
  384. if (data[0] == 1) {
  385. int i, cnt, nalsize;
  386. const uint8_t *p = data;
  387. *is_avc = 1;
  388. if (size < 7) {
  389. av_log(logctx, AV_LOG_ERROR, "avcC %d too short\n", size);
  390. return AVERROR_INVALIDDATA;
  391. }
  392. // Decode sps from avcC
  393. cnt = *(p + 5) & 0x1f; // Number of sps
  394. p += 6;
  395. for (i = 0; i < cnt; i++) {
  396. nalsize = AV_RB16(p) + 2;
  397. if (nalsize > size - (p - data))
  398. return AVERROR_INVALIDDATA;
  399. ret = decode_extradata_ps_mp4(p, nalsize, ps, err_recognition, logctx);
  400. if (ret < 0) {
  401. av_log(logctx, AV_LOG_ERROR,
  402. "Decoding sps %d from avcC failed\n", i);
  403. return ret;
  404. }
  405. p += nalsize;
  406. }
  407. // Decode pps from avcC
  408. cnt = *(p++); // Number of pps
  409. for (i = 0; i < cnt; i++) {
  410. nalsize = AV_RB16(p) + 2;
  411. if (nalsize > size - (p - data))
  412. return AVERROR_INVALIDDATA;
  413. ret = decode_extradata_ps_mp4(p, nalsize, ps, err_recognition, logctx);
  414. if (ret < 0) {
  415. av_log(logctx, AV_LOG_ERROR,
  416. "Decoding pps %d from avcC failed\n", i);
  417. return ret;
  418. }
  419. p += nalsize;
  420. }
  421. // Store right nal length size that will be used to parse all other nals
  422. *nal_length_size = (data[4] & 0x03) + 1;
  423. } else {
  424. *is_avc = 0;
  425. ret = decode_extradata_ps(data, size, ps, 0, logctx);
  426. if (ret < 0)
  427. return ret;
  428. }
  429. return size;
  430. }
  431. /**
  432. * Compute profile from profile_idc and constraint_set?_flags.
  433. *
  434. * @param sps SPS
  435. *
  436. * @return profile as defined by FF_PROFILE_H264_*
  437. */
  438. int ff_h264_get_profile(const SPS *sps)
  439. {
  440. int profile = sps->profile_idc;
  441. switch (sps->profile_idc) {
  442. case FF_PROFILE_H264_BASELINE:
  443. // constraint_set1_flag set to 1
  444. profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
  445. break;
  446. case FF_PROFILE_H264_HIGH_10:
  447. case FF_PROFILE_H264_HIGH_422:
  448. case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
  449. // constraint_set3_flag set to 1
  450. profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
  451. break;
  452. }
  453. return profile;
  454. }