You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2040 lines
69KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... decoder
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 codec.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #define UNCHECKED_BITSTREAM_READER 1
  27. #include "libavutil/avassert.h"
  28. #include "libavutil/display.h"
  29. #include "libavutil/imgutils.h"
  30. #include "libavutil/opt.h"
  31. #include "libavutil/stereo3d.h"
  32. #include "libavutil/timer.h"
  33. #include "internal.h"
  34. #include "cabac.h"
  35. #include "cabac_functions.h"
  36. #include "error_resilience.h"
  37. #include "avcodec.h"
  38. #include "h264.h"
  39. #include "h264data.h"
  40. #include "h264chroma.h"
  41. #include "h264_mvpred.h"
  42. #include "golomb.h"
  43. #include "mathops.h"
  44. #include "me_cmp.h"
  45. #include "mpegutils.h"
  46. #include "rectangle.h"
  47. #include "svq3.h"
  48. #include "thread.h"
  49. #include "vdpau_internal.h"
  50. const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
  51. int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
  52. {
  53. H264Context *h = avctx->priv_data;
  54. return h ? h->sps.num_reorder_frames : 0;
  55. }
  56. static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
  57. int (*mv)[2][4][2],
  58. int mb_x, int mb_y, int mb_intra, int mb_skipped)
  59. {
  60. H264Context *h = opaque;
  61. H264SliceContext *sl = &h->slice_ctx[0];
  62. h->mb_x = mb_x;
  63. h->mb_y = mb_y;
  64. h->mb_xy = mb_x + mb_y * h->mb_stride;
  65. memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
  66. av_assert1(ref >= 0);
  67. /* FIXME: It is possible albeit uncommon that slice references
  68. * differ between slices. We take the easy approach and ignore
  69. * it for now. If this turns out to have any relevance in
  70. * practice then correct remapping should be added. */
  71. if (ref >= sl->ref_count[0])
  72. ref = 0;
  73. if (!sl->ref_list[0][ref].f.data[0]) {
  74. av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
  75. ref = 0;
  76. }
  77. if ((sl->ref_list[0][ref].reference&3) != 3) {
  78. av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
  79. return;
  80. }
  81. fill_rectangle(&h->cur_pic.ref_index[0][4 * h->mb_xy],
  82. 2, 2, 2, ref, 1);
  83. fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
  84. fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
  85. pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
  86. h->mb_mbaff =
  87. h->mb_field_decoding_flag = 0;
  88. ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
  89. }
  90. void ff_h264_draw_horiz_band(H264Context *h, H264SliceContext *sl,
  91. int y, int height)
  92. {
  93. AVCodecContext *avctx = h->avctx;
  94. AVFrame *cur = &h->cur_pic.f;
  95. AVFrame *last = sl->ref_list[0][0].f.data[0] ? &sl->ref_list[0][0].f : NULL;
  96. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  97. int vshift = desc->log2_chroma_h;
  98. const int field_pic = h->picture_structure != PICT_FRAME;
  99. if (field_pic) {
  100. height <<= 1;
  101. y <<= 1;
  102. }
  103. height = FFMIN(height, avctx->height - y);
  104. if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
  105. return;
  106. if (avctx->draw_horiz_band) {
  107. AVFrame *src;
  108. int offset[AV_NUM_DATA_POINTERS];
  109. int i;
  110. if (cur->pict_type == AV_PICTURE_TYPE_B || h->low_delay ||
  111. (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
  112. src = cur;
  113. else if (last)
  114. src = last;
  115. else
  116. return;
  117. offset[0] = y * src->linesize[0];
  118. offset[1] =
  119. offset[2] = (y >> vshift) * src->linesize[1];
  120. for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
  121. offset[i] = 0;
  122. emms_c();
  123. avctx->draw_horiz_band(avctx, src, offset,
  124. y, h->picture_structure, height);
  125. }
  126. }
  127. /**
  128. * Check if the top & left blocks are available if needed and
  129. * change the dc mode so it only uses the available blocks.
  130. */
  131. int ff_h264_check_intra4x4_pred_mode(H264Context *h, H264SliceContext *sl)
  132. {
  133. static const int8_t top[12] = {
  134. -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
  135. };
  136. static const int8_t left[12] = {
  137. 0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
  138. };
  139. int i;
  140. if (!(sl->top_samples_available & 0x8000)) {
  141. for (i = 0; i < 4; i++) {
  142. int status = top[sl->intra4x4_pred_mode_cache[scan8[0] + i]];
  143. if (status < 0) {
  144. av_log(h->avctx, AV_LOG_ERROR,
  145. "top block unavailable for requested intra4x4 mode %d at %d %d\n",
  146. status, h->mb_x, h->mb_y);
  147. return AVERROR_INVALIDDATA;
  148. } else if (status) {
  149. sl->intra4x4_pred_mode_cache[scan8[0] + i] = status;
  150. }
  151. }
  152. }
  153. if ((sl->left_samples_available & 0x8888) != 0x8888) {
  154. static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
  155. for (i = 0; i < 4; i++)
  156. if (!(sl->left_samples_available & mask[i])) {
  157. int status = left[sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
  158. if (status < 0) {
  159. av_log(h->avctx, AV_LOG_ERROR,
  160. "left block unavailable for requested intra4x4 mode %d at %d %d\n",
  161. status, h->mb_x, h->mb_y);
  162. return AVERROR_INVALIDDATA;
  163. } else if (status) {
  164. sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
  165. }
  166. }
  167. }
  168. return 0;
  169. } // FIXME cleanup like ff_h264_check_intra_pred_mode
  170. /**
  171. * Check if the top & left blocks are available if needed and
  172. * change the dc mode so it only uses the available blocks.
  173. */
  174. int ff_h264_check_intra_pred_mode(H264Context *h, H264SliceContext *sl,
  175. int mode, int is_chroma)
  176. {
  177. static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
  178. static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
  179. if (mode > 3U) {
  180. av_log(h->avctx, AV_LOG_ERROR,
  181. "out of range intra chroma pred mode at %d %d\n",
  182. h->mb_x, h->mb_y);
  183. return AVERROR_INVALIDDATA;
  184. }
  185. if (!(sl->top_samples_available & 0x8000)) {
  186. mode = top[mode];
  187. if (mode < 0) {
  188. av_log(h->avctx, AV_LOG_ERROR,
  189. "top block unavailable for requested intra mode at %d %d\n",
  190. h->mb_x, h->mb_y);
  191. return AVERROR_INVALIDDATA;
  192. }
  193. }
  194. if ((sl->left_samples_available & 0x8080) != 0x8080) {
  195. mode = left[mode];
  196. if (mode < 0) {
  197. av_log(h->avctx, AV_LOG_ERROR,
  198. "left block unavailable for requested intra mode at %d %d\n",
  199. h->mb_x, h->mb_y);
  200. return AVERROR_INVALIDDATA;
  201. }
  202. if (is_chroma && (sl->left_samples_available & 0x8080)) {
  203. // mad cow disease mode, aka MBAFF + constrained_intra_pred
  204. mode = ALZHEIMER_DC_L0T_PRED8x8 +
  205. (!(sl->left_samples_available & 0x8000)) +
  206. 2 * (mode == DC_128_PRED8x8);
  207. }
  208. }
  209. return mode;
  210. }
  211. const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src,
  212. int *dst_length, int *consumed, int length)
  213. {
  214. int i, si, di;
  215. uint8_t *dst;
  216. int bufidx;
  217. // src[0]&0x80; // forbidden bit
  218. h->nal_ref_idc = src[0] >> 5;
  219. h->nal_unit_type = src[0] & 0x1F;
  220. src++;
  221. length--;
  222. #define STARTCODE_TEST \
  223. if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
  224. if (src[i + 2] != 3 && src[i + 2] != 0) { \
  225. /* startcode, so we must be past the end */ \
  226. length = i; \
  227. } \
  228. break; \
  229. }
  230. #if HAVE_FAST_UNALIGNED
  231. #define FIND_FIRST_ZERO \
  232. if (i > 0 && !src[i]) \
  233. i--; \
  234. while (src[i]) \
  235. i++
  236. #if HAVE_FAST_64BIT
  237. for (i = 0; i + 1 < length; i += 9) {
  238. if (!((~AV_RN64A(src + i) &
  239. (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
  240. 0x8000800080008080ULL))
  241. continue;
  242. FIND_FIRST_ZERO;
  243. STARTCODE_TEST;
  244. i -= 7;
  245. }
  246. #else
  247. for (i = 0; i + 1 < length; i += 5) {
  248. if (!((~AV_RN32A(src + i) &
  249. (AV_RN32A(src + i) - 0x01000101U)) &
  250. 0x80008080U))
  251. continue;
  252. FIND_FIRST_ZERO;
  253. STARTCODE_TEST;
  254. i -= 3;
  255. }
  256. #endif
  257. #else
  258. for (i = 0; i + 1 < length; i += 2) {
  259. if (src[i])
  260. continue;
  261. if (i > 0 && src[i - 1] == 0)
  262. i--;
  263. STARTCODE_TEST;
  264. }
  265. #endif
  266. // use second escape buffer for inter data
  267. bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0;
  268. av_fast_padded_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+MAX_MBPAIR_SIZE);
  269. dst = h->rbsp_buffer[bufidx];
  270. if (!dst)
  271. return NULL;
  272. if(i>=length-1){ //no escaped 0
  273. *dst_length= length;
  274. *consumed= length+1; //+1 for the header
  275. if(h->avctx->flags2 & CODEC_FLAG2_FAST){
  276. return src;
  277. }else{
  278. memcpy(dst, src, length);
  279. return dst;
  280. }
  281. }
  282. memcpy(dst, src, i);
  283. si = di = i;
  284. while (si + 2 < length) {
  285. // remove escapes (very rare 1:2^22)
  286. if (src[si + 2] > 3) {
  287. dst[di++] = src[si++];
  288. dst[di++] = src[si++];
  289. } else if (src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) {
  290. if (src[si + 2] == 3) { // escape
  291. dst[di++] = 0;
  292. dst[di++] = 0;
  293. si += 3;
  294. continue;
  295. } else // next start code
  296. goto nsc;
  297. }
  298. dst[di++] = src[si++];
  299. }
  300. while (si < length)
  301. dst[di++] = src[si++];
  302. nsc:
  303. memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
  304. *dst_length = di;
  305. *consumed = si + 1; // +1 for the header
  306. /* FIXME store exact number of bits in the getbitcontext
  307. * (it is needed for decoding) */
  308. return dst;
  309. }
  310. /**
  311. * Identify the exact end of the bitstream
  312. * @return the length of the trailing, or 0 if damaged
  313. */
  314. static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
  315. {
  316. int v = *src;
  317. int r;
  318. tprintf(h->avctx, "rbsp trailing %X\n", v);
  319. for (r = 1; r < 9; r++) {
  320. if (v & 1)
  321. return r;
  322. v >>= 1;
  323. }
  324. return 0;
  325. }
  326. void ff_h264_free_tables(H264Context *h, int free_rbsp)
  327. {
  328. int i;
  329. H264Context *hx;
  330. av_freep(&h->intra4x4_pred_mode);
  331. av_freep(&h->chroma_pred_mode_table);
  332. av_freep(&h->cbp_table);
  333. av_freep(&h->mvd_table[0]);
  334. av_freep(&h->mvd_table[1]);
  335. av_freep(&h->direct_table);
  336. av_freep(&h->non_zero_count);
  337. av_freep(&h->slice_table_base);
  338. h->slice_table = NULL;
  339. av_freep(&h->list_counts);
  340. av_freep(&h->mb2b_xy);
  341. av_freep(&h->mb2br_xy);
  342. av_buffer_pool_uninit(&h->qscale_table_pool);
  343. av_buffer_pool_uninit(&h->mb_type_pool);
  344. av_buffer_pool_uninit(&h->motion_val_pool);
  345. av_buffer_pool_uninit(&h->ref_index_pool);
  346. if (free_rbsp && h->DPB) {
  347. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
  348. ff_h264_unref_picture(h, &h->DPB[i]);
  349. memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
  350. av_freep(&h->DPB);
  351. } else if (h->DPB) {
  352. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
  353. h->DPB[i].needs_realloc = 1;
  354. }
  355. h->cur_pic_ptr = NULL;
  356. for (i = 0; i < H264_MAX_THREADS; i++) {
  357. hx = h->thread_context[i];
  358. if (!hx)
  359. continue;
  360. av_freep(&hx->top_borders[1]);
  361. av_freep(&hx->top_borders[0]);
  362. av_freep(&hx->bipred_scratchpad);
  363. av_freep(&hx->edge_emu_buffer);
  364. av_freep(&hx->dc_val_base);
  365. av_freep(&hx->er.mb_index2xy);
  366. av_freep(&hx->er.error_status_table);
  367. av_freep(&hx->er.er_temp_buffer);
  368. av_freep(&hx->er.mbintra_table);
  369. av_freep(&hx->er.mbskip_table);
  370. if (free_rbsp) {
  371. av_freep(&hx->rbsp_buffer[1]);
  372. av_freep(&hx->rbsp_buffer[0]);
  373. hx->rbsp_buffer_size[0] = 0;
  374. hx->rbsp_buffer_size[1] = 0;
  375. }
  376. if (i)
  377. av_freep(&h->thread_context[i]);
  378. }
  379. }
  380. int ff_h264_alloc_tables(H264Context *h)
  381. {
  382. const int big_mb_num = h->mb_stride * (h->mb_height + 1);
  383. const int row_mb_num = 2*h->mb_stride*FFMAX(h->avctx->thread_count, 1);
  384. int x, y, i;
  385. FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->intra4x4_pred_mode,
  386. row_mb_num, 8 * sizeof(uint8_t), fail)
  387. h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;
  388. FF_ALLOCZ_OR_GOTO(h->avctx, h->non_zero_count,
  389. big_mb_num * 48 * sizeof(uint8_t), fail)
  390. FF_ALLOCZ_OR_GOTO(h->avctx, h->slice_table_base,
  391. (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
  392. FF_ALLOCZ_OR_GOTO(h->avctx, h->cbp_table,
  393. big_mb_num * sizeof(uint16_t), fail)
  394. FF_ALLOCZ_OR_GOTO(h->avctx, h->chroma_pred_mode_table,
  395. big_mb_num * sizeof(uint8_t), fail)
  396. FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[0],
  397. row_mb_num, 16 * sizeof(uint8_t), fail);
  398. FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[1],
  399. row_mb_num, 16 * sizeof(uint8_t), fail);
  400. h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
  401. h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
  402. FF_ALLOCZ_OR_GOTO(h->avctx, h->direct_table,
  403. 4 * big_mb_num * sizeof(uint8_t), fail);
  404. FF_ALLOCZ_OR_GOTO(h->avctx, h->list_counts,
  405. big_mb_num * sizeof(uint8_t), fail)
  406. memset(h->slice_table_base, -1,
  407. (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
  408. h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
  409. FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2b_xy,
  410. big_mb_num * sizeof(uint32_t), fail);
  411. FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2br_xy,
  412. big_mb_num * sizeof(uint32_t), fail);
  413. for (y = 0; y < h->mb_height; y++)
  414. for (x = 0; x < h->mb_width; x++) {
  415. const int mb_xy = x + y * h->mb_stride;
  416. const int b_xy = 4 * x + 4 * y * h->b_stride;
  417. h->mb2b_xy[mb_xy] = b_xy;
  418. h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
  419. }
  420. if (!h->dequant4_coeff[0])
  421. ff_h264_init_dequant_tables(h);
  422. if (!h->DPB) {
  423. h->DPB = av_mallocz_array(H264_MAX_PICTURE_COUNT, sizeof(*h->DPB));
  424. if (!h->DPB)
  425. goto fail;
  426. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
  427. av_frame_unref(&h->DPB[i].f);
  428. av_frame_unref(&h->cur_pic.f);
  429. }
  430. return 0;
  431. fail:
  432. ff_h264_free_tables(h, 1);
  433. return AVERROR(ENOMEM);
  434. }
  435. /**
  436. * Init context
  437. * Allocate buffers which are not shared amongst multiple threads.
  438. */
  439. int ff_h264_context_init(H264Context *h)
  440. {
  441. ERContext *er = &h->er;
  442. int mb_array_size = h->mb_height * h->mb_stride;
  443. int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
  444. int c_size = h->mb_stride * (h->mb_height + 1);
  445. int yc_size = y_size + 2 * c_size;
  446. int x, y, i;
  447. FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->top_borders[0],
  448. h->mb_width, 16 * 3 * sizeof(uint8_t) * 2, fail)
  449. FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->top_borders[1],
  450. h->mb_width, 16 * 3 * sizeof(uint8_t) * 2, fail)
  451. for (i = 0; i < h->nb_slice_ctx; i++) {
  452. h->slice_ctx[i].ref_cache[0][scan8[5] + 1] =
  453. h->slice_ctx[i].ref_cache[0][scan8[7] + 1] =
  454. h->slice_ctx[i].ref_cache[0][scan8[13] + 1] =
  455. h->slice_ctx[i].ref_cache[1][scan8[5] + 1] =
  456. h->slice_ctx[i].ref_cache[1][scan8[7] + 1] =
  457. h->slice_ctx[i].ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
  458. }
  459. if (CONFIG_ERROR_RESILIENCE) {
  460. /* init ER */
  461. er->avctx = h->avctx;
  462. er->decode_mb = h264_er_decode_mb;
  463. er->opaque = h;
  464. er->quarter_sample = 1;
  465. er->mb_num = h->mb_num;
  466. er->mb_width = h->mb_width;
  467. er->mb_height = h->mb_height;
  468. er->mb_stride = h->mb_stride;
  469. er->b8_stride = h->mb_width * 2 + 1;
  470. // error resilience code looks cleaner with this
  471. FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy,
  472. (h->mb_num + 1) * sizeof(int), fail);
  473. for (y = 0; y < h->mb_height; y++)
  474. for (x = 0; x < h->mb_width; x++)
  475. er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
  476. er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
  477. h->mb_stride + h->mb_width;
  478. FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table,
  479. mb_array_size * sizeof(uint8_t), fail);
  480. FF_ALLOC_OR_GOTO(h->avctx, er->mbintra_table, mb_array_size, fail);
  481. memset(er->mbintra_table, 1, mb_array_size);
  482. FF_ALLOCZ_OR_GOTO(h->avctx, er->mbskip_table, mb_array_size + 2, fail);
  483. FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer,
  484. h->mb_height * h->mb_stride, fail);
  485. FF_ALLOCZ_OR_GOTO(h->avctx, h->dc_val_base,
  486. yc_size * sizeof(int16_t), fail);
  487. er->dc_val[0] = h->dc_val_base + h->mb_width * 2 + 2;
  488. er->dc_val[1] = h->dc_val_base + y_size + h->mb_stride + 1;
  489. er->dc_val[2] = er->dc_val[1] + c_size;
  490. for (i = 0; i < yc_size; i++)
  491. h->dc_val_base[i] = 1024;
  492. }
  493. return 0;
  494. fail:
  495. return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
  496. }
  497. static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
  498. int parse_extradata);
  499. int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
  500. {
  501. AVCodecContext *avctx = h->avctx;
  502. int ret;
  503. if (!buf || size <= 0)
  504. return -1;
  505. if (buf[0] == 1) {
  506. int i, cnt, nalsize;
  507. const unsigned char *p = buf;
  508. h->is_avc = 1;
  509. if (size < 7) {
  510. av_log(avctx, AV_LOG_ERROR,
  511. "avcC %d too short\n", size);
  512. return AVERROR_INVALIDDATA;
  513. }
  514. /* sps and pps in the avcC always have length coded with 2 bytes,
  515. * so put a fake nal_length_size = 2 while parsing them */
  516. h->nal_length_size = 2;
  517. // Decode sps from avcC
  518. cnt = *(p + 5) & 0x1f; // Number of sps
  519. p += 6;
  520. for (i = 0; i < cnt; i++) {
  521. nalsize = AV_RB16(p) + 2;
  522. if(nalsize > size - (p-buf))
  523. return AVERROR_INVALIDDATA;
  524. ret = decode_nal_units(h, p, nalsize, 1);
  525. if (ret < 0) {
  526. av_log(avctx, AV_LOG_ERROR,
  527. "Decoding sps %d from avcC failed\n", i);
  528. return ret;
  529. }
  530. p += nalsize;
  531. }
  532. // Decode pps from avcC
  533. cnt = *(p++); // Number of pps
  534. for (i = 0; i < cnt; i++) {
  535. nalsize = AV_RB16(p) + 2;
  536. if(nalsize > size - (p-buf))
  537. return AVERROR_INVALIDDATA;
  538. ret = decode_nal_units(h, p, nalsize, 1);
  539. if (ret < 0) {
  540. av_log(avctx, AV_LOG_ERROR,
  541. "Decoding pps %d from avcC failed\n", i);
  542. return ret;
  543. }
  544. p += nalsize;
  545. }
  546. // Store right nal length size that will be used to parse all other nals
  547. h->nal_length_size = (buf[4] & 0x03) + 1;
  548. } else {
  549. h->is_avc = 0;
  550. ret = decode_nal_units(h, buf, size, 1);
  551. if (ret < 0)
  552. return ret;
  553. }
  554. return size;
  555. }
  556. av_cold int ff_h264_decode_init(AVCodecContext *avctx)
  557. {
  558. H264Context *h = avctx->priv_data;
  559. int i;
  560. int ret;
  561. h->avctx = avctx;
  562. h->bit_depth_luma = 8;
  563. h->chroma_format_idc = 1;
  564. h->avctx->bits_per_raw_sample = 8;
  565. h->cur_chroma_format_idc = 1;
  566. ff_h264dsp_init(&h->h264dsp, 8, 1);
  567. av_assert0(h->sps.bit_depth_chroma == 0);
  568. ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma);
  569. ff_h264qpel_init(&h->h264qpel, 8);
  570. ff_h264_pred_init(&h->hpc, h->avctx->codec_id, 8, 1);
  571. h->dequant_coeff_pps = -1;
  572. h->current_sps_id = -1;
  573. /* needed so that IDCT permutation is known early */
  574. ff_videodsp_init(&h->vdsp, 8);
  575. memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
  576. memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
  577. h->picture_structure = PICT_FRAME;
  578. h->slice_context_count = 1;
  579. h->workaround_bugs = avctx->workaround_bugs;
  580. h->flags = avctx->flags;
  581. /* set defaults */
  582. // s->decode_mb = ff_h263_decode_mb;
  583. if (!avctx->has_b_frames)
  584. h->low_delay = 1;
  585. avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
  586. ff_h264_decode_init_vlc();
  587. ff_init_cabac_states();
  588. h->pixel_shift = 0;
  589. h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8;
  590. h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? H264_MAX_THREADS : 1;
  591. h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
  592. if (!h->slice_ctx) {
  593. h->nb_slice_ctx = 0;
  594. return AVERROR(ENOMEM);
  595. }
  596. h->thread_context[0] = h;
  597. for (i = 0; i < h->nb_slice_ctx; i++)
  598. h->slice_ctx[i].h264 = h->thread_context[0];
  599. h->outputed_poc = h->next_outputed_poc = INT_MIN;
  600. for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
  601. h->last_pocs[i] = INT_MIN;
  602. h->prev_poc_msb = 1 << 16;
  603. h->prev_frame_num = -1;
  604. h->x264_build = -1;
  605. h->sei_fpa.frame_packing_arrangement_cancel_flag = -1;
  606. ff_h264_reset_sei(h);
  607. if (avctx->codec_id == AV_CODEC_ID_H264) {
  608. if (avctx->ticks_per_frame == 1) {
  609. if(h->avctx->time_base.den < INT_MAX/2) {
  610. h->avctx->time_base.den *= 2;
  611. } else
  612. h->avctx->time_base.num /= 2;
  613. }
  614. avctx->ticks_per_frame = 2;
  615. }
  616. if (avctx->extradata_size > 0 && avctx->extradata) {
  617. ret = ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size);
  618. if (ret < 0) {
  619. ff_h264_free_context(h);
  620. return ret;
  621. }
  622. }
  623. if (h->sps.bitstream_restriction_flag &&
  624. h->avctx->has_b_frames < h->sps.num_reorder_frames) {
  625. h->avctx->has_b_frames = h->sps.num_reorder_frames;
  626. h->low_delay = 0;
  627. }
  628. avctx->internal->allocate_progress = 1;
  629. ff_h264_flush_change(h);
  630. return 0;
  631. }
  632. static int decode_init_thread_copy(AVCodecContext *avctx)
  633. {
  634. H264Context *h = avctx->priv_data;
  635. int i;
  636. if (!avctx->internal->is_copy)
  637. return 0;
  638. memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
  639. memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
  640. h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? H264_MAX_THREADS : 1;
  641. h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
  642. if (!h->slice_ctx) {
  643. h->nb_slice_ctx = 0;
  644. return AVERROR(ENOMEM);
  645. }
  646. for (i = 0; i < h->nb_slice_ctx; i++)
  647. h->slice_ctx[i].h264 = h;
  648. h->avctx = avctx;
  649. h->rbsp_buffer[0] = NULL;
  650. h->rbsp_buffer[1] = NULL;
  651. h->rbsp_buffer_size[0] = 0;
  652. h->rbsp_buffer_size[1] = 0;
  653. h->context_initialized = 0;
  654. return 0;
  655. }
  656. /**
  657. * Run setup operations that must be run after slice header decoding.
  658. * This includes finding the next displayed frame.
  659. *
  660. * @param h h264 master context
  661. * @param setup_finished enough NALs have been read that we can call
  662. * ff_thread_finish_setup()
  663. */
  664. static void decode_postinit(H264Context *h, int setup_finished)
  665. {
  666. H264Picture *out = h->cur_pic_ptr;
  667. H264Picture *cur = h->cur_pic_ptr;
  668. int i, pics, out_of_order, out_idx;
  669. h->cur_pic_ptr->f.pict_type = h->pict_type;
  670. if (h->next_output_pic)
  671. return;
  672. if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
  673. /* FIXME: if we have two PAFF fields in one packet, we can't start
  674. * the next thread here. If we have one field per packet, we can.
  675. * The check in decode_nal_units() is not good enough to find this
  676. * yet, so we assume the worst for now. */
  677. // if (setup_finished)
  678. // ff_thread_finish_setup(h->avctx);
  679. if (cur->field_poc[0] == INT_MAX && cur->field_poc[1] == INT_MAX)
  680. return;
  681. if (h->avctx->hwaccel || h->missing_fields <=1)
  682. return;
  683. }
  684. cur->f.interlaced_frame = 0;
  685. cur->f.repeat_pict = 0;
  686. /* Signal interlacing information externally. */
  687. /* Prioritize picture timing SEI information over used
  688. * decoding process if it exists. */
  689. if (h->sps.pic_struct_present_flag) {
  690. switch (h->sei_pic_struct) {
  691. case SEI_PIC_STRUCT_FRAME:
  692. break;
  693. case SEI_PIC_STRUCT_TOP_FIELD:
  694. case SEI_PIC_STRUCT_BOTTOM_FIELD:
  695. cur->f.interlaced_frame = 1;
  696. break;
  697. case SEI_PIC_STRUCT_TOP_BOTTOM:
  698. case SEI_PIC_STRUCT_BOTTOM_TOP:
  699. if (FIELD_OR_MBAFF_PICTURE(h))
  700. cur->f.interlaced_frame = 1;
  701. else
  702. // try to flag soft telecine progressive
  703. cur->f.interlaced_frame = h->prev_interlaced_frame;
  704. break;
  705. case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
  706. case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
  707. /* Signal the possibility of telecined film externally
  708. * (pic_struct 5,6). From these hints, let the applications
  709. * decide if they apply deinterlacing. */
  710. cur->f.repeat_pict = 1;
  711. break;
  712. case SEI_PIC_STRUCT_FRAME_DOUBLING:
  713. cur->f.repeat_pict = 2;
  714. break;
  715. case SEI_PIC_STRUCT_FRAME_TRIPLING:
  716. cur->f.repeat_pict = 4;
  717. break;
  718. }
  719. if ((h->sei_ct_type & 3) &&
  720. h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
  721. cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
  722. } else {
  723. /* Derive interlacing flag from used decoding process. */
  724. cur->f.interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
  725. }
  726. h->prev_interlaced_frame = cur->f.interlaced_frame;
  727. if (cur->field_poc[0] != cur->field_poc[1]) {
  728. /* Derive top_field_first from field pocs. */
  729. cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
  730. } else {
  731. if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
  732. /* Use picture timing SEI information. Even if it is a
  733. * information of a past frame, better than nothing. */
  734. if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM ||
  735. h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
  736. cur->f.top_field_first = 1;
  737. else
  738. cur->f.top_field_first = 0;
  739. } else {
  740. /* Most likely progressive */
  741. cur->f.top_field_first = 0;
  742. }
  743. }
  744. if (h->sei_frame_packing_present &&
  745. h->frame_packing_arrangement_type >= 0 &&
  746. h->frame_packing_arrangement_type <= 6 &&
  747. h->content_interpretation_type > 0 &&
  748. h->content_interpretation_type < 3) {
  749. AVStereo3D *stereo = av_stereo3d_create_side_data(&cur->f);
  750. if (stereo) {
  751. switch (h->frame_packing_arrangement_type) {
  752. case 0:
  753. stereo->type = AV_STEREO3D_CHECKERBOARD;
  754. break;
  755. case 1:
  756. stereo->type = AV_STEREO3D_COLUMNS;
  757. break;
  758. case 2:
  759. stereo->type = AV_STEREO3D_LINES;
  760. break;
  761. case 3:
  762. if (h->quincunx_subsampling)
  763. stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
  764. else
  765. stereo->type = AV_STEREO3D_SIDEBYSIDE;
  766. break;
  767. case 4:
  768. stereo->type = AV_STEREO3D_TOPBOTTOM;
  769. break;
  770. case 5:
  771. stereo->type = AV_STEREO3D_FRAMESEQUENCE;
  772. break;
  773. case 6:
  774. stereo->type = AV_STEREO3D_2D;
  775. break;
  776. }
  777. if (h->content_interpretation_type == 2)
  778. stereo->flags = AV_STEREO3D_FLAG_INVERT;
  779. }
  780. }
  781. if (h->sei_display_orientation_present &&
  782. (h->sei_anticlockwise_rotation || h->sei_hflip || h->sei_vflip)) {
  783. double angle = h->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
  784. AVFrameSideData *rotation = av_frame_new_side_data(&cur->f,
  785. AV_FRAME_DATA_DISPLAYMATRIX,
  786. sizeof(int32_t) * 9);
  787. if (rotation) {
  788. av_display_rotation_set((int32_t *)rotation->data, angle);
  789. av_display_matrix_flip((int32_t *)rotation->data,
  790. h->sei_hflip, h->sei_vflip);
  791. }
  792. }
  793. cur->mmco_reset = h->mmco_reset;
  794. h->mmco_reset = 0;
  795. // FIXME do something with unavailable reference frames
  796. /* Sort B-frames into display order */
  797. if (h->sps.bitstream_restriction_flag &&
  798. h->avctx->has_b_frames < h->sps.num_reorder_frames) {
  799. h->avctx->has_b_frames = h->sps.num_reorder_frames;
  800. h->low_delay = 0;
  801. }
  802. if (h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT &&
  803. !h->sps.bitstream_restriction_flag) {
  804. h->avctx->has_b_frames = MAX_DELAYED_PIC_COUNT - 1;
  805. h->low_delay = 0;
  806. }
  807. for (i = 0; 1; i++) {
  808. if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
  809. if(i)
  810. h->last_pocs[i-1] = cur->poc;
  811. break;
  812. } else if(i) {
  813. h->last_pocs[i-1]= h->last_pocs[i];
  814. }
  815. }
  816. out_of_order = MAX_DELAYED_PIC_COUNT - i;
  817. if( cur->f.pict_type == AV_PICTURE_TYPE_B
  818. || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
  819. out_of_order = FFMAX(out_of_order, 1);
  820. if (out_of_order == MAX_DELAYED_PIC_COUNT) {
  821. av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
  822. for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
  823. h->last_pocs[i] = INT_MIN;
  824. h->last_pocs[0] = cur->poc;
  825. cur->mmco_reset = 1;
  826. } else if(h->avctx->has_b_frames < out_of_order && !h->sps.bitstream_restriction_flag){
  827. av_log(h->avctx, AV_LOG_VERBOSE, "Increasing reorder buffer to %d\n", out_of_order);
  828. h->avctx->has_b_frames = out_of_order;
  829. h->low_delay = 0;
  830. }
  831. pics = 0;
  832. while (h->delayed_pic[pics])
  833. pics++;
  834. av_assert0(pics <= MAX_DELAYED_PIC_COUNT);
  835. h->delayed_pic[pics++] = cur;
  836. if (cur->reference == 0)
  837. cur->reference = DELAYED_PIC_REF;
  838. out = h->delayed_pic[0];
  839. out_idx = 0;
  840. for (i = 1; h->delayed_pic[i] &&
  841. !h->delayed_pic[i]->f.key_frame &&
  842. !h->delayed_pic[i]->mmco_reset;
  843. i++)
  844. if (h->delayed_pic[i]->poc < out->poc) {
  845. out = h->delayed_pic[i];
  846. out_idx = i;
  847. }
  848. if (h->avctx->has_b_frames == 0 &&
  849. (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset))
  850. h->next_outputed_poc = INT_MIN;
  851. out_of_order = out->poc < h->next_outputed_poc;
  852. if (out_of_order || pics > h->avctx->has_b_frames) {
  853. out->reference &= ~DELAYED_PIC_REF;
  854. // for frame threading, the owner must be the second field's thread or
  855. // else the first thread can release the picture and reuse it unsafely
  856. for (i = out_idx; h->delayed_pic[i]; i++)
  857. h->delayed_pic[i] = h->delayed_pic[i + 1];
  858. }
  859. if (!out_of_order && pics > h->avctx->has_b_frames) {
  860. h->next_output_pic = out;
  861. if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset)) {
  862. h->next_outputed_poc = INT_MIN;
  863. } else
  864. h->next_outputed_poc = out->poc;
  865. } else {
  866. av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
  867. }
  868. if (h->next_output_pic) {
  869. if (h->next_output_pic->recovered) {
  870. // We have reached an recovery point and all frames after it in
  871. // display order are "recovered".
  872. h->frame_recovered |= FRAME_RECOVERED_SEI;
  873. }
  874. h->next_output_pic->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
  875. }
  876. if (setup_finished && !h->avctx->hwaccel)
  877. ff_thread_finish_setup(h->avctx);
  878. }
  879. int ff_pred_weight_table(H264Context *h, H264SliceContext *sl)
  880. {
  881. int list, i;
  882. int luma_def, chroma_def;
  883. sl->use_weight = 0;
  884. sl->use_weight_chroma = 0;
  885. sl->luma_log2_weight_denom = get_ue_golomb(&h->gb);
  886. if (h->sps.chroma_format_idc)
  887. sl->chroma_log2_weight_denom = get_ue_golomb(&h->gb);
  888. if (sl->luma_log2_weight_denom > 7U) {
  889. av_log(h->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", sl->luma_log2_weight_denom);
  890. sl->luma_log2_weight_denom = 0;
  891. }
  892. if (sl->chroma_log2_weight_denom > 7U) {
  893. av_log(h->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", sl->chroma_log2_weight_denom);
  894. sl->chroma_log2_weight_denom = 0;
  895. }
  896. luma_def = 1 << sl->luma_log2_weight_denom;
  897. chroma_def = 1 << sl->chroma_log2_weight_denom;
  898. for (list = 0; list < 2; list++) {
  899. sl->luma_weight_flag[list] = 0;
  900. sl->chroma_weight_flag[list] = 0;
  901. for (i = 0; i < sl->ref_count[list]; i++) {
  902. int luma_weight_flag, chroma_weight_flag;
  903. luma_weight_flag = get_bits1(&h->gb);
  904. if (luma_weight_flag) {
  905. sl->luma_weight[i][list][0] = get_se_golomb(&h->gb);
  906. sl->luma_weight[i][list][1] = get_se_golomb(&h->gb);
  907. if (sl->luma_weight[i][list][0] != luma_def ||
  908. sl->luma_weight[i][list][1] != 0) {
  909. sl->use_weight = 1;
  910. sl->luma_weight_flag[list] = 1;
  911. }
  912. } else {
  913. sl->luma_weight[i][list][0] = luma_def;
  914. sl->luma_weight[i][list][1] = 0;
  915. }
  916. if (h->sps.chroma_format_idc) {
  917. chroma_weight_flag = get_bits1(&h->gb);
  918. if (chroma_weight_flag) {
  919. int j;
  920. for (j = 0; j < 2; j++) {
  921. sl->chroma_weight[i][list][j][0] = get_se_golomb(&h->gb);
  922. sl->chroma_weight[i][list][j][1] = get_se_golomb(&h->gb);
  923. if (sl->chroma_weight[i][list][j][0] != chroma_def ||
  924. sl->chroma_weight[i][list][j][1] != 0) {
  925. sl->use_weight_chroma = 1;
  926. sl->chroma_weight_flag[list] = 1;
  927. }
  928. }
  929. } else {
  930. int j;
  931. for (j = 0; j < 2; j++) {
  932. sl->chroma_weight[i][list][j][0] = chroma_def;
  933. sl->chroma_weight[i][list][j][1] = 0;
  934. }
  935. }
  936. }
  937. }
  938. if (sl->slice_type_nos != AV_PICTURE_TYPE_B)
  939. break;
  940. }
  941. sl->use_weight = sl->use_weight || sl->use_weight_chroma;
  942. return 0;
  943. }
  944. /**
  945. * instantaneous decoder refresh.
  946. */
  947. static void idr(H264Context *h)
  948. {
  949. int i;
  950. ff_h264_remove_all_refs(h);
  951. h->prev_frame_num =
  952. h->prev_frame_num_offset = 0;
  953. h->prev_poc_msb = 1<<16;
  954. h->prev_poc_lsb = 0;
  955. for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
  956. h->last_pocs[i] = INT_MIN;
  957. }
  958. /* forget old pics after a seek */
  959. void ff_h264_flush_change(H264Context *h)
  960. {
  961. int i, j;
  962. h->outputed_poc = h->next_outputed_poc = INT_MIN;
  963. h->prev_interlaced_frame = 1;
  964. idr(h);
  965. h->prev_frame_num = -1;
  966. if (h->cur_pic_ptr) {
  967. h->cur_pic_ptr->reference = 0;
  968. for (j=i=0; h->delayed_pic[i]; i++)
  969. if (h->delayed_pic[i] != h->cur_pic_ptr)
  970. h->delayed_pic[j++] = h->delayed_pic[i];
  971. h->delayed_pic[j] = NULL;
  972. }
  973. ff_h264_unref_picture(h, &h->last_pic_for_ec);
  974. h->first_field = 0;
  975. ff_h264_reset_sei(h);
  976. h->recovery_frame = -1;
  977. h->frame_recovered = 0;
  978. h->current_slice = 0;
  979. h->mmco_reset = 1;
  980. for (i = 0; i < h->nb_slice_ctx; i++)
  981. h->slice_ctx[i].list_count = 0;
  982. }
  983. /* forget old pics after a seek */
  984. static void flush_dpb(AVCodecContext *avctx)
  985. {
  986. H264Context *h = avctx->priv_data;
  987. int i;
  988. memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
  989. ff_h264_flush_change(h);
  990. if (h->DPB)
  991. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
  992. ff_h264_unref_picture(h, &h->DPB[i]);
  993. h->cur_pic_ptr = NULL;
  994. ff_h264_unref_picture(h, &h->cur_pic);
  995. h->mb_x = h->mb_y = 0;
  996. ff_h264_free_tables(h, 1);
  997. h->context_initialized = 0;
  998. }
  999. int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
  1000. {
  1001. const int max_frame_num = 1 << h->sps.log2_max_frame_num;
  1002. int field_poc[2];
  1003. h->frame_num_offset = h->prev_frame_num_offset;
  1004. if (h->frame_num < h->prev_frame_num)
  1005. h->frame_num_offset += max_frame_num;
  1006. if (h->sps.poc_type == 0) {
  1007. const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
  1008. if (h->poc_lsb < h->prev_poc_lsb &&
  1009. h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
  1010. h->poc_msb = h->prev_poc_msb + max_poc_lsb;
  1011. else if (h->poc_lsb > h->prev_poc_lsb &&
  1012. h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
  1013. h->poc_msb = h->prev_poc_msb - max_poc_lsb;
  1014. else
  1015. h->poc_msb = h->prev_poc_msb;
  1016. field_poc[0] =
  1017. field_poc[1] = h->poc_msb + h->poc_lsb;
  1018. if (h->picture_structure == PICT_FRAME)
  1019. field_poc[1] += h->delta_poc_bottom;
  1020. } else if (h->sps.poc_type == 1) {
  1021. int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
  1022. int i;
  1023. if (h->sps.poc_cycle_length != 0)
  1024. abs_frame_num = h->frame_num_offset + h->frame_num;
  1025. else
  1026. abs_frame_num = 0;
  1027. if (h->nal_ref_idc == 0 && abs_frame_num > 0)
  1028. abs_frame_num--;
  1029. expected_delta_per_poc_cycle = 0;
  1030. for (i = 0; i < h->sps.poc_cycle_length; i++)
  1031. // FIXME integrate during sps parse
  1032. expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
  1033. if (abs_frame_num > 0) {
  1034. int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
  1035. int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
  1036. expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
  1037. for (i = 0; i <= frame_num_in_poc_cycle; i++)
  1038. expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
  1039. } else
  1040. expectedpoc = 0;
  1041. if (h->nal_ref_idc == 0)
  1042. expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
  1043. field_poc[0] = expectedpoc + h->delta_poc[0];
  1044. field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
  1045. if (h->picture_structure == PICT_FRAME)
  1046. field_poc[1] += h->delta_poc[1];
  1047. } else {
  1048. int poc = 2 * (h->frame_num_offset + h->frame_num);
  1049. if (!h->nal_ref_idc)
  1050. poc--;
  1051. field_poc[0] = poc;
  1052. field_poc[1] = poc;
  1053. }
  1054. if (h->picture_structure != PICT_BOTTOM_FIELD)
  1055. pic_field_poc[0] = field_poc[0];
  1056. if (h->picture_structure != PICT_TOP_FIELD)
  1057. pic_field_poc[1] = field_poc[1];
  1058. *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
  1059. return 0;
  1060. }
  1061. /**
  1062. * Compute profile from profile_idc and constraint_set?_flags.
  1063. *
  1064. * @param sps SPS
  1065. *
  1066. * @return profile as defined by FF_PROFILE_H264_*
  1067. */
  1068. int ff_h264_get_profile(SPS *sps)
  1069. {
  1070. int profile = sps->profile_idc;
  1071. switch (sps->profile_idc) {
  1072. case FF_PROFILE_H264_BASELINE:
  1073. // constraint_set1_flag set to 1
  1074. profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
  1075. break;
  1076. case FF_PROFILE_H264_HIGH_10:
  1077. case FF_PROFILE_H264_HIGH_422:
  1078. case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
  1079. // constraint_set3_flag set to 1
  1080. profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
  1081. break;
  1082. }
  1083. return profile;
  1084. }
  1085. int ff_h264_set_parameter_from_sps(H264Context *h)
  1086. {
  1087. if (h->flags & CODEC_FLAG_LOW_DELAY ||
  1088. (h->sps.bitstream_restriction_flag &&
  1089. !h->sps.num_reorder_frames)) {
  1090. if (h->avctx->has_b_frames > 1 || h->delayed_pic[0])
  1091. av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. "
  1092. "Reenabling low delay requires a codec flush.\n");
  1093. else
  1094. h->low_delay = 1;
  1095. }
  1096. if (h->avctx->has_b_frames < 2)
  1097. h->avctx->has_b_frames = !h->low_delay;
  1098. if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
  1099. h->cur_chroma_format_idc != h->sps.chroma_format_idc) {
  1100. if (h->avctx->codec &&
  1101. h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU &&
  1102. (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) {
  1103. av_log(h->avctx, AV_LOG_ERROR,
  1104. "VDPAU decoding does not support video colorspace.\n");
  1105. return AVERROR_INVALIDDATA;
  1106. }
  1107. if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 14 &&
  1108. h->sps.bit_depth_luma != 11 && h->sps.bit_depth_luma != 13) {
  1109. h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
  1110. h->cur_chroma_format_idc = h->sps.chroma_format_idc;
  1111. h->pixel_shift = h->sps.bit_depth_luma > 8;
  1112. ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma,
  1113. h->sps.chroma_format_idc);
  1114. ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma);
  1115. ff_h264qpel_init(&h->h264qpel, h->sps.bit_depth_luma);
  1116. ff_h264_pred_init(&h->hpc, h->avctx->codec_id, h->sps.bit_depth_luma,
  1117. h->sps.chroma_format_idc);
  1118. ff_videodsp_init(&h->vdsp, h->sps.bit_depth_luma);
  1119. } else {
  1120. av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
  1121. h->sps.bit_depth_luma);
  1122. return AVERROR_INVALIDDATA;
  1123. }
  1124. }
  1125. return 0;
  1126. }
  1127. int ff_set_ref_count(H264Context *h, H264SliceContext *sl)
  1128. {
  1129. int ref_count[2], list_count;
  1130. int num_ref_idx_active_override_flag;
  1131. // set defaults, might be overridden a few lines later
  1132. ref_count[0] = h->pps.ref_count[0];
  1133. ref_count[1] = h->pps.ref_count[1];
  1134. if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
  1135. unsigned max[2];
  1136. max[0] = max[1] = h->picture_structure == PICT_FRAME ? 15 : 31;
  1137. if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
  1138. sl->direct_spatial_mv_pred = get_bits1(&h->gb);
  1139. num_ref_idx_active_override_flag = get_bits1(&h->gb);
  1140. if (num_ref_idx_active_override_flag) {
  1141. ref_count[0] = get_ue_golomb(&h->gb) + 1;
  1142. if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
  1143. ref_count[1] = get_ue_golomb(&h->gb) + 1;
  1144. } else
  1145. // full range is spec-ok in this case, even for frames
  1146. ref_count[1] = 1;
  1147. }
  1148. if (ref_count[0]-1 > max[0] || ref_count[1]-1 > max[1]){
  1149. av_log(h->avctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n", ref_count[0]-1, max[0], ref_count[1]-1, max[1]);
  1150. sl->ref_count[0] = sl->ref_count[1] = 0;
  1151. sl->list_count = 0;
  1152. return AVERROR_INVALIDDATA;
  1153. }
  1154. if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
  1155. list_count = 2;
  1156. else
  1157. list_count = 1;
  1158. } else {
  1159. list_count = 0;
  1160. ref_count[0] = ref_count[1] = 0;
  1161. }
  1162. if (list_count != sl->list_count ||
  1163. ref_count[0] != sl->ref_count[0] ||
  1164. ref_count[1] != sl->ref_count[1]) {
  1165. sl->ref_count[0] = ref_count[0];
  1166. sl->ref_count[1] = ref_count[1];
  1167. sl->list_count = list_count;
  1168. return 1;
  1169. }
  1170. return 0;
  1171. }
  1172. static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
  1173. static int get_bit_length(H264Context *h, const uint8_t *buf,
  1174. const uint8_t *ptr, int dst_length,
  1175. int i, int next_avc)
  1176. {
  1177. if ((h->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
  1178. buf[i] == 0x00 && buf[i + 1] == 0x00 &&
  1179. buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
  1180. h->workaround_bugs |= FF_BUG_TRUNCATED;
  1181. if (!(h->workaround_bugs & FF_BUG_TRUNCATED))
  1182. while (dst_length > 0 && ptr[dst_length - 1] == 0)
  1183. dst_length--;
  1184. if (!dst_length)
  1185. return 0;
  1186. return 8 * dst_length - decode_rbsp_trailing(h, ptr + dst_length - 1);
  1187. }
  1188. static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size)
  1189. {
  1190. int next_avc = h->is_avc ? 0 : buf_size;
  1191. int nal_index = 0;
  1192. int buf_index = 0;
  1193. int nals_needed = 0;
  1194. int first_slice = 0;
  1195. while(1) {
  1196. int nalsize = 0;
  1197. int dst_length, bit_length, consumed;
  1198. const uint8_t *ptr;
  1199. if (buf_index >= next_avc) {
  1200. nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
  1201. if (nalsize < 0)
  1202. break;
  1203. next_avc = buf_index + nalsize;
  1204. } else {
  1205. buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
  1206. if (buf_index >= buf_size)
  1207. break;
  1208. if (buf_index >= next_avc)
  1209. continue;
  1210. }
  1211. ptr = ff_h264_decode_nal(h, buf + buf_index, &dst_length, &consumed,
  1212. next_avc - buf_index);
  1213. if (!ptr || dst_length < 0)
  1214. return AVERROR_INVALIDDATA;
  1215. buf_index += consumed;
  1216. bit_length = get_bit_length(h, buf, ptr, dst_length,
  1217. buf_index, next_avc);
  1218. nal_index++;
  1219. /* packets can sometimes contain multiple PPS/SPS,
  1220. * e.g. two PAFF field pictures in one packet, or a demuxer
  1221. * which splits NALs strangely if so, when frame threading we
  1222. * can't start the next thread until we've read all of them */
  1223. switch (h->nal_unit_type) {
  1224. case NAL_SPS:
  1225. case NAL_PPS:
  1226. nals_needed = nal_index;
  1227. break;
  1228. case NAL_DPA:
  1229. case NAL_IDR_SLICE:
  1230. case NAL_SLICE:
  1231. init_get_bits(&h->gb, ptr, bit_length);
  1232. if (!get_ue_golomb(&h->gb) ||
  1233. !first_slice ||
  1234. first_slice != h->nal_unit_type)
  1235. nals_needed = nal_index;
  1236. if (!first_slice)
  1237. first_slice = h->nal_unit_type;
  1238. }
  1239. }
  1240. return nals_needed;
  1241. }
  1242. static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
  1243. int parse_extradata)
  1244. {
  1245. AVCodecContext *const avctx = h->avctx;
  1246. H264Context *hx; ///< thread context
  1247. H264SliceContext *sl;
  1248. int buf_index;
  1249. unsigned context_count;
  1250. int next_avc;
  1251. int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
  1252. int nal_index;
  1253. int idr_cleared=0;
  1254. int ret = 0;
  1255. h->nal_unit_type= 0;
  1256. if(!h->slice_context_count)
  1257. h->slice_context_count= 1;
  1258. h->max_contexts = h->slice_context_count;
  1259. if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) {
  1260. h->current_slice = 0;
  1261. if (!h->first_field)
  1262. h->cur_pic_ptr = NULL;
  1263. ff_h264_reset_sei(h);
  1264. }
  1265. if (h->nal_length_size == 4) {
  1266. if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
  1267. h->is_avc = 0;
  1268. }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
  1269. h->is_avc = 1;
  1270. }
  1271. if (avctx->active_thread_type & FF_THREAD_FRAME)
  1272. nals_needed = get_last_needed_nal(h, buf, buf_size);
  1273. {
  1274. buf_index = 0;
  1275. context_count = 0;
  1276. next_avc = h->is_avc ? 0 : buf_size;
  1277. nal_index = 0;
  1278. for (;;) {
  1279. int consumed;
  1280. int dst_length;
  1281. int bit_length;
  1282. const uint8_t *ptr;
  1283. int nalsize = 0;
  1284. int err;
  1285. if (buf_index >= next_avc) {
  1286. nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
  1287. if (nalsize < 0)
  1288. break;
  1289. next_avc = buf_index + nalsize;
  1290. } else {
  1291. buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
  1292. if (buf_index >= buf_size)
  1293. break;
  1294. if (buf_index >= next_avc)
  1295. continue;
  1296. }
  1297. hx = h->thread_context[context_count];
  1298. sl = &h->slice_ctx[context_count];
  1299. ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length,
  1300. &consumed, next_avc - buf_index);
  1301. if (!ptr || dst_length < 0) {
  1302. ret = -1;
  1303. goto end;
  1304. }
  1305. bit_length = get_bit_length(h, buf, ptr, dst_length,
  1306. buf_index + consumed, next_avc);
  1307. if (h->avctx->debug & FF_DEBUG_STARTCODE)
  1308. av_log(h->avctx, AV_LOG_DEBUG,
  1309. "NAL %d/%d at %d/%d length %d\n",
  1310. hx->nal_unit_type, hx->nal_ref_idc, buf_index, buf_size, dst_length);
  1311. if (h->is_avc && (nalsize != consumed) && nalsize)
  1312. av_log(h->avctx, AV_LOG_DEBUG,
  1313. "AVC: Consumed only %d bytes instead of %d\n",
  1314. consumed, nalsize);
  1315. buf_index += consumed;
  1316. nal_index++;
  1317. if (avctx->skip_frame >= AVDISCARD_NONREF &&
  1318. h->nal_ref_idc == 0 &&
  1319. h->nal_unit_type != NAL_SEI)
  1320. continue;
  1321. again:
  1322. if ( (!(avctx->active_thread_type & FF_THREAD_FRAME) || nals_needed >= nal_index)
  1323. && !h->current_slice)
  1324. h->au_pps_id = -1;
  1325. /* Ignore per frame NAL unit type during extradata
  1326. * parsing. Decoding slices is not possible in codec init
  1327. * with frame-mt */
  1328. if (parse_extradata) {
  1329. switch (hx->nal_unit_type) {
  1330. case NAL_IDR_SLICE:
  1331. case NAL_SLICE:
  1332. case NAL_DPA:
  1333. case NAL_DPB:
  1334. case NAL_DPC:
  1335. av_log(h->avctx, AV_LOG_WARNING,
  1336. "Ignoring NAL %d in global header/extradata\n",
  1337. hx->nal_unit_type);
  1338. // fall through to next case
  1339. case NAL_AUXILIARY_SLICE:
  1340. hx->nal_unit_type = NAL_FF_IGNORE;
  1341. }
  1342. }
  1343. err = 0;
  1344. switch (hx->nal_unit_type) {
  1345. case NAL_IDR_SLICE:
  1346. if ((ptr[0] & 0xFC) == 0x98) {
  1347. av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
  1348. h->next_outputed_poc = INT_MIN;
  1349. ret = -1;
  1350. goto end;
  1351. }
  1352. if (h->nal_unit_type != NAL_IDR_SLICE) {
  1353. av_log(h->avctx, AV_LOG_ERROR,
  1354. "Invalid mix of idr and non-idr slices\n");
  1355. ret = -1;
  1356. goto end;
  1357. }
  1358. if(!idr_cleared)
  1359. idr(h); // FIXME ensure we don't lose some frames if there is reordering
  1360. idr_cleared = 1;
  1361. h->has_recovery_point = 1;
  1362. case NAL_SLICE:
  1363. init_get_bits(&hx->gb, ptr, bit_length);
  1364. hx->intra_gb_ptr =
  1365. hx->inter_gb_ptr = &hx->gb;
  1366. if ((err = ff_h264_decode_slice_header(hx, sl, h)))
  1367. break;
  1368. if (h->sei_recovery_frame_cnt >= 0) {
  1369. if (h->frame_num != h->sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
  1370. h->valid_recovery_point = 1;
  1371. if ( h->recovery_frame < 0
  1372. || ((h->recovery_frame - h->frame_num) & ((1 << h->sps.log2_max_frame_num)-1)) > h->sei_recovery_frame_cnt) {
  1373. h->recovery_frame = (h->frame_num + h->sei_recovery_frame_cnt) &
  1374. ((1 << h->sps.log2_max_frame_num) - 1);
  1375. if (!h->valid_recovery_point)
  1376. h->recovery_frame = h->frame_num;
  1377. }
  1378. }
  1379. h->cur_pic_ptr->f.key_frame |=
  1380. (hx->nal_unit_type == NAL_IDR_SLICE);
  1381. if (hx->nal_unit_type == NAL_IDR_SLICE ||
  1382. h->recovery_frame == h->frame_num) {
  1383. h->recovery_frame = -1;
  1384. h->cur_pic_ptr->recovered = 1;
  1385. }
  1386. // If we have an IDR, all frames after it in decoded order are
  1387. // "recovered".
  1388. if (hx->nal_unit_type == NAL_IDR_SLICE)
  1389. h->frame_recovered |= FRAME_RECOVERED_IDR;
  1390. h->frame_recovered |= 3*!!(avctx->flags2 & CODEC_FLAG2_SHOW_ALL);
  1391. h->frame_recovered |= 3*!!(avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT);
  1392. #if 1
  1393. h->cur_pic_ptr->recovered |= h->frame_recovered;
  1394. #else
  1395. h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
  1396. #endif
  1397. if (h->current_slice == 1) {
  1398. if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS))
  1399. decode_postinit(h, nal_index >= nals_needed);
  1400. if (h->avctx->hwaccel &&
  1401. (ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0)) < 0)
  1402. return ret;
  1403. if (CONFIG_H264_VDPAU_DECODER &&
  1404. h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
  1405. ff_vdpau_h264_picture_start(h);
  1406. }
  1407. if (sl->redundant_pic_count == 0) {
  1408. if (avctx->hwaccel) {
  1409. ret = avctx->hwaccel->decode_slice(avctx,
  1410. &buf[buf_index - consumed],
  1411. consumed);
  1412. if (ret < 0)
  1413. return ret;
  1414. } else if (CONFIG_H264_VDPAU_DECODER &&
  1415. h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) {
  1416. ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0],
  1417. start_code,
  1418. sizeof(start_code));
  1419. ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0],
  1420. &buf[buf_index - consumed],
  1421. consumed);
  1422. } else
  1423. context_count++;
  1424. }
  1425. break;
  1426. case NAL_DPA:
  1427. case NAL_DPB:
  1428. case NAL_DPC:
  1429. avpriv_request_sample(avctx, "data partitioning");
  1430. ret = AVERROR(ENOSYS);
  1431. goto end;
  1432. break;
  1433. case NAL_SEI:
  1434. init_get_bits(&h->gb, ptr, bit_length);
  1435. ret = ff_h264_decode_sei(h);
  1436. if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
  1437. goto end;
  1438. break;
  1439. case NAL_SPS:
  1440. init_get_bits(&h->gb, ptr, bit_length);
  1441. if (ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? nalsize : 1)) {
  1442. av_log(h->avctx, AV_LOG_DEBUG,
  1443. "SPS decoding failure, trying again with the complete NAL\n");
  1444. if (h->is_avc)
  1445. av_assert0(next_avc - buf_index + consumed == nalsize);
  1446. if ((next_avc - buf_index + consumed - 1) >= INT_MAX/8)
  1447. break;
  1448. init_get_bits(&h->gb, &buf[buf_index + 1 - consumed],
  1449. 8*(next_avc - buf_index + consumed - 1));
  1450. ff_h264_decode_seq_parameter_set(h);
  1451. }
  1452. break;
  1453. case NAL_PPS:
  1454. init_get_bits(&h->gb, ptr, bit_length);
  1455. ret = ff_h264_decode_picture_parameter_set(h, bit_length);
  1456. if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
  1457. goto end;
  1458. break;
  1459. case NAL_AUD:
  1460. case NAL_END_SEQUENCE:
  1461. case NAL_END_STREAM:
  1462. case NAL_FILLER_DATA:
  1463. case NAL_SPS_EXT:
  1464. case NAL_AUXILIARY_SLICE:
  1465. break;
  1466. case NAL_FF_IGNORE:
  1467. break;
  1468. default:
  1469. av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
  1470. hx->nal_unit_type, bit_length);
  1471. }
  1472. if (context_count == h->max_contexts) {
  1473. ret = ff_h264_execute_decode_slices(h, context_count);
  1474. if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
  1475. goto end;
  1476. context_count = 0;
  1477. }
  1478. if (err < 0 || err == SLICE_SKIPED) {
  1479. if (err < 0)
  1480. av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
  1481. sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
  1482. } else if (err == SLICE_SINGLETHREAD) {
  1483. /* Slice could not be decoded in parallel mode, copy down
  1484. * NAL unit stuff to context 0 and restart. Note that
  1485. * rbsp_buffer is not transferred, but since we no longer
  1486. * run in parallel mode this should not be an issue. */
  1487. h->nal_unit_type = hx->nal_unit_type;
  1488. h->nal_ref_idc = hx->nal_ref_idc;
  1489. hx = h;
  1490. sl = &h->slice_ctx[0];
  1491. goto again;
  1492. }
  1493. }
  1494. }
  1495. if (context_count) {
  1496. ret = ff_h264_execute_decode_slices(h, context_count);
  1497. if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
  1498. goto end;
  1499. }
  1500. ret = 0;
  1501. end:
  1502. /* clean up */
  1503. if (h->cur_pic_ptr && !h->droppable) {
  1504. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1505. h->picture_structure == PICT_BOTTOM_FIELD);
  1506. }
  1507. return (ret < 0) ? ret : buf_index;
  1508. }
  1509. /**
  1510. * Return the number of bytes consumed for building the current frame.
  1511. */
  1512. static int get_consumed_bytes(int pos, int buf_size)
  1513. {
  1514. if (pos == 0)
  1515. pos = 1; // avoid infinite loops (I doubt that is needed but...)
  1516. if (pos + 10 > buf_size)
  1517. pos = buf_size; // oops ;)
  1518. return pos;
  1519. }
  1520. static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
  1521. {
  1522. AVFrame *src = &srcp->f;
  1523. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);
  1524. int i;
  1525. int ret = av_frame_ref(dst, src);
  1526. if (ret < 0)
  1527. return ret;
  1528. av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(h), 0);
  1529. if (srcp->sei_recovery_frame_cnt == 0)
  1530. dst->key_frame = 1;
  1531. if (!srcp->crop)
  1532. return 0;
  1533. for (i = 0; i < desc->nb_components; i++) {
  1534. int hshift = (i > 0) ? desc->log2_chroma_w : 0;
  1535. int vshift = (i > 0) ? desc->log2_chroma_h : 0;
  1536. int off = ((srcp->crop_left >> hshift) << h->pixel_shift) +
  1537. (srcp->crop_top >> vshift) * dst->linesize[i];
  1538. dst->data[i] += off;
  1539. }
  1540. return 0;
  1541. }
  1542. static int is_extra(const uint8_t *buf, int buf_size)
  1543. {
  1544. int cnt= buf[5]&0x1f;
  1545. const uint8_t *p= buf+6;
  1546. while(cnt--){
  1547. int nalsize= AV_RB16(p) + 2;
  1548. if(nalsize > buf_size - (p-buf) || p[2]!=0x67)
  1549. return 0;
  1550. p += nalsize;
  1551. }
  1552. cnt = *(p++);
  1553. if(!cnt)
  1554. return 0;
  1555. while(cnt--){
  1556. int nalsize= AV_RB16(p) + 2;
  1557. if(nalsize > buf_size - (p-buf) || p[2]!=0x68)
  1558. return 0;
  1559. p += nalsize;
  1560. }
  1561. return 1;
  1562. }
  1563. static int h264_decode_frame(AVCodecContext *avctx, void *data,
  1564. int *got_frame, AVPacket *avpkt)
  1565. {
  1566. const uint8_t *buf = avpkt->data;
  1567. int buf_size = avpkt->size;
  1568. H264Context *h = avctx->priv_data;
  1569. AVFrame *pict = data;
  1570. int buf_index = 0;
  1571. H264Picture *out;
  1572. int i, out_idx;
  1573. int ret;
  1574. h->flags = avctx->flags;
  1575. ff_h264_unref_picture(h, &h->last_pic_for_ec);
  1576. /* end of stream, output what is still in the buffers */
  1577. if (buf_size == 0) {
  1578. out:
  1579. h->cur_pic_ptr = NULL;
  1580. h->first_field = 0;
  1581. // FIXME factorize this with the output code below
  1582. out = h->delayed_pic[0];
  1583. out_idx = 0;
  1584. for (i = 1;
  1585. h->delayed_pic[i] &&
  1586. !h->delayed_pic[i]->f.key_frame &&
  1587. !h->delayed_pic[i]->mmco_reset;
  1588. i++)
  1589. if (h->delayed_pic[i]->poc < out->poc) {
  1590. out = h->delayed_pic[i];
  1591. out_idx = i;
  1592. }
  1593. for (i = out_idx; h->delayed_pic[i]; i++)
  1594. h->delayed_pic[i] = h->delayed_pic[i + 1];
  1595. if (out) {
  1596. out->reference &= ~DELAYED_PIC_REF;
  1597. ret = output_frame(h, pict, out);
  1598. if (ret < 0)
  1599. return ret;
  1600. *got_frame = 1;
  1601. }
  1602. return buf_index;
  1603. }
  1604. if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {
  1605. int side_size;
  1606. uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
  1607. if (is_extra(side, side_size))
  1608. ff_h264_decode_extradata(h, side, side_size);
  1609. }
  1610. if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
  1611. if (is_extra(buf, buf_size))
  1612. return ff_h264_decode_extradata(h, buf, buf_size);
  1613. }
  1614. buf_index = decode_nal_units(h, buf, buf_size, 0);
  1615. if (buf_index < 0)
  1616. return AVERROR_INVALIDDATA;
  1617. if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
  1618. av_assert0(buf_index <= buf_size);
  1619. goto out;
  1620. }
  1621. if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
  1622. if (avctx->skip_frame >= AVDISCARD_NONREF ||
  1623. buf_size >= 4 && !memcmp("Q264", buf, 4))
  1624. return buf_size;
  1625. av_log(avctx, AV_LOG_ERROR, "no frame!\n");
  1626. return AVERROR_INVALIDDATA;
  1627. }
  1628. if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) ||
  1629. (h->mb_y >= h->mb_height && h->mb_height)) {
  1630. if (avctx->flags2 & CODEC_FLAG2_CHUNKS)
  1631. decode_postinit(h, 1);
  1632. ff_h264_field_end(h, &h->slice_ctx[0], 0);
  1633. /* Wait for second field. */
  1634. *got_frame = 0;
  1635. if (h->next_output_pic && (
  1636. h->next_output_pic->recovered)) {
  1637. if (!h->next_output_pic->recovered)
  1638. h->next_output_pic->f.flags |= AV_FRAME_FLAG_CORRUPT;
  1639. if (!h->avctx->hwaccel &&
  1640. (h->next_output_pic->field_poc[0] == INT_MAX ||
  1641. h->next_output_pic->field_poc[1] == INT_MAX)
  1642. ) {
  1643. int p;
  1644. AVFrame *f = &h->next_output_pic->f;
  1645. int field = h->next_output_pic->field_poc[0] == INT_MAX;
  1646. uint8_t *dst_data[4];
  1647. int linesizes[4];
  1648. const uint8_t *src_data[4];
  1649. av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
  1650. for (p = 0; p<4; p++) {
  1651. dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
  1652. src_data[p] = f->data[p] + field *f->linesize[p];
  1653. linesizes[p] = 2*f->linesize[p];
  1654. }
  1655. av_image_copy(dst_data, linesizes, src_data, linesizes,
  1656. f->format, f->width, f->height>>1);
  1657. }
  1658. ret = output_frame(h, pict, h->next_output_pic);
  1659. if (ret < 0)
  1660. return ret;
  1661. *got_frame = 1;
  1662. if (CONFIG_MPEGVIDEO) {
  1663. ff_print_debug_info2(h->avctx, pict, h->er.mbskip_table,
  1664. h->next_output_pic->mb_type,
  1665. h->next_output_pic->qscale_table,
  1666. h->next_output_pic->motion_val,
  1667. &h->low_delay,
  1668. h->mb_width, h->mb_height, h->mb_stride, 1);
  1669. }
  1670. }
  1671. }
  1672. av_assert0(pict->buf[0] || !*got_frame);
  1673. ff_h264_unref_picture(h, &h->last_pic_for_ec);
  1674. return get_consumed_bytes(buf_index, buf_size);
  1675. }
  1676. av_cold void ff_h264_free_context(H264Context *h)
  1677. {
  1678. int i;
  1679. ff_h264_free_tables(h, 1); // FIXME cleanup init stuff perhaps
  1680. av_freep(&h->slice_ctx);
  1681. h->nb_slice_ctx = 0;
  1682. for (i = 0; i < MAX_SPS_COUNT; i++)
  1683. av_freep(h->sps_buffers + i);
  1684. for (i = 0; i < MAX_PPS_COUNT; i++)
  1685. av_freep(h->pps_buffers + i);
  1686. }
  1687. static av_cold int h264_decode_end(AVCodecContext *avctx)
  1688. {
  1689. H264Context *h = avctx->priv_data;
  1690. ff_h264_remove_all_refs(h);
  1691. ff_h264_free_context(h);
  1692. ff_h264_unref_picture(h, &h->cur_pic);
  1693. ff_h264_unref_picture(h, &h->last_pic_for_ec);
  1694. return 0;
  1695. }
  1696. static const AVProfile profiles[] = {
  1697. { FF_PROFILE_H264_BASELINE, "Baseline" },
  1698. { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
  1699. { FF_PROFILE_H264_MAIN, "Main" },
  1700. { FF_PROFILE_H264_EXTENDED, "Extended" },
  1701. { FF_PROFILE_H264_HIGH, "High" },
  1702. { FF_PROFILE_H264_HIGH_10, "High 10" },
  1703. { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
  1704. { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
  1705. { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
  1706. { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
  1707. { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
  1708. { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
  1709. { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
  1710. { FF_PROFILE_UNKNOWN },
  1711. };
  1712. static const AVOption h264_options[] = {
  1713. {"is_avc", "is avc", offsetof(H264Context, is_avc), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 1, 0},
  1714. {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
  1715. {NULL}
  1716. };
  1717. static const AVClass h264_class = {
  1718. .class_name = "H264 Decoder",
  1719. .item_name = av_default_item_name,
  1720. .option = h264_options,
  1721. .version = LIBAVUTIL_VERSION_INT,
  1722. };
  1723. AVCodec ff_h264_decoder = {
  1724. .name = "h264",
  1725. .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
  1726. .type = AVMEDIA_TYPE_VIDEO,
  1727. .id = AV_CODEC_ID_H264,
  1728. .priv_data_size = sizeof(H264Context),
  1729. .init = ff_h264_decode_init,
  1730. .close = h264_decode_end,
  1731. .decode = h264_decode_frame,
  1732. .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 |
  1733. CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS |
  1734. CODEC_CAP_FRAME_THREADS,
  1735. .flush = flush_dpb,
  1736. .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
  1737. .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
  1738. .profiles = NULL_IF_CONFIG_SMALL(profiles),
  1739. .priv_class = &h264_class,
  1740. };
  1741. #if CONFIG_H264_VDPAU_DECODER
  1742. static const AVClass h264_vdpau_class = {
  1743. .class_name = "H264 VDPAU Decoder",
  1744. .item_name = av_default_item_name,
  1745. .option = h264_options,
  1746. .version = LIBAVUTIL_VERSION_INT,
  1747. };
  1748. AVCodec ff_h264_vdpau_decoder = {
  1749. .name = "h264_vdpau",
  1750. .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
  1751. .type = AVMEDIA_TYPE_VIDEO,
  1752. .id = AV_CODEC_ID_H264,
  1753. .priv_data_size = sizeof(H264Context),
  1754. .init = ff_h264_decode_init,
  1755. .close = h264_decode_end,
  1756. .decode = h264_decode_frame,
  1757. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
  1758. .flush = flush_dpb,
  1759. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
  1760. AV_PIX_FMT_NONE},
  1761. .profiles = NULL_IF_CONFIG_SMALL(profiles),
  1762. .priv_class = &h264_vdpau_class,
  1763. };
  1764. #endif