You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2624 lines
98KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... decoder
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG-4 part10 codec.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/display.h"
  28. #include "libavutil/imgutils.h"
  29. #include "libavutil/stereo3d.h"
  30. #include "libavutil/timer.h"
  31. #include "internal.h"
  32. #include "cabac.h"
  33. #include "cabac_functions.h"
  34. #include "error_resilience.h"
  35. #include "avcodec.h"
  36. #include "h264.h"
  37. #include "h264data.h"
  38. #include "h264chroma.h"
  39. #include "h264_mvpred.h"
  40. #include "golomb.h"
  41. #include "mathops.h"
  42. #include "mpegutils.h"
  43. #include "rectangle.h"
  44. #include "thread.h"
  45. static const uint8_t field_scan[16+1] = {
  46. 0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
  47. 0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
  48. 2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
  49. 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
  50. };
  51. static const uint8_t field_scan8x8[64+1] = {
  52. 0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
  53. 1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
  54. 2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
  55. 0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
  56. 2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
  57. 2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
  58. 2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
  59. 3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
  60. 3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
  61. 4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
  62. 4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
  63. 5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
  64. 5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
  65. 7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
  66. 6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
  67. 7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
  68. };
  69. static const uint8_t field_scan8x8_cavlc[64+1] = {
  70. 0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
  71. 2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
  72. 3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
  73. 5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
  74. 0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
  75. 1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
  76. 3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
  77. 5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
  78. 0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
  79. 1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
  80. 3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
  81. 5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
  82. 1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
  83. 1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
  84. 3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
  85. 6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
  86. };
  87. // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
  88. static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
  89. 0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
  90. 4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
  91. 3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
  92. 2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
  93. 1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
  94. 3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
  95. 2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
  96. 3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
  97. 0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
  98. 2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
  99. 1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
  100. 4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
  101. 0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
  102. 1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
  103. 0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
  104. 5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
  105. };
  106. static void release_unused_pictures(H264Context *h, int remove_current)
  107. {
  108. int i;
  109. /* release non reference frames */
  110. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
  111. if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
  112. (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
  113. ff_h264_unref_picture(h, &h->DPB[i]);
  114. }
  115. }
  116. }
  117. static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
  118. {
  119. const H264Context *h = sl->h264;
  120. int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
  121. av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
  122. // edge emu needs blocksize + filter length - 1
  123. // (= 21x21 for H.264)
  124. av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
  125. av_fast_mallocz(&sl->top_borders[0], &sl->top_borders_allocated[0],
  126. h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
  127. av_fast_mallocz(&sl->top_borders[1], &sl->top_borders_allocated[1],
  128. h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
  129. if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
  130. !sl->top_borders[0] || !sl->top_borders[1]) {
  131. av_freep(&sl->bipred_scratchpad);
  132. av_freep(&sl->edge_emu_buffer);
  133. av_freep(&sl->top_borders[0]);
  134. av_freep(&sl->top_borders[1]);
  135. sl->bipred_scratchpad_allocated = 0;
  136. sl->edge_emu_buffer_allocated = 0;
  137. sl->top_borders_allocated[0] = 0;
  138. sl->top_borders_allocated[1] = 0;
  139. return AVERROR(ENOMEM);
  140. }
  141. return 0;
  142. }
  143. static int init_table_pools(H264Context *h)
  144. {
  145. const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
  146. const int mb_array_size = h->mb_stride * h->mb_height;
  147. const int b4_stride = h->mb_width * 4 + 1;
  148. const int b4_array_size = b4_stride * h->mb_height * 4;
  149. h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
  150. av_buffer_allocz);
  151. h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
  152. sizeof(uint32_t), av_buffer_allocz);
  153. h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
  154. sizeof(int16_t), av_buffer_allocz);
  155. h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
  156. if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
  157. !h->ref_index_pool) {
  158. av_buffer_pool_uninit(&h->qscale_table_pool);
  159. av_buffer_pool_uninit(&h->mb_type_pool);
  160. av_buffer_pool_uninit(&h->motion_val_pool);
  161. av_buffer_pool_uninit(&h->ref_index_pool);
  162. return AVERROR(ENOMEM);
  163. }
  164. return 0;
  165. }
  166. static int alloc_picture(H264Context *h, H264Picture *pic)
  167. {
  168. int i, ret = 0;
  169. av_assert0(!pic->f->data[0]);
  170. pic->tf.f = pic->f;
  171. ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
  172. AV_GET_BUFFER_FLAG_REF : 0);
  173. if (ret < 0)
  174. goto fail;
  175. pic->crop = h->ps.sps->crop;
  176. pic->crop_top = h->ps.sps->crop_top;
  177. pic->crop_left= h->ps.sps->crop_left;
  178. if (h->avctx->hwaccel) {
  179. const AVHWAccel *hwaccel = h->avctx->hwaccel;
  180. av_assert0(!pic->hwaccel_picture_private);
  181. if (hwaccel->frame_priv_data_size) {
  182. pic->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
  183. if (!pic->hwaccel_priv_buf)
  184. return AVERROR(ENOMEM);
  185. pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
  186. }
  187. }
  188. if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
  189. int h_chroma_shift, v_chroma_shift;
  190. av_pix_fmt_get_chroma_sub_sample(pic->f->format,
  191. &h_chroma_shift, &v_chroma_shift);
  192. for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
  193. memset(pic->f->data[1] + pic->f->linesize[1]*i,
  194. 0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
  195. memset(pic->f->data[2] + pic->f->linesize[2]*i,
  196. 0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
  197. }
  198. }
  199. if (!h->qscale_table_pool) {
  200. ret = init_table_pools(h);
  201. if (ret < 0)
  202. goto fail;
  203. }
  204. pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
  205. pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
  206. if (!pic->qscale_table_buf || !pic->mb_type_buf)
  207. goto fail;
  208. pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
  209. pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
  210. for (i = 0; i < 2; i++) {
  211. pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
  212. pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
  213. if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
  214. goto fail;
  215. pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
  216. pic->ref_index[i] = pic->ref_index_buf[i]->data;
  217. }
  218. return 0;
  219. fail:
  220. ff_h264_unref_picture(h, pic);
  221. return (ret < 0) ? ret : AVERROR(ENOMEM);
  222. }
  223. static inline int pic_is_unused(H264Context *h, H264Picture *pic)
  224. {
  225. if (!pic->f->buf[0])
  226. return 1;
  227. return 0;
  228. }
  229. static int find_unused_picture(H264Context *h)
  230. {
  231. int i;
  232. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
  233. if (pic_is_unused(h, &h->DPB[i]))
  234. break;
  235. }
  236. if (i == H264_MAX_PICTURE_COUNT)
  237. return AVERROR_INVALIDDATA;
  238. return i;
  239. }
  240. #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
  241. #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
  242. (((pic) && (pic) >= (old_ctx)->DPB && \
  243. (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
  244. &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
  245. static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
  246. H264Context *new_base,
  247. H264Context *old_base)
  248. {
  249. int i;
  250. for (i = 0; i < count; i++) {
  251. av_assert1(!from[i] ||
  252. IN_RANGE(from[i], old_base, 1) ||
  253. IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
  254. to[i] = REBASE_PICTURE(from[i], new_base, old_base);
  255. }
  256. }
  257. static int h264_slice_header_init(H264Context *h);
  258. int ff_h264_update_thread_context(AVCodecContext *dst,
  259. const AVCodecContext *src)
  260. {
  261. H264Context *h = dst->priv_data, *h1 = src->priv_data;
  262. int inited = h->context_initialized, err = 0;
  263. int need_reinit = 0;
  264. int i, ret;
  265. if (dst == src)
  266. return 0;
  267. // We can't fail if SPS isn't set at it breaks current skip_frame code
  268. //if (!h1->ps.sps)
  269. // return AVERROR_INVALIDDATA;
  270. if (inited &&
  271. (h->width != h1->width ||
  272. h->height != h1->height ||
  273. h->mb_width != h1->mb_width ||
  274. h->mb_height != h1->mb_height ||
  275. !h->ps.sps ||
  276. h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
  277. h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
  278. h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
  279. need_reinit = 1;
  280. }
  281. /* copy block_offset since frame_start may not be called */
  282. memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
  283. // SPS/PPS
  284. for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
  285. av_buffer_unref(&h->ps.sps_list[i]);
  286. if (h1->ps.sps_list[i]) {
  287. h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]);
  288. if (!h->ps.sps_list[i])
  289. return AVERROR(ENOMEM);
  290. }
  291. }
  292. for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
  293. av_buffer_unref(&h->ps.pps_list[i]);
  294. if (h1->ps.pps_list[i]) {
  295. h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]);
  296. if (!h->ps.pps_list[i])
  297. return AVERROR(ENOMEM);
  298. }
  299. }
  300. av_buffer_unref(&h->ps.pps_ref);
  301. av_buffer_unref(&h->ps.sps_ref);
  302. h->ps.pps = NULL;
  303. h->ps.sps = NULL;
  304. if (h1->ps.pps_ref) {
  305. h->ps.pps_ref = av_buffer_ref(h1->ps.pps_ref);
  306. if (!h->ps.pps_ref)
  307. return AVERROR(ENOMEM);
  308. h->ps.pps = (const PPS*)h->ps.pps_ref->data;
  309. }
  310. if (h1->ps.sps_ref) {
  311. h->ps.sps_ref = av_buffer_ref(h1->ps.sps_ref);
  312. if (!h->ps.sps_ref)
  313. return AVERROR(ENOMEM);
  314. h->ps.sps = (const SPS*)h->ps.sps_ref->data;
  315. }
  316. if (need_reinit || !inited) {
  317. h->width = h1->width;
  318. h->height = h1->height;
  319. h->mb_height = h1->mb_height;
  320. h->mb_width = h1->mb_width;
  321. h->mb_num = h1->mb_num;
  322. h->mb_stride = h1->mb_stride;
  323. h->b_stride = h1->b_stride;
  324. if (h->context_initialized || h1->context_initialized) {
  325. if ((err = h264_slice_header_init(h)) < 0) {
  326. av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
  327. return err;
  328. }
  329. }
  330. /* copy block_offset since frame_start may not be called */
  331. memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
  332. }
  333. h->avctx->coded_height = h1->avctx->coded_height;
  334. h->avctx->coded_width = h1->avctx->coded_width;
  335. h->avctx->width = h1->avctx->width;
  336. h->avctx->height = h1->avctx->height;
  337. h->coded_picture_number = h1->coded_picture_number;
  338. h->first_field = h1->first_field;
  339. h->picture_structure = h1->picture_structure;
  340. h->mb_aff_frame = h1->mb_aff_frame;
  341. h->droppable = h1->droppable;
  342. h->backup_width = h1->backup_width;
  343. h->backup_height = h1->backup_height;
  344. h->backup_pix_fmt = h1->backup_pix_fmt;
  345. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
  346. ff_h264_unref_picture(h, &h->DPB[i]);
  347. if (h1->DPB[i].f->buf[0] &&
  348. (ret = ff_h264_ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
  349. return ret;
  350. }
  351. h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
  352. ff_h264_unref_picture(h, &h->cur_pic);
  353. if (h1->cur_pic.f->buf[0]) {
  354. ret = ff_h264_ref_picture(h, &h->cur_pic, &h1->cur_pic);
  355. if (ret < 0)
  356. return ret;
  357. }
  358. h->enable_er = h1->enable_er;
  359. h->workaround_bugs = h1->workaround_bugs;
  360. h->droppable = h1->droppable;
  361. // extradata/NAL handling
  362. h->is_avc = h1->is_avc;
  363. h->nal_length_size = h1->nal_length_size;
  364. h->sei.unregistered.x264_build = h1->sei.unregistered.x264_build;
  365. memcpy(&h->poc, &h1->poc, sizeof(h->poc));
  366. memcpy(h->default_ref, h1->default_ref, sizeof(h->default_ref));
  367. memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
  368. memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
  369. memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
  370. memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
  371. h->next_output_pic = h1->next_output_pic;
  372. h->next_outputed_poc = h1->next_outputed_poc;
  373. memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
  374. h->nb_mmco = h1->nb_mmco;
  375. h->mmco_reset = h1->mmco_reset;
  376. h->explicit_ref_marking = h1->explicit_ref_marking;
  377. h->long_ref_count = h1->long_ref_count;
  378. h->short_ref_count = h1->short_ref_count;
  379. copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
  380. copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
  381. copy_picture_range(h->delayed_pic, h1->delayed_pic,
  382. MAX_DELAYED_PIC_COUNT + 2, h, h1);
  383. h->frame_recovered = h1->frame_recovered;
  384. if (!h->cur_pic_ptr)
  385. return 0;
  386. if (!h->droppable) {
  387. err = ff_h264_execute_ref_pic_marking(h);
  388. h->poc.prev_poc_msb = h->poc.poc_msb;
  389. h->poc.prev_poc_lsb = h->poc.poc_lsb;
  390. }
  391. h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
  392. h->poc.prev_frame_num = h->poc.frame_num;
  393. h->recovery_frame = h1->recovery_frame;
  394. return err;
  395. }
  396. static int h264_frame_start(H264Context *h)
  397. {
  398. H264Picture *pic;
  399. int i, ret;
  400. const int pixel_shift = h->pixel_shift;
  401. int c[4] = {
  402. 1<<(h->ps.sps->bit_depth_luma-1),
  403. 1<<(h->ps.sps->bit_depth_chroma-1),
  404. 1<<(h->ps.sps->bit_depth_chroma-1),
  405. -1
  406. };
  407. if (!ff_thread_can_start_frame(h->avctx)) {
  408. av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
  409. return -1;
  410. }
  411. release_unused_pictures(h, 1);
  412. h->cur_pic_ptr = NULL;
  413. i = find_unused_picture(h);
  414. if (i < 0) {
  415. av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  416. return i;
  417. }
  418. pic = &h->DPB[i];
  419. pic->reference = h->droppable ? 0 : h->picture_structure;
  420. pic->f->coded_picture_number = h->coded_picture_number++;
  421. pic->field_picture = h->picture_structure != PICT_FRAME;
  422. pic->frame_num = h->poc.frame_num;
  423. /*
  424. * Zero key_frame here; IDR markings per slice in frame or fields are ORed
  425. * in later.
  426. * See decode_nal_units().
  427. */
  428. pic->f->key_frame = 0;
  429. pic->mmco_reset = 0;
  430. pic->recovered = 0;
  431. pic->invalid_gap = 0;
  432. pic->sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
  433. pic->f->pict_type = h->slice_ctx[0].slice_type;
  434. if ((ret = alloc_picture(h, pic)) < 0)
  435. return ret;
  436. if(!h->frame_recovered && !h->avctx->hwaccel
  437. #if FF_API_CAP_VDPAU
  438. && !(h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
  439. #endif
  440. )
  441. ff_color_frame(pic->f, c);
  442. h->cur_pic_ptr = pic;
  443. ff_h264_unref_picture(h, &h->cur_pic);
  444. if (CONFIG_ERROR_RESILIENCE) {
  445. ff_h264_set_erpic(&h->slice_ctx[0].er.cur_pic, NULL);
  446. }
  447. if ((ret = ff_h264_ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
  448. return ret;
  449. for (i = 0; i < h->nb_slice_ctx; i++) {
  450. h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
  451. h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
  452. }
  453. if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
  454. ff_er_frame_start(&h->slice_ctx[0].er);
  455. ff_h264_set_erpic(&h->slice_ctx[0].er.last_pic, NULL);
  456. ff_h264_set_erpic(&h->slice_ctx[0].er.next_pic, NULL);
  457. }
  458. for (i = 0; i < 16; i++) {
  459. h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
  460. h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
  461. }
  462. for (i = 0; i < 16; i++) {
  463. h->block_offset[16 + i] =
  464. h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
  465. h->block_offset[48 + 16 + i] =
  466. h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
  467. }
  468. /* We mark the current picture as non-reference after allocating it, so
  469. * that if we break out due to an error it can be released automatically
  470. * in the next ff_mpv_frame_start().
  471. */
  472. h->cur_pic_ptr->reference = 0;
  473. h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
  474. h->next_output_pic = NULL;
  475. h->postpone_filter = 0;
  476. h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
  477. assert(h->cur_pic_ptr->long_ref == 0);
  478. return 0;
  479. }
  480. static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl,
  481. uint8_t *src_y,
  482. uint8_t *src_cb, uint8_t *src_cr,
  483. int linesize, int uvlinesize,
  484. int simple)
  485. {
  486. uint8_t *top_border;
  487. int top_idx = 1;
  488. const int pixel_shift = h->pixel_shift;
  489. int chroma444 = CHROMA444(h);
  490. int chroma422 = CHROMA422(h);
  491. src_y -= linesize;
  492. src_cb -= uvlinesize;
  493. src_cr -= uvlinesize;
  494. if (!simple && FRAME_MBAFF(h)) {
  495. if (sl->mb_y & 1) {
  496. if (!MB_MBAFF(sl)) {
  497. top_border = sl->top_borders[0][sl->mb_x];
  498. AV_COPY128(top_border, src_y + 15 * linesize);
  499. if (pixel_shift)
  500. AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
  501. if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
  502. if (chroma444) {
  503. if (pixel_shift) {
  504. AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
  505. AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
  506. AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
  507. AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
  508. } else {
  509. AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
  510. AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
  511. }
  512. } else if (chroma422) {
  513. if (pixel_shift) {
  514. AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
  515. AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
  516. } else {
  517. AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
  518. AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
  519. }
  520. } else {
  521. if (pixel_shift) {
  522. AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
  523. AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
  524. } else {
  525. AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
  526. AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
  527. }
  528. }
  529. }
  530. }
  531. } else if (MB_MBAFF(sl)) {
  532. top_idx = 0;
  533. } else
  534. return;
  535. }
  536. top_border = sl->top_borders[top_idx][sl->mb_x];
  537. /* There are two lines saved, the line above the top macroblock
  538. * of a pair, and the line above the bottom macroblock. */
  539. AV_COPY128(top_border, src_y + 16 * linesize);
  540. if (pixel_shift)
  541. AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
  542. if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
  543. if (chroma444) {
  544. if (pixel_shift) {
  545. AV_COPY128(top_border + 32, src_cb + 16 * linesize);
  546. AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
  547. AV_COPY128(top_border + 64, src_cr + 16 * linesize);
  548. AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
  549. } else {
  550. AV_COPY128(top_border + 16, src_cb + 16 * linesize);
  551. AV_COPY128(top_border + 32, src_cr + 16 * linesize);
  552. }
  553. } else if (chroma422) {
  554. if (pixel_shift) {
  555. AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
  556. AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
  557. } else {
  558. AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
  559. AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
  560. }
  561. } else {
  562. if (pixel_shift) {
  563. AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
  564. AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
  565. } else {
  566. AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
  567. AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
  568. }
  569. }
  570. }
  571. }
  572. /**
  573. * Initialize implicit_weight table.
  574. * @param field 0/1 initialize the weight for interlaced MBAFF
  575. * -1 initializes the rest
  576. */
  577. static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
  578. {
  579. int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
  580. for (i = 0; i < 2; i++) {
  581. sl->pwt.luma_weight_flag[i] = 0;
  582. sl->pwt.chroma_weight_flag[i] = 0;
  583. }
  584. if (field < 0) {
  585. if (h->picture_structure == PICT_FRAME) {
  586. cur_poc = h->cur_pic_ptr->poc;
  587. } else {
  588. cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
  589. }
  590. if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
  591. sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2 * cur_poc) {
  592. sl->pwt.use_weight = 0;
  593. sl->pwt.use_weight_chroma = 0;
  594. return;
  595. }
  596. ref_start = 0;
  597. ref_count0 = sl->ref_count[0];
  598. ref_count1 = sl->ref_count[1];
  599. } else {
  600. cur_poc = h->cur_pic_ptr->field_poc[field];
  601. ref_start = 16;
  602. ref_count0 = 16 + 2 * sl->ref_count[0];
  603. ref_count1 = 16 + 2 * sl->ref_count[1];
  604. }
  605. sl->pwt.use_weight = 2;
  606. sl->pwt.use_weight_chroma = 2;
  607. sl->pwt.luma_log2_weight_denom = 5;
  608. sl->pwt.chroma_log2_weight_denom = 5;
  609. for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
  610. int64_t poc0 = sl->ref_list[0][ref0].poc;
  611. for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
  612. int w = 32;
  613. if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
  614. int poc1 = sl->ref_list[1][ref1].poc;
  615. int td = av_clip_int8(poc1 - poc0);
  616. if (td) {
  617. int tb = av_clip_int8(cur_poc - poc0);
  618. int tx = (16384 + (FFABS(td) >> 1)) / td;
  619. int dist_scale_factor = (tb * tx + 32) >> 8;
  620. if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
  621. w = 64 - dist_scale_factor;
  622. }
  623. }
  624. if (field < 0) {
  625. sl->pwt.implicit_weight[ref0][ref1][0] =
  626. sl->pwt.implicit_weight[ref0][ref1][1] = w;
  627. } else {
  628. sl->pwt.implicit_weight[ref0][ref1][field] = w;
  629. }
  630. }
  631. }
  632. }
  633. /**
  634. * initialize scan tables
  635. */
  636. static void init_scan_tables(H264Context *h)
  637. {
  638. int i;
  639. for (i = 0; i < 16; i++) {
  640. #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
  641. h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
  642. h->field_scan[i] = TRANSPOSE(field_scan[i]);
  643. #undef TRANSPOSE
  644. }
  645. for (i = 0; i < 64; i++) {
  646. #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
  647. h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
  648. h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
  649. h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
  650. h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
  651. #undef TRANSPOSE
  652. }
  653. if (h->ps.sps->transform_bypass) { // FIXME same ugly
  654. memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
  655. memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
  656. memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
  657. memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
  658. memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
  659. memcpy(h->field_scan8x8_cavlc_q0 , field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
  660. } else {
  661. memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
  662. memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
  663. memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
  664. memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
  665. memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
  666. memcpy(h->field_scan8x8_cavlc_q0 , h->field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
  667. }
  668. }
  669. static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
  670. {
  671. #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
  672. CONFIG_H264_D3D11VA_HWACCEL + \
  673. CONFIG_H264_VAAPI_HWACCEL + \
  674. (CONFIG_H264_VDA_HWACCEL * 2) + \
  675. CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
  676. CONFIG_H264_VDPAU_HWACCEL)
  677. enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
  678. const enum AVPixelFormat *choices = pix_fmts;
  679. int i;
  680. switch (h->ps.sps->bit_depth_luma) {
  681. case 9:
  682. if (CHROMA444(h)) {
  683. if (h->avctx->colorspace == AVCOL_SPC_RGB) {
  684. *fmt++ = AV_PIX_FMT_GBRP9;
  685. } else
  686. *fmt++ = AV_PIX_FMT_YUV444P9;
  687. } else if (CHROMA422(h))
  688. *fmt++ = AV_PIX_FMT_YUV422P9;
  689. else
  690. *fmt++ = AV_PIX_FMT_YUV420P9;
  691. break;
  692. case 10:
  693. if (CHROMA444(h)) {
  694. if (h->avctx->colorspace == AVCOL_SPC_RGB) {
  695. *fmt++ = AV_PIX_FMT_GBRP10;
  696. } else
  697. *fmt++ = AV_PIX_FMT_YUV444P10;
  698. } else if (CHROMA422(h))
  699. *fmt++ = AV_PIX_FMT_YUV422P10;
  700. else
  701. *fmt++ = AV_PIX_FMT_YUV420P10;
  702. break;
  703. case 12:
  704. if (CHROMA444(h)) {
  705. if (h->avctx->colorspace == AVCOL_SPC_RGB) {
  706. *fmt++ = AV_PIX_FMT_GBRP12;
  707. } else
  708. *fmt++ = AV_PIX_FMT_YUV444P12;
  709. } else if (CHROMA422(h))
  710. *fmt++ = AV_PIX_FMT_YUV422P12;
  711. else
  712. *fmt++ = AV_PIX_FMT_YUV420P12;
  713. break;
  714. case 14:
  715. if (CHROMA444(h)) {
  716. if (h->avctx->colorspace == AVCOL_SPC_RGB) {
  717. *fmt++ = AV_PIX_FMT_GBRP14;
  718. } else
  719. *fmt++ = AV_PIX_FMT_YUV444P14;
  720. } else if (CHROMA422(h))
  721. *fmt++ = AV_PIX_FMT_YUV422P14;
  722. else
  723. *fmt++ = AV_PIX_FMT_YUV420P14;
  724. break;
  725. case 8:
  726. #if CONFIG_H264_VDPAU_HWACCEL
  727. *fmt++ = AV_PIX_FMT_VDPAU;
  728. #endif
  729. if (CHROMA444(h)) {
  730. if (h->avctx->colorspace == AVCOL_SPC_RGB)
  731. *fmt++ = AV_PIX_FMT_GBRP;
  732. else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
  733. *fmt++ = AV_PIX_FMT_YUVJ444P;
  734. else
  735. *fmt++ = AV_PIX_FMT_YUV444P;
  736. } else if (CHROMA422(h)) {
  737. if (h->avctx->color_range == AVCOL_RANGE_JPEG)
  738. *fmt++ = AV_PIX_FMT_YUVJ422P;
  739. else
  740. *fmt++ = AV_PIX_FMT_YUV422P;
  741. } else {
  742. #if CONFIG_H264_DXVA2_HWACCEL
  743. *fmt++ = AV_PIX_FMT_DXVA2_VLD;
  744. #endif
  745. #if CONFIG_H264_D3D11VA_HWACCEL
  746. *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
  747. #endif
  748. #if CONFIG_H264_VAAPI_HWACCEL
  749. *fmt++ = AV_PIX_FMT_VAAPI;
  750. #endif
  751. #if CONFIG_H264_VDA_HWACCEL
  752. *fmt++ = AV_PIX_FMT_VDA_VLD;
  753. *fmt++ = AV_PIX_FMT_VDA;
  754. #endif
  755. #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
  756. *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
  757. #endif
  758. if (h->avctx->codec->pix_fmts)
  759. choices = h->avctx->codec->pix_fmts;
  760. else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
  761. *fmt++ = AV_PIX_FMT_YUVJ420P;
  762. else
  763. *fmt++ = AV_PIX_FMT_YUV420P;
  764. }
  765. break;
  766. default:
  767. av_log(h->avctx, AV_LOG_ERROR,
  768. "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
  769. return AVERROR_INVALIDDATA;
  770. }
  771. *fmt = AV_PIX_FMT_NONE;
  772. for (i=0; choices[i] != AV_PIX_FMT_NONE; i++)
  773. if (choices[i] == h->avctx->pix_fmt && !force_callback)
  774. return choices[i];
  775. return ff_thread_get_format(h->avctx, choices);
  776. }
  777. /* export coded and cropped frame dimensions to AVCodecContext */
  778. static int init_dimensions(H264Context *h)
  779. {
  780. const SPS *sps = (const SPS*)h->ps.sps;
  781. int width = h->width - (sps->crop_right + sps->crop_left);
  782. int height = h->height - (sps->crop_top + sps->crop_bottom);
  783. av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
  784. av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
  785. /* handle container cropping */
  786. if (FFALIGN(h->avctx->width, 16) == FFALIGN(width, 16) &&
  787. FFALIGN(h->avctx->height, 16) == FFALIGN(height, 16) &&
  788. h->avctx->width <= width &&
  789. h->avctx->height <= height
  790. ) {
  791. width = h->avctx->width;
  792. height = h->avctx->height;
  793. }
  794. h->avctx->coded_width = h->width;
  795. h->avctx->coded_height = h->height;
  796. h->avctx->width = width;
  797. h->avctx->height = height;
  798. return 0;
  799. }
  800. static int h264_slice_header_init(H264Context *h)
  801. {
  802. const SPS *sps = h->ps.sps;
  803. int i, ret;
  804. ff_set_sar(h->avctx, sps->sar);
  805. av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
  806. &h->chroma_x_shift, &h->chroma_y_shift);
  807. if (sps->timing_info_present_flag) {
  808. int64_t den = sps->time_scale;
  809. if (h->sei.unregistered.x264_build < 44U)
  810. den *= 2;
  811. av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
  812. sps->num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
  813. }
  814. ff_h264_free_tables(h);
  815. h->first_field = 0;
  816. h->prev_interlaced_frame = 1;
  817. init_scan_tables(h);
  818. ret = ff_h264_alloc_tables(h);
  819. if (ret < 0) {
  820. av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
  821. goto fail;
  822. }
  823. #if FF_API_CAP_VDPAU
  824. if (h->avctx->codec &&
  825. h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU &&
  826. (sps->bit_depth_luma != 8 || sps->chroma_format_idc > 1)) {
  827. av_log(h->avctx, AV_LOG_ERROR,
  828. "VDPAU decoding does not support video colorspace.\n");
  829. ret = AVERROR_INVALIDDATA;
  830. goto fail;
  831. }
  832. #endif
  833. if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
  834. sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
  835. ) {
  836. av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
  837. sps->bit_depth_luma);
  838. ret = AVERROR_INVALIDDATA;
  839. goto fail;
  840. }
  841. h->cur_bit_depth_luma =
  842. h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
  843. h->cur_chroma_format_idc = sps->chroma_format_idc;
  844. h->pixel_shift = sps->bit_depth_luma > 8;
  845. h->chroma_format_idc = sps->chroma_format_idc;
  846. h->bit_depth_luma = sps->bit_depth_luma;
  847. ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
  848. sps->chroma_format_idc);
  849. ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
  850. ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
  851. ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma,
  852. sps->chroma_format_idc);
  853. ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
  854. if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
  855. ret = ff_h264_slice_context_init(h, &h->slice_ctx[0]);
  856. if (ret < 0) {
  857. av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
  858. goto fail;
  859. }
  860. } else {
  861. for (i = 0; i < h->nb_slice_ctx; i++) {
  862. H264SliceContext *sl = &h->slice_ctx[i];
  863. sl->h264 = h;
  864. sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
  865. sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
  866. sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
  867. if ((ret = ff_h264_slice_context_init(h, sl)) < 0) {
  868. av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
  869. goto fail;
  870. }
  871. }
  872. }
  873. h->context_initialized = 1;
  874. return 0;
  875. fail:
  876. ff_h264_free_tables(h);
  877. h->context_initialized = 0;
  878. return ret;
  879. }
  880. static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
  881. {
  882. switch (a) {
  883. case AV_PIX_FMT_YUVJ420P: return AV_PIX_FMT_YUV420P;
  884. case AV_PIX_FMT_YUVJ422P: return AV_PIX_FMT_YUV422P;
  885. case AV_PIX_FMT_YUVJ444P: return AV_PIX_FMT_YUV444P;
  886. default:
  887. return a;
  888. }
  889. }
  890. static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
  891. {
  892. const SPS *sps;
  893. int needs_reinit = 0, must_reinit, ret;
  894. if (first_slice) {
  895. av_buffer_unref(&h->ps.pps_ref);
  896. h->ps.pps = NULL;
  897. h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[sl->pps_id]);
  898. if (!h->ps.pps_ref)
  899. return AVERROR(ENOMEM);
  900. h->ps.pps = (const PPS*)h->ps.pps_ref->data;
  901. }
  902. if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) {
  903. av_buffer_unref(&h->ps.sps_ref);
  904. h->ps.sps = NULL;
  905. h->ps.sps_ref = av_buffer_ref(h->ps.sps_list[h->ps.pps->sps_id]);
  906. if (!h->ps.sps_ref)
  907. return AVERROR(ENOMEM);
  908. h->ps.sps = (const SPS*)h->ps.sps_ref->data;
  909. if (h->mb_width != h->ps.sps->mb_width ||
  910. h->mb_height != h->ps.sps->mb_height * (2 - h->ps.sps->frame_mbs_only_flag) ||
  911. h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma ||
  912. h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
  913. )
  914. needs_reinit = 1;
  915. if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
  916. h->chroma_format_idc != h->ps.sps->chroma_format_idc)
  917. needs_reinit = 1;
  918. }
  919. sps = h->ps.sps;
  920. must_reinit = (h->context_initialized &&
  921. ( 16*sps->mb_width != h->avctx->coded_width
  922. || 16*sps->mb_height * (2 - sps->frame_mbs_only_flag) != h->avctx->coded_height
  923. || h->cur_bit_depth_luma != sps->bit_depth_luma
  924. || h->cur_chroma_format_idc != sps->chroma_format_idc
  925. || h->mb_width != sps->mb_width
  926. || h->mb_height != sps->mb_height * (2 - sps->frame_mbs_only_flag)
  927. ));
  928. if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
  929. || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
  930. must_reinit = 1;
  931. if (first_slice && av_cmp_q(sps->sar, h->avctx->sample_aspect_ratio))
  932. must_reinit = 1;
  933. if (!h->setup_finished) {
  934. h->avctx->profile = ff_h264_get_profile(sps);
  935. h->avctx->level = sps->level_idc;
  936. h->avctx->refs = sps->ref_frame_count;
  937. h->mb_width = sps->mb_width;
  938. h->mb_height = sps->mb_height * (2 - sps->frame_mbs_only_flag);
  939. h->mb_num = h->mb_width * h->mb_height;
  940. h->mb_stride = h->mb_width + 1;
  941. h->b_stride = h->mb_width * 4;
  942. h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
  943. h->width = 16 * h->mb_width;
  944. h->height = 16 * h->mb_height;
  945. ret = init_dimensions(h);
  946. if (ret < 0)
  947. return ret;
  948. if (sps->video_signal_type_present_flag) {
  949. h->avctx->color_range = sps->full_range > 0 ? AVCOL_RANGE_JPEG
  950. : AVCOL_RANGE_MPEG;
  951. if (sps->colour_description_present_flag) {
  952. if (h->avctx->colorspace != sps->colorspace)
  953. needs_reinit = 1;
  954. h->avctx->color_primaries = sps->color_primaries;
  955. h->avctx->color_trc = sps->color_trc;
  956. h->avctx->colorspace = sps->colorspace;
  957. }
  958. }
  959. }
  960. if (!h->context_initialized || must_reinit || needs_reinit) {
  961. int flush_changes = h->context_initialized;
  962. h->context_initialized = 0;
  963. if (sl != h->slice_ctx) {
  964. av_log(h->avctx, AV_LOG_ERROR,
  965. "changing width %d -> %d / height %d -> %d on "
  966. "slice %d\n",
  967. h->width, h->avctx->coded_width,
  968. h->height, h->avctx->coded_height,
  969. h->current_slice + 1);
  970. return AVERROR_INVALIDDATA;
  971. }
  972. av_assert1(first_slice);
  973. if (flush_changes)
  974. ff_h264_flush_change(h);
  975. if ((ret = get_pixel_format(h, 1)) < 0)
  976. return ret;
  977. h->avctx->pix_fmt = ret;
  978. av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
  979. "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
  980. if ((ret = h264_slice_header_init(h)) < 0) {
  981. av_log(h->avctx, AV_LOG_ERROR,
  982. "h264_slice_header_init() failed\n");
  983. return ret;
  984. }
  985. }
  986. return 0;
  987. }
  988. static int h264_export_frame_props(H264Context *h)
  989. {
  990. const SPS *sps = h->ps.sps;
  991. H264Picture *cur = h->cur_pic_ptr;
  992. cur->f->interlaced_frame = 0;
  993. cur->f->repeat_pict = 0;
  994. /* Signal interlacing information externally. */
  995. /* Prioritize picture timing SEI information over used
  996. * decoding process if it exists. */
  997. if (sps->pic_struct_present_flag) {
  998. H264SEIPictureTiming *pt = &h->sei.picture_timing;
  999. switch (pt->pic_struct) {
  1000. case SEI_PIC_STRUCT_FRAME:
  1001. break;
  1002. case SEI_PIC_STRUCT_TOP_FIELD:
  1003. case SEI_PIC_STRUCT_BOTTOM_FIELD:
  1004. cur->f->interlaced_frame = 1;
  1005. break;
  1006. case SEI_PIC_STRUCT_TOP_BOTTOM:
  1007. case SEI_PIC_STRUCT_BOTTOM_TOP:
  1008. if (FIELD_OR_MBAFF_PICTURE(h))
  1009. cur->f->interlaced_frame = 1;
  1010. else
  1011. // try to flag soft telecine progressive
  1012. cur->f->interlaced_frame = h->prev_interlaced_frame;
  1013. break;
  1014. case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
  1015. case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
  1016. /* Signal the possibility of telecined film externally
  1017. * (pic_struct 5,6). From these hints, let the applications
  1018. * decide if they apply deinterlacing. */
  1019. cur->f->repeat_pict = 1;
  1020. break;
  1021. case SEI_PIC_STRUCT_FRAME_DOUBLING:
  1022. cur->f->repeat_pict = 2;
  1023. break;
  1024. case SEI_PIC_STRUCT_FRAME_TRIPLING:
  1025. cur->f->repeat_pict = 4;
  1026. break;
  1027. }
  1028. if ((pt->ct_type & 3) &&
  1029. pt->pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
  1030. cur->f->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
  1031. } else {
  1032. /* Derive interlacing flag from used decoding process. */
  1033. cur->f->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
  1034. }
  1035. h->prev_interlaced_frame = cur->f->interlaced_frame;
  1036. if (cur->field_poc[0] != cur->field_poc[1]) {
  1037. /* Derive top_field_first from field pocs. */
  1038. cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
  1039. } else {
  1040. if (sps->pic_struct_present_flag) {
  1041. /* Use picture timing SEI information. Even if it is a
  1042. * information of a past frame, better than nothing. */
  1043. if (h->sei.picture_timing.pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM ||
  1044. h->sei.picture_timing.pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
  1045. cur->f->top_field_first = 1;
  1046. else
  1047. cur->f->top_field_first = 0;
  1048. } else if (cur->f->interlaced_frame) {
  1049. /* Default to top field first when pic_struct_present_flag
  1050. * is not set but interlaced frame detected */
  1051. cur->f->top_field_first = 1;
  1052. } else {
  1053. /* Most likely progressive */
  1054. cur->f->top_field_first = 0;
  1055. }
  1056. }
  1057. if (h->sei.frame_packing.present &&
  1058. h->sei.frame_packing.frame_packing_arrangement_type <= 6 &&
  1059. h->sei.frame_packing.content_interpretation_type > 0 &&
  1060. h->sei.frame_packing.content_interpretation_type < 3) {
  1061. H264SEIFramePacking *fp = &h->sei.frame_packing;
  1062. AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
  1063. if (stereo) {
  1064. switch (fp->frame_packing_arrangement_type) {
  1065. case 0:
  1066. stereo->type = AV_STEREO3D_CHECKERBOARD;
  1067. break;
  1068. case 1:
  1069. stereo->type = AV_STEREO3D_COLUMNS;
  1070. break;
  1071. case 2:
  1072. stereo->type = AV_STEREO3D_LINES;
  1073. break;
  1074. case 3:
  1075. if (fp->quincunx_sampling_flag)
  1076. stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
  1077. else
  1078. stereo->type = AV_STEREO3D_SIDEBYSIDE;
  1079. break;
  1080. case 4:
  1081. stereo->type = AV_STEREO3D_TOPBOTTOM;
  1082. break;
  1083. case 5:
  1084. stereo->type = AV_STEREO3D_FRAMESEQUENCE;
  1085. break;
  1086. case 6:
  1087. stereo->type = AV_STEREO3D_2D;
  1088. break;
  1089. }
  1090. if (fp->content_interpretation_type == 2)
  1091. stereo->flags = AV_STEREO3D_FLAG_INVERT;
  1092. }
  1093. }
  1094. if (h->sei.display_orientation.present &&
  1095. (h->sei.display_orientation.anticlockwise_rotation ||
  1096. h->sei.display_orientation.hflip ||
  1097. h->sei.display_orientation.vflip)) {
  1098. H264SEIDisplayOrientation *o = &h->sei.display_orientation;
  1099. double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
  1100. AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
  1101. AV_FRAME_DATA_DISPLAYMATRIX,
  1102. sizeof(int32_t) * 9);
  1103. if (rotation) {
  1104. av_display_rotation_set((int32_t *)rotation->data, angle);
  1105. av_display_matrix_flip((int32_t *)rotation->data,
  1106. o->hflip, o->vflip);
  1107. }
  1108. }
  1109. if (h->sei.afd.present) {
  1110. AVFrameSideData *sd = av_frame_new_side_data(cur->f, AV_FRAME_DATA_AFD,
  1111. sizeof(uint8_t));
  1112. if (sd) {
  1113. *sd->data = h->sei.afd.active_format_description;
  1114. h->sei.afd.present = 0;
  1115. }
  1116. }
  1117. if (h->sei.a53_caption.a53_caption) {
  1118. H264SEIA53Caption *a53 = &h->sei.a53_caption;
  1119. AVFrameSideData *sd = av_frame_new_side_data(cur->f,
  1120. AV_FRAME_DATA_A53_CC,
  1121. a53->a53_caption_size);
  1122. if (sd)
  1123. memcpy(sd->data, a53->a53_caption, a53->a53_caption_size);
  1124. av_freep(&a53->a53_caption);
  1125. a53->a53_caption_size = 0;
  1126. h->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
  1127. }
  1128. return 0;
  1129. }
  1130. /* This function is called right after decoding the slice header for a first
  1131. * slice in a field (or a frame). It decides whether we are decoding a new frame
  1132. * or a second field in a pair and does the necessary setup.
  1133. */
  1134. static int h264_field_start(H264Context *h, const H264SliceContext *sl,
  1135. const H2645NAL *nal, int first_slice)
  1136. {
  1137. int i;
  1138. const SPS *sps;
  1139. int last_pic_structure, last_pic_droppable, ret;
  1140. ret = h264_init_ps(h, sl, first_slice);
  1141. if (ret < 0)
  1142. return ret;
  1143. sps = h->ps.sps;
  1144. last_pic_droppable = h->droppable;
  1145. last_pic_structure = h->picture_structure;
  1146. h->droppable = (nal->ref_idc == 0);
  1147. h->picture_structure = sl->picture_structure;
  1148. h->poc.frame_num = sl->frame_num;
  1149. h->poc.poc_lsb = sl->poc_lsb;
  1150. h->poc.delta_poc_bottom = sl->delta_poc_bottom;
  1151. h->poc.delta_poc[0] = sl->delta_poc[0];
  1152. h->poc.delta_poc[1] = sl->delta_poc[1];
  1153. /* Shorten frame num gaps so we don't have to allocate reference
  1154. * frames just to throw them away */
  1155. if (h->poc.frame_num != h->poc.prev_frame_num) {
  1156. int unwrap_prev_frame_num = h->poc.prev_frame_num;
  1157. int max_frame_num = 1 << sps->log2_max_frame_num;
  1158. if (unwrap_prev_frame_num > h->poc.frame_num)
  1159. unwrap_prev_frame_num -= max_frame_num;
  1160. if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
  1161. unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
  1162. if (unwrap_prev_frame_num < 0)
  1163. unwrap_prev_frame_num += max_frame_num;
  1164. h->poc.prev_frame_num = unwrap_prev_frame_num;
  1165. }
  1166. }
  1167. /* See if we have a decoded first field looking for a pair...
  1168. * Here, we're using that to see if we should mark previously
  1169. * decode frames as "finished".
  1170. * We have to do that before the "dummy" in-between frame allocation,
  1171. * since that can modify h->cur_pic_ptr. */
  1172. if (h->first_field) {
  1173. av_assert0(h->cur_pic_ptr);
  1174. av_assert0(h->cur_pic_ptr->f->buf[0]);
  1175. assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
  1176. /* Mark old field/frame as completed */
  1177. if (h->cur_pic_ptr->tf.owner == h->avctx) {
  1178. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1179. last_pic_structure == PICT_BOTTOM_FIELD);
  1180. }
  1181. /* figure out if we have a complementary field pair */
  1182. if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
  1183. /* Previous field is unmatched. Don't display it, but let it
  1184. * remain for reference if marked as such. */
  1185. if (last_pic_structure != PICT_FRAME) {
  1186. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1187. last_pic_structure == PICT_TOP_FIELD);
  1188. }
  1189. } else {
  1190. if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
  1191. /* This and previous field were reference, but had
  1192. * different frame_nums. Consider this field first in
  1193. * pair. Throw away previous field except for reference
  1194. * purposes. */
  1195. if (last_pic_structure != PICT_FRAME) {
  1196. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1197. last_pic_structure == PICT_TOP_FIELD);
  1198. }
  1199. } else {
  1200. /* Second field in complementary pair */
  1201. if (!((last_pic_structure == PICT_TOP_FIELD &&
  1202. h->picture_structure == PICT_BOTTOM_FIELD) ||
  1203. (last_pic_structure == PICT_BOTTOM_FIELD &&
  1204. h->picture_structure == PICT_TOP_FIELD))) {
  1205. av_log(h->avctx, AV_LOG_ERROR,
  1206. "Invalid field mode combination %d/%d\n",
  1207. last_pic_structure, h->picture_structure);
  1208. h->picture_structure = last_pic_structure;
  1209. h->droppable = last_pic_droppable;
  1210. return AVERROR_INVALIDDATA;
  1211. } else if (last_pic_droppable != h->droppable) {
  1212. avpriv_request_sample(h->avctx,
  1213. "Found reference and non-reference fields in the same frame, which");
  1214. h->picture_structure = last_pic_structure;
  1215. h->droppable = last_pic_droppable;
  1216. return AVERROR_PATCHWELCOME;
  1217. }
  1218. }
  1219. }
  1220. }
  1221. while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
  1222. h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
  1223. H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
  1224. av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
  1225. h->poc.frame_num, h->poc.prev_frame_num);
  1226. if (!sps->gaps_in_frame_num_allowed_flag)
  1227. for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
  1228. h->last_pocs[i] = INT_MIN;
  1229. ret = h264_frame_start(h);
  1230. if (ret < 0) {
  1231. h->first_field = 0;
  1232. return ret;
  1233. }
  1234. h->poc.prev_frame_num++;
  1235. h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
  1236. h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
  1237. h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
  1238. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
  1239. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
  1240. h->explicit_ref_marking = 0;
  1241. ret = ff_h264_execute_ref_pic_marking(h);
  1242. if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
  1243. return ret;
  1244. /* Error concealment: If a ref is missing, copy the previous ref
  1245. * in its place.
  1246. * FIXME: Avoiding a memcpy would be nice, but ref handling makes
  1247. * many assumptions about there being no actual duplicates.
  1248. * FIXME: This does not copy padding for out-of-frame motion
  1249. * vectors. Given we are concealing a lost frame, this probably
  1250. * is not noticeable by comparison, but it should be fixed. */
  1251. if (h->short_ref_count) {
  1252. if (prev &&
  1253. h->short_ref[0]->f->width == prev->f->width &&
  1254. h->short_ref[0]->f->height == prev->f->height &&
  1255. h->short_ref[0]->f->format == prev->f->format) {
  1256. av_image_copy(h->short_ref[0]->f->data,
  1257. h->short_ref[0]->f->linesize,
  1258. (const uint8_t **)prev->f->data,
  1259. prev->f->linesize,
  1260. prev->f->format,
  1261. prev->f->width,
  1262. prev->f->height);
  1263. h->short_ref[0]->poc = prev->poc + 2;
  1264. }
  1265. h->short_ref[0]->frame_num = h->poc.prev_frame_num;
  1266. }
  1267. }
  1268. /* See if we have a decoded first field looking for a pair...
  1269. * We're using that to see whether to continue decoding in that
  1270. * frame, or to allocate a new one. */
  1271. if (h->first_field) {
  1272. av_assert0(h->cur_pic_ptr);
  1273. av_assert0(h->cur_pic_ptr->f->buf[0]);
  1274. assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
  1275. /* figure out if we have a complementary field pair */
  1276. if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
  1277. /* Previous field is unmatched. Don't display it, but let it
  1278. * remain for reference if marked as such. */
  1279. h->missing_fields ++;
  1280. h->cur_pic_ptr = NULL;
  1281. h->first_field = FIELD_PICTURE(h);
  1282. } else {
  1283. h->missing_fields = 0;
  1284. if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
  1285. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1286. h->picture_structure==PICT_BOTTOM_FIELD);
  1287. /* This and the previous field had different frame_nums.
  1288. * Consider this field first in pair. Throw away previous
  1289. * one except for reference purposes. */
  1290. h->first_field = 1;
  1291. h->cur_pic_ptr = NULL;
  1292. } else {
  1293. /* Second field in complementary pair */
  1294. h->first_field = 0;
  1295. }
  1296. }
  1297. } else {
  1298. /* Frame or first field in a potentially complementary pair */
  1299. h->first_field = FIELD_PICTURE(h);
  1300. }
  1301. if (!FIELD_PICTURE(h) || h->first_field) {
  1302. if (h264_frame_start(h) < 0) {
  1303. h->first_field = 0;
  1304. return AVERROR_INVALIDDATA;
  1305. }
  1306. } else {
  1307. release_unused_pictures(h, 0);
  1308. }
  1309. /* Some macroblocks can be accessed before they're available in case
  1310. * of lost slices, MBAFF or threading. */
  1311. if (FIELD_PICTURE(h)) {
  1312. for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
  1313. memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
  1314. } else {
  1315. memset(h->slice_table, -1,
  1316. (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
  1317. }
  1318. ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
  1319. h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
  1320. memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
  1321. h->nb_mmco = sl->nb_mmco;
  1322. h->explicit_ref_marking = sl->explicit_ref_marking;
  1323. /* Set the frame properties/side data. Only done for the second field in
  1324. * field coded frames, since some SEI information is present for each field
  1325. * and is merged by the SEI parsing code. */
  1326. if (!FIELD_PICTURE(h) || !h->first_field) {
  1327. ret = h264_export_frame_props(h);
  1328. if (ret < 0)
  1329. return ret;
  1330. }
  1331. return 0;
  1332. }
  1333. static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl,
  1334. const H2645NAL *nal)
  1335. {
  1336. const SPS *sps;
  1337. const PPS *pps;
  1338. int ret;
  1339. unsigned int slice_type, tmp, i;
  1340. int field_pic_flag, bottom_field_flag;
  1341. int first_slice = sl == h->slice_ctx && !h->current_slice;
  1342. int picture_structure;
  1343. if (first_slice)
  1344. av_assert0(!h->setup_finished);
  1345. sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
  1346. slice_type = get_ue_golomb_31(&sl->gb);
  1347. if (slice_type > 9) {
  1348. av_log(h->avctx, AV_LOG_ERROR,
  1349. "slice type %d too large at %d\n",
  1350. slice_type, sl->first_mb_addr);
  1351. return AVERROR_INVALIDDATA;
  1352. }
  1353. if (slice_type > 4) {
  1354. slice_type -= 5;
  1355. sl->slice_type_fixed = 1;
  1356. } else
  1357. sl->slice_type_fixed = 0;
  1358. slice_type = ff_h264_golomb_to_pict_type[slice_type];
  1359. sl->slice_type = slice_type;
  1360. sl->slice_type_nos = slice_type & 3;
  1361. if (nal->type == NAL_IDR_SLICE &&
  1362. sl->slice_type_nos != AV_PICTURE_TYPE_I) {
  1363. av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
  1364. return AVERROR_INVALIDDATA;
  1365. }
  1366. sl->pps_id = get_ue_golomb(&sl->gb);
  1367. if (sl->pps_id >= MAX_PPS_COUNT) {
  1368. av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
  1369. return AVERROR_INVALIDDATA;
  1370. }
  1371. if (!h->ps.pps_list[sl->pps_id]) {
  1372. av_log(h->avctx, AV_LOG_ERROR,
  1373. "non-existing PPS %u referenced\n",
  1374. sl->pps_id);
  1375. return AVERROR_INVALIDDATA;
  1376. }
  1377. pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
  1378. if (!h->ps.sps_list[pps->sps_id]) {
  1379. av_log(h->avctx, AV_LOG_ERROR,
  1380. "non-existing SPS %u referenced\n", pps->sps_id);
  1381. return AVERROR_INVALIDDATA;
  1382. }
  1383. sps = (const SPS*)h->ps.sps_list[pps->sps_id]->data;
  1384. sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
  1385. if (!first_slice) {
  1386. if (h->poc.frame_num != sl->frame_num) {
  1387. av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
  1388. h->poc.frame_num, sl->frame_num);
  1389. return AVERROR_INVALIDDATA;
  1390. }
  1391. }
  1392. sl->mb_mbaff = 0;
  1393. if (sps->frame_mbs_only_flag) {
  1394. picture_structure = PICT_FRAME;
  1395. } else {
  1396. if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
  1397. av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
  1398. return -1;
  1399. }
  1400. field_pic_flag = get_bits1(&sl->gb);
  1401. if (field_pic_flag) {
  1402. bottom_field_flag = get_bits1(&sl->gb);
  1403. picture_structure = PICT_TOP_FIELD + bottom_field_flag;
  1404. } else {
  1405. picture_structure = PICT_FRAME;
  1406. }
  1407. }
  1408. sl->picture_structure = picture_structure;
  1409. sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
  1410. if (picture_structure == PICT_FRAME) {
  1411. sl->curr_pic_num = sl->frame_num;
  1412. sl->max_pic_num = 1 << sps->log2_max_frame_num;
  1413. } else {
  1414. sl->curr_pic_num = 2 * sl->frame_num + 1;
  1415. sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
  1416. }
  1417. if (nal->type == NAL_IDR_SLICE)
  1418. get_ue_golomb_long(&sl->gb); /* idr_pic_id */
  1419. if (sps->poc_type == 0) {
  1420. sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
  1421. if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
  1422. sl->delta_poc_bottom = get_se_golomb(&sl->gb);
  1423. }
  1424. if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
  1425. sl->delta_poc[0] = get_se_golomb(&sl->gb);
  1426. if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
  1427. sl->delta_poc[1] = get_se_golomb(&sl->gb);
  1428. }
  1429. if (pps->redundant_pic_cnt_present)
  1430. sl->redundant_pic_count = get_ue_golomb(&sl->gb);
  1431. if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
  1432. sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
  1433. ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count,
  1434. &sl->gb, pps, sl->slice_type_nos,
  1435. picture_structure, h->avctx);
  1436. if (ret < 0)
  1437. return ret;
  1438. if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
  1439. ret = ff_h264_decode_ref_pic_list_reordering(h, sl);
  1440. if (ret < 0) {
  1441. sl->ref_count[1] = sl->ref_count[0] = 0;
  1442. return ret;
  1443. }
  1444. }
  1445. sl->pwt.use_weight = 0;
  1446. for (i = 0; i < 2; i++) {
  1447. sl->pwt.luma_weight_flag[i] = 0;
  1448. sl->pwt.chroma_weight_flag[i] = 0;
  1449. }
  1450. if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
  1451. (pps->weighted_bipred_idc == 1 &&
  1452. sl->slice_type_nos == AV_PICTURE_TYPE_B))
  1453. ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count,
  1454. sl->slice_type_nos, &sl->pwt, h->avctx);
  1455. sl->explicit_ref_marking = 0;
  1456. if (nal->ref_idc) {
  1457. ret = ff_h264_decode_ref_pic_marking(h, sl, &sl->gb);
  1458. if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
  1459. return AVERROR_INVALIDDATA;
  1460. }
  1461. if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
  1462. tmp = get_ue_golomb_31(&sl->gb);
  1463. if (tmp > 2) {
  1464. av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
  1465. return AVERROR_INVALIDDATA;
  1466. }
  1467. sl->cabac_init_idc = tmp;
  1468. }
  1469. sl->last_qscale_diff = 0;
  1470. tmp = pps->init_qp + get_se_golomb(&sl->gb);
  1471. if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
  1472. av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
  1473. return AVERROR_INVALIDDATA;
  1474. }
  1475. sl->qscale = tmp;
  1476. sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
  1477. sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
  1478. // FIXME qscale / qp ... stuff
  1479. if (sl->slice_type == AV_PICTURE_TYPE_SP)
  1480. get_bits1(&sl->gb); /* sp_for_switch_flag */
  1481. if (sl->slice_type == AV_PICTURE_TYPE_SP ||
  1482. sl->slice_type == AV_PICTURE_TYPE_SI)
  1483. get_se_golomb(&sl->gb); /* slice_qs_delta */
  1484. sl->deblocking_filter = 1;
  1485. sl->slice_alpha_c0_offset = 0;
  1486. sl->slice_beta_offset = 0;
  1487. if (pps->deblocking_filter_parameters_present) {
  1488. tmp = get_ue_golomb_31(&sl->gb);
  1489. if (tmp > 2) {
  1490. av_log(h->avctx, AV_LOG_ERROR,
  1491. "deblocking_filter_idc %u out of range\n", tmp);
  1492. return AVERROR_INVALIDDATA;
  1493. }
  1494. sl->deblocking_filter = tmp;
  1495. if (sl->deblocking_filter < 2)
  1496. sl->deblocking_filter ^= 1; // 1<->0
  1497. if (sl->deblocking_filter) {
  1498. sl->slice_alpha_c0_offset = get_se_golomb(&sl->gb) * 2;
  1499. sl->slice_beta_offset = get_se_golomb(&sl->gb) * 2;
  1500. if (sl->slice_alpha_c0_offset > 12 ||
  1501. sl->slice_alpha_c0_offset < -12 ||
  1502. sl->slice_beta_offset > 12 ||
  1503. sl->slice_beta_offset < -12) {
  1504. av_log(h->avctx, AV_LOG_ERROR,
  1505. "deblocking filter parameters %d %d out of range\n",
  1506. sl->slice_alpha_c0_offset, sl->slice_beta_offset);
  1507. return AVERROR_INVALIDDATA;
  1508. }
  1509. }
  1510. }
  1511. return 0;
  1512. }
  1513. /**
  1514. * Decode a slice header.
  1515. * This will (re)initialize the decoder and call h264_frame_start() as needed.
  1516. *
  1517. * @param h h264context
  1518. *
  1519. * @return 0 if okay, <0 if an error occurred
  1520. */
  1521. int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl,
  1522. const H2645NAL *nal)
  1523. {
  1524. int i, j, ret = 0;
  1525. int first_slice = sl == h->slice_ctx && !h->current_slice;
  1526. ret = h264_slice_header_parse(h, sl, nal);
  1527. if (ret < 0)
  1528. return ret;
  1529. if (sl->first_mb_addr == 0 || !h->current_slice) {
  1530. if (h->setup_finished) {
  1531. av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
  1532. return AVERROR_INVALIDDATA;
  1533. }
  1534. }
  1535. if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
  1536. if (h->current_slice) {
  1537. if (h->max_contexts > 1) {
  1538. if (!h->single_decode_warning) {
  1539. av_log(h->avctx, AV_LOG_WARNING, "Cannot decode multiple access units as slice threads\n");
  1540. h->single_decode_warning = 1;
  1541. }
  1542. h->max_contexts = 1;
  1543. return SLICE_SINGLETHREAD;
  1544. }
  1545. if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
  1546. ret = ff_h264_field_end(h, h->slice_ctx, 1);
  1547. h->current_slice = 0;
  1548. if (ret < 0)
  1549. return ret;
  1550. } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == NAL_IDR_SLICE) {
  1551. av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
  1552. ret = ff_h264_field_end(h, h->slice_ctx, 1);
  1553. h->current_slice = 0;
  1554. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
  1555. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
  1556. h->cur_pic_ptr = NULL;
  1557. if (ret < 0)
  1558. return ret;
  1559. } else
  1560. return AVERROR_INVALIDDATA;
  1561. }
  1562. if (!h->first_field) {
  1563. if (h->cur_pic_ptr && !h->droppable) {
  1564. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1565. h->picture_structure == PICT_BOTTOM_FIELD);
  1566. }
  1567. h->cur_pic_ptr = NULL;
  1568. }
  1569. }
  1570. if (!h->current_slice)
  1571. av_assert0(sl == h->slice_ctx);
  1572. if (h->current_slice == 0 && !h->first_field) {
  1573. if (
  1574. (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
  1575. (h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
  1576. (h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
  1577. (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
  1578. h->avctx->skip_frame >= AVDISCARD_ALL) {
  1579. return SLICE_SKIPED;
  1580. }
  1581. }
  1582. if (!first_slice) {
  1583. const PPS *pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
  1584. if (h->ps.pps->sps_id != pps->sps_id ||
  1585. h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
  1586. (h->setup_finished && h->ps.pps != pps)*/) {
  1587. av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
  1588. return AVERROR_INVALIDDATA;
  1589. }
  1590. if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) {
  1591. av_log(h->avctx, AV_LOG_ERROR,
  1592. "SPS changed in the middle of the frame\n");
  1593. return AVERROR_INVALIDDATA;
  1594. }
  1595. }
  1596. if (h->current_slice == 0) {
  1597. ret = h264_field_start(h, sl, nal, first_slice);
  1598. if (ret < 0)
  1599. return ret;
  1600. } else {
  1601. if (h->picture_structure != sl->picture_structure ||
  1602. h->droppable != (nal->ref_idc == 0)) {
  1603. av_log(h->avctx, AV_LOG_ERROR,
  1604. "Changing field mode (%d -> %d) between slices is not allowed\n",
  1605. h->picture_structure, sl->picture_structure);
  1606. return AVERROR_INVALIDDATA;
  1607. } else if (!h->cur_pic_ptr) {
  1608. av_log(h->avctx, AV_LOG_ERROR,
  1609. "unset cur_pic_ptr on slice %d\n",
  1610. h->current_slice + 1);
  1611. return AVERROR_INVALIDDATA;
  1612. }
  1613. }
  1614. av_assert1(h->mb_num == h->mb_width * h->mb_height);
  1615. if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
  1616. sl->first_mb_addr >= h->mb_num) {
  1617. av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
  1618. return AVERROR_INVALIDDATA;
  1619. }
  1620. sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
  1621. sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
  1622. FIELD_OR_MBAFF_PICTURE(h);
  1623. if (h->picture_structure == PICT_BOTTOM_FIELD)
  1624. sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
  1625. av_assert1(sl->mb_y < h->mb_height);
  1626. ret = ff_h264_build_ref_list(h, sl);
  1627. if (ret < 0)
  1628. return ret;
  1629. if (h->ps.pps->weighted_bipred_idc == 2 &&
  1630. sl->slice_type_nos == AV_PICTURE_TYPE_B) {
  1631. implicit_weight_table(h, sl, -1);
  1632. if (FRAME_MBAFF(h)) {
  1633. implicit_weight_table(h, sl, 0);
  1634. implicit_weight_table(h, sl, 1);
  1635. }
  1636. }
  1637. if (sl->slice_type_nos == AV_PICTURE_TYPE_B && !sl->direct_spatial_mv_pred)
  1638. ff_h264_direct_dist_scale_factor(h, sl);
  1639. ff_h264_direct_ref_list_init(h, sl);
  1640. if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
  1641. (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
  1642. h->nal_unit_type != NAL_IDR_SLICE) ||
  1643. (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
  1644. sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
  1645. (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
  1646. sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
  1647. (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
  1648. nal->ref_idc == 0))
  1649. sl->deblocking_filter = 0;
  1650. if (sl->deblocking_filter == 1 && h->max_contexts > 1) {
  1651. if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
  1652. /* Cheat slightly for speed:
  1653. * Do not bother to deblock across slices. */
  1654. sl->deblocking_filter = 2;
  1655. } else {
  1656. h->postpone_filter = 1;
  1657. }
  1658. }
  1659. sl->qp_thresh = 15 -
  1660. FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) -
  1661. FFMAX3(0,
  1662. h->ps.pps->chroma_qp_index_offset[0],
  1663. h->ps.pps->chroma_qp_index_offset[1]) +
  1664. 6 * (h->ps.sps->bit_depth_luma - 8);
  1665. sl->slice_num = ++h->current_slice;
  1666. if (sl->slice_num)
  1667. h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
  1668. if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
  1669. && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
  1670. && sl->slice_num >= MAX_SLICES) {
  1671. //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
  1672. av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
  1673. }
  1674. for (j = 0; j < 2; j++) {
  1675. int id_list[16];
  1676. int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
  1677. for (i = 0; i < 16; i++) {
  1678. id_list[i] = 60;
  1679. if (j < sl->list_count && i < sl->ref_count[j] &&
  1680. sl->ref_list[j][i].parent->f->buf[0]) {
  1681. int k;
  1682. AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
  1683. for (k = 0; k < h->short_ref_count; k++)
  1684. if (h->short_ref[k]->f->buf[0]->buffer == buf) {
  1685. id_list[i] = k;
  1686. break;
  1687. }
  1688. for (k = 0; k < h->long_ref_count; k++)
  1689. if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
  1690. id_list[i] = h->short_ref_count + k;
  1691. break;
  1692. }
  1693. }
  1694. }
  1695. ref2frm[0] =
  1696. ref2frm[1] = -1;
  1697. for (i = 0; i < 16; i++)
  1698. ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
  1699. ref2frm[18 + 0] =
  1700. ref2frm[18 + 1] = -1;
  1701. for (i = 16; i < 48; i++)
  1702. ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
  1703. (sl->ref_list[j][i].reference & 3);
  1704. }
  1705. if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
  1706. av_log(h->avctx, AV_LOG_DEBUG,
  1707. "slice:%d %s mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
  1708. sl->slice_num,
  1709. (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
  1710. sl->mb_y * h->mb_width + sl->mb_x,
  1711. av_get_picture_type_char(sl->slice_type),
  1712. sl->slice_type_fixed ? " fix" : "",
  1713. nal->type == NAL_IDR_SLICE ? " IDR" : "",
  1714. h->poc.frame_num,
  1715. h->cur_pic_ptr->field_poc[0],
  1716. h->cur_pic_ptr->field_poc[1],
  1717. sl->ref_count[0], sl->ref_count[1],
  1718. sl->qscale,
  1719. sl->deblocking_filter,
  1720. sl->slice_alpha_c0_offset, sl->slice_beta_offset,
  1721. sl->pwt.use_weight,
  1722. sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
  1723. sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
  1724. }
  1725. return 0;
  1726. }
  1727. int ff_h264_get_slice_type(const H264SliceContext *sl)
  1728. {
  1729. switch (sl->slice_type) {
  1730. case AV_PICTURE_TYPE_P:
  1731. return 0;
  1732. case AV_PICTURE_TYPE_B:
  1733. return 1;
  1734. case AV_PICTURE_TYPE_I:
  1735. return 2;
  1736. case AV_PICTURE_TYPE_SP:
  1737. return 3;
  1738. case AV_PICTURE_TYPE_SI:
  1739. return 4;
  1740. default:
  1741. return AVERROR_INVALIDDATA;
  1742. }
  1743. }
  1744. static av_always_inline void fill_filter_caches_inter(const H264Context *h,
  1745. H264SliceContext *sl,
  1746. int mb_type, int top_xy,
  1747. int left_xy[LEFT_MBS],
  1748. int top_type,
  1749. int left_type[LEFT_MBS],
  1750. int mb_xy, int list)
  1751. {
  1752. int b_stride = h->b_stride;
  1753. int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
  1754. int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
  1755. if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
  1756. if (USES_LIST(top_type, list)) {
  1757. const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
  1758. const int b8_xy = 4 * top_xy + 2;
  1759. const int *ref2frm = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list] + (MB_MBAFF(sl) ? 20 : 2);
  1760. AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
  1761. ref_cache[0 - 1 * 8] =
  1762. ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
  1763. ref_cache[2 - 1 * 8] =
  1764. ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
  1765. } else {
  1766. AV_ZERO128(mv_dst - 1 * 8);
  1767. AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1768. }
  1769. if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
  1770. if (USES_LIST(left_type[LTOP], list)) {
  1771. const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
  1772. const int b8_xy = 4 * left_xy[LTOP] + 1;
  1773. const int *ref2frm = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list] + (MB_MBAFF(sl) ? 20 : 2);
  1774. AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
  1775. AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
  1776. AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
  1777. AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
  1778. ref_cache[-1 + 0] =
  1779. ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
  1780. ref_cache[-1 + 16] =
  1781. ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
  1782. } else {
  1783. AV_ZERO32(mv_dst - 1 + 0);
  1784. AV_ZERO32(mv_dst - 1 + 8);
  1785. AV_ZERO32(mv_dst - 1 + 16);
  1786. AV_ZERO32(mv_dst - 1 + 24);
  1787. ref_cache[-1 + 0] =
  1788. ref_cache[-1 + 8] =
  1789. ref_cache[-1 + 16] =
  1790. ref_cache[-1 + 24] = LIST_NOT_USED;
  1791. }
  1792. }
  1793. }
  1794. if (!USES_LIST(mb_type, list)) {
  1795. fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
  1796. AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1797. AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1798. AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1799. AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1800. return;
  1801. }
  1802. {
  1803. int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
  1804. const int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list] + (MB_MBAFF(sl) ? 20 : 2);
  1805. uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
  1806. uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
  1807. AV_WN32A(&ref_cache[0 * 8], ref01);
  1808. AV_WN32A(&ref_cache[1 * 8], ref01);
  1809. AV_WN32A(&ref_cache[2 * 8], ref23);
  1810. AV_WN32A(&ref_cache[3 * 8], ref23);
  1811. }
  1812. {
  1813. int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
  1814. AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
  1815. AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
  1816. AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
  1817. AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
  1818. }
  1819. }
  1820. /**
  1821. * @return non zero if the loop filter can be skipped
  1822. */
  1823. static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
  1824. {
  1825. const int mb_xy = sl->mb_xy;
  1826. int top_xy, left_xy[LEFT_MBS];
  1827. int top_type, left_type[LEFT_MBS];
  1828. uint8_t *nnz;
  1829. uint8_t *nnz_cache;
  1830. top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
  1831. left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
  1832. if (FRAME_MBAFF(h)) {
  1833. const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
  1834. const int curr_mb_field_flag = IS_INTERLACED(mb_type);
  1835. if (sl->mb_y & 1) {
  1836. if (left_mb_field_flag != curr_mb_field_flag)
  1837. left_xy[LTOP] -= h->mb_stride;
  1838. } else {
  1839. if (curr_mb_field_flag)
  1840. top_xy += h->mb_stride &
  1841. (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
  1842. if (left_mb_field_flag != curr_mb_field_flag)
  1843. left_xy[LBOT] += h->mb_stride;
  1844. }
  1845. }
  1846. sl->top_mb_xy = top_xy;
  1847. sl->left_mb_xy[LTOP] = left_xy[LTOP];
  1848. sl->left_mb_xy[LBOT] = left_xy[LBOT];
  1849. {
  1850. /* For sufficiently low qp, filtering wouldn't do anything.
  1851. * This is a conservative estimate: could also check beta_offset
  1852. * and more accurate chroma_qp. */
  1853. int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
  1854. int qp = h->cur_pic.qscale_table[mb_xy];
  1855. if (qp <= qp_thresh &&
  1856. (left_xy[LTOP] < 0 ||
  1857. ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
  1858. (top_xy < 0 ||
  1859. ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
  1860. if (!FRAME_MBAFF(h))
  1861. return 1;
  1862. if ((left_xy[LTOP] < 0 ||
  1863. ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
  1864. (top_xy < h->mb_stride ||
  1865. ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
  1866. return 1;
  1867. }
  1868. }
  1869. top_type = h->cur_pic.mb_type[top_xy];
  1870. left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
  1871. left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
  1872. if (sl->deblocking_filter == 2) {
  1873. if (h->slice_table[top_xy] != sl->slice_num)
  1874. top_type = 0;
  1875. if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
  1876. left_type[LTOP] = left_type[LBOT] = 0;
  1877. } else {
  1878. if (h->slice_table[top_xy] == 0xFFFF)
  1879. top_type = 0;
  1880. if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
  1881. left_type[LTOP] = left_type[LBOT] = 0;
  1882. }
  1883. sl->top_type = top_type;
  1884. sl->left_type[LTOP] = left_type[LTOP];
  1885. sl->left_type[LBOT] = left_type[LBOT];
  1886. if (IS_INTRA(mb_type))
  1887. return 0;
  1888. fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
  1889. top_type, left_type, mb_xy, 0);
  1890. if (sl->list_count == 2)
  1891. fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
  1892. top_type, left_type, mb_xy, 1);
  1893. nnz = h->non_zero_count[mb_xy];
  1894. nnz_cache = sl->non_zero_count_cache;
  1895. AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
  1896. AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
  1897. AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
  1898. AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
  1899. sl->cbp = h->cbp_table[mb_xy];
  1900. if (top_type) {
  1901. nnz = h->non_zero_count[top_xy];
  1902. AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
  1903. }
  1904. if (left_type[LTOP]) {
  1905. nnz = h->non_zero_count[left_xy[LTOP]];
  1906. nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
  1907. nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
  1908. nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
  1909. nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
  1910. }
  1911. /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
  1912. * from what the loop filter needs */
  1913. if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
  1914. if (IS_8x8DCT(top_type)) {
  1915. nnz_cache[4 + 8 * 0] =
  1916. nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
  1917. nnz_cache[6 + 8 * 0] =
  1918. nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
  1919. }
  1920. if (IS_8x8DCT(left_type[LTOP])) {
  1921. nnz_cache[3 + 8 * 1] =
  1922. nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
  1923. }
  1924. if (IS_8x8DCT(left_type[LBOT])) {
  1925. nnz_cache[3 + 8 * 3] =
  1926. nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
  1927. }
  1928. if (IS_8x8DCT(mb_type)) {
  1929. nnz_cache[scan8[0]] =
  1930. nnz_cache[scan8[1]] =
  1931. nnz_cache[scan8[2]] =
  1932. nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
  1933. nnz_cache[scan8[0 + 4]] =
  1934. nnz_cache[scan8[1 + 4]] =
  1935. nnz_cache[scan8[2 + 4]] =
  1936. nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
  1937. nnz_cache[scan8[0 + 8]] =
  1938. nnz_cache[scan8[1 + 8]] =
  1939. nnz_cache[scan8[2 + 8]] =
  1940. nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
  1941. nnz_cache[scan8[0 + 12]] =
  1942. nnz_cache[scan8[1 + 12]] =
  1943. nnz_cache[scan8[2 + 12]] =
  1944. nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
  1945. }
  1946. }
  1947. return 0;
  1948. }
  1949. static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
  1950. {
  1951. uint8_t *dest_y, *dest_cb, *dest_cr;
  1952. int linesize, uvlinesize, mb_x, mb_y;
  1953. const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
  1954. const int old_slice_type = sl->slice_type;
  1955. const int pixel_shift = h->pixel_shift;
  1956. const int block_h = 16 >> h->chroma_y_shift;
  1957. if (h->postpone_filter)
  1958. return;
  1959. if (sl->deblocking_filter) {
  1960. for (mb_x = start_x; mb_x < end_x; mb_x++)
  1961. for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
  1962. int mb_xy, mb_type;
  1963. mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
  1964. mb_type = h->cur_pic.mb_type[mb_xy];
  1965. if (FRAME_MBAFF(h))
  1966. sl->mb_mbaff =
  1967. sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
  1968. sl->mb_x = mb_x;
  1969. sl->mb_y = mb_y;
  1970. dest_y = h->cur_pic.f->data[0] +
  1971. ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
  1972. dest_cb = h->cur_pic.f->data[1] +
  1973. (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
  1974. mb_y * sl->uvlinesize * block_h;
  1975. dest_cr = h->cur_pic.f->data[2] +
  1976. (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
  1977. mb_y * sl->uvlinesize * block_h;
  1978. // FIXME simplify above
  1979. if (MB_FIELD(sl)) {
  1980. linesize = sl->mb_linesize = sl->linesize * 2;
  1981. uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
  1982. if (mb_y & 1) { // FIXME move out of this function?
  1983. dest_y -= sl->linesize * 15;
  1984. dest_cb -= sl->uvlinesize * (block_h - 1);
  1985. dest_cr -= sl->uvlinesize * (block_h - 1);
  1986. }
  1987. } else {
  1988. linesize = sl->mb_linesize = sl->linesize;
  1989. uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
  1990. }
  1991. backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
  1992. uvlinesize, 0);
  1993. if (fill_filter_caches(h, sl, mb_type))
  1994. continue;
  1995. sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
  1996. sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
  1997. if (FRAME_MBAFF(h)) {
  1998. ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
  1999. linesize, uvlinesize);
  2000. } else {
  2001. ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
  2002. dest_cr, linesize, uvlinesize);
  2003. }
  2004. }
  2005. }
  2006. sl->slice_type = old_slice_type;
  2007. sl->mb_x = end_x;
  2008. sl->mb_y = end_mb_y - FRAME_MBAFF(h);
  2009. sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
  2010. sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
  2011. }
  2012. static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
  2013. {
  2014. const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
  2015. int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
  2016. h->cur_pic.mb_type[mb_xy - 1] :
  2017. (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
  2018. h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
  2019. sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
  2020. }
  2021. /**
  2022. * Draw edges and report progress for the last MB row.
  2023. */
  2024. static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
  2025. {
  2026. int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
  2027. int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
  2028. int height = 16 << FRAME_MBAFF(h);
  2029. int deblock_border = (16 + 4) << FRAME_MBAFF(h);
  2030. if (sl->deblocking_filter) {
  2031. if ((top + height) >= pic_height)
  2032. height += deblock_border;
  2033. top -= deblock_border;
  2034. }
  2035. if (top >= pic_height || (top + height) < 0)
  2036. return;
  2037. height = FFMIN(height, pic_height - top);
  2038. if (top < 0) {
  2039. height = top + height;
  2040. top = 0;
  2041. }
  2042. ff_h264_draw_horiz_band(h, sl, top, height);
  2043. if (h->droppable || sl->h264->slice_ctx[0].er.error_occurred)
  2044. return;
  2045. ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
  2046. h->picture_structure == PICT_BOTTOM_FIELD);
  2047. }
  2048. static void er_add_slice(H264SliceContext *sl,
  2049. int startx, int starty,
  2050. int endx, int endy, int status)
  2051. {
  2052. if (!sl->h264->enable_er)
  2053. return;
  2054. if (CONFIG_ERROR_RESILIENCE) {
  2055. ERContext *er = &sl->h264->slice_ctx[0].er;
  2056. ff_er_add_slice(er, startx, starty, endx, endy, status);
  2057. }
  2058. }
  2059. static int decode_slice(struct AVCodecContext *avctx, void *arg)
  2060. {
  2061. H264SliceContext *sl = arg;
  2062. const H264Context *h = sl->h264;
  2063. int lf_x_start = sl->mb_x;
  2064. int orig_deblock = sl->deblocking_filter;
  2065. int ret;
  2066. sl->linesize = h->cur_pic_ptr->f->linesize[0];
  2067. sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
  2068. ret = alloc_scratch_buffers(sl, sl->linesize);
  2069. if (ret < 0)
  2070. return ret;
  2071. sl->mb_skip_run = -1;
  2072. av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
  2073. if (h->postpone_filter)
  2074. sl->deblocking_filter = 0;
  2075. sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
  2076. avctx->codec_id != AV_CODEC_ID_H264 ||
  2077. (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
  2078. if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && h->slice_ctx[0].er.error_status_table) {
  2079. const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
  2080. if (start_i) {
  2081. int prev_status = h->slice_ctx[0].er.error_status_table[h->slice_ctx[0].er.mb_index2xy[start_i - 1]];
  2082. prev_status &= ~ VP_START;
  2083. if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
  2084. h->slice_ctx[0].er.error_occurred = 1;
  2085. }
  2086. }
  2087. if (h->ps.pps->cabac) {
  2088. /* realign */
  2089. align_get_bits(&sl->gb);
  2090. /* init cabac */
  2091. ret = ff_init_cabac_decoder(&sl->cabac,
  2092. sl->gb.buffer + get_bits_count(&sl->gb) / 8,
  2093. (get_bits_left(&sl->gb) + 7) / 8);
  2094. if (ret < 0)
  2095. return ret;
  2096. ff_h264_init_cabac_states(h, sl);
  2097. for (;;) {
  2098. // START_TIMER
  2099. int ret, eos;
  2100. if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
  2101. av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
  2102. sl->next_slice_idx);
  2103. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
  2104. sl->mb_y, ER_MB_ERROR);
  2105. return AVERROR_INVALIDDATA;
  2106. }
  2107. ret = ff_h264_decode_mb_cabac(h, sl);
  2108. // STOP_TIMER("decode_mb_cabac")
  2109. if (ret >= 0)
  2110. ff_h264_hl_decode_mb(h, sl);
  2111. // FIXME optimal? or let mb_decode decode 16x32 ?
  2112. if (ret >= 0 && FRAME_MBAFF(h)) {
  2113. sl->mb_y++;
  2114. ret = ff_h264_decode_mb_cabac(h, sl);
  2115. if (ret >= 0)
  2116. ff_h264_hl_decode_mb(h, sl);
  2117. sl->mb_y--;
  2118. }
  2119. eos = get_cabac_terminate(&sl->cabac);
  2120. if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
  2121. sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
  2122. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
  2123. sl->mb_y, ER_MB_END);
  2124. if (sl->mb_x >= lf_x_start)
  2125. loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
  2126. goto finish;
  2127. }
  2128. if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
  2129. av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
  2130. if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
  2131. av_log(h->avctx, AV_LOG_ERROR,
  2132. "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
  2133. sl->mb_x, sl->mb_y,
  2134. sl->cabac.bytestream_end - sl->cabac.bytestream);
  2135. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
  2136. sl->mb_y, ER_MB_ERROR);
  2137. return AVERROR_INVALIDDATA;
  2138. }
  2139. if (++sl->mb_x >= h->mb_width) {
  2140. loop_filter(h, sl, lf_x_start, sl->mb_x);
  2141. sl->mb_x = lf_x_start = 0;
  2142. decode_finish_row(h, sl);
  2143. ++sl->mb_y;
  2144. if (FIELD_OR_MBAFF_PICTURE(h)) {
  2145. ++sl->mb_y;
  2146. if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
  2147. predict_field_decoding_flag(h, sl);
  2148. }
  2149. }
  2150. if (eos || sl->mb_y >= h->mb_height) {
  2151. ff_tlog(h->avctx, "slice end %d %d\n",
  2152. get_bits_count(&sl->gb), sl->gb.size_in_bits);
  2153. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
  2154. sl->mb_y, ER_MB_END);
  2155. if (sl->mb_x > lf_x_start)
  2156. loop_filter(h, sl, lf_x_start, sl->mb_x);
  2157. goto finish;
  2158. }
  2159. }
  2160. } else {
  2161. for (;;) {
  2162. int ret;
  2163. if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
  2164. av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
  2165. sl->next_slice_idx);
  2166. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
  2167. sl->mb_y, ER_MB_ERROR);
  2168. return AVERROR_INVALIDDATA;
  2169. }
  2170. ret = ff_h264_decode_mb_cavlc(h, sl);
  2171. if (ret >= 0)
  2172. ff_h264_hl_decode_mb(h, sl);
  2173. // FIXME optimal? or let mb_decode decode 16x32 ?
  2174. if (ret >= 0 && FRAME_MBAFF(h)) {
  2175. sl->mb_y++;
  2176. ret = ff_h264_decode_mb_cavlc(h, sl);
  2177. if (ret >= 0)
  2178. ff_h264_hl_decode_mb(h, sl);
  2179. sl->mb_y--;
  2180. }
  2181. if (ret < 0) {
  2182. av_log(h->avctx, AV_LOG_ERROR,
  2183. "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
  2184. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
  2185. sl->mb_y, ER_MB_ERROR);
  2186. return ret;
  2187. }
  2188. if (++sl->mb_x >= h->mb_width) {
  2189. loop_filter(h, sl, lf_x_start, sl->mb_x);
  2190. sl->mb_x = lf_x_start = 0;
  2191. decode_finish_row(h, sl);
  2192. ++sl->mb_y;
  2193. if (FIELD_OR_MBAFF_PICTURE(h)) {
  2194. ++sl->mb_y;
  2195. if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
  2196. predict_field_decoding_flag(h, sl);
  2197. }
  2198. if (sl->mb_y >= h->mb_height) {
  2199. ff_tlog(h->avctx, "slice end %d %d\n",
  2200. get_bits_count(&sl->gb), sl->gb.size_in_bits);
  2201. if ( get_bits_left(&sl->gb) == 0
  2202. || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
  2203. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
  2204. sl->mb_x - 1, sl->mb_y, ER_MB_END);
  2205. goto finish;
  2206. } else {
  2207. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
  2208. sl->mb_x, sl->mb_y, ER_MB_END);
  2209. return AVERROR_INVALIDDATA;
  2210. }
  2211. }
  2212. }
  2213. if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
  2214. ff_tlog(h->avctx, "slice end %d %d\n",
  2215. get_bits_count(&sl->gb), sl->gb.size_in_bits);
  2216. if (get_bits_left(&sl->gb) == 0) {
  2217. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
  2218. sl->mb_x - 1, sl->mb_y, ER_MB_END);
  2219. if (sl->mb_x > lf_x_start)
  2220. loop_filter(h, sl, lf_x_start, sl->mb_x);
  2221. goto finish;
  2222. } else {
  2223. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
  2224. sl->mb_y, ER_MB_ERROR);
  2225. return AVERROR_INVALIDDATA;
  2226. }
  2227. }
  2228. }
  2229. }
  2230. finish:
  2231. sl->deblocking_filter = orig_deblock;
  2232. return 0;
  2233. }
  2234. /**
  2235. * Call decode_slice() for each context.
  2236. *
  2237. * @param h h264 master context
  2238. * @param context_count number of contexts to execute
  2239. */
  2240. int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
  2241. {
  2242. AVCodecContext *const avctx = h->avctx;
  2243. H264SliceContext *sl;
  2244. int i, j;
  2245. av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
  2246. h->slice_ctx[0].next_slice_idx = INT_MAX;
  2247. if (h->avctx->hwaccel
  2248. #if FF_API_CAP_VDPAU
  2249. || h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU
  2250. #endif
  2251. )
  2252. return 0;
  2253. if (context_count == 1) {
  2254. int ret;
  2255. h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
  2256. h->postpone_filter = 0;
  2257. ret = decode_slice(avctx, &h->slice_ctx[0]);
  2258. h->mb_y = h->slice_ctx[0].mb_y;
  2259. return ret;
  2260. } else {
  2261. av_assert0(context_count > 0);
  2262. for (i = 0; i < context_count; i++) {
  2263. int next_slice_idx = h->mb_width * h->mb_height;
  2264. int slice_idx;
  2265. sl = &h->slice_ctx[i];
  2266. if (CONFIG_ERROR_RESILIENCE) {
  2267. sl->er.error_count = 0;
  2268. }
  2269. /* make sure none of those slices overlap */
  2270. slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
  2271. for (j = 0; j < context_count; j++) {
  2272. H264SliceContext *sl2 = &h->slice_ctx[j];
  2273. int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
  2274. if (i == j || slice_idx2 < slice_idx)
  2275. continue;
  2276. next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
  2277. }
  2278. sl->next_slice_idx = next_slice_idx;
  2279. }
  2280. avctx->execute(avctx, decode_slice, h->slice_ctx,
  2281. NULL, context_count, sizeof(h->slice_ctx[0]));
  2282. /* pull back stuff from slices to master context */
  2283. sl = &h->slice_ctx[context_count - 1];
  2284. h->mb_y = sl->mb_y;
  2285. if (CONFIG_ERROR_RESILIENCE) {
  2286. for (i = 1; i < context_count; i++)
  2287. h->slice_ctx[0].er.error_count += h->slice_ctx[i].er.error_count;
  2288. }
  2289. if (h->postpone_filter) {
  2290. h->postpone_filter = 0;
  2291. for (i = 0; i < context_count; i++) {
  2292. int y_end, x_end;
  2293. sl = &h->slice_ctx[i];
  2294. y_end = FFMIN(sl->mb_y + 1, h->mb_height);
  2295. x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
  2296. for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
  2297. sl->mb_y = j;
  2298. loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
  2299. j == y_end - 1 ? x_end : h->mb_width);
  2300. }
  2301. }
  2302. }
  2303. }
  2304. return 0;
  2305. }