You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2576 lines
94KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... decoder
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG-4 part10 codec.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/display.h"
  28. #include "libavutil/imgutils.h"
  29. #include "libavutil/stereo3d.h"
  30. #include "libavutil/timer.h"
  31. #include "internal.h"
  32. #include "cabac.h"
  33. #include "cabac_functions.h"
  34. #include "error_resilience.h"
  35. #include "golomb_legacy.h"
  36. #include "avcodec.h"
  37. #include "h264.h"
  38. #include "h264dec.h"
  39. #include "h264data.h"
  40. #include "h264chroma.h"
  41. #include "h264_mvpred.h"
  42. #include "h264_ps.h"
  43. #include "mathops.h"
  44. #include "mpegutils.h"
  45. #include "rectangle.h"
  46. #include "thread.h"
  47. static const uint8_t field_scan[16] = {
  48. 0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
  49. 0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
  50. 2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
  51. 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
  52. };
  53. static const uint8_t field_scan8x8[64] = {
  54. 0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
  55. 1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
  56. 2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
  57. 0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
  58. 2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
  59. 2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
  60. 2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
  61. 3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
  62. 3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
  63. 4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
  64. 4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
  65. 5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
  66. 5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
  67. 7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
  68. 6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
  69. 7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
  70. };
  71. static const uint8_t field_scan8x8_cavlc[64] = {
  72. 0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
  73. 2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
  74. 3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
  75. 5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
  76. 0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
  77. 1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
  78. 3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
  79. 5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
  80. 0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
  81. 1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
  82. 3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
  83. 5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
  84. 1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
  85. 1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
  86. 3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
  87. 6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
  88. };
  89. // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
  90. static const uint8_t zigzag_scan8x8_cavlc[64] = {
  91. 0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
  92. 4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
  93. 3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
  94. 2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
  95. 1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
  96. 3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
  97. 2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
  98. 3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
  99. 0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
  100. 2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
  101. 1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
  102. 4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
  103. 0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
  104. 1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
  105. 0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
  106. 5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
  107. };
  108. static void release_unused_pictures(H264Context *h, int remove_current)
  109. {
  110. int i;
  111. /* release non reference frames */
  112. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
  113. if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
  114. (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
  115. ff_h264_unref_picture(h, &h->DPB[i]);
  116. }
  117. }
  118. }
  119. static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
  120. {
  121. const H264Context *h = sl->h264;
  122. int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
  123. av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
  124. // edge emu needs blocksize + filter length - 1
  125. // (= 21x21 for H.264)
  126. av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
  127. av_fast_malloc(&sl->top_borders[0], &sl->top_borders_allocated[0],
  128. h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
  129. av_fast_malloc(&sl->top_borders[1], &sl->top_borders_allocated[1],
  130. h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
  131. if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
  132. !sl->top_borders[0] || !sl->top_borders[1]) {
  133. av_freep(&sl->bipred_scratchpad);
  134. av_freep(&sl->edge_emu_buffer);
  135. av_freep(&sl->top_borders[0]);
  136. av_freep(&sl->top_borders[1]);
  137. sl->bipred_scratchpad_allocated = 0;
  138. sl->edge_emu_buffer_allocated = 0;
  139. sl->top_borders_allocated[0] = 0;
  140. sl->top_borders_allocated[1] = 0;
  141. return AVERROR(ENOMEM);
  142. }
  143. return 0;
  144. }
  145. static int init_table_pools(H264Context *h)
  146. {
  147. const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
  148. const int mb_array_size = h->mb_stride * h->mb_height;
  149. const int b4_stride = h->mb_width * 4 + 1;
  150. const int b4_array_size = b4_stride * h->mb_height * 4;
  151. h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
  152. av_buffer_allocz);
  153. h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
  154. sizeof(uint32_t), av_buffer_allocz);
  155. h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
  156. sizeof(int16_t), av_buffer_allocz);
  157. h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
  158. if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
  159. !h->ref_index_pool) {
  160. av_buffer_pool_uninit(&h->qscale_table_pool);
  161. av_buffer_pool_uninit(&h->mb_type_pool);
  162. av_buffer_pool_uninit(&h->motion_val_pool);
  163. av_buffer_pool_uninit(&h->ref_index_pool);
  164. return AVERROR(ENOMEM);
  165. }
  166. return 0;
  167. }
  168. static int alloc_picture(H264Context *h, H264Picture *pic)
  169. {
  170. int i, ret = 0;
  171. av_assert0(!pic->f->data[0]);
  172. pic->tf.f = pic->f;
  173. ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
  174. AV_GET_BUFFER_FLAG_REF : 0);
  175. if (ret < 0)
  176. goto fail;
  177. if (h->avctx->hwaccel) {
  178. const AVHWAccel *hwaccel = h->avctx->hwaccel;
  179. av_assert0(!pic->hwaccel_picture_private);
  180. if (hwaccel->frame_priv_data_size) {
  181. pic->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
  182. if (!pic->hwaccel_priv_buf)
  183. return AVERROR(ENOMEM);
  184. pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
  185. }
  186. }
  187. if (!h->qscale_table_pool) {
  188. ret = init_table_pools(h);
  189. if (ret < 0)
  190. goto fail;
  191. }
  192. pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
  193. pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
  194. if (!pic->qscale_table_buf || !pic->mb_type_buf)
  195. goto fail;
  196. pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
  197. pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
  198. for (i = 0; i < 2; i++) {
  199. pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
  200. pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
  201. if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
  202. goto fail;
  203. pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
  204. pic->ref_index[i] = pic->ref_index_buf[i]->data;
  205. }
  206. return 0;
  207. fail:
  208. ff_h264_unref_picture(h, pic);
  209. return (ret < 0) ? ret : AVERROR(ENOMEM);
  210. }
  211. static inline int pic_is_unused(H264Context *h, H264Picture *pic)
  212. {
  213. if (!pic->f->buf[0])
  214. return 1;
  215. return 0;
  216. }
  217. static int find_unused_picture(H264Context *h)
  218. {
  219. int i;
  220. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
  221. if (pic_is_unused(h, &h->DPB[i]))
  222. break;
  223. }
  224. if (i == H264_MAX_PICTURE_COUNT)
  225. return AVERROR_INVALIDDATA;
  226. return i;
  227. }
  228. static int initialize_cur_frame(H264Context *h)
  229. {
  230. H264Picture *cur;
  231. int ret;
  232. release_unused_pictures(h, 1);
  233. ff_h264_unref_picture(h, &h->cur_pic);
  234. h->cur_pic_ptr = NULL;
  235. ret = find_unused_picture(h);
  236. if (ret < 0) {
  237. av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  238. return ret;
  239. }
  240. cur = &h->DPB[ret];
  241. ret = alloc_picture(h, cur);
  242. if (ret < 0)
  243. return ret;
  244. ret = ff_h264_ref_picture(h, &h->cur_pic, cur);
  245. if (ret < 0)
  246. return ret;
  247. h->cur_pic_ptr = cur;
  248. return 0;
  249. }
  250. #define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size))))
  251. #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
  252. ((pic && pic >= old_ctx->DPB && \
  253. pic < old_ctx->DPB + H264_MAX_PICTURE_COUNT) ? \
  254. &new_ctx->DPB[pic - old_ctx->DPB] : NULL)
  255. static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
  256. H264Context *new_base,
  257. H264Context *old_base)
  258. {
  259. int i;
  260. for (i = 0; i < count; i++) {
  261. assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
  262. IN_RANGE(from[i], old_base->DPB,
  263. sizeof(H264Picture) * H264_MAX_PICTURE_COUNT) ||
  264. !from[i]));
  265. to[i] = REBASE_PICTURE(from[i], new_base, old_base);
  266. }
  267. }
  268. static int h264_slice_header_init(H264Context *h);
  269. int ff_h264_update_thread_context(AVCodecContext *dst,
  270. const AVCodecContext *src)
  271. {
  272. H264Context *h = dst->priv_data, *h1 = src->priv_data;
  273. int inited = h->context_initialized, err = 0;
  274. int need_reinit = 0;
  275. int i, ret;
  276. if (dst == src || !h1->context_initialized)
  277. return 0;
  278. if (!h1->ps.sps)
  279. return AVERROR_INVALIDDATA;
  280. if (inited &&
  281. (h->width != h1->width ||
  282. h->height != h1->height ||
  283. h->mb_width != h1->mb_width ||
  284. h->mb_height != h1->mb_height ||
  285. !h->ps.sps ||
  286. h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
  287. h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
  288. h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
  289. need_reinit = 1;
  290. }
  291. // SPS/PPS
  292. for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
  293. av_buffer_unref(&h->ps.sps_list[i]);
  294. if (h1->ps.sps_list[i]) {
  295. h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]);
  296. if (!h->ps.sps_list[i])
  297. return AVERROR(ENOMEM);
  298. }
  299. }
  300. for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
  301. av_buffer_unref(&h->ps.pps_list[i]);
  302. if (h1->ps.pps_list[i]) {
  303. h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]);
  304. if (!h->ps.pps_list[i])
  305. return AVERROR(ENOMEM);
  306. }
  307. }
  308. h->ps.sps = h1->ps.sps;
  309. if (need_reinit || !inited) {
  310. h->width = h1->width;
  311. h->height = h1->height;
  312. h->mb_height = h1->mb_height;
  313. h->mb_width = h1->mb_width;
  314. h->mb_num = h1->mb_num;
  315. h->mb_stride = h1->mb_stride;
  316. h->b_stride = h1->b_stride;
  317. if ((err = h264_slice_header_init(h)) < 0) {
  318. av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
  319. return err;
  320. }
  321. /* copy block_offset since frame_start may not be called */
  322. memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
  323. }
  324. h->avctx->coded_height = h1->avctx->coded_height;
  325. h->avctx->coded_width = h1->avctx->coded_width;
  326. h->avctx->width = h1->avctx->width;
  327. h->avctx->height = h1->avctx->height;
  328. h->width_from_caller = h1->width_from_caller;
  329. h->height_from_caller = h1->height_from_caller;
  330. h->coded_picture_number = h1->coded_picture_number;
  331. h->first_field = h1->first_field;
  332. h->picture_structure = h1->picture_structure;
  333. h->mb_aff_frame = h1->mb_aff_frame;
  334. h->droppable = h1->droppable;
  335. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
  336. ff_h264_unref_picture(h, &h->DPB[i]);
  337. if (h1->DPB[i].f->buf[0] &&
  338. (ret = ff_h264_ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
  339. return ret;
  340. }
  341. h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
  342. ff_h264_unref_picture(h, &h->cur_pic);
  343. if (h1->cur_pic.f->buf[0]) {
  344. ret = ff_h264_ref_picture(h, &h->cur_pic, &h1->cur_pic);
  345. if (ret < 0)
  346. return ret;
  347. }
  348. h->enable_er = h1->enable_er;
  349. h->workaround_bugs = h1->workaround_bugs;
  350. h->x264_build = h1->x264_build;
  351. h->droppable = h1->droppable;
  352. // extradata/NAL handling
  353. h->is_avc = h1->is_avc;
  354. h->nal_length_size = h1->nal_length_size;
  355. memcpy(&h->poc, &h1->poc, sizeof(h->poc));
  356. memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
  357. memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
  358. memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
  359. memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
  360. h->next_outputed_poc = h1->next_outputed_poc;
  361. memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
  362. h->nb_mmco = h1->nb_mmco;
  363. h->mmco_reset = h1->mmco_reset;
  364. h->explicit_ref_marking = h1->explicit_ref_marking;
  365. h->long_ref_count = h1->long_ref_count;
  366. h->short_ref_count = h1->short_ref_count;
  367. copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
  368. copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
  369. copy_picture_range(h->delayed_pic, h1->delayed_pic,
  370. MAX_DELAYED_PIC_COUNT + 2, h, h1);
  371. if (!h->cur_pic_ptr)
  372. return 0;
  373. if (!h->droppable) {
  374. err = ff_h264_execute_ref_pic_marking(h);
  375. h->poc.prev_poc_msb = h->poc.poc_msb;
  376. h->poc.prev_poc_lsb = h->poc.poc_lsb;
  377. }
  378. h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
  379. h->poc.prev_frame_num = h->poc.frame_num;
  380. h->recovery_frame = h1->recovery_frame;
  381. h->frame_recovered = h1->frame_recovered;
  382. return err;
  383. }
  384. static int h264_frame_start(H264Context *h)
  385. {
  386. H264Picture *pic;
  387. int i, ret;
  388. const int pixel_shift = h->pixel_shift;
  389. ret = initialize_cur_frame(h);
  390. if (ret < 0)
  391. return ret;
  392. pic = h->cur_pic_ptr;
  393. pic->reference = h->droppable ? 0 : h->picture_structure;
  394. pic->f->coded_picture_number = h->coded_picture_number++;
  395. pic->field_picture = h->picture_structure != PICT_FRAME;
  396. pic->frame_num = h->poc.frame_num;
  397. /*
  398. * Zero key_frame here; IDR markings per slice in frame or fields are ORed
  399. * in later.
  400. * See decode_nal_units().
  401. */
  402. pic->f->key_frame = 0;
  403. pic->mmco_reset = 0;
  404. pic->recovered = 0;
  405. pic->f->pict_type = h->slice_ctx[0].slice_type;
  406. pic->f->crop_left = h->crop_left;
  407. pic->f->crop_right = h->crop_right;
  408. pic->f->crop_top = h->crop_top;
  409. pic->f->crop_bottom = h->crop_bottom;
  410. if (CONFIG_ERROR_RESILIENCE && h->enable_er)
  411. ff_er_frame_start(&h->slice_ctx[0].er);
  412. for (i = 0; i < 16; i++) {
  413. h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
  414. h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
  415. }
  416. for (i = 0; i < 16; i++) {
  417. h->block_offset[16 + i] =
  418. h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
  419. h->block_offset[48 + 16 + i] =
  420. h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
  421. }
  422. /* Some macroblocks can be accessed before they're available in case
  423. * of lost slices, MBAFF or threading. */
  424. memset(h->slice_table, -1,
  425. (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
  426. /* We mark the current picture as non-reference after allocating it, so
  427. * that if we break out due to an error it can be released automatically
  428. * in the next ff_mpv_frame_start().
  429. */
  430. h->cur_pic_ptr->reference = 0;
  431. h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
  432. h->postpone_filter = 0;
  433. h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
  434. if (h->sei.unregistered.x264_build >= 0)
  435. h->x264_build = h->sei.unregistered.x264_build;
  436. assert(h->cur_pic_ptr->long_ref == 0);
  437. return 0;
  438. }
  439. static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl,
  440. uint8_t *src_y,
  441. uint8_t *src_cb, uint8_t *src_cr,
  442. int linesize, int uvlinesize,
  443. int simple)
  444. {
  445. uint8_t *top_border;
  446. int top_idx = 1;
  447. const int pixel_shift = h->pixel_shift;
  448. int chroma444 = CHROMA444(h);
  449. int chroma422 = CHROMA422(h);
  450. src_y -= linesize;
  451. src_cb -= uvlinesize;
  452. src_cr -= uvlinesize;
  453. if (!simple && FRAME_MBAFF(h)) {
  454. if (sl->mb_y & 1) {
  455. if (!MB_MBAFF(sl)) {
  456. top_border = sl->top_borders[0][sl->mb_x];
  457. AV_COPY128(top_border, src_y + 15 * linesize);
  458. if (pixel_shift)
  459. AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
  460. if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
  461. if (chroma444) {
  462. if (pixel_shift) {
  463. AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
  464. AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
  465. AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
  466. AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
  467. } else {
  468. AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
  469. AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
  470. }
  471. } else if (chroma422) {
  472. if (pixel_shift) {
  473. AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
  474. AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
  475. } else {
  476. AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
  477. AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
  478. }
  479. } else {
  480. if (pixel_shift) {
  481. AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
  482. AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
  483. } else {
  484. AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
  485. AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
  486. }
  487. }
  488. }
  489. }
  490. } else if (MB_MBAFF(sl)) {
  491. top_idx = 0;
  492. } else
  493. return;
  494. }
  495. top_border = sl->top_borders[top_idx][sl->mb_x];
  496. /* There are two lines saved, the line above the top macroblock
  497. * of a pair, and the line above the bottom macroblock. */
  498. AV_COPY128(top_border, src_y + 16 * linesize);
  499. if (pixel_shift)
  500. AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
  501. if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
  502. if (chroma444) {
  503. if (pixel_shift) {
  504. AV_COPY128(top_border + 32, src_cb + 16 * linesize);
  505. AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
  506. AV_COPY128(top_border + 64, src_cr + 16 * linesize);
  507. AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
  508. } else {
  509. AV_COPY128(top_border + 16, src_cb + 16 * linesize);
  510. AV_COPY128(top_border + 32, src_cr + 16 * linesize);
  511. }
  512. } else if (chroma422) {
  513. if (pixel_shift) {
  514. AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
  515. AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
  516. } else {
  517. AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
  518. AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
  519. }
  520. } else {
  521. if (pixel_shift) {
  522. AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
  523. AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
  524. } else {
  525. AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
  526. AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
  527. }
  528. }
  529. }
  530. }
  531. /**
  532. * Initialize implicit_weight table.
  533. * @param field 0/1 initialize the weight for interlaced MBAFF
  534. * -1 initializes the rest
  535. */
  536. static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
  537. {
  538. int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
  539. for (i = 0; i < 2; i++) {
  540. sl->pwt.luma_weight_flag[i] = 0;
  541. sl->pwt.chroma_weight_flag[i] = 0;
  542. }
  543. if (field < 0) {
  544. if (h->picture_structure == PICT_FRAME) {
  545. cur_poc = h->cur_pic_ptr->poc;
  546. } else {
  547. cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
  548. }
  549. if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
  550. sl->ref_list[0][0].poc + sl->ref_list[1][0].poc == 2 * cur_poc) {
  551. sl->pwt.use_weight = 0;
  552. sl->pwt.use_weight_chroma = 0;
  553. return;
  554. }
  555. ref_start = 0;
  556. ref_count0 = sl->ref_count[0];
  557. ref_count1 = sl->ref_count[1];
  558. } else {
  559. cur_poc = h->cur_pic_ptr->field_poc[field];
  560. ref_start = 16;
  561. ref_count0 = 16 + 2 * sl->ref_count[0];
  562. ref_count1 = 16 + 2 * sl->ref_count[1];
  563. }
  564. sl->pwt.use_weight = 2;
  565. sl->pwt.use_weight_chroma = 2;
  566. sl->pwt.luma_log2_weight_denom = 5;
  567. sl->pwt.chroma_log2_weight_denom = 5;
  568. for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
  569. int poc0 = sl->ref_list[0][ref0].poc;
  570. for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
  571. int w = 32;
  572. if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
  573. int poc1 = sl->ref_list[1][ref1].poc;
  574. int td = av_clip_int8(poc1 - poc0);
  575. if (td) {
  576. int tb = av_clip_int8(cur_poc - poc0);
  577. int tx = (16384 + (FFABS(td) >> 1)) / td;
  578. int dist_scale_factor = (tb * tx + 32) >> 8;
  579. if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
  580. w = 64 - dist_scale_factor;
  581. }
  582. }
  583. if (field < 0) {
  584. sl->pwt.implicit_weight[ref0][ref1][0] =
  585. sl->pwt.implicit_weight[ref0][ref1][1] = w;
  586. } else {
  587. sl->pwt.implicit_weight[ref0][ref1][field] = w;
  588. }
  589. }
  590. }
  591. }
  592. /**
  593. * initialize scan tables
  594. */
  595. static void init_scan_tables(H264Context *h)
  596. {
  597. int i;
  598. for (i = 0; i < 16; i++) {
  599. #define TRANSPOSE(x) (x >> 2) | ((x << 2) & 0xF)
  600. h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
  601. h->field_scan[i] = TRANSPOSE(field_scan[i]);
  602. #undef TRANSPOSE
  603. }
  604. for (i = 0; i < 64; i++) {
  605. #define TRANSPOSE(x) (x >> 3) | ((x & 7) << 3)
  606. h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
  607. h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
  608. h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
  609. h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
  610. #undef TRANSPOSE
  611. }
  612. if (h->ps.sps->transform_bypass) { // FIXME same ugly
  613. h->zigzag_scan_q0 = ff_zigzag_scan;
  614. h->zigzag_scan8x8_q0 = ff_zigzag_direct;
  615. h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc;
  616. h->field_scan_q0 = field_scan;
  617. h->field_scan8x8_q0 = field_scan8x8;
  618. h->field_scan8x8_cavlc_q0 = field_scan8x8_cavlc;
  619. } else {
  620. h->zigzag_scan_q0 = h->zigzag_scan;
  621. h->zigzag_scan8x8_q0 = h->zigzag_scan8x8;
  622. h->zigzag_scan8x8_cavlc_q0 = h->zigzag_scan8x8_cavlc;
  623. h->field_scan_q0 = h->field_scan;
  624. h->field_scan8x8_q0 = h->field_scan8x8;
  625. h->field_scan8x8_cavlc_q0 = h->field_scan8x8_cavlc;
  626. }
  627. }
  628. static enum AVPixelFormat get_pixel_format(H264Context *h)
  629. {
  630. #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
  631. (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
  632. CONFIG_H264_VAAPI_HWACCEL + \
  633. (CONFIG_H264_VDA_HWACCEL * 2) + \
  634. CONFIG_H264_VDPAU_HWACCEL)
  635. enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
  636. const enum AVPixelFormat *choices = pix_fmts;
  637. switch (h->ps.sps->bit_depth_luma) {
  638. case 9:
  639. if (CHROMA444(h)) {
  640. if (h->avctx->colorspace == AVCOL_SPC_RGB) {
  641. *fmt++ = AV_PIX_FMT_GBRP9;
  642. } else
  643. *fmt++ = AV_PIX_FMT_YUV444P9;
  644. } else if (CHROMA422(h))
  645. *fmt++ = AV_PIX_FMT_YUV422P9;
  646. else
  647. *fmt++ = AV_PIX_FMT_YUV420P9;
  648. break;
  649. case 10:
  650. if (CHROMA444(h)) {
  651. if (h->avctx->colorspace == AVCOL_SPC_RGB) {
  652. *fmt++ = AV_PIX_FMT_GBRP10;
  653. } else
  654. *fmt++ = AV_PIX_FMT_YUV444P10;
  655. } else if (CHROMA422(h))
  656. *fmt++ = AV_PIX_FMT_YUV422P10;
  657. else
  658. *fmt++ = AV_PIX_FMT_YUV420P10;
  659. break;
  660. case 8:
  661. #if CONFIG_H264_VDPAU_HWACCEL
  662. *fmt++ = AV_PIX_FMT_VDPAU;
  663. #endif
  664. if (CHROMA444(h)) {
  665. if (h->avctx->colorspace == AVCOL_SPC_RGB)
  666. *fmt++ = AV_PIX_FMT_GBRP;
  667. else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
  668. *fmt++ = AV_PIX_FMT_YUVJ444P;
  669. else
  670. *fmt++ = AV_PIX_FMT_YUV444P;
  671. } else if (CHROMA422(h)) {
  672. if (h->avctx->color_range == AVCOL_RANGE_JPEG)
  673. *fmt++ = AV_PIX_FMT_YUVJ422P;
  674. else
  675. *fmt++ = AV_PIX_FMT_YUV422P;
  676. } else {
  677. #if CONFIG_H264_DXVA2_HWACCEL
  678. *fmt++ = AV_PIX_FMT_DXVA2_VLD;
  679. #endif
  680. #if CONFIG_H264_D3D11VA_HWACCEL
  681. *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
  682. *fmt++ = AV_PIX_FMT_D3D11;
  683. #endif
  684. #if CONFIG_H264_VAAPI_HWACCEL
  685. *fmt++ = AV_PIX_FMT_VAAPI;
  686. #endif
  687. #if CONFIG_H264_VDA_HWACCEL
  688. *fmt++ = AV_PIX_FMT_VDA_VLD;
  689. *fmt++ = AV_PIX_FMT_VDA;
  690. #endif
  691. if (h->avctx->codec->pix_fmts)
  692. choices = h->avctx->codec->pix_fmts;
  693. else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
  694. *fmt++ = AV_PIX_FMT_YUVJ420P;
  695. else
  696. *fmt++ = AV_PIX_FMT_YUV420P;
  697. }
  698. break;
  699. default:
  700. av_log(h->avctx, AV_LOG_ERROR,
  701. "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
  702. return AVERROR_INVALIDDATA;
  703. }
  704. *fmt = AV_PIX_FMT_NONE;
  705. return ff_get_format(h->avctx, choices);
  706. }
  707. /* export coded and cropped frame dimensions to AVCodecContext */
  708. static int init_dimensions(H264Context *h)
  709. {
  710. SPS *sps = h->ps.sps;
  711. int cr = sps->crop_right;
  712. int cl = sps->crop_left;
  713. int ct = sps->crop_top;
  714. int cb = sps->crop_bottom;
  715. int width = h->width - (cr + cl);
  716. int height = h->height - (ct + cb);
  717. /* handle container cropping */
  718. if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
  719. !sps->crop_top && !sps->crop_left &&
  720. FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
  721. FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16)) {
  722. width = h->width_from_caller;
  723. height = h->height_from_caller;
  724. cl = 0;
  725. ct = 0;
  726. cr = h->width - width;
  727. cb = h->height - height;
  728. } else {
  729. h->width_from_caller = 0;
  730. h->height_from_caller = 0;
  731. }
  732. h->avctx->coded_width = h->width;
  733. h->avctx->coded_height = h->height;
  734. h->avctx->width = width;
  735. h->avctx->height = height;
  736. h->crop_right = cr;
  737. h->crop_left = cl;
  738. h->crop_top = ct;
  739. h->crop_bottom = cb;
  740. return 0;
  741. }
  742. static int h264_slice_header_init(H264Context *h)
  743. {
  744. const SPS *sps = h->ps.sps;
  745. int i, ret;
  746. ff_set_sar(h->avctx, sps->sar);
  747. av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
  748. &h->chroma_x_shift, &h->chroma_y_shift);
  749. if (sps->timing_info_present_flag) {
  750. int64_t den = sps->time_scale;
  751. if (h->x264_build < 44U)
  752. den *= 2;
  753. av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
  754. sps->num_units_in_tick, den, 1 << 30);
  755. }
  756. ff_h264_free_tables(h);
  757. h->first_field = 0;
  758. h->prev_interlaced_frame = 1;
  759. init_scan_tables(h);
  760. ret = ff_h264_alloc_tables(h);
  761. if (ret < 0) {
  762. av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
  763. return ret;
  764. }
  765. if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 10) {
  766. av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
  767. sps->bit_depth_luma);
  768. return AVERROR_INVALIDDATA;
  769. }
  770. h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
  771. h->pixel_shift = sps->bit_depth_luma > 8;
  772. h->chroma_format_idc = sps->chroma_format_idc;
  773. h->bit_depth_luma = sps->bit_depth_luma;
  774. ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
  775. sps->chroma_format_idc);
  776. ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
  777. ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
  778. ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma,
  779. sps->chroma_format_idc);
  780. ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
  781. if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
  782. ret = ff_h264_slice_context_init(h, &h->slice_ctx[0]);
  783. if (ret < 0) {
  784. av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
  785. return ret;
  786. }
  787. } else {
  788. for (i = 0; i < h->nb_slice_ctx; i++) {
  789. H264SliceContext *sl = &h->slice_ctx[i];
  790. sl->h264 = h;
  791. sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
  792. sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
  793. sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
  794. if ((ret = ff_h264_slice_context_init(h, sl)) < 0) {
  795. av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
  796. return ret;
  797. }
  798. }
  799. }
  800. h->context_initialized = 1;
  801. return 0;
  802. }
  803. static int h264_init_ps(H264Context *h, const H264SliceContext *sl)
  804. {
  805. const SPS *sps;
  806. int needs_reinit = 0, ret;
  807. h->ps.pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
  808. if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) {
  809. h->ps.sps = (SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data;
  810. if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
  811. h->chroma_format_idc != h->ps.sps->chroma_format_idc)
  812. needs_reinit = 1;
  813. }
  814. sps = h->ps.sps;
  815. h->avctx->profile = ff_h264_get_profile(sps);
  816. h->avctx->level = sps->level_idc;
  817. h->avctx->refs = sps->ref_frame_count;
  818. if (h->mb_width != sps->mb_width ||
  819. h->mb_height != sps->mb_height)
  820. needs_reinit = 1;
  821. h->mb_width = sps->mb_width;
  822. h->mb_height = sps->mb_height;
  823. h->mb_num = h->mb_width * h->mb_height;
  824. h->mb_stride = h->mb_width + 1;
  825. h->b_stride = h->mb_width * 4;
  826. h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
  827. h->width = 16 * h->mb_width;
  828. h->height = 16 * h->mb_height;
  829. ret = init_dimensions(h);
  830. if (ret < 0)
  831. return ret;
  832. if (sps->video_signal_type_present_flag) {
  833. h->avctx->color_range = sps->full_range ? AVCOL_RANGE_JPEG
  834. : AVCOL_RANGE_MPEG;
  835. if (sps->colour_description_present_flag) {
  836. if (h->avctx->colorspace != sps->colorspace)
  837. needs_reinit = 1;
  838. h->avctx->color_primaries = sps->color_primaries;
  839. h->avctx->color_trc = sps->color_trc;
  840. h->avctx->colorspace = sps->colorspace;
  841. }
  842. }
  843. if (!h->context_initialized || needs_reinit) {
  844. h->context_initialized = 0;
  845. if (sl != h->slice_ctx) {
  846. av_log(h->avctx, AV_LOG_ERROR,
  847. "changing width %d -> %d / height %d -> %d on "
  848. "slice %d\n",
  849. h->width, h->avctx->coded_width,
  850. h->height, h->avctx->coded_height,
  851. h->current_slice + 1);
  852. return AVERROR_INVALIDDATA;
  853. }
  854. ff_h264_flush_change(h);
  855. if ((ret = get_pixel_format(h)) < 0)
  856. return ret;
  857. h->avctx->pix_fmt = ret;
  858. av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
  859. "pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt);
  860. if ((ret = h264_slice_header_init(h)) < 0) {
  861. av_log(h->avctx, AV_LOG_ERROR,
  862. "h264_slice_header_init() failed\n");
  863. return ret;
  864. }
  865. }
  866. return 0;
  867. }
  868. static int h264_export_frame_props(H264Context *h)
  869. {
  870. const SPS *sps = h->ps.sps;
  871. H264Picture *cur = h->cur_pic_ptr;
  872. cur->f->interlaced_frame = 0;
  873. cur->f->repeat_pict = 0;
  874. /* Signal interlacing information externally. */
  875. /* Prioritize picture timing SEI information over used
  876. * decoding process if it exists. */
  877. if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
  878. H264SEIPictureTiming *pt = &h->sei.picture_timing;
  879. switch (pt->pic_struct) {
  880. case H264_SEI_PIC_STRUCT_FRAME:
  881. break;
  882. case H264_SEI_PIC_STRUCT_TOP_FIELD:
  883. case H264_SEI_PIC_STRUCT_BOTTOM_FIELD:
  884. cur->f->interlaced_frame = 1;
  885. break;
  886. case H264_SEI_PIC_STRUCT_TOP_BOTTOM:
  887. case H264_SEI_PIC_STRUCT_BOTTOM_TOP:
  888. if (FIELD_OR_MBAFF_PICTURE(h))
  889. cur->f->interlaced_frame = 1;
  890. else
  891. // try to flag soft telecine progressive
  892. cur->f->interlaced_frame = h->prev_interlaced_frame;
  893. break;
  894. case H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
  895. case H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
  896. /* Signal the possibility of telecined film externally
  897. * (pic_struct 5,6). From these hints, let the applications
  898. * decide if they apply deinterlacing. */
  899. cur->f->repeat_pict = 1;
  900. break;
  901. case H264_SEI_PIC_STRUCT_FRAME_DOUBLING:
  902. cur->f->repeat_pict = 2;
  903. break;
  904. case H264_SEI_PIC_STRUCT_FRAME_TRIPLING:
  905. cur->f->repeat_pict = 4;
  906. break;
  907. }
  908. if ((pt->ct_type & 3) &&
  909. pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
  910. cur->f->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
  911. } else {
  912. /* Derive interlacing flag from used decoding process. */
  913. cur->f->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
  914. }
  915. h->prev_interlaced_frame = cur->f->interlaced_frame;
  916. if (cur->field_poc[0] != cur->field_poc[1]) {
  917. /* Derive top_field_first from field pocs. */
  918. cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
  919. } else {
  920. if (cur->f->interlaced_frame ||
  921. (sps->pic_struct_present_flag && h->sei.picture_timing.present)) {
  922. /* Use picture timing SEI information. Even if it is a
  923. * information of a past frame, better than nothing. */
  924. if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
  925. h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
  926. cur->f->top_field_first = 1;
  927. else
  928. cur->f->top_field_first = 0;
  929. } else {
  930. /* Most likely progressive */
  931. cur->f->top_field_first = 0;
  932. }
  933. }
  934. if (h->sei.frame_packing.present &&
  935. h->sei.frame_packing.arrangement_type >= 0 &&
  936. h->sei.frame_packing.arrangement_type <= 6 &&
  937. h->sei.frame_packing.content_interpretation_type > 0 &&
  938. h->sei.frame_packing.content_interpretation_type < 3) {
  939. H264SEIFramePacking *fp = &h->sei.frame_packing;
  940. AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
  941. if (!stereo)
  942. return AVERROR(ENOMEM);
  943. switch (fp->arrangement_type) {
  944. case 0:
  945. stereo->type = AV_STEREO3D_CHECKERBOARD;
  946. break;
  947. case 1:
  948. stereo->type = AV_STEREO3D_COLUMNS;
  949. break;
  950. case 2:
  951. stereo->type = AV_STEREO3D_LINES;
  952. break;
  953. case 3:
  954. if (fp->quincunx_subsampling)
  955. stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
  956. else
  957. stereo->type = AV_STEREO3D_SIDEBYSIDE;
  958. break;
  959. case 4:
  960. stereo->type = AV_STEREO3D_TOPBOTTOM;
  961. break;
  962. case 5:
  963. stereo->type = AV_STEREO3D_FRAMESEQUENCE;
  964. break;
  965. case 6:
  966. stereo->type = AV_STEREO3D_2D;
  967. break;
  968. }
  969. if (fp->content_interpretation_type == 2)
  970. stereo->flags = AV_STEREO3D_FLAG_INVERT;
  971. }
  972. if (h->sei.display_orientation.present &&
  973. (h->sei.display_orientation.anticlockwise_rotation ||
  974. h->sei.display_orientation.hflip ||
  975. h->sei.display_orientation.vflip)) {
  976. H264SEIDisplayOrientation *o = &h->sei.display_orientation;
  977. double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
  978. AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
  979. AV_FRAME_DATA_DISPLAYMATRIX,
  980. sizeof(int32_t) * 9);
  981. if (!rotation)
  982. return AVERROR(ENOMEM);
  983. av_display_rotation_set((int32_t *)rotation->data, angle);
  984. av_display_matrix_flip((int32_t *)rotation->data,
  985. o->hflip, o->vflip);
  986. }
  987. if (h->sei.afd.present) {
  988. AVFrameSideData *sd = av_frame_new_side_data(cur->f, AV_FRAME_DATA_AFD,
  989. sizeof(uint8_t));
  990. if (!sd)
  991. return AVERROR(ENOMEM);
  992. *sd->data = h->sei.afd.active_format_description;
  993. h->sei.afd.present = 0;
  994. }
  995. if (h->sei.a53_caption.a53_caption) {
  996. H264SEIA53Caption *a53 = &h->sei.a53_caption;
  997. AVFrameSideData *sd = av_frame_new_side_data(cur->f,
  998. AV_FRAME_DATA_A53_CC,
  999. a53->a53_caption_size);
  1000. if (!sd)
  1001. return AVERROR(ENOMEM);
  1002. memcpy(sd->data, a53->a53_caption, a53->a53_caption_size);
  1003. av_freep(&a53->a53_caption);
  1004. a53->a53_caption_size = 0;
  1005. }
  1006. return 0;
  1007. }
  1008. static int h264_select_output_frame(H264Context *h)
  1009. {
  1010. const SPS *sps = h->ps.sps;
  1011. H264Picture *out = h->cur_pic_ptr;
  1012. H264Picture *cur = h->cur_pic_ptr;
  1013. int i, pics, out_of_order, out_idx;
  1014. int invalid = 0, cnt = 0;
  1015. int ret;
  1016. if (sps->bitstream_restriction_flag ||
  1017. h->avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
  1018. h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
  1019. }
  1020. pics = 0;
  1021. while (h->delayed_pic[pics])
  1022. pics++;
  1023. assert(pics <= MAX_DELAYED_PIC_COUNT);
  1024. h->delayed_pic[pics++] = cur;
  1025. if (cur->reference == 0)
  1026. cur->reference = DELAYED_PIC_REF;
  1027. /* Frame reordering. This code takes pictures from coding order and sorts
  1028. * them by their incremental POC value into display order. It supports POC
  1029. * gaps, MMCO reset codes and random resets.
  1030. * A "display group" can start either with a IDR frame (f.key_frame = 1),
  1031. * and/or can be closed down with a MMCO reset code. In sequences where
  1032. * there is no delay, we can't detect that (since the frame was already
  1033. * output to the user), so we also set h->mmco_reset to detect the MMCO
  1034. * reset code.
  1035. * FIXME: if we detect insufficient delays (as per h->avctx->has_b_frames),
  1036. * we increase the delay between input and output. All frames affected by
  1037. * the lag (e.g. those that should have been output before another frame
  1038. * that we already returned to the user) will be dropped. This is a bug
  1039. * that we will fix later. */
  1040. for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
  1041. cnt += out->poc < h->last_pocs[i];
  1042. invalid += out->poc == INT_MIN;
  1043. }
  1044. if (!h->mmco_reset && !cur->f->key_frame &&
  1045. cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
  1046. h->mmco_reset = 2;
  1047. if (pics > 1)
  1048. h->delayed_pic[pics - 2]->mmco_reset = 2;
  1049. }
  1050. if (h->mmco_reset || cur->f->key_frame) {
  1051. for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
  1052. h->last_pocs[i] = INT_MIN;
  1053. cnt = 0;
  1054. invalid = MAX_DELAYED_PIC_COUNT;
  1055. }
  1056. out = h->delayed_pic[0];
  1057. out_idx = 0;
  1058. for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
  1059. h->delayed_pic[i] &&
  1060. !h->delayed_pic[i - 1]->mmco_reset &&
  1061. !h->delayed_pic[i]->f->key_frame;
  1062. i++)
  1063. if (h->delayed_pic[i]->poc < out->poc) {
  1064. out = h->delayed_pic[i];
  1065. out_idx = i;
  1066. }
  1067. if (h->avctx->has_b_frames == 0 &&
  1068. (h->delayed_pic[0]->f->key_frame || h->mmco_reset))
  1069. h->next_outputed_poc = INT_MIN;
  1070. out_of_order = !out->f->key_frame && !h->mmco_reset &&
  1071. (out->poc < h->next_outputed_poc);
  1072. if (sps->bitstream_restriction_flag &&
  1073. h->avctx->has_b_frames >= sps->num_reorder_frames) {
  1074. } else if (out_of_order && pics - 1 == h->avctx->has_b_frames &&
  1075. h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
  1076. if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
  1077. h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, cnt);
  1078. }
  1079. } else if (!h->avctx->has_b_frames &&
  1080. ((h->next_outputed_poc != INT_MIN &&
  1081. out->poc > h->next_outputed_poc + 2) ||
  1082. cur->f->pict_type == AV_PICTURE_TYPE_B)) {
  1083. h->avctx->has_b_frames++;
  1084. }
  1085. if (pics > h->avctx->has_b_frames) {
  1086. out->reference &= ~DELAYED_PIC_REF;
  1087. for (i = out_idx; h->delayed_pic[i]; i++)
  1088. h->delayed_pic[i] = h->delayed_pic[i + 1];
  1089. }
  1090. memmove(h->last_pocs, &h->last_pocs[1],
  1091. sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
  1092. h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
  1093. if (!out_of_order && pics > h->avctx->has_b_frames) {
  1094. av_frame_unref(h->output_frame);
  1095. ret = av_frame_ref(h->output_frame, out->f);
  1096. if (ret < 0)
  1097. return ret;
  1098. if (out->recovered) {
  1099. // We have reached an recovery point and all frames after it in
  1100. // display order are "recovered".
  1101. h->frame_recovered |= FRAME_RECOVERED_SEI;
  1102. }
  1103. out->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
  1104. if (!out->recovered) {
  1105. if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT))
  1106. av_frame_unref(h->output_frame);
  1107. else
  1108. h->output_frame->flags |= AV_FRAME_FLAG_CORRUPT;
  1109. }
  1110. if (out->mmco_reset) {
  1111. if (out_idx > 0) {
  1112. h->next_outputed_poc = out->poc;
  1113. h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
  1114. } else {
  1115. h->next_outputed_poc = INT_MIN;
  1116. }
  1117. } else {
  1118. if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f->key_frame) {
  1119. h->next_outputed_poc = INT_MIN;
  1120. } else {
  1121. h->next_outputed_poc = out->poc;
  1122. }
  1123. }
  1124. h->mmco_reset = 0;
  1125. } else {
  1126. av_log(h->avctx, AV_LOG_DEBUG, "no picture\n");
  1127. }
  1128. return 0;
  1129. }
  1130. /* This function is called right after decoding the slice header for a first
  1131. * slice in a field (or a frame). It decides whether we are decoding a new frame
  1132. * or a second field in a pair and does the necessary setup.
  1133. */
  1134. static int h264_field_start(H264Context *h, const H264SliceContext *sl,
  1135. const H2645NAL *nal)
  1136. {
  1137. const SPS *sps;
  1138. int last_pic_structure, last_pic_droppable, ret;
  1139. ret = h264_init_ps(h, sl);
  1140. if (ret < 0)
  1141. return ret;
  1142. sps = h->ps.sps;
  1143. last_pic_droppable = h->droppable;
  1144. last_pic_structure = h->picture_structure;
  1145. h->droppable = (nal->ref_idc == 0);
  1146. h->picture_structure = sl->picture_structure;
  1147. h->poc.frame_num = sl->frame_num;
  1148. h->poc.poc_lsb = sl->poc_lsb;
  1149. h->poc.delta_poc_bottom = sl->delta_poc_bottom;
  1150. h->poc.delta_poc[0] = sl->delta_poc[0];
  1151. h->poc.delta_poc[1] = sl->delta_poc[1];
  1152. /* Shorten frame num gaps so we don't have to allocate reference
  1153. * frames just to throw them away */
  1154. if (h->poc.frame_num != h->poc.prev_frame_num) {
  1155. int unwrap_prev_frame_num = h->poc.prev_frame_num;
  1156. int max_frame_num = 1 << sps->log2_max_frame_num;
  1157. if (unwrap_prev_frame_num > h->poc.frame_num)
  1158. unwrap_prev_frame_num -= max_frame_num;
  1159. if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
  1160. unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
  1161. if (unwrap_prev_frame_num < 0)
  1162. unwrap_prev_frame_num += max_frame_num;
  1163. h->poc.prev_frame_num = unwrap_prev_frame_num;
  1164. }
  1165. }
  1166. /* See if we have a decoded first field looking for a pair...
  1167. * Here, we're using that to see if we should mark previously
  1168. * decode frames as "finished".
  1169. * We have to do that before the "dummy" in-between frame allocation,
  1170. * since that can modify s->current_picture_ptr. */
  1171. if (h->first_field) {
  1172. assert(h->cur_pic_ptr);
  1173. assert(h->cur_pic_ptr->f->buf[0]);
  1174. assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
  1175. /* figure out if we have a complementary field pair */
  1176. if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
  1177. /* Previous field is unmatched. Don't display it, but let it
  1178. * remain for reference if marked as such. */
  1179. if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
  1180. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1181. last_pic_structure == PICT_TOP_FIELD);
  1182. }
  1183. } else {
  1184. if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
  1185. /* This and previous field were reference, but had
  1186. * different frame_nums. Consider this field first in
  1187. * pair. Throw away previous field except for reference
  1188. * purposes. */
  1189. if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
  1190. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1191. last_pic_structure == PICT_TOP_FIELD);
  1192. }
  1193. } else {
  1194. /* Second field in complementary pair */
  1195. if (!((last_pic_structure == PICT_TOP_FIELD &&
  1196. h->picture_structure == PICT_BOTTOM_FIELD) ||
  1197. (last_pic_structure == PICT_BOTTOM_FIELD &&
  1198. h->picture_structure == PICT_TOP_FIELD))) {
  1199. av_log(h->avctx, AV_LOG_ERROR,
  1200. "Invalid field mode combination %d/%d\n",
  1201. last_pic_structure, h->picture_structure);
  1202. h->picture_structure = last_pic_structure;
  1203. h->droppable = last_pic_droppable;
  1204. return AVERROR_INVALIDDATA;
  1205. } else if (last_pic_droppable != h->droppable) {
  1206. avpriv_request_sample(h->avctx,
  1207. "Found reference and non-reference fields in the same frame, which");
  1208. h->picture_structure = last_pic_structure;
  1209. h->droppable = last_pic_droppable;
  1210. return AVERROR_PATCHWELCOME;
  1211. }
  1212. }
  1213. }
  1214. }
  1215. while (h->poc.frame_num != h->poc.prev_frame_num &&
  1216. h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
  1217. H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
  1218. av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
  1219. h->poc.frame_num, h->poc.prev_frame_num);
  1220. ret = initialize_cur_frame(h);
  1221. if (ret < 0) {
  1222. h->first_field = 0;
  1223. return ret;
  1224. }
  1225. h->poc.prev_frame_num++;
  1226. h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
  1227. h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
  1228. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
  1229. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
  1230. h->explicit_ref_marking = 0;
  1231. ret = ff_h264_execute_ref_pic_marking(h);
  1232. if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
  1233. return ret;
  1234. /* Error concealment: If a ref is missing, copy the previous ref
  1235. * in its place.
  1236. * FIXME: Avoiding a memcpy would be nice, but ref handling makes
  1237. * many assumptions about there being no actual duplicates.
  1238. * FIXME: This does not copy padding for out-of-frame motion
  1239. * vectors. Given we are concealing a lost frame, this probably
  1240. * is not noticeable by comparison, but it should be fixed. */
  1241. if (h->short_ref_count) {
  1242. if (prev &&
  1243. h->short_ref[0]->f->width == prev->f->width &&
  1244. h->short_ref[0]->f->height == prev->f->height &&
  1245. h->short_ref[0]->f->format == prev->f->format) {
  1246. ff_thread_await_progress(&prev->tf, INT_MAX, 0);
  1247. if (prev->field_picture)
  1248. ff_thread_await_progress(&prev->tf, INT_MAX, 1);
  1249. av_image_copy(h->short_ref[0]->f->data,
  1250. h->short_ref[0]->f->linesize,
  1251. (const uint8_t **)prev->f->data,
  1252. prev->f->linesize,
  1253. prev->f->format,
  1254. h->mb_width * 16,
  1255. h->mb_height * 16);
  1256. h->short_ref[0]->poc = prev->poc + 2;
  1257. }
  1258. h->short_ref[0]->frame_num = h->poc.prev_frame_num;
  1259. }
  1260. }
  1261. /* See if we have a decoded first field looking for a pair...
  1262. * We're using that to see whether to continue decoding in that
  1263. * frame, or to allocate a new one. */
  1264. if (h->first_field) {
  1265. assert(h->cur_pic_ptr);
  1266. assert(h->cur_pic_ptr->f->buf[0]);
  1267. assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
  1268. /* figure out if we have a complementary field pair */
  1269. if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
  1270. /* Previous field is unmatched. Don't display it, but let it
  1271. * remain for reference if marked as such. */
  1272. h->cur_pic_ptr = NULL;
  1273. h->first_field = FIELD_PICTURE(h);
  1274. } else {
  1275. if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
  1276. /* This and the previous field had different frame_nums.
  1277. * Consider this field first in pair. Throw away previous
  1278. * one except for reference purposes. */
  1279. h->first_field = 1;
  1280. h->cur_pic_ptr = NULL;
  1281. } else {
  1282. /* Second field in complementary pair */
  1283. h->first_field = 0;
  1284. }
  1285. }
  1286. } else {
  1287. /* Frame or first field in a potentially complementary pair */
  1288. h->first_field = FIELD_PICTURE(h);
  1289. }
  1290. if (!FIELD_PICTURE(h) || h->first_field) {
  1291. if (h264_frame_start(h) < 0) {
  1292. h->first_field = 0;
  1293. return AVERROR_INVALIDDATA;
  1294. }
  1295. } else {
  1296. release_unused_pictures(h, 0);
  1297. }
  1298. ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
  1299. h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
  1300. memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
  1301. h->nb_mmco = sl->nb_mmco;
  1302. h->explicit_ref_marking = sl->explicit_ref_marking;
  1303. h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
  1304. if (h->sei.recovery_point.recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
  1305. h->recovery_frame = (h->poc.frame_num + h->sei.recovery_point.recovery_frame_cnt) &
  1306. ((1 << h->ps.sps->log2_max_frame_num) - 1);
  1307. }
  1308. h->cur_pic_ptr->f->key_frame |= (nal->type == H264_NAL_IDR_SLICE) ||
  1309. (h->sei.recovery_point.recovery_frame_cnt >= 0);
  1310. if (nal->type == H264_NAL_IDR_SLICE || h->recovery_frame == h->poc.frame_num) {
  1311. h->recovery_frame = -1;
  1312. h->cur_pic_ptr->recovered = 1;
  1313. }
  1314. // If we have an IDR, all frames after it in decoded order are
  1315. // "recovered".
  1316. if (nal->type == H264_NAL_IDR_SLICE)
  1317. h->frame_recovered |= FRAME_RECOVERED_IDR;
  1318. h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
  1319. /* Set the frame properties/side data. Only done for the second field in
  1320. * field coded frames, since some SEI information is present for each field
  1321. * and is merged by the SEI parsing code. */
  1322. if (!FIELD_PICTURE(h) || !h->first_field) {
  1323. ret = h264_export_frame_props(h);
  1324. if (ret < 0)
  1325. return ret;
  1326. ret = h264_select_output_frame(h);
  1327. if (ret < 0)
  1328. return ret;
  1329. }
  1330. if (h->avctx->hwaccel) {
  1331. ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0);
  1332. if (ret < 0)
  1333. return ret;
  1334. }
  1335. return 0;
  1336. }
  1337. static int h264_slice_header_parse(H264SliceContext *sl, const H2645NAL *nal,
  1338. const H264ParamSets *ps, AVCodecContext *avctx)
  1339. {
  1340. const SPS *sps;
  1341. const PPS *pps;
  1342. int ret;
  1343. unsigned int slice_type, tmp, i;
  1344. int field_pic_flag, bottom_field_flag, picture_structure;
  1345. sl->first_mb_addr = get_ue_golomb(&sl->gb);
  1346. slice_type = get_ue_golomb_31(&sl->gb);
  1347. if (slice_type > 9) {
  1348. av_log(avctx, AV_LOG_ERROR,
  1349. "slice type %d too large at %d\n",
  1350. slice_type, sl->first_mb_addr);
  1351. return AVERROR_INVALIDDATA;
  1352. }
  1353. if (slice_type > 4) {
  1354. slice_type -= 5;
  1355. sl->slice_type_fixed = 1;
  1356. } else
  1357. sl->slice_type_fixed = 0;
  1358. slice_type = ff_h264_golomb_to_pict_type[slice_type];
  1359. sl->slice_type = slice_type;
  1360. sl->slice_type_nos = slice_type & 3;
  1361. if (nal->type == H264_NAL_IDR_SLICE &&
  1362. sl->slice_type_nos != AV_PICTURE_TYPE_I) {
  1363. av_log(avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
  1364. return AVERROR_INVALIDDATA;
  1365. }
  1366. sl->pps_id = get_ue_golomb(&sl->gb);
  1367. if (sl->pps_id >= MAX_PPS_COUNT) {
  1368. av_log(avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
  1369. return AVERROR_INVALIDDATA;
  1370. }
  1371. if (!ps->pps_list[sl->pps_id]) {
  1372. av_log(avctx, AV_LOG_ERROR,
  1373. "non-existing PPS %u referenced\n",
  1374. sl->pps_id);
  1375. return AVERROR_INVALIDDATA;
  1376. }
  1377. pps = (const PPS*)ps->pps_list[sl->pps_id]->data;
  1378. if (!ps->sps_list[pps->sps_id]) {
  1379. av_log(avctx, AV_LOG_ERROR,
  1380. "non-existing SPS %u referenced\n", pps->sps_id);
  1381. return AVERROR_INVALIDDATA;
  1382. }
  1383. sps = (const SPS*)ps->sps_list[pps->sps_id]->data;
  1384. sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
  1385. sl->mb_mbaff = 0;
  1386. if (sps->frame_mbs_only_flag) {
  1387. picture_structure = PICT_FRAME;
  1388. } else {
  1389. field_pic_flag = get_bits1(&sl->gb);
  1390. if (field_pic_flag) {
  1391. bottom_field_flag = get_bits1(&sl->gb);
  1392. picture_structure = PICT_TOP_FIELD + bottom_field_flag;
  1393. } else {
  1394. picture_structure = PICT_FRAME;
  1395. }
  1396. }
  1397. sl->picture_structure = picture_structure;
  1398. sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
  1399. if (picture_structure == PICT_FRAME) {
  1400. sl->curr_pic_num = sl->frame_num;
  1401. sl->max_pic_num = 1 << sps->log2_max_frame_num;
  1402. } else {
  1403. sl->curr_pic_num = 2 * sl->frame_num + 1;
  1404. sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
  1405. }
  1406. if (nal->type == H264_NAL_IDR_SLICE)
  1407. get_ue_golomb(&sl->gb); /* idr_pic_id */
  1408. if (sps->poc_type == 0) {
  1409. sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
  1410. if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
  1411. sl->delta_poc_bottom = get_se_golomb(&sl->gb);
  1412. }
  1413. if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
  1414. sl->delta_poc[0] = get_se_golomb(&sl->gb);
  1415. if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
  1416. sl->delta_poc[1] = get_se_golomb(&sl->gb);
  1417. }
  1418. sl->redundant_pic_count = 0;
  1419. if (pps->redundant_pic_cnt_present)
  1420. sl->redundant_pic_count = get_ue_golomb(&sl->gb);
  1421. if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
  1422. sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
  1423. ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count,
  1424. &sl->gb, pps, sl->slice_type_nos,
  1425. picture_structure);
  1426. if (ret < 0)
  1427. return ret;
  1428. if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
  1429. ret = ff_h264_decode_ref_pic_list_reordering(sl, avctx);
  1430. if (ret < 0) {
  1431. sl->ref_count[1] = sl->ref_count[0] = 0;
  1432. return ret;
  1433. }
  1434. }
  1435. sl->pwt.use_weight = 0;
  1436. for (i = 0; i < 2; i++) {
  1437. sl->pwt.luma_weight_flag[i] = 0;
  1438. sl->pwt.chroma_weight_flag[i] = 0;
  1439. }
  1440. if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
  1441. (pps->weighted_bipred_idc == 1 &&
  1442. sl->slice_type_nos == AV_PICTURE_TYPE_B))
  1443. ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count,
  1444. sl->slice_type_nos, &sl->pwt);
  1445. sl->explicit_ref_marking = 0;
  1446. if (nal->ref_idc) {
  1447. ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, avctx);
  1448. if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
  1449. return AVERROR_INVALIDDATA;
  1450. }
  1451. if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
  1452. tmp = get_ue_golomb_31(&sl->gb);
  1453. if (tmp > 2) {
  1454. av_log(avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
  1455. return AVERROR_INVALIDDATA;
  1456. }
  1457. sl->cabac_init_idc = tmp;
  1458. }
  1459. sl->last_qscale_diff = 0;
  1460. tmp = pps->init_qp + get_se_golomb(&sl->gb);
  1461. if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
  1462. av_log(avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
  1463. return AVERROR_INVALIDDATA;
  1464. }
  1465. sl->qscale = tmp;
  1466. sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
  1467. sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
  1468. // FIXME qscale / qp ... stuff
  1469. if (sl->slice_type == AV_PICTURE_TYPE_SP)
  1470. get_bits1(&sl->gb); /* sp_for_switch_flag */
  1471. if (sl->slice_type == AV_PICTURE_TYPE_SP ||
  1472. sl->slice_type == AV_PICTURE_TYPE_SI)
  1473. get_se_golomb(&sl->gb); /* slice_qs_delta */
  1474. sl->deblocking_filter = 1;
  1475. sl->slice_alpha_c0_offset = 0;
  1476. sl->slice_beta_offset = 0;
  1477. if (pps->deblocking_filter_parameters_present) {
  1478. tmp = get_ue_golomb_31(&sl->gb);
  1479. if (tmp > 2) {
  1480. av_log(avctx, AV_LOG_ERROR,
  1481. "deblocking_filter_idc %u out of range\n", tmp);
  1482. return AVERROR_INVALIDDATA;
  1483. }
  1484. sl->deblocking_filter = tmp;
  1485. if (sl->deblocking_filter < 2)
  1486. sl->deblocking_filter ^= 1; // 1<->0
  1487. if (sl->deblocking_filter) {
  1488. sl->slice_alpha_c0_offset = get_se_golomb(&sl->gb) * 2;
  1489. sl->slice_beta_offset = get_se_golomb(&sl->gb) * 2;
  1490. if (sl->slice_alpha_c0_offset > 12 ||
  1491. sl->slice_alpha_c0_offset < -12 ||
  1492. sl->slice_beta_offset > 12 ||
  1493. sl->slice_beta_offset < -12) {
  1494. av_log(avctx, AV_LOG_ERROR,
  1495. "deblocking filter parameters %d %d out of range\n",
  1496. sl->slice_alpha_c0_offset, sl->slice_beta_offset);
  1497. return AVERROR_INVALIDDATA;
  1498. }
  1499. }
  1500. }
  1501. return 0;
  1502. }
  1503. /* do all the per-slice initialization needed before we can start decoding the
  1504. * actual MBs */
  1505. static int h264_slice_init(H264Context *h, H264SliceContext *sl,
  1506. const H2645NAL *nal)
  1507. {
  1508. int i, j, ret = 0;
  1509. if (h->current_slice > 0) {
  1510. if (h->ps.pps != (const PPS*)h->ps.pps_list[sl->pps_id]->data) {
  1511. av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
  1512. return AVERROR_INVALIDDATA;
  1513. }
  1514. if (h->picture_structure != sl->picture_structure ||
  1515. h->droppable != (nal->ref_idc == 0)) {
  1516. av_log(h->avctx, AV_LOG_ERROR,
  1517. "Changing field mode (%d -> %d) between slices is not allowed\n",
  1518. h->picture_structure, sl->picture_structure);
  1519. return AVERROR_INVALIDDATA;
  1520. } else if (!h->cur_pic_ptr) {
  1521. av_log(h->avctx, AV_LOG_ERROR,
  1522. "unset cur_pic_ptr on slice %d\n",
  1523. h->current_slice + 1);
  1524. return AVERROR_INVALIDDATA;
  1525. }
  1526. }
  1527. if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
  1528. av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
  1529. return AVERROR_INVALIDDATA;
  1530. }
  1531. assert(h->mb_num == h->mb_width * h->mb_height);
  1532. if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
  1533. sl->first_mb_addr >= h->mb_num) {
  1534. av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
  1535. return AVERROR_INVALIDDATA;
  1536. }
  1537. sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
  1538. sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
  1539. FIELD_OR_MBAFF_PICTURE(h);
  1540. if (h->picture_structure == PICT_BOTTOM_FIELD)
  1541. sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
  1542. assert(sl->mb_y < h->mb_height);
  1543. ret = ff_h264_build_ref_list(h, sl);
  1544. if (ret < 0)
  1545. return ret;
  1546. if (h->ps.pps->weighted_bipred_idc == 2 &&
  1547. sl->slice_type_nos == AV_PICTURE_TYPE_B) {
  1548. implicit_weight_table(h, sl, -1);
  1549. if (FRAME_MBAFF(h)) {
  1550. implicit_weight_table(h, sl, 0);
  1551. implicit_weight_table(h, sl, 1);
  1552. }
  1553. }
  1554. if (sl->slice_type_nos == AV_PICTURE_TYPE_B && !sl->direct_spatial_mv_pred)
  1555. ff_h264_direct_dist_scale_factor(h, sl);
  1556. ff_h264_direct_ref_list_init(h, sl);
  1557. if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
  1558. (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
  1559. sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
  1560. (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
  1561. sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
  1562. (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
  1563. nal->ref_idc == 0))
  1564. sl->deblocking_filter = 0;
  1565. if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
  1566. if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
  1567. /* Cheat slightly for speed:
  1568. * Do not bother to deblock across slices. */
  1569. sl->deblocking_filter = 2;
  1570. } else {
  1571. h->postpone_filter = 1;
  1572. }
  1573. }
  1574. sl->qp_thresh = 15 -
  1575. FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) -
  1576. FFMAX3(0,
  1577. h->ps.pps->chroma_qp_index_offset[0],
  1578. h->ps.pps->chroma_qp_index_offset[1]) +
  1579. 6 * (h->ps.sps->bit_depth_luma - 8);
  1580. sl->slice_num = ++h->current_slice;
  1581. if (sl->slice_num >= MAX_SLICES) {
  1582. av_log(h->avctx, AV_LOG_ERROR,
  1583. "Too many slices, increase MAX_SLICES and recompile\n");
  1584. }
  1585. for (j = 0; j < 2; j++) {
  1586. int id_list[16];
  1587. int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
  1588. for (i = 0; i < 16; i++) {
  1589. id_list[i] = 60;
  1590. if (j < sl->list_count && i < sl->ref_count[j] &&
  1591. sl->ref_list[j][i].parent->f->buf[0]) {
  1592. int k;
  1593. AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
  1594. for (k = 0; k < h->short_ref_count; k++)
  1595. if (h->short_ref[k]->f->buf[0]->buffer == buf) {
  1596. id_list[i] = k;
  1597. break;
  1598. }
  1599. for (k = 0; k < h->long_ref_count; k++)
  1600. if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
  1601. id_list[i] = h->short_ref_count + k;
  1602. break;
  1603. }
  1604. }
  1605. }
  1606. ref2frm[0] =
  1607. ref2frm[1] = -1;
  1608. for (i = 0; i < 16; i++)
  1609. ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
  1610. ref2frm[18 + 0] =
  1611. ref2frm[18 + 1] = -1;
  1612. for (i = 16; i < 48; i++)
  1613. ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
  1614. (sl->ref_list[j][i].reference & 3);
  1615. }
  1616. if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
  1617. av_log(h->avctx, AV_LOG_DEBUG,
  1618. "slice:%d %s mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
  1619. sl->slice_num,
  1620. (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
  1621. sl->mb_y * h->mb_width + sl->mb_x,
  1622. av_get_picture_type_char(sl->slice_type),
  1623. sl->slice_type_fixed ? " fix" : "",
  1624. nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
  1625. h->poc.frame_num,
  1626. h->cur_pic_ptr->field_poc[0],
  1627. h->cur_pic_ptr->field_poc[1],
  1628. sl->ref_count[0], sl->ref_count[1],
  1629. sl->qscale,
  1630. sl->deblocking_filter,
  1631. sl->slice_alpha_c0_offset, sl->slice_beta_offset,
  1632. sl->pwt.use_weight,
  1633. sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
  1634. sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
  1635. }
  1636. return 0;
  1637. }
  1638. int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
  1639. {
  1640. H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
  1641. int ret;
  1642. sl->gb = nal->gb;
  1643. ret = h264_slice_header_parse(sl, nal, &h->ps, h->avctx);
  1644. if (ret < 0)
  1645. return ret;
  1646. // discard redundant pictures
  1647. if (sl->redundant_pic_count > 0)
  1648. return 0;
  1649. if (!h->setup_finished) {
  1650. if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
  1651. // this slice starts a new field
  1652. // first decode any pending queued slices
  1653. if (h->nb_slice_ctx_queued) {
  1654. H264SliceContext tmp_ctx;
  1655. ret = ff_h264_execute_decode_slices(h);
  1656. if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
  1657. return ret;
  1658. memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
  1659. memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
  1660. memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
  1661. sl = h->slice_ctx;
  1662. }
  1663. if (h->field_started)
  1664. ff_h264_field_end(h, sl, 1);
  1665. h->current_slice = 0;
  1666. if (!h->first_field) {
  1667. if (h->cur_pic_ptr && !h->droppable) {
  1668. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1669. h->picture_structure == PICT_BOTTOM_FIELD);
  1670. }
  1671. h->cur_pic_ptr = NULL;
  1672. }
  1673. }
  1674. if (h->current_slice == 0) {
  1675. ret = h264_field_start(h, sl, nal);
  1676. if (ret < 0)
  1677. return ret;
  1678. h->field_started = 1;
  1679. }
  1680. }
  1681. ret = h264_slice_init(h, sl, nal);
  1682. if (ret < 0)
  1683. return ret;
  1684. if ((h->avctx->skip_frame < AVDISCARD_NONREF || nal->ref_idc) &&
  1685. (h->avctx->skip_frame < AVDISCARD_BIDIR ||
  1686. sl->slice_type_nos != AV_PICTURE_TYPE_B) &&
  1687. (h->avctx->skip_frame < AVDISCARD_NONKEY ||
  1688. h->cur_pic_ptr->f->key_frame) &&
  1689. h->avctx->skip_frame < AVDISCARD_ALL) {
  1690. h->nb_slice_ctx_queued++;
  1691. }
  1692. return 0;
  1693. }
  1694. int ff_h264_get_slice_type(const H264SliceContext *sl)
  1695. {
  1696. switch (sl->slice_type) {
  1697. case AV_PICTURE_TYPE_P:
  1698. return 0;
  1699. case AV_PICTURE_TYPE_B:
  1700. return 1;
  1701. case AV_PICTURE_TYPE_I:
  1702. return 2;
  1703. case AV_PICTURE_TYPE_SP:
  1704. return 3;
  1705. case AV_PICTURE_TYPE_SI:
  1706. return 4;
  1707. default:
  1708. return AVERROR_INVALIDDATA;
  1709. }
  1710. }
  1711. static av_always_inline void fill_filter_caches_inter(const H264Context *h,
  1712. H264SliceContext *sl,
  1713. int mb_type, int top_xy,
  1714. int left_xy[LEFT_MBS],
  1715. int top_type,
  1716. int left_type[LEFT_MBS],
  1717. int mb_xy, int list)
  1718. {
  1719. int b_stride = h->b_stride;
  1720. int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
  1721. int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
  1722. if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
  1723. if (USES_LIST(top_type, list)) {
  1724. const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
  1725. const int b8_xy = 4 * top_xy + 2;
  1726. const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
  1727. AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
  1728. ref_cache[0 - 1 * 8] =
  1729. ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
  1730. ref_cache[2 - 1 * 8] =
  1731. ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
  1732. } else {
  1733. AV_ZERO128(mv_dst - 1 * 8);
  1734. AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1735. }
  1736. if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
  1737. if (USES_LIST(left_type[LTOP], list)) {
  1738. const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
  1739. const int b8_xy = 4 * left_xy[LTOP] + 1;
  1740. const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
  1741. AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
  1742. AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
  1743. AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
  1744. AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
  1745. ref_cache[-1 + 0] =
  1746. ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
  1747. ref_cache[-1 + 16] =
  1748. ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
  1749. } else {
  1750. AV_ZERO32(mv_dst - 1 + 0);
  1751. AV_ZERO32(mv_dst - 1 + 8);
  1752. AV_ZERO32(mv_dst - 1 + 16);
  1753. AV_ZERO32(mv_dst - 1 + 24);
  1754. ref_cache[-1 + 0] =
  1755. ref_cache[-1 + 8] =
  1756. ref_cache[-1 + 16] =
  1757. ref_cache[-1 + 24] = LIST_NOT_USED;
  1758. }
  1759. }
  1760. }
  1761. if (!USES_LIST(mb_type, list)) {
  1762. fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
  1763. AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1764. AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1765. AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1766. AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1767. return;
  1768. }
  1769. {
  1770. int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
  1771. const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
  1772. uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
  1773. uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
  1774. AV_WN32A(&ref_cache[0 * 8], ref01);
  1775. AV_WN32A(&ref_cache[1 * 8], ref01);
  1776. AV_WN32A(&ref_cache[2 * 8], ref23);
  1777. AV_WN32A(&ref_cache[3 * 8], ref23);
  1778. }
  1779. {
  1780. int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
  1781. AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
  1782. AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
  1783. AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
  1784. AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
  1785. }
  1786. }
  1787. /**
  1788. * @return non zero if the loop filter can be skipped
  1789. */
  1790. static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
  1791. {
  1792. const int mb_xy = sl->mb_xy;
  1793. int top_xy, left_xy[LEFT_MBS];
  1794. int top_type, left_type[LEFT_MBS];
  1795. uint8_t *nnz;
  1796. uint8_t *nnz_cache;
  1797. top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
  1798. left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
  1799. if (FRAME_MBAFF(h)) {
  1800. const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
  1801. const int curr_mb_field_flag = IS_INTERLACED(mb_type);
  1802. if (sl->mb_y & 1) {
  1803. if (left_mb_field_flag != curr_mb_field_flag)
  1804. left_xy[LTOP] -= h->mb_stride;
  1805. } else {
  1806. if (curr_mb_field_flag)
  1807. top_xy += h->mb_stride &
  1808. (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
  1809. if (left_mb_field_flag != curr_mb_field_flag)
  1810. left_xy[LBOT] += h->mb_stride;
  1811. }
  1812. }
  1813. sl->top_mb_xy = top_xy;
  1814. sl->left_mb_xy[LTOP] = left_xy[LTOP];
  1815. sl->left_mb_xy[LBOT] = left_xy[LBOT];
  1816. {
  1817. /* For sufficiently low qp, filtering wouldn't do anything.
  1818. * This is a conservative estimate: could also check beta_offset
  1819. * and more accurate chroma_qp. */
  1820. int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
  1821. int qp = h->cur_pic.qscale_table[mb_xy];
  1822. if (qp <= qp_thresh &&
  1823. (left_xy[LTOP] < 0 ||
  1824. ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
  1825. (top_xy < 0 ||
  1826. ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
  1827. if (!FRAME_MBAFF(h))
  1828. return 1;
  1829. if ((left_xy[LTOP] < 0 ||
  1830. ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
  1831. (top_xy < h->mb_stride ||
  1832. ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
  1833. return 1;
  1834. }
  1835. }
  1836. top_type = h->cur_pic.mb_type[top_xy];
  1837. left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
  1838. left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
  1839. if (sl->deblocking_filter == 2) {
  1840. if (h->slice_table[top_xy] != sl->slice_num)
  1841. top_type = 0;
  1842. if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
  1843. left_type[LTOP] = left_type[LBOT] = 0;
  1844. } else {
  1845. if (h->slice_table[top_xy] == 0xFFFF)
  1846. top_type = 0;
  1847. if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
  1848. left_type[LTOP] = left_type[LBOT] = 0;
  1849. }
  1850. sl->top_type = top_type;
  1851. sl->left_type[LTOP] = left_type[LTOP];
  1852. sl->left_type[LBOT] = left_type[LBOT];
  1853. if (IS_INTRA(mb_type))
  1854. return 0;
  1855. fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
  1856. top_type, left_type, mb_xy, 0);
  1857. if (sl->list_count == 2)
  1858. fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
  1859. top_type, left_type, mb_xy, 1);
  1860. nnz = h->non_zero_count[mb_xy];
  1861. nnz_cache = sl->non_zero_count_cache;
  1862. AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
  1863. AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
  1864. AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
  1865. AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
  1866. sl->cbp = h->cbp_table[mb_xy];
  1867. if (top_type) {
  1868. nnz = h->non_zero_count[top_xy];
  1869. AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
  1870. }
  1871. if (left_type[LTOP]) {
  1872. nnz = h->non_zero_count[left_xy[LTOP]];
  1873. nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
  1874. nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
  1875. nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
  1876. nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
  1877. }
  1878. /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
  1879. * from what the loop filter needs */
  1880. if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
  1881. if (IS_8x8DCT(top_type)) {
  1882. nnz_cache[4 + 8 * 0] =
  1883. nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
  1884. nnz_cache[6 + 8 * 0] =
  1885. nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
  1886. }
  1887. if (IS_8x8DCT(left_type[LTOP])) {
  1888. nnz_cache[3 + 8 * 1] =
  1889. nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
  1890. }
  1891. if (IS_8x8DCT(left_type[LBOT])) {
  1892. nnz_cache[3 + 8 * 3] =
  1893. nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
  1894. }
  1895. if (IS_8x8DCT(mb_type)) {
  1896. nnz_cache[scan8[0]] =
  1897. nnz_cache[scan8[1]] =
  1898. nnz_cache[scan8[2]] =
  1899. nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
  1900. nnz_cache[scan8[0 + 4]] =
  1901. nnz_cache[scan8[1 + 4]] =
  1902. nnz_cache[scan8[2 + 4]] =
  1903. nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
  1904. nnz_cache[scan8[0 + 8]] =
  1905. nnz_cache[scan8[1 + 8]] =
  1906. nnz_cache[scan8[2 + 8]] =
  1907. nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
  1908. nnz_cache[scan8[0 + 12]] =
  1909. nnz_cache[scan8[1 + 12]] =
  1910. nnz_cache[scan8[2 + 12]] =
  1911. nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
  1912. }
  1913. }
  1914. return 0;
  1915. }
  1916. static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
  1917. {
  1918. uint8_t *dest_y, *dest_cb, *dest_cr;
  1919. int linesize, uvlinesize, mb_x, mb_y;
  1920. const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
  1921. const int old_slice_type = sl->slice_type;
  1922. const int pixel_shift = h->pixel_shift;
  1923. const int block_h = 16 >> h->chroma_y_shift;
  1924. if (h->postpone_filter)
  1925. return;
  1926. if (sl->deblocking_filter) {
  1927. for (mb_x = start_x; mb_x < end_x; mb_x++)
  1928. for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
  1929. int mb_xy, mb_type;
  1930. mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
  1931. mb_type = h->cur_pic.mb_type[mb_xy];
  1932. if (FRAME_MBAFF(h))
  1933. sl->mb_mbaff =
  1934. sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
  1935. sl->mb_x = mb_x;
  1936. sl->mb_y = mb_y;
  1937. dest_y = h->cur_pic.f->data[0] +
  1938. ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
  1939. dest_cb = h->cur_pic.f->data[1] +
  1940. (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
  1941. mb_y * sl->uvlinesize * block_h;
  1942. dest_cr = h->cur_pic.f->data[2] +
  1943. (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
  1944. mb_y * sl->uvlinesize * block_h;
  1945. // FIXME simplify above
  1946. if (MB_FIELD(sl)) {
  1947. linesize = sl->mb_linesize = sl->linesize * 2;
  1948. uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
  1949. if (mb_y & 1) { // FIXME move out of this function?
  1950. dest_y -= sl->linesize * 15;
  1951. dest_cb -= sl->uvlinesize * (block_h - 1);
  1952. dest_cr -= sl->uvlinesize * (block_h - 1);
  1953. }
  1954. } else {
  1955. linesize = sl->mb_linesize = sl->linesize;
  1956. uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
  1957. }
  1958. backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
  1959. uvlinesize, 0);
  1960. if (fill_filter_caches(h, sl, mb_type))
  1961. continue;
  1962. sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
  1963. sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
  1964. if (FRAME_MBAFF(h)) {
  1965. ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
  1966. linesize, uvlinesize);
  1967. } else {
  1968. ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
  1969. dest_cr, linesize, uvlinesize);
  1970. }
  1971. }
  1972. }
  1973. sl->slice_type = old_slice_type;
  1974. sl->mb_x = end_x;
  1975. sl->mb_y = end_mb_y - FRAME_MBAFF(h);
  1976. sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
  1977. sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
  1978. }
  1979. static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
  1980. {
  1981. const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
  1982. int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
  1983. h->cur_pic.mb_type[mb_xy - 1] :
  1984. (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
  1985. h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
  1986. sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
  1987. }
  1988. /**
  1989. * Draw edges and report progress for the last MB row.
  1990. */
  1991. static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
  1992. {
  1993. int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
  1994. int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
  1995. int height = 16 << FRAME_MBAFF(h);
  1996. int deblock_border = (16 + 4) << FRAME_MBAFF(h);
  1997. if (sl->deblocking_filter) {
  1998. if ((top + height) >= pic_height)
  1999. height += deblock_border;
  2000. top -= deblock_border;
  2001. }
  2002. if (top >= pic_height || (top + height) < 0)
  2003. return;
  2004. height = FFMIN(height, pic_height - top);
  2005. if (top < 0) {
  2006. height = top + height;
  2007. top = 0;
  2008. }
  2009. ff_h264_draw_horiz_band(h, sl, top, height);
  2010. if (h->droppable)
  2011. return;
  2012. ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
  2013. h->picture_structure == PICT_BOTTOM_FIELD);
  2014. }
  2015. static void er_add_slice(H264SliceContext *sl,
  2016. int startx, int starty,
  2017. int endx, int endy, int status)
  2018. {
  2019. #if CONFIG_ERROR_RESILIENCE
  2020. ERContext *er = &sl->er;
  2021. if (!sl->h264->enable_er)
  2022. return;
  2023. er->ref_count = sl->ref_count[0];
  2024. ff_er_add_slice(er, startx, starty, endx, endy, status);
  2025. #endif
  2026. }
  2027. static int decode_slice(struct AVCodecContext *avctx, void *arg)
  2028. {
  2029. H264SliceContext *sl = arg;
  2030. const H264Context *h = sl->h264;
  2031. int lf_x_start = sl->mb_x;
  2032. int orig_deblock = sl->deblocking_filter;
  2033. int ret;
  2034. sl->linesize = h->cur_pic_ptr->f->linesize[0];
  2035. sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
  2036. ret = alloc_scratch_buffers(sl, sl->linesize);
  2037. if (ret < 0)
  2038. return ret;
  2039. sl->mb_skip_run = -1;
  2040. if (h->postpone_filter)
  2041. sl->deblocking_filter = 0;
  2042. sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
  2043. (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
  2044. if (h->ps.pps->cabac) {
  2045. /* realign */
  2046. align_get_bits(&sl->gb);
  2047. /* init cabac */
  2048. ff_init_cabac_decoder(&sl->cabac,
  2049. sl->gb.buffer + get_bits_count(&sl->gb) / 8,
  2050. (get_bits_left(&sl->gb) + 7) / 8);
  2051. ff_h264_init_cabac_states(h, sl);
  2052. for (;;) {
  2053. // START_TIMER
  2054. int ret, eos;
  2055. if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
  2056. av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
  2057. sl->next_slice_idx);
  2058. return AVERROR_INVALIDDATA;
  2059. }
  2060. ret = ff_h264_decode_mb_cabac(h, sl);
  2061. // STOP_TIMER("decode_mb_cabac")
  2062. if (ret >= 0)
  2063. ff_h264_hl_decode_mb(h, sl);
  2064. // FIXME optimal? or let mb_decode decode 16x32 ?
  2065. if (ret >= 0 && FRAME_MBAFF(h)) {
  2066. sl->mb_y++;
  2067. ret = ff_h264_decode_mb_cabac(h, sl);
  2068. if (ret >= 0)
  2069. ff_h264_hl_decode_mb(h, sl);
  2070. sl->mb_y--;
  2071. }
  2072. eos = get_cabac_terminate(&sl->cabac);
  2073. if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
  2074. sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
  2075. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
  2076. sl->mb_y, ER_MB_END);
  2077. if (sl->mb_x >= lf_x_start)
  2078. loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
  2079. goto finish;
  2080. }
  2081. if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
  2082. av_log(h->avctx, AV_LOG_ERROR,
  2083. "error while decoding MB %d %d, bytestream %td\n",
  2084. sl->mb_x, sl->mb_y,
  2085. sl->cabac.bytestream_end - sl->cabac.bytestream);
  2086. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
  2087. sl->mb_y, ER_MB_ERROR);
  2088. return AVERROR_INVALIDDATA;
  2089. }
  2090. if (++sl->mb_x >= h->mb_width) {
  2091. loop_filter(h, sl, lf_x_start, sl->mb_x);
  2092. sl->mb_x = lf_x_start = 0;
  2093. decode_finish_row(h, sl);
  2094. ++sl->mb_y;
  2095. if (FIELD_OR_MBAFF_PICTURE(h)) {
  2096. ++sl->mb_y;
  2097. if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
  2098. predict_field_decoding_flag(h, sl);
  2099. }
  2100. }
  2101. if (eos || sl->mb_y >= h->mb_height) {
  2102. ff_tlog(h->avctx, "slice end %d %d\n",
  2103. get_bits_count(&sl->gb), sl->gb.size_in_bits);
  2104. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
  2105. sl->mb_y, ER_MB_END);
  2106. if (sl->mb_x > lf_x_start)
  2107. loop_filter(h, sl, lf_x_start, sl->mb_x);
  2108. goto finish;
  2109. }
  2110. }
  2111. } else {
  2112. for (;;) {
  2113. int ret;
  2114. if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
  2115. av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
  2116. sl->next_slice_idx);
  2117. return AVERROR_INVALIDDATA;
  2118. }
  2119. ret = ff_h264_decode_mb_cavlc(h, sl);
  2120. if (ret >= 0)
  2121. ff_h264_hl_decode_mb(h, sl);
  2122. // FIXME optimal? or let mb_decode decode 16x32 ?
  2123. if (ret >= 0 && FRAME_MBAFF(h)) {
  2124. sl->mb_y++;
  2125. ret = ff_h264_decode_mb_cavlc(h, sl);
  2126. if (ret >= 0)
  2127. ff_h264_hl_decode_mb(h, sl);
  2128. sl->mb_y--;
  2129. }
  2130. if (ret < 0) {
  2131. av_log(h->avctx, AV_LOG_ERROR,
  2132. "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
  2133. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
  2134. sl->mb_y, ER_MB_ERROR);
  2135. return ret;
  2136. }
  2137. if (++sl->mb_x >= h->mb_width) {
  2138. loop_filter(h, sl, lf_x_start, sl->mb_x);
  2139. sl->mb_x = lf_x_start = 0;
  2140. decode_finish_row(h, sl);
  2141. ++sl->mb_y;
  2142. if (FIELD_OR_MBAFF_PICTURE(h)) {
  2143. ++sl->mb_y;
  2144. if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
  2145. predict_field_decoding_flag(h, sl);
  2146. }
  2147. if (sl->mb_y >= h->mb_height) {
  2148. ff_tlog(h->avctx, "slice end %d %d\n",
  2149. get_bits_count(&sl->gb), sl->gb.size_in_bits);
  2150. if (get_bits_left(&sl->gb) == 0) {
  2151. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
  2152. sl->mb_x - 1, sl->mb_y, ER_MB_END);
  2153. goto finish;
  2154. } else {
  2155. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
  2156. sl->mb_x - 1, sl->mb_y, ER_MB_END);
  2157. return AVERROR_INVALIDDATA;
  2158. }
  2159. }
  2160. }
  2161. if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
  2162. ff_tlog(h->avctx, "slice end %d %d\n",
  2163. get_bits_count(&sl->gb), sl->gb.size_in_bits);
  2164. if (get_bits_left(&sl->gb) == 0) {
  2165. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
  2166. sl->mb_x - 1, sl->mb_y, ER_MB_END);
  2167. if (sl->mb_x > lf_x_start)
  2168. loop_filter(h, sl, lf_x_start, sl->mb_x);
  2169. goto finish;
  2170. } else {
  2171. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
  2172. sl->mb_y, ER_MB_ERROR);
  2173. return AVERROR_INVALIDDATA;
  2174. }
  2175. }
  2176. }
  2177. }
  2178. finish:
  2179. sl->deblocking_filter = orig_deblock;
  2180. return 0;
  2181. }
  2182. /**
  2183. * Call decode_slice() for each context.
  2184. *
  2185. * @param h h264 master context
  2186. */
  2187. int ff_h264_execute_decode_slices(H264Context *h)
  2188. {
  2189. AVCodecContext *const avctx = h->avctx;
  2190. H264SliceContext *sl;
  2191. int context_count = h->nb_slice_ctx_queued;
  2192. int ret = 0;
  2193. int i, j;
  2194. if (h->avctx->hwaccel || context_count < 1)
  2195. return 0;
  2196. if (context_count == 1) {
  2197. h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
  2198. h->postpone_filter = 0;
  2199. ret = decode_slice(avctx, &h->slice_ctx[0]);
  2200. h->mb_y = h->slice_ctx[0].mb_y;
  2201. if (ret < 0)
  2202. goto finish;
  2203. } else {
  2204. for (i = 0; i < context_count; i++) {
  2205. int next_slice_idx = h->mb_width * h->mb_height;
  2206. int slice_idx;
  2207. sl = &h->slice_ctx[i];
  2208. sl->er.error_count = 0;
  2209. /* make sure none of those slices overlap */
  2210. slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
  2211. for (j = 0; j < context_count; j++) {
  2212. H264SliceContext *sl2 = &h->slice_ctx[j];
  2213. int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
  2214. if (i == j || slice_idx2 < slice_idx)
  2215. continue;
  2216. next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
  2217. }
  2218. sl->next_slice_idx = next_slice_idx;
  2219. }
  2220. avctx->execute(avctx, decode_slice, h->slice_ctx,
  2221. NULL, context_count, sizeof(h->slice_ctx[0]));
  2222. /* pull back stuff from slices to master context */
  2223. sl = &h->slice_ctx[context_count - 1];
  2224. h->mb_y = sl->mb_y;
  2225. for (i = 1; i < context_count; i++)
  2226. h->slice_ctx[0].er.error_count += h->slice_ctx[i].er.error_count;
  2227. if (h->postpone_filter) {
  2228. h->postpone_filter = 0;
  2229. for (i = 0; i < context_count; i++) {
  2230. int y_end, x_end;
  2231. sl = &h->slice_ctx[i];
  2232. y_end = FFMIN(sl->mb_y + 1, h->mb_height);
  2233. x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
  2234. for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
  2235. sl->mb_y = j;
  2236. loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
  2237. j == y_end - 1 ? x_end : h->mb_width);
  2238. }
  2239. }
  2240. }
  2241. }
  2242. finish:
  2243. h->nb_slice_ctx_queued = 0;
  2244. return ret;
  2245. }