You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2355 lines
86KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... decoder
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG-4 part10 codec.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/display.h"
  28. #include "libavutil/imgutils.h"
  29. #include "libavutil/stereo3d.h"
  30. #include "libavutil/timer.h"
  31. #include "internal.h"
  32. #include "cabac.h"
  33. #include "cabac_functions.h"
  34. #include "error_resilience.h"
  35. #include "avcodec.h"
  36. #include "h264dec.h"
  37. #include "h264data.h"
  38. #include "h264chroma.h"
  39. #include "h264_mvpred.h"
  40. #include "golomb.h"
  41. #include "mathops.h"
  42. #include "mpegutils.h"
  43. #include "rectangle.h"
  44. #include "thread.h"
  45. static const uint8_t field_scan[16] = {
  46. 0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
  47. 0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
  48. 2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
  49. 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
  50. };
  51. static const uint8_t field_scan8x8[64] = {
  52. 0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
  53. 1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
  54. 2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
  55. 0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
  56. 2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
  57. 2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
  58. 2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
  59. 3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
  60. 3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
  61. 4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
  62. 4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
  63. 5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
  64. 5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
  65. 7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
  66. 6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
  67. 7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
  68. };
  69. static const uint8_t field_scan8x8_cavlc[64] = {
  70. 0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
  71. 2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
  72. 3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
  73. 5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
  74. 0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
  75. 1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
  76. 3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
  77. 5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
  78. 0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
  79. 1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
  80. 3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
  81. 5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
  82. 1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
  83. 1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
  84. 3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
  85. 6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
  86. };
  87. // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
  88. static const uint8_t zigzag_scan8x8_cavlc[64] = {
  89. 0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
  90. 4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
  91. 3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
  92. 2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
  93. 1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
  94. 3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
  95. 2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
  96. 3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
  97. 0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
  98. 2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
  99. 1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
  100. 4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
  101. 0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
  102. 1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
  103. 0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
  104. 5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
  105. };
  106. static void release_unused_pictures(H264Context *h, int remove_current)
  107. {
  108. int i;
  109. /* release non reference frames */
  110. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
  111. if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
  112. (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
  113. ff_h264_unref_picture(h, &h->DPB[i]);
  114. }
  115. }
  116. }
  117. static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
  118. {
  119. const H264Context *h = sl->h264;
  120. int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
  121. av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
  122. // edge emu needs blocksize + filter length - 1
  123. // (= 21x21 for H.264)
  124. av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
  125. av_fast_malloc(&sl->top_borders[0], &sl->top_borders_allocated[0],
  126. h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
  127. av_fast_malloc(&sl->top_borders[1], &sl->top_borders_allocated[1],
  128. h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
  129. if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
  130. !sl->top_borders[0] || !sl->top_borders[1]) {
  131. av_freep(&sl->bipred_scratchpad);
  132. av_freep(&sl->edge_emu_buffer);
  133. av_freep(&sl->top_borders[0]);
  134. av_freep(&sl->top_borders[1]);
  135. sl->bipred_scratchpad_allocated = 0;
  136. sl->edge_emu_buffer_allocated = 0;
  137. sl->top_borders_allocated[0] = 0;
  138. sl->top_borders_allocated[1] = 0;
  139. return AVERROR(ENOMEM);
  140. }
  141. return 0;
  142. }
  143. static int init_table_pools(H264Context *h)
  144. {
  145. const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
  146. const int mb_array_size = h->mb_stride * h->mb_height;
  147. const int b4_stride = h->mb_width * 4 + 1;
  148. const int b4_array_size = b4_stride * h->mb_height * 4;
  149. h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
  150. av_buffer_allocz);
  151. h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
  152. sizeof(uint32_t), av_buffer_allocz);
  153. h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
  154. sizeof(int16_t), av_buffer_allocz);
  155. h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
  156. if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
  157. !h->ref_index_pool) {
  158. av_buffer_pool_uninit(&h->qscale_table_pool);
  159. av_buffer_pool_uninit(&h->mb_type_pool);
  160. av_buffer_pool_uninit(&h->motion_val_pool);
  161. av_buffer_pool_uninit(&h->ref_index_pool);
  162. return AVERROR(ENOMEM);
  163. }
  164. return 0;
  165. }
  166. static int alloc_picture(H264Context *h, H264Picture *pic)
  167. {
  168. int i, ret = 0;
  169. av_assert0(!pic->f->data[0]);
  170. pic->tf.f = pic->f;
  171. ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
  172. AV_GET_BUFFER_FLAG_REF : 0);
  173. if (ret < 0)
  174. goto fail;
  175. if (h->avctx->hwaccel) {
  176. const AVHWAccel *hwaccel = h->avctx->hwaccel;
  177. av_assert0(!pic->hwaccel_picture_private);
  178. if (hwaccel->frame_priv_data_size) {
  179. pic->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
  180. if (!pic->hwaccel_priv_buf)
  181. return AVERROR(ENOMEM);
  182. pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
  183. }
  184. }
  185. if (!h->qscale_table_pool) {
  186. ret = init_table_pools(h);
  187. if (ret < 0)
  188. goto fail;
  189. }
  190. pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
  191. pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
  192. if (!pic->qscale_table_buf || !pic->mb_type_buf)
  193. goto fail;
  194. pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
  195. pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
  196. for (i = 0; i < 2; i++) {
  197. pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
  198. pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
  199. if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
  200. goto fail;
  201. pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
  202. pic->ref_index[i] = pic->ref_index_buf[i]->data;
  203. }
  204. return 0;
  205. fail:
  206. ff_h264_unref_picture(h, pic);
  207. return (ret < 0) ? ret : AVERROR(ENOMEM);
  208. }
  209. static inline int pic_is_unused(H264Context *h, H264Picture *pic)
  210. {
  211. if (!pic->f->buf[0])
  212. return 1;
  213. return 0;
  214. }
  215. static int find_unused_picture(H264Context *h)
  216. {
  217. int i;
  218. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
  219. if (pic_is_unused(h, &h->DPB[i]))
  220. break;
  221. }
  222. if (i == H264_MAX_PICTURE_COUNT)
  223. return AVERROR_INVALIDDATA;
  224. return i;
  225. }
  226. static int initialize_cur_frame(H264Context *h)
  227. {
  228. H264Picture *cur;
  229. int ret;
  230. release_unused_pictures(h, 1);
  231. ff_h264_unref_picture(h, &h->cur_pic);
  232. h->cur_pic_ptr = NULL;
  233. ret = find_unused_picture(h);
  234. if (ret < 0) {
  235. av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  236. return ret;
  237. }
  238. cur = &h->DPB[ret];
  239. ret = alloc_picture(h, cur);
  240. if (ret < 0)
  241. return ret;
  242. ret = ff_h264_ref_picture(h, &h->cur_pic, cur);
  243. if (ret < 0)
  244. return ret;
  245. h->cur_pic_ptr = cur;
  246. return 0;
  247. }
  248. #define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size))))
  249. #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
  250. ((pic && pic >= old_ctx->DPB && \
  251. pic < old_ctx->DPB + H264_MAX_PICTURE_COUNT) ? \
  252. &new_ctx->DPB[pic - old_ctx->DPB] : NULL)
  253. static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
  254. H264Context *new_base,
  255. H264Context *old_base)
  256. {
  257. int i;
  258. for (i = 0; i < count; i++) {
  259. assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
  260. IN_RANGE(from[i], old_base->DPB,
  261. sizeof(H264Picture) * H264_MAX_PICTURE_COUNT) ||
  262. !from[i]));
  263. to[i] = REBASE_PICTURE(from[i], new_base, old_base);
  264. }
  265. }
  266. static int h264_slice_header_init(H264Context *h);
  267. int ff_h264_update_thread_context(AVCodecContext *dst,
  268. const AVCodecContext *src)
  269. {
  270. H264Context *h = dst->priv_data, *h1 = src->priv_data;
  271. int inited = h->context_initialized, err = 0;
  272. int need_reinit = 0;
  273. int i, ret;
  274. if (dst == src || !h1->context_initialized)
  275. return 0;
  276. if (!h1->ps.sps)
  277. return AVERROR_INVALIDDATA;
  278. if (inited &&
  279. (h->width != h1->width ||
  280. h->height != h1->height ||
  281. h->mb_width != h1->mb_width ||
  282. h->mb_height != h1->mb_height ||
  283. !h->ps.sps ||
  284. h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
  285. h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
  286. h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
  287. need_reinit = 1;
  288. }
  289. // SPS/PPS
  290. for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
  291. av_buffer_unref(&h->ps.sps_list[i]);
  292. if (h1->ps.sps_list[i]) {
  293. h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]);
  294. if (!h->ps.sps_list[i])
  295. return AVERROR(ENOMEM);
  296. }
  297. }
  298. for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
  299. av_buffer_unref(&h->ps.pps_list[i]);
  300. if (h1->ps.pps_list[i]) {
  301. h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]);
  302. if (!h->ps.pps_list[i])
  303. return AVERROR(ENOMEM);
  304. }
  305. }
  306. h->ps.sps = h1->ps.sps;
  307. if (need_reinit || !inited) {
  308. h->width = h1->width;
  309. h->height = h1->height;
  310. h->mb_height = h1->mb_height;
  311. h->mb_width = h1->mb_width;
  312. h->mb_num = h1->mb_num;
  313. h->mb_stride = h1->mb_stride;
  314. h->b_stride = h1->b_stride;
  315. if ((err = h264_slice_header_init(h)) < 0) {
  316. av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
  317. return err;
  318. }
  319. /* copy block_offset since frame_start may not be called */
  320. memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
  321. }
  322. h->avctx->coded_height = h1->avctx->coded_height;
  323. h->avctx->coded_width = h1->avctx->coded_width;
  324. h->avctx->width = h1->avctx->width;
  325. h->avctx->height = h1->avctx->height;
  326. h->coded_picture_number = h1->coded_picture_number;
  327. h->first_field = h1->first_field;
  328. h->picture_structure = h1->picture_structure;
  329. h->mb_aff_frame = h1->mb_aff_frame;
  330. h->droppable = h1->droppable;
  331. for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
  332. ff_h264_unref_picture(h, &h->DPB[i]);
  333. if (h1->DPB[i].f->buf[0] &&
  334. (ret = ff_h264_ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
  335. return ret;
  336. }
  337. h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
  338. ff_h264_unref_picture(h, &h->cur_pic);
  339. if (h1->cur_pic.f->buf[0]) {
  340. ret = ff_h264_ref_picture(h, &h->cur_pic, &h1->cur_pic);
  341. if (ret < 0)
  342. return ret;
  343. }
  344. h->enable_er = h1->enable_er;
  345. h->workaround_bugs = h1->workaround_bugs;
  346. h->droppable = h1->droppable;
  347. // extradata/NAL handling
  348. h->is_avc = h1->is_avc;
  349. h->nal_length_size = h1->nal_length_size;
  350. memcpy(&h->poc, &h1->poc, sizeof(h->poc));
  351. memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
  352. memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
  353. memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
  354. memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
  355. h->next_output_pic = h1->next_output_pic;
  356. h->next_outputed_poc = h1->next_outputed_poc;
  357. memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
  358. h->nb_mmco = h1->nb_mmco;
  359. h->mmco_reset = h1->mmco_reset;
  360. h->explicit_ref_marking = h1->explicit_ref_marking;
  361. h->long_ref_count = h1->long_ref_count;
  362. h->short_ref_count = h1->short_ref_count;
  363. copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
  364. copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
  365. copy_picture_range(h->delayed_pic, h1->delayed_pic,
  366. MAX_DELAYED_PIC_COUNT + 2, h, h1);
  367. if (!h->cur_pic_ptr)
  368. return 0;
  369. if (!h->droppable) {
  370. err = ff_h264_execute_ref_pic_marking(h);
  371. h->poc.prev_poc_msb = h->poc.poc_msb;
  372. h->poc.prev_poc_lsb = h->poc.poc_lsb;
  373. }
  374. h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
  375. h->poc.prev_frame_num = h->poc.frame_num;
  376. h->recovery_frame = h1->recovery_frame;
  377. h->frame_recovered = h1->frame_recovered;
  378. return err;
  379. }
  380. static int h264_frame_start(H264Context *h)
  381. {
  382. H264Picture *pic;
  383. int i, ret;
  384. const int pixel_shift = h->pixel_shift;
  385. ret = initialize_cur_frame(h);
  386. if (ret < 0)
  387. return ret;
  388. pic = h->cur_pic_ptr;
  389. pic->reference = h->droppable ? 0 : h->picture_structure;
  390. pic->f->coded_picture_number = h->coded_picture_number++;
  391. pic->field_picture = h->picture_structure != PICT_FRAME;
  392. pic->frame_num = h->poc.frame_num;
  393. /*
  394. * Zero key_frame here; IDR markings per slice in frame or fields are ORed
  395. * in later.
  396. * See decode_nal_units().
  397. */
  398. pic->f->key_frame = 0;
  399. pic->mmco_reset = 0;
  400. pic->recovered = 0;
  401. pic->f->pict_type = h->slice_ctx[0].slice_type;
  402. if (CONFIG_ERROR_RESILIENCE && h->enable_er)
  403. ff_er_frame_start(&h->slice_ctx[0].er);
  404. for (i = 0; i < 16; i++) {
  405. h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
  406. h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
  407. }
  408. for (i = 0; i < 16; i++) {
  409. h->block_offset[16 + i] =
  410. h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
  411. h->block_offset[48 + 16 + i] =
  412. h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
  413. }
  414. /* Some macroblocks can be accessed before they're available in case
  415. * of lost slices, MBAFF or threading. */
  416. memset(h->slice_table, -1,
  417. (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
  418. /* We mark the current picture as non-reference after allocating it, so
  419. * that if we break out due to an error it can be released automatically
  420. * in the next ff_mpv_frame_start().
  421. */
  422. h->cur_pic_ptr->reference = 0;
  423. h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
  424. h->next_output_pic = NULL;
  425. h->postpone_filter = 0;
  426. h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
  427. assert(h->cur_pic_ptr->long_ref == 0);
  428. return 0;
  429. }
  430. static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl,
  431. uint8_t *src_y,
  432. uint8_t *src_cb, uint8_t *src_cr,
  433. int linesize, int uvlinesize,
  434. int simple)
  435. {
  436. uint8_t *top_border;
  437. int top_idx = 1;
  438. const int pixel_shift = h->pixel_shift;
  439. int chroma444 = CHROMA444(h);
  440. int chroma422 = CHROMA422(h);
  441. src_y -= linesize;
  442. src_cb -= uvlinesize;
  443. src_cr -= uvlinesize;
  444. if (!simple && FRAME_MBAFF(h)) {
  445. if (sl->mb_y & 1) {
  446. if (!MB_MBAFF(sl)) {
  447. top_border = sl->top_borders[0][sl->mb_x];
  448. AV_COPY128(top_border, src_y + 15 * linesize);
  449. if (pixel_shift)
  450. AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
  451. if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
  452. if (chroma444) {
  453. if (pixel_shift) {
  454. AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
  455. AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
  456. AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
  457. AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
  458. } else {
  459. AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
  460. AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
  461. }
  462. } else if (chroma422) {
  463. if (pixel_shift) {
  464. AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
  465. AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
  466. } else {
  467. AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
  468. AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
  469. }
  470. } else {
  471. if (pixel_shift) {
  472. AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
  473. AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
  474. } else {
  475. AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
  476. AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
  477. }
  478. }
  479. }
  480. }
  481. } else if (MB_MBAFF(sl)) {
  482. top_idx = 0;
  483. } else
  484. return;
  485. }
  486. top_border = sl->top_borders[top_idx][sl->mb_x];
  487. /* There are two lines saved, the line above the top macroblock
  488. * of a pair, and the line above the bottom macroblock. */
  489. AV_COPY128(top_border, src_y + 16 * linesize);
  490. if (pixel_shift)
  491. AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
  492. if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
  493. if (chroma444) {
  494. if (pixel_shift) {
  495. AV_COPY128(top_border + 32, src_cb + 16 * linesize);
  496. AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
  497. AV_COPY128(top_border + 64, src_cr + 16 * linesize);
  498. AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
  499. } else {
  500. AV_COPY128(top_border + 16, src_cb + 16 * linesize);
  501. AV_COPY128(top_border + 32, src_cr + 16 * linesize);
  502. }
  503. } else if (chroma422) {
  504. if (pixel_shift) {
  505. AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
  506. AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
  507. } else {
  508. AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
  509. AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
  510. }
  511. } else {
  512. if (pixel_shift) {
  513. AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
  514. AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
  515. } else {
  516. AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
  517. AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
  518. }
  519. }
  520. }
  521. }
  522. /**
  523. * Initialize implicit_weight table.
  524. * @param field 0/1 initialize the weight for interlaced MBAFF
  525. * -1 initializes the rest
  526. */
  527. static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
  528. {
  529. int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
  530. for (i = 0; i < 2; i++) {
  531. sl->pwt.luma_weight_flag[i] = 0;
  532. sl->pwt.chroma_weight_flag[i] = 0;
  533. }
  534. if (field < 0) {
  535. if (h->picture_structure == PICT_FRAME) {
  536. cur_poc = h->cur_pic_ptr->poc;
  537. } else {
  538. cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
  539. }
  540. if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
  541. sl->ref_list[0][0].poc + sl->ref_list[1][0].poc == 2 * cur_poc) {
  542. sl->pwt.use_weight = 0;
  543. sl->pwt.use_weight_chroma = 0;
  544. return;
  545. }
  546. ref_start = 0;
  547. ref_count0 = sl->ref_count[0];
  548. ref_count1 = sl->ref_count[1];
  549. } else {
  550. cur_poc = h->cur_pic_ptr->field_poc[field];
  551. ref_start = 16;
  552. ref_count0 = 16 + 2 * sl->ref_count[0];
  553. ref_count1 = 16 + 2 * sl->ref_count[1];
  554. }
  555. sl->pwt.use_weight = 2;
  556. sl->pwt.use_weight_chroma = 2;
  557. sl->pwt.luma_log2_weight_denom = 5;
  558. sl->pwt.chroma_log2_weight_denom = 5;
  559. for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
  560. int poc0 = sl->ref_list[0][ref0].poc;
  561. for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
  562. int w = 32;
  563. if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
  564. int poc1 = sl->ref_list[1][ref1].poc;
  565. int td = av_clip_int8(poc1 - poc0);
  566. if (td) {
  567. int tb = av_clip_int8(cur_poc - poc0);
  568. int tx = (16384 + (FFABS(td) >> 1)) / td;
  569. int dist_scale_factor = (tb * tx + 32) >> 8;
  570. if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
  571. w = 64 - dist_scale_factor;
  572. }
  573. }
  574. if (field < 0) {
  575. sl->pwt.implicit_weight[ref0][ref1][0] =
  576. sl->pwt.implicit_weight[ref0][ref1][1] = w;
  577. } else {
  578. sl->pwt.implicit_weight[ref0][ref1][field] = w;
  579. }
  580. }
  581. }
  582. }
  583. /**
  584. * initialize scan tables
  585. */
  586. static void init_scan_tables(H264Context *h)
  587. {
  588. int i;
  589. for (i = 0; i < 16; i++) {
  590. #define TRANSPOSE(x) (x >> 2) | ((x << 2) & 0xF)
  591. h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
  592. h->field_scan[i] = TRANSPOSE(field_scan[i]);
  593. #undef TRANSPOSE
  594. }
  595. for (i = 0; i < 64; i++) {
  596. #define TRANSPOSE(x) (x >> 3) | ((x & 7) << 3)
  597. h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
  598. h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
  599. h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
  600. h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
  601. #undef TRANSPOSE
  602. }
  603. if (h->ps.sps->transform_bypass) { // FIXME same ugly
  604. h->zigzag_scan_q0 = ff_zigzag_scan;
  605. h->zigzag_scan8x8_q0 = ff_zigzag_direct;
  606. h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc;
  607. h->field_scan_q0 = field_scan;
  608. h->field_scan8x8_q0 = field_scan8x8;
  609. h->field_scan8x8_cavlc_q0 = field_scan8x8_cavlc;
  610. } else {
  611. h->zigzag_scan_q0 = h->zigzag_scan;
  612. h->zigzag_scan8x8_q0 = h->zigzag_scan8x8;
  613. h->zigzag_scan8x8_cavlc_q0 = h->zigzag_scan8x8_cavlc;
  614. h->field_scan_q0 = h->field_scan;
  615. h->field_scan8x8_q0 = h->field_scan8x8;
  616. h->field_scan8x8_cavlc_q0 = h->field_scan8x8_cavlc;
  617. }
  618. }
  619. static enum AVPixelFormat get_pixel_format(H264Context *h)
  620. {
  621. #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
  622. CONFIG_H264_D3D11VA_HWACCEL + \
  623. CONFIG_H264_VAAPI_HWACCEL + \
  624. (CONFIG_H264_VDA_HWACCEL * 2) + \
  625. CONFIG_H264_VDPAU_HWACCEL)
  626. enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
  627. const enum AVPixelFormat *choices = pix_fmts;
  628. switch (h->ps.sps->bit_depth_luma) {
  629. case 9:
  630. if (CHROMA444(h)) {
  631. if (h->avctx->colorspace == AVCOL_SPC_RGB) {
  632. *fmt++ = AV_PIX_FMT_GBRP9;
  633. } else
  634. *fmt++ = AV_PIX_FMT_YUV444P9;
  635. } else if (CHROMA422(h))
  636. *fmt++ = AV_PIX_FMT_YUV422P9;
  637. else
  638. *fmt++ = AV_PIX_FMT_YUV420P9;
  639. break;
  640. case 10:
  641. if (CHROMA444(h)) {
  642. if (h->avctx->colorspace == AVCOL_SPC_RGB) {
  643. *fmt++ = AV_PIX_FMT_GBRP10;
  644. } else
  645. *fmt++ = AV_PIX_FMT_YUV444P10;
  646. } else if (CHROMA422(h))
  647. *fmt++ = AV_PIX_FMT_YUV422P10;
  648. else
  649. *fmt++ = AV_PIX_FMT_YUV420P10;
  650. break;
  651. case 8:
  652. #if CONFIG_H264_VDPAU_HWACCEL
  653. *fmt++ = AV_PIX_FMT_VDPAU;
  654. #endif
  655. if (CHROMA444(h)) {
  656. if (h->avctx->colorspace == AVCOL_SPC_RGB)
  657. *fmt++ = AV_PIX_FMT_GBRP;
  658. else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
  659. *fmt++ = AV_PIX_FMT_YUVJ444P;
  660. else
  661. *fmt++ = AV_PIX_FMT_YUV444P;
  662. } else if (CHROMA422(h)) {
  663. if (h->avctx->color_range == AVCOL_RANGE_JPEG)
  664. *fmt++ = AV_PIX_FMT_YUVJ422P;
  665. else
  666. *fmt++ = AV_PIX_FMT_YUV422P;
  667. } else {
  668. #if CONFIG_H264_DXVA2_HWACCEL
  669. *fmt++ = AV_PIX_FMT_DXVA2_VLD;
  670. #endif
  671. #if CONFIG_H264_D3D11VA_HWACCEL
  672. *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
  673. #endif
  674. #if CONFIG_H264_VAAPI_HWACCEL
  675. *fmt++ = AV_PIX_FMT_VAAPI;
  676. #endif
  677. #if CONFIG_H264_VDA_HWACCEL
  678. *fmt++ = AV_PIX_FMT_VDA_VLD;
  679. *fmt++ = AV_PIX_FMT_VDA;
  680. #endif
  681. if (h->avctx->codec->pix_fmts)
  682. choices = h->avctx->codec->pix_fmts;
  683. else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
  684. *fmt++ = AV_PIX_FMT_YUVJ420P;
  685. else
  686. *fmt++ = AV_PIX_FMT_YUV420P;
  687. }
  688. break;
  689. default:
  690. av_log(h->avctx, AV_LOG_ERROR,
  691. "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
  692. return AVERROR_INVALIDDATA;
  693. }
  694. *fmt = AV_PIX_FMT_NONE;
  695. return ff_get_format(h->avctx, choices);
  696. }
  697. /* export coded and cropped frame dimensions to AVCodecContext */
  698. static int init_dimensions(H264Context *h)
  699. {
  700. SPS *sps = h->ps.sps;
  701. int width = h->width - (sps->crop_right + sps->crop_left);
  702. int height = h->height - (sps->crop_top + sps->crop_bottom);
  703. /* handle container cropping */
  704. if (FFALIGN(h->avctx->width, 16) == FFALIGN(width, 16) &&
  705. FFALIGN(h->avctx->height, 16) == FFALIGN(height, 16)) {
  706. width = h->avctx->width;
  707. height = h->avctx->height;
  708. }
  709. if (width <= 0 || height <= 0) {
  710. av_log(h->avctx, AV_LOG_ERROR, "Invalid cropped dimensions: %dx%d.\n",
  711. width, height);
  712. if (h->avctx->err_recognition & AV_EF_EXPLODE)
  713. return AVERROR_INVALIDDATA;
  714. av_log(h->avctx, AV_LOG_WARNING, "Ignoring cropping information.\n");
  715. sps->crop_bottom =
  716. sps->crop_top =
  717. sps->crop_right =
  718. sps->crop_left =
  719. sps->crop = 0;
  720. width = h->width;
  721. height = h->height;
  722. }
  723. h->avctx->coded_width = h->width;
  724. h->avctx->coded_height = h->height;
  725. h->avctx->width = width;
  726. h->avctx->height = height;
  727. return 0;
  728. }
  729. static int h264_slice_header_init(H264Context *h)
  730. {
  731. const SPS *sps = h->ps.sps;
  732. int i, ret;
  733. ff_set_sar(h->avctx, sps->sar);
  734. av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
  735. &h->chroma_x_shift, &h->chroma_y_shift);
  736. if (sps->timing_info_present_flag) {
  737. int64_t den = sps->time_scale;
  738. if (h->sei.unregistered.x264_build < 44U)
  739. den *= 2;
  740. av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
  741. sps->num_units_in_tick, den, 1 << 30);
  742. }
  743. ff_h264_free_tables(h);
  744. h->first_field = 0;
  745. h->prev_interlaced_frame = 1;
  746. init_scan_tables(h);
  747. ret = ff_h264_alloc_tables(h);
  748. if (ret < 0) {
  749. av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
  750. return ret;
  751. }
  752. if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 10) {
  753. av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
  754. sps->bit_depth_luma);
  755. return AVERROR_INVALIDDATA;
  756. }
  757. h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
  758. h->pixel_shift = sps->bit_depth_luma > 8;
  759. h->chroma_format_idc = sps->chroma_format_idc;
  760. h->bit_depth_luma = sps->bit_depth_luma;
  761. ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
  762. sps->chroma_format_idc);
  763. ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
  764. ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
  765. ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma,
  766. sps->chroma_format_idc);
  767. ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
  768. if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
  769. ret = ff_h264_slice_context_init(h, &h->slice_ctx[0]);
  770. if (ret < 0) {
  771. av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
  772. return ret;
  773. }
  774. } else {
  775. for (i = 0; i < h->nb_slice_ctx; i++) {
  776. H264SliceContext *sl = &h->slice_ctx[i];
  777. sl->h264 = h;
  778. sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
  779. sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
  780. sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
  781. if ((ret = ff_h264_slice_context_init(h, sl)) < 0) {
  782. av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
  783. return ret;
  784. }
  785. }
  786. }
  787. h->context_initialized = 1;
  788. return 0;
  789. }
  790. static int h264_init_ps(H264Context *h, const H264SliceContext *sl)
  791. {
  792. const SPS *sps;
  793. int needs_reinit = 0, ret;
  794. h->ps.pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
  795. if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) {
  796. h->ps.sps = (SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data;
  797. if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
  798. h->chroma_format_idc != h->ps.sps->chroma_format_idc)
  799. needs_reinit = 1;
  800. }
  801. sps = h->ps.sps;
  802. h->avctx->profile = ff_h264_get_profile(sps);
  803. h->avctx->level = sps->level_idc;
  804. h->avctx->refs = sps->ref_frame_count;
  805. if (h->mb_width != sps->mb_width ||
  806. h->mb_height != sps->mb_height * (2 - sps->frame_mbs_only_flag))
  807. needs_reinit = 1;
  808. h->mb_width = sps->mb_width;
  809. h->mb_height = sps->mb_height * (2 - sps->frame_mbs_only_flag);
  810. h->mb_num = h->mb_width * h->mb_height;
  811. h->mb_stride = h->mb_width + 1;
  812. h->b_stride = h->mb_width * 4;
  813. h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
  814. h->width = 16 * h->mb_width;
  815. h->height = 16 * h->mb_height;
  816. ret = init_dimensions(h);
  817. if (ret < 0)
  818. return ret;
  819. if (sps->video_signal_type_present_flag) {
  820. h->avctx->color_range = sps->full_range ? AVCOL_RANGE_JPEG
  821. : AVCOL_RANGE_MPEG;
  822. if (sps->colour_description_present_flag) {
  823. if (h->avctx->colorspace != sps->colorspace)
  824. needs_reinit = 1;
  825. h->avctx->color_primaries = sps->color_primaries;
  826. h->avctx->color_trc = sps->color_trc;
  827. h->avctx->colorspace = sps->colorspace;
  828. }
  829. }
  830. if (!h->context_initialized || needs_reinit) {
  831. h->context_initialized = 0;
  832. if (sl != h->slice_ctx) {
  833. av_log(h->avctx, AV_LOG_ERROR,
  834. "changing width %d -> %d / height %d -> %d on "
  835. "slice %d\n",
  836. h->width, h->avctx->coded_width,
  837. h->height, h->avctx->coded_height,
  838. h->current_slice + 1);
  839. return AVERROR_INVALIDDATA;
  840. }
  841. ff_h264_flush_change(h);
  842. if ((ret = get_pixel_format(h)) < 0)
  843. return ret;
  844. h->avctx->pix_fmt = ret;
  845. av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
  846. "pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt);
  847. if ((ret = h264_slice_header_init(h)) < 0) {
  848. av_log(h->avctx, AV_LOG_ERROR,
  849. "h264_slice_header_init() failed\n");
  850. return ret;
  851. }
  852. }
  853. return 0;
  854. }
  855. static int h264_export_frame_props(H264Context *h)
  856. {
  857. const SPS *sps = h->ps.sps;
  858. H264Picture *cur = h->cur_pic_ptr;
  859. cur->f->interlaced_frame = 0;
  860. cur->f->repeat_pict = 0;
  861. /* Signal interlacing information externally. */
  862. /* Prioritize picture timing SEI information over used
  863. * decoding process if it exists. */
  864. if (sps->pic_struct_present_flag) {
  865. H264SEIPictureTiming *pt = &h->sei.picture_timing;
  866. switch (pt->pic_struct) {
  867. case SEI_PIC_STRUCT_FRAME:
  868. break;
  869. case SEI_PIC_STRUCT_TOP_FIELD:
  870. case SEI_PIC_STRUCT_BOTTOM_FIELD:
  871. cur->f->interlaced_frame = 1;
  872. break;
  873. case SEI_PIC_STRUCT_TOP_BOTTOM:
  874. case SEI_PIC_STRUCT_BOTTOM_TOP:
  875. if (FIELD_OR_MBAFF_PICTURE(h))
  876. cur->f->interlaced_frame = 1;
  877. else
  878. // try to flag soft telecine progressive
  879. cur->f->interlaced_frame = h->prev_interlaced_frame;
  880. break;
  881. case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
  882. case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
  883. /* Signal the possibility of telecined film externally
  884. * (pic_struct 5,6). From these hints, let the applications
  885. * decide if they apply deinterlacing. */
  886. cur->f->repeat_pict = 1;
  887. break;
  888. case SEI_PIC_STRUCT_FRAME_DOUBLING:
  889. cur->f->repeat_pict = 2;
  890. break;
  891. case SEI_PIC_STRUCT_FRAME_TRIPLING:
  892. cur->f->repeat_pict = 4;
  893. break;
  894. }
  895. if ((pt->ct_type & 3) &&
  896. pt->pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
  897. cur->f->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
  898. } else {
  899. /* Derive interlacing flag from used decoding process. */
  900. cur->f->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
  901. }
  902. h->prev_interlaced_frame = cur->f->interlaced_frame;
  903. if (cur->field_poc[0] != cur->field_poc[1]) {
  904. /* Derive top_field_first from field pocs. */
  905. cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
  906. } else {
  907. if (cur->f->interlaced_frame || sps->pic_struct_present_flag) {
  908. /* Use picture timing SEI information. Even if it is a
  909. * information of a past frame, better than nothing. */
  910. if (h->sei.picture_timing.pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM ||
  911. h->sei.picture_timing.pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
  912. cur->f->top_field_first = 1;
  913. else
  914. cur->f->top_field_first = 0;
  915. } else {
  916. /* Most likely progressive */
  917. cur->f->top_field_first = 0;
  918. }
  919. }
  920. if (h->sei.frame_packing.present &&
  921. h->sei.frame_packing.arrangement_type >= 0 &&
  922. h->sei.frame_packing.arrangement_type <= 6 &&
  923. h->sei.frame_packing.content_interpretation_type > 0 &&
  924. h->sei.frame_packing.content_interpretation_type < 3) {
  925. H264SEIFramePacking *fp = &h->sei.frame_packing;
  926. AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
  927. if (!stereo)
  928. return AVERROR(ENOMEM);
  929. switch (fp->arrangement_type) {
  930. case 0:
  931. stereo->type = AV_STEREO3D_CHECKERBOARD;
  932. break;
  933. case 1:
  934. stereo->type = AV_STEREO3D_COLUMNS;
  935. break;
  936. case 2:
  937. stereo->type = AV_STEREO3D_LINES;
  938. break;
  939. case 3:
  940. if (fp->quincunx_subsampling)
  941. stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
  942. else
  943. stereo->type = AV_STEREO3D_SIDEBYSIDE;
  944. break;
  945. case 4:
  946. stereo->type = AV_STEREO3D_TOPBOTTOM;
  947. break;
  948. case 5:
  949. stereo->type = AV_STEREO3D_FRAMESEQUENCE;
  950. break;
  951. case 6:
  952. stereo->type = AV_STEREO3D_2D;
  953. break;
  954. }
  955. if (fp->content_interpretation_type == 2)
  956. stereo->flags = AV_STEREO3D_FLAG_INVERT;
  957. }
  958. if (h->sei.display_orientation.present &&
  959. (h->sei.display_orientation.anticlockwise_rotation ||
  960. h->sei.display_orientation.hflip ||
  961. h->sei.display_orientation.vflip)) {
  962. H264SEIDisplayOrientation *o = &h->sei.display_orientation;
  963. double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
  964. AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
  965. AV_FRAME_DATA_DISPLAYMATRIX,
  966. sizeof(int32_t) * 9);
  967. if (!rotation)
  968. return AVERROR(ENOMEM);
  969. av_display_rotation_set((int32_t *)rotation->data, angle);
  970. av_display_matrix_flip((int32_t *)rotation->data,
  971. o->hflip, o->vflip);
  972. }
  973. if (h->sei.afd.present) {
  974. AVFrameSideData *sd = av_frame_new_side_data(cur->f, AV_FRAME_DATA_AFD,
  975. sizeof(uint8_t));
  976. if (!sd)
  977. return AVERROR(ENOMEM);
  978. *sd->data = h->sei.afd.active_format_description;
  979. h->sei.afd.present = 0;
  980. }
  981. if (h->sei.a53_caption.a53_caption) {
  982. H264SEIA53Caption *a53 = &h->sei.a53_caption;
  983. AVFrameSideData *sd = av_frame_new_side_data(cur->f,
  984. AV_FRAME_DATA_A53_CC,
  985. a53->a53_caption_size);
  986. if (!sd)
  987. return AVERROR(ENOMEM);
  988. memcpy(sd->data, a53->a53_caption, a53->a53_caption_size);
  989. av_freep(&a53->a53_caption);
  990. a53->a53_caption_size = 0;
  991. }
  992. return 0;
  993. }
  994. /* This function is called right after decoding the slice header for a first
  995. * slice in a field (or a frame). It decides whether we are decoding a new frame
  996. * or a second field in a pair and does the necessary setup.
  997. */
  998. static int h264_field_start(H264Context *h, const H264SliceContext *sl,
  999. const H2645NAL *nal)
  1000. {
  1001. const SPS *sps;
  1002. int last_pic_structure, last_pic_droppable, ret;
  1003. ret = h264_init_ps(h, sl);
  1004. if (ret < 0)
  1005. return ret;
  1006. sps = h->ps.sps;
  1007. last_pic_droppable = h->droppable;
  1008. last_pic_structure = h->picture_structure;
  1009. h->droppable = (nal->ref_idc == 0);
  1010. h->picture_structure = sl->picture_structure;
  1011. h->poc.frame_num = sl->frame_num;
  1012. h->poc.poc_lsb = sl->poc_lsb;
  1013. h->poc.delta_poc_bottom = sl->delta_poc_bottom;
  1014. h->poc.delta_poc[0] = sl->delta_poc[0];
  1015. h->poc.delta_poc[1] = sl->delta_poc[1];
  1016. /* Shorten frame num gaps so we don't have to allocate reference
  1017. * frames just to throw them away */
  1018. if (h->poc.frame_num != h->poc.prev_frame_num) {
  1019. int unwrap_prev_frame_num = h->poc.prev_frame_num;
  1020. int max_frame_num = 1 << sps->log2_max_frame_num;
  1021. if (unwrap_prev_frame_num > h->poc.frame_num)
  1022. unwrap_prev_frame_num -= max_frame_num;
  1023. if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
  1024. unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
  1025. if (unwrap_prev_frame_num < 0)
  1026. unwrap_prev_frame_num += max_frame_num;
  1027. h->poc.prev_frame_num = unwrap_prev_frame_num;
  1028. }
  1029. }
  1030. /* See if we have a decoded first field looking for a pair...
  1031. * Here, we're using that to see if we should mark previously
  1032. * decode frames as "finished".
  1033. * We have to do that before the "dummy" in-between frame allocation,
  1034. * since that can modify s->current_picture_ptr. */
  1035. if (h->first_field) {
  1036. assert(h->cur_pic_ptr);
  1037. assert(h->cur_pic_ptr->f->buf[0]);
  1038. assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
  1039. /* figure out if we have a complementary field pair */
  1040. if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
  1041. /* Previous field is unmatched. Don't display it, but let it
  1042. * remain for reference if marked as such. */
  1043. if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
  1044. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1045. last_pic_structure == PICT_TOP_FIELD);
  1046. }
  1047. } else {
  1048. if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
  1049. /* This and previous field were reference, but had
  1050. * different frame_nums. Consider this field first in
  1051. * pair. Throw away previous field except for reference
  1052. * purposes. */
  1053. if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
  1054. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1055. last_pic_structure == PICT_TOP_FIELD);
  1056. }
  1057. } else {
  1058. /* Second field in complementary pair */
  1059. if (!((last_pic_structure == PICT_TOP_FIELD &&
  1060. h->picture_structure == PICT_BOTTOM_FIELD) ||
  1061. (last_pic_structure == PICT_BOTTOM_FIELD &&
  1062. h->picture_structure == PICT_TOP_FIELD))) {
  1063. av_log(h->avctx, AV_LOG_ERROR,
  1064. "Invalid field mode combination %d/%d\n",
  1065. last_pic_structure, h->picture_structure);
  1066. h->picture_structure = last_pic_structure;
  1067. h->droppable = last_pic_droppable;
  1068. return AVERROR_INVALIDDATA;
  1069. } else if (last_pic_droppable != h->droppable) {
  1070. avpriv_request_sample(h->avctx,
  1071. "Found reference and non-reference fields in the same frame, which");
  1072. h->picture_structure = last_pic_structure;
  1073. h->droppable = last_pic_droppable;
  1074. return AVERROR_PATCHWELCOME;
  1075. }
  1076. }
  1077. }
  1078. }
  1079. while (h->poc.frame_num != h->poc.prev_frame_num &&
  1080. h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
  1081. H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
  1082. av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
  1083. h->poc.frame_num, h->poc.prev_frame_num);
  1084. ret = initialize_cur_frame(h);
  1085. if (ret < 0) {
  1086. h->first_field = 0;
  1087. return ret;
  1088. }
  1089. h->poc.prev_frame_num++;
  1090. h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
  1091. h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
  1092. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
  1093. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
  1094. h->explicit_ref_marking = 0;
  1095. ret = ff_h264_execute_ref_pic_marking(h);
  1096. if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
  1097. return ret;
  1098. /* Error concealment: If a ref is missing, copy the previous ref
  1099. * in its place.
  1100. * FIXME: Avoiding a memcpy would be nice, but ref handling makes
  1101. * many assumptions about there being no actual duplicates.
  1102. * FIXME: This does not copy padding for out-of-frame motion
  1103. * vectors. Given we are concealing a lost frame, this probably
  1104. * is not noticeable by comparison, but it should be fixed. */
  1105. if (h->short_ref_count) {
  1106. if (prev &&
  1107. h->short_ref[0]->f->width == prev->f->width &&
  1108. h->short_ref[0]->f->height == prev->f->height &&
  1109. h->short_ref[0]->f->format == prev->f->format) {
  1110. av_image_copy(h->short_ref[0]->f->data,
  1111. h->short_ref[0]->f->linesize,
  1112. (const uint8_t **)prev->f->data,
  1113. prev->f->linesize,
  1114. prev->f->format,
  1115. h->mb_width * 16,
  1116. h->mb_height * 16);
  1117. h->short_ref[0]->poc = prev->poc + 2;
  1118. }
  1119. h->short_ref[0]->frame_num = h->poc.prev_frame_num;
  1120. }
  1121. }
  1122. /* See if we have a decoded first field looking for a pair...
  1123. * We're using that to see whether to continue decoding in that
  1124. * frame, or to allocate a new one. */
  1125. if (h->first_field) {
  1126. assert(h->cur_pic_ptr);
  1127. assert(h->cur_pic_ptr->f->buf[0]);
  1128. assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
  1129. /* figure out if we have a complementary field pair */
  1130. if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
  1131. /* Previous field is unmatched. Don't display it, but let it
  1132. * remain for reference if marked as such. */
  1133. h->cur_pic_ptr = NULL;
  1134. h->first_field = FIELD_PICTURE(h);
  1135. } else {
  1136. if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
  1137. /* This and the previous field had different frame_nums.
  1138. * Consider this field first in pair. Throw away previous
  1139. * one except for reference purposes. */
  1140. h->first_field = 1;
  1141. h->cur_pic_ptr = NULL;
  1142. } else {
  1143. /* Second field in complementary pair */
  1144. h->first_field = 0;
  1145. }
  1146. }
  1147. } else {
  1148. /* Frame or first field in a potentially complementary pair */
  1149. h->first_field = FIELD_PICTURE(h);
  1150. }
  1151. if (!FIELD_PICTURE(h) || h->first_field) {
  1152. if (h264_frame_start(h) < 0) {
  1153. h->first_field = 0;
  1154. return AVERROR_INVALIDDATA;
  1155. }
  1156. } else {
  1157. release_unused_pictures(h, 0);
  1158. }
  1159. ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
  1160. h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
  1161. memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
  1162. h->nb_mmco = sl->nb_mmco;
  1163. h->explicit_ref_marking = sl->explicit_ref_marking;
  1164. /* Set the frame properties/side data. Only done for the second field in
  1165. * field coded frames, since some SEI information is present for each field
  1166. * and is merged by the SEI parsing code. */
  1167. if (!FIELD_PICTURE(h) || !h->first_field) {
  1168. ret = h264_export_frame_props(h);
  1169. if (ret < 0)
  1170. return ret;
  1171. }
  1172. return 0;
  1173. }
  1174. static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl,
  1175. const H2645NAL *nal)
  1176. {
  1177. const SPS *sps;
  1178. const PPS *pps;
  1179. int ret;
  1180. unsigned int slice_type, tmp, i;
  1181. int field_pic_flag, bottom_field_flag;
  1182. int droppable, picture_structure;
  1183. sl->first_mb_addr = get_ue_golomb(&sl->gb);
  1184. slice_type = get_ue_golomb_31(&sl->gb);
  1185. if (slice_type > 9) {
  1186. av_log(h->avctx, AV_LOG_ERROR,
  1187. "slice type %d too large at %d\n",
  1188. slice_type, sl->first_mb_addr);
  1189. return AVERROR_INVALIDDATA;
  1190. }
  1191. if (slice_type > 4) {
  1192. slice_type -= 5;
  1193. sl->slice_type_fixed = 1;
  1194. } else
  1195. sl->slice_type_fixed = 0;
  1196. slice_type = ff_h264_golomb_to_pict_type[slice_type];
  1197. sl->slice_type = slice_type;
  1198. sl->slice_type_nos = slice_type & 3;
  1199. if (nal->type == NAL_IDR_SLICE &&
  1200. sl->slice_type_nos != AV_PICTURE_TYPE_I) {
  1201. av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
  1202. return AVERROR_INVALIDDATA;
  1203. }
  1204. sl->pps_id = get_ue_golomb(&sl->gb);
  1205. if (sl->pps_id >= MAX_PPS_COUNT) {
  1206. av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
  1207. return AVERROR_INVALIDDATA;
  1208. }
  1209. if (!h->ps.pps_list[sl->pps_id]) {
  1210. av_log(h->avctx, AV_LOG_ERROR,
  1211. "non-existing PPS %u referenced\n",
  1212. sl->pps_id);
  1213. return AVERROR_INVALIDDATA;
  1214. }
  1215. pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
  1216. if (!h->ps.sps_list[pps->sps_id]) {
  1217. av_log(h->avctx, AV_LOG_ERROR,
  1218. "non-existing SPS %u referenced\n", pps->sps_id);
  1219. return AVERROR_INVALIDDATA;
  1220. }
  1221. sps = (const SPS*)h->ps.sps_list[pps->sps_id]->data;
  1222. sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
  1223. sl->mb_mbaff = 0;
  1224. droppable = nal->ref_idc == 0;
  1225. if (sps->frame_mbs_only_flag) {
  1226. picture_structure = PICT_FRAME;
  1227. } else {
  1228. field_pic_flag = get_bits1(&sl->gb);
  1229. if (field_pic_flag) {
  1230. bottom_field_flag = get_bits1(&sl->gb);
  1231. picture_structure = PICT_TOP_FIELD + bottom_field_flag;
  1232. } else {
  1233. picture_structure = PICT_FRAME;
  1234. }
  1235. }
  1236. sl->picture_structure = picture_structure;
  1237. sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
  1238. if (picture_structure == PICT_FRAME) {
  1239. sl->curr_pic_num = sl->frame_num;
  1240. sl->max_pic_num = 1 << sps->log2_max_frame_num;
  1241. } else {
  1242. sl->curr_pic_num = 2 * sl->frame_num + 1;
  1243. sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
  1244. }
  1245. if (nal->type == NAL_IDR_SLICE)
  1246. get_ue_golomb(&sl->gb); /* idr_pic_id */
  1247. if (sps->poc_type == 0) {
  1248. sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
  1249. if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
  1250. sl->delta_poc_bottom = get_se_golomb(&sl->gb);
  1251. }
  1252. if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
  1253. sl->delta_poc[0] = get_se_golomb(&sl->gb);
  1254. if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
  1255. sl->delta_poc[1] = get_se_golomb(&sl->gb);
  1256. }
  1257. if (pps->redundant_pic_cnt_present)
  1258. sl->redundant_pic_count = get_ue_golomb(&sl->gb);
  1259. if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
  1260. sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
  1261. ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count,
  1262. &sl->gb, pps, sl->slice_type_nos,
  1263. picture_structure);
  1264. if (ret < 0)
  1265. return ret;
  1266. if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
  1267. ret = ff_h264_decode_ref_pic_list_reordering(h, sl);
  1268. if (ret < 0) {
  1269. sl->ref_count[1] = sl->ref_count[0] = 0;
  1270. return ret;
  1271. }
  1272. }
  1273. sl->pwt.use_weight = 0;
  1274. for (i = 0; i < 2; i++) {
  1275. sl->pwt.luma_weight_flag[i] = 0;
  1276. sl->pwt.chroma_weight_flag[i] = 0;
  1277. }
  1278. if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
  1279. (pps->weighted_bipred_idc == 1 &&
  1280. sl->slice_type_nos == AV_PICTURE_TYPE_B))
  1281. ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count,
  1282. sl->slice_type_nos, &sl->pwt);
  1283. sl->explicit_ref_marking = 0;
  1284. if (nal->ref_idc) {
  1285. ret = ff_h264_decode_ref_pic_marking(h, sl, &sl->gb);
  1286. if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
  1287. return AVERROR_INVALIDDATA;
  1288. }
  1289. if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
  1290. tmp = get_ue_golomb_31(&sl->gb);
  1291. if (tmp > 2) {
  1292. av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
  1293. return AVERROR_INVALIDDATA;
  1294. }
  1295. sl->cabac_init_idc = tmp;
  1296. }
  1297. sl->last_qscale_diff = 0;
  1298. tmp = pps->init_qp + get_se_golomb(&sl->gb);
  1299. if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
  1300. av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
  1301. return AVERROR_INVALIDDATA;
  1302. }
  1303. sl->qscale = tmp;
  1304. sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
  1305. sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
  1306. // FIXME qscale / qp ... stuff
  1307. if (sl->slice_type == AV_PICTURE_TYPE_SP)
  1308. get_bits1(&sl->gb); /* sp_for_switch_flag */
  1309. if (sl->slice_type == AV_PICTURE_TYPE_SP ||
  1310. sl->slice_type == AV_PICTURE_TYPE_SI)
  1311. get_se_golomb(&sl->gb); /* slice_qs_delta */
  1312. sl->deblocking_filter = 1;
  1313. sl->slice_alpha_c0_offset = 0;
  1314. sl->slice_beta_offset = 0;
  1315. if (pps->deblocking_filter_parameters_present) {
  1316. tmp = get_ue_golomb_31(&sl->gb);
  1317. if (tmp > 2) {
  1318. av_log(h->avctx, AV_LOG_ERROR,
  1319. "deblocking_filter_idc %u out of range\n", tmp);
  1320. return AVERROR_INVALIDDATA;
  1321. }
  1322. sl->deblocking_filter = tmp;
  1323. if (sl->deblocking_filter < 2)
  1324. sl->deblocking_filter ^= 1; // 1<->0
  1325. if (sl->deblocking_filter) {
  1326. sl->slice_alpha_c0_offset = get_se_golomb(&sl->gb) * 2;
  1327. sl->slice_beta_offset = get_se_golomb(&sl->gb) * 2;
  1328. if (sl->slice_alpha_c0_offset > 12 ||
  1329. sl->slice_alpha_c0_offset < -12 ||
  1330. sl->slice_beta_offset > 12 ||
  1331. sl->slice_beta_offset < -12) {
  1332. av_log(h->avctx, AV_LOG_ERROR,
  1333. "deblocking filter parameters %d %d out of range\n",
  1334. sl->slice_alpha_c0_offset, sl->slice_beta_offset);
  1335. return AVERROR_INVALIDDATA;
  1336. }
  1337. }
  1338. }
  1339. return 0;
  1340. }
  1341. /**
  1342. * Decode a slice header.
  1343. * This will (re)initialize the decoder and call h264_frame_start() as needed.
  1344. *
  1345. * @param h h264context
  1346. *
  1347. * @return 0 if okay, <0 if an error occurred
  1348. */
  1349. int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl,
  1350. const H2645NAL *nal)
  1351. {
  1352. int i, j, ret = 0;
  1353. ret = h264_slice_header_parse(h, sl, nal);
  1354. if (ret < 0)
  1355. return ret;
  1356. if (!h->setup_finished) {
  1357. if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
  1358. if (h->current_slice && h->cur_pic_ptr && FIELD_PICTURE(h)) {
  1359. ff_h264_field_end(h, sl, 1);
  1360. }
  1361. h->current_slice = 0;
  1362. if (!h->first_field) {
  1363. if (h->cur_pic_ptr && !h->droppable) {
  1364. ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
  1365. h->picture_structure == PICT_BOTTOM_FIELD);
  1366. }
  1367. h->cur_pic_ptr = NULL;
  1368. }
  1369. }
  1370. if (h->current_slice == 0) {
  1371. ret = h264_field_start(h, sl, nal);
  1372. if (ret < 0)
  1373. return ret;
  1374. }
  1375. }
  1376. if (h->current_slice > 0) {
  1377. if (h->ps.pps != (const PPS*)h->ps.pps_list[sl->pps_id]->data) {
  1378. av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
  1379. return AVERROR_INVALIDDATA;
  1380. }
  1381. if (h->picture_structure != sl->picture_structure ||
  1382. h->droppable != (nal->ref_idc == 0)) {
  1383. av_log(h->avctx, AV_LOG_ERROR,
  1384. "Changing field mode (%d -> %d) between slices is not allowed\n",
  1385. h->picture_structure, sl->picture_structure);
  1386. return AVERROR_INVALIDDATA;
  1387. } else if (!h->cur_pic_ptr) {
  1388. av_log(h->avctx, AV_LOG_ERROR,
  1389. "unset cur_pic_ptr on slice %d\n",
  1390. h->current_slice + 1);
  1391. return AVERROR_INVALIDDATA;
  1392. }
  1393. }
  1394. assert(h->mb_num == h->mb_width * h->mb_height);
  1395. if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
  1396. sl->first_mb_addr >= h->mb_num) {
  1397. av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
  1398. return AVERROR_INVALIDDATA;
  1399. }
  1400. sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
  1401. sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
  1402. FIELD_OR_MBAFF_PICTURE(h);
  1403. if (h->picture_structure == PICT_BOTTOM_FIELD)
  1404. sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
  1405. assert(sl->mb_y < h->mb_height);
  1406. ret = ff_h264_build_ref_list(h, sl);
  1407. if (ret < 0)
  1408. return ret;
  1409. if (h->ps.pps->weighted_bipred_idc == 2 &&
  1410. sl->slice_type_nos == AV_PICTURE_TYPE_B) {
  1411. implicit_weight_table(h, sl, -1);
  1412. if (FRAME_MBAFF(h)) {
  1413. implicit_weight_table(h, sl, 0);
  1414. implicit_weight_table(h, sl, 1);
  1415. }
  1416. }
  1417. if (sl->slice_type_nos == AV_PICTURE_TYPE_B && !sl->direct_spatial_mv_pred)
  1418. ff_h264_direct_dist_scale_factor(h, sl);
  1419. ff_h264_direct_ref_list_init(h, sl);
  1420. if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
  1421. (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
  1422. sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
  1423. (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
  1424. sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
  1425. (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
  1426. nal->ref_idc == 0))
  1427. sl->deblocking_filter = 0;
  1428. if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
  1429. if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
  1430. /* Cheat slightly for speed:
  1431. * Do not bother to deblock across slices. */
  1432. sl->deblocking_filter = 2;
  1433. } else {
  1434. h->postpone_filter = 1;
  1435. }
  1436. }
  1437. sl->qp_thresh = 15 -
  1438. FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) -
  1439. FFMAX3(0,
  1440. h->ps.pps->chroma_qp_index_offset[0],
  1441. h->ps.pps->chroma_qp_index_offset[1]) +
  1442. 6 * (h->ps.sps->bit_depth_luma - 8);
  1443. sl->slice_num = ++h->current_slice;
  1444. if (sl->slice_num >= MAX_SLICES) {
  1445. av_log(h->avctx, AV_LOG_ERROR,
  1446. "Too many slices, increase MAX_SLICES and recompile\n");
  1447. }
  1448. for (j = 0; j < 2; j++) {
  1449. int id_list[16];
  1450. int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
  1451. for (i = 0; i < 16; i++) {
  1452. id_list[i] = 60;
  1453. if (j < sl->list_count && i < sl->ref_count[j] &&
  1454. sl->ref_list[j][i].parent->f->buf[0]) {
  1455. int k;
  1456. AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
  1457. for (k = 0; k < h->short_ref_count; k++)
  1458. if (h->short_ref[k]->f->buf[0]->buffer == buf) {
  1459. id_list[i] = k;
  1460. break;
  1461. }
  1462. for (k = 0; k < h->long_ref_count; k++)
  1463. if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
  1464. id_list[i] = h->short_ref_count + k;
  1465. break;
  1466. }
  1467. }
  1468. }
  1469. ref2frm[0] =
  1470. ref2frm[1] = -1;
  1471. for (i = 0; i < 16; i++)
  1472. ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
  1473. ref2frm[18 + 0] =
  1474. ref2frm[18 + 1] = -1;
  1475. for (i = 16; i < 48; i++)
  1476. ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
  1477. (sl->ref_list[j][i].reference & 3);
  1478. }
  1479. if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
  1480. av_log(h->avctx, AV_LOG_DEBUG,
  1481. "slice:%d %s mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
  1482. sl->slice_num,
  1483. (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
  1484. sl->mb_y * h->mb_width + sl->mb_x,
  1485. av_get_picture_type_char(sl->slice_type),
  1486. sl->slice_type_fixed ? " fix" : "",
  1487. nal->type == NAL_IDR_SLICE ? " IDR" : "",
  1488. h->poc.frame_num,
  1489. h->cur_pic_ptr->field_poc[0],
  1490. h->cur_pic_ptr->field_poc[1],
  1491. sl->ref_count[0], sl->ref_count[1],
  1492. sl->qscale,
  1493. sl->deblocking_filter,
  1494. sl->slice_alpha_c0_offset, sl->slice_beta_offset,
  1495. sl->pwt.use_weight,
  1496. sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
  1497. sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
  1498. }
  1499. return 0;
  1500. }
  1501. int ff_h264_get_slice_type(const H264SliceContext *sl)
  1502. {
  1503. switch (sl->slice_type) {
  1504. case AV_PICTURE_TYPE_P:
  1505. return 0;
  1506. case AV_PICTURE_TYPE_B:
  1507. return 1;
  1508. case AV_PICTURE_TYPE_I:
  1509. return 2;
  1510. case AV_PICTURE_TYPE_SP:
  1511. return 3;
  1512. case AV_PICTURE_TYPE_SI:
  1513. return 4;
  1514. default:
  1515. return AVERROR_INVALIDDATA;
  1516. }
  1517. }
  1518. static av_always_inline void fill_filter_caches_inter(const H264Context *h,
  1519. H264SliceContext *sl,
  1520. int mb_type, int top_xy,
  1521. int left_xy[LEFT_MBS],
  1522. int top_type,
  1523. int left_type[LEFT_MBS],
  1524. int mb_xy, int list)
  1525. {
  1526. int b_stride = h->b_stride;
  1527. int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
  1528. int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
  1529. if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
  1530. if (USES_LIST(top_type, list)) {
  1531. const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
  1532. const int b8_xy = 4 * top_xy + 2;
  1533. int (*ref2frm)[64] = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF(sl) ? 20 : 2);
  1534. AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
  1535. ref_cache[0 - 1 * 8] =
  1536. ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 0]];
  1537. ref_cache[2 - 1 * 8] =
  1538. ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 1]];
  1539. } else {
  1540. AV_ZERO128(mv_dst - 1 * 8);
  1541. AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1542. }
  1543. if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
  1544. if (USES_LIST(left_type[LTOP], list)) {
  1545. const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
  1546. const int b8_xy = 4 * left_xy[LTOP] + 1;
  1547. int (*ref2frm)[64] = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF(sl) ? 20 : 2);
  1548. AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
  1549. AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
  1550. AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
  1551. AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
  1552. ref_cache[-1 + 0] =
  1553. ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
  1554. ref_cache[-1 + 16] =
  1555. ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
  1556. } else {
  1557. AV_ZERO32(mv_dst - 1 + 0);
  1558. AV_ZERO32(mv_dst - 1 + 8);
  1559. AV_ZERO32(mv_dst - 1 + 16);
  1560. AV_ZERO32(mv_dst - 1 + 24);
  1561. ref_cache[-1 + 0] =
  1562. ref_cache[-1 + 8] =
  1563. ref_cache[-1 + 16] =
  1564. ref_cache[-1 + 24] = LIST_NOT_USED;
  1565. }
  1566. }
  1567. }
  1568. if (!USES_LIST(mb_type, list)) {
  1569. fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
  1570. AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1571. AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1572. AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1573. AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
  1574. return;
  1575. }
  1576. {
  1577. int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
  1578. int (*ref2frm)[64] = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF(sl) ? 20 : 2);
  1579. uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
  1580. uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
  1581. AV_WN32A(&ref_cache[0 * 8], ref01);
  1582. AV_WN32A(&ref_cache[1 * 8], ref01);
  1583. AV_WN32A(&ref_cache[2 * 8], ref23);
  1584. AV_WN32A(&ref_cache[3 * 8], ref23);
  1585. }
  1586. {
  1587. int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
  1588. AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
  1589. AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
  1590. AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
  1591. AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
  1592. }
  1593. }
  1594. /**
  1595. * @return non zero if the loop filter can be skipped
  1596. */
  1597. static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
  1598. {
  1599. const int mb_xy = sl->mb_xy;
  1600. int top_xy, left_xy[LEFT_MBS];
  1601. int top_type, left_type[LEFT_MBS];
  1602. uint8_t *nnz;
  1603. uint8_t *nnz_cache;
  1604. top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
  1605. left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
  1606. if (FRAME_MBAFF(h)) {
  1607. const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
  1608. const int curr_mb_field_flag = IS_INTERLACED(mb_type);
  1609. if (sl->mb_y & 1) {
  1610. if (left_mb_field_flag != curr_mb_field_flag)
  1611. left_xy[LTOP] -= h->mb_stride;
  1612. } else {
  1613. if (curr_mb_field_flag)
  1614. top_xy += h->mb_stride &
  1615. (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
  1616. if (left_mb_field_flag != curr_mb_field_flag)
  1617. left_xy[LBOT] += h->mb_stride;
  1618. }
  1619. }
  1620. sl->top_mb_xy = top_xy;
  1621. sl->left_mb_xy[LTOP] = left_xy[LTOP];
  1622. sl->left_mb_xy[LBOT] = left_xy[LBOT];
  1623. {
  1624. /* For sufficiently low qp, filtering wouldn't do anything.
  1625. * This is a conservative estimate: could also check beta_offset
  1626. * and more accurate chroma_qp. */
  1627. int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
  1628. int qp = h->cur_pic.qscale_table[mb_xy];
  1629. if (qp <= qp_thresh &&
  1630. (left_xy[LTOP] < 0 ||
  1631. ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
  1632. (top_xy < 0 ||
  1633. ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
  1634. if (!FRAME_MBAFF(h))
  1635. return 1;
  1636. if ((left_xy[LTOP] < 0 ||
  1637. ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
  1638. (top_xy < h->mb_stride ||
  1639. ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
  1640. return 1;
  1641. }
  1642. }
  1643. top_type = h->cur_pic.mb_type[top_xy];
  1644. left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
  1645. left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
  1646. if (sl->deblocking_filter == 2) {
  1647. if (h->slice_table[top_xy] != sl->slice_num)
  1648. top_type = 0;
  1649. if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
  1650. left_type[LTOP] = left_type[LBOT] = 0;
  1651. } else {
  1652. if (h->slice_table[top_xy] == 0xFFFF)
  1653. top_type = 0;
  1654. if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
  1655. left_type[LTOP] = left_type[LBOT] = 0;
  1656. }
  1657. sl->top_type = top_type;
  1658. sl->left_type[LTOP] = left_type[LTOP];
  1659. sl->left_type[LBOT] = left_type[LBOT];
  1660. if (IS_INTRA(mb_type))
  1661. return 0;
  1662. fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
  1663. top_type, left_type, mb_xy, 0);
  1664. if (sl->list_count == 2)
  1665. fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
  1666. top_type, left_type, mb_xy, 1);
  1667. nnz = h->non_zero_count[mb_xy];
  1668. nnz_cache = sl->non_zero_count_cache;
  1669. AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
  1670. AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
  1671. AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
  1672. AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
  1673. sl->cbp = h->cbp_table[mb_xy];
  1674. if (top_type) {
  1675. nnz = h->non_zero_count[top_xy];
  1676. AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
  1677. }
  1678. if (left_type[LTOP]) {
  1679. nnz = h->non_zero_count[left_xy[LTOP]];
  1680. nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
  1681. nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
  1682. nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
  1683. nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
  1684. }
  1685. /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
  1686. * from what the loop filter needs */
  1687. if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
  1688. if (IS_8x8DCT(top_type)) {
  1689. nnz_cache[4 + 8 * 0] =
  1690. nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
  1691. nnz_cache[6 + 8 * 0] =
  1692. nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
  1693. }
  1694. if (IS_8x8DCT(left_type[LTOP])) {
  1695. nnz_cache[3 + 8 * 1] =
  1696. nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
  1697. }
  1698. if (IS_8x8DCT(left_type[LBOT])) {
  1699. nnz_cache[3 + 8 * 3] =
  1700. nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
  1701. }
  1702. if (IS_8x8DCT(mb_type)) {
  1703. nnz_cache[scan8[0]] =
  1704. nnz_cache[scan8[1]] =
  1705. nnz_cache[scan8[2]] =
  1706. nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
  1707. nnz_cache[scan8[0 + 4]] =
  1708. nnz_cache[scan8[1 + 4]] =
  1709. nnz_cache[scan8[2 + 4]] =
  1710. nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
  1711. nnz_cache[scan8[0 + 8]] =
  1712. nnz_cache[scan8[1 + 8]] =
  1713. nnz_cache[scan8[2 + 8]] =
  1714. nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
  1715. nnz_cache[scan8[0 + 12]] =
  1716. nnz_cache[scan8[1 + 12]] =
  1717. nnz_cache[scan8[2 + 12]] =
  1718. nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
  1719. }
  1720. }
  1721. return 0;
  1722. }
  1723. static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
  1724. {
  1725. uint8_t *dest_y, *dest_cb, *dest_cr;
  1726. int linesize, uvlinesize, mb_x, mb_y;
  1727. const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
  1728. const int old_slice_type = sl->slice_type;
  1729. const int pixel_shift = h->pixel_shift;
  1730. const int block_h = 16 >> h->chroma_y_shift;
  1731. if (h->postpone_filter)
  1732. return;
  1733. if (sl->deblocking_filter) {
  1734. for (mb_x = start_x; mb_x < end_x; mb_x++)
  1735. for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
  1736. int mb_xy, mb_type;
  1737. mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
  1738. mb_type = h->cur_pic.mb_type[mb_xy];
  1739. if (FRAME_MBAFF(h))
  1740. sl->mb_mbaff =
  1741. sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
  1742. sl->mb_x = mb_x;
  1743. sl->mb_y = mb_y;
  1744. dest_y = h->cur_pic.f->data[0] +
  1745. ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
  1746. dest_cb = h->cur_pic.f->data[1] +
  1747. (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
  1748. mb_y * sl->uvlinesize * block_h;
  1749. dest_cr = h->cur_pic.f->data[2] +
  1750. (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
  1751. mb_y * sl->uvlinesize * block_h;
  1752. // FIXME simplify above
  1753. if (MB_FIELD(sl)) {
  1754. linesize = sl->mb_linesize = sl->linesize * 2;
  1755. uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
  1756. if (mb_y & 1) { // FIXME move out of this function?
  1757. dest_y -= sl->linesize * 15;
  1758. dest_cb -= sl->uvlinesize * (block_h - 1);
  1759. dest_cr -= sl->uvlinesize * (block_h - 1);
  1760. }
  1761. } else {
  1762. linesize = sl->mb_linesize = sl->linesize;
  1763. uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
  1764. }
  1765. backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
  1766. uvlinesize, 0);
  1767. if (fill_filter_caches(h, sl, mb_type))
  1768. continue;
  1769. sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
  1770. sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
  1771. if (FRAME_MBAFF(h)) {
  1772. ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
  1773. linesize, uvlinesize);
  1774. } else {
  1775. ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
  1776. dest_cr, linesize, uvlinesize);
  1777. }
  1778. }
  1779. }
  1780. sl->slice_type = old_slice_type;
  1781. sl->mb_x = end_x;
  1782. sl->mb_y = end_mb_y - FRAME_MBAFF(h);
  1783. sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
  1784. sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
  1785. }
  1786. static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
  1787. {
  1788. const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
  1789. int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
  1790. h->cur_pic.mb_type[mb_xy - 1] :
  1791. (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
  1792. h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
  1793. sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
  1794. }
  1795. /**
  1796. * Draw edges and report progress for the last MB row.
  1797. */
  1798. static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
  1799. {
  1800. int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
  1801. int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
  1802. int height = 16 << FRAME_MBAFF(h);
  1803. int deblock_border = (16 + 4) << FRAME_MBAFF(h);
  1804. if (sl->deblocking_filter) {
  1805. if ((top + height) >= pic_height)
  1806. height += deblock_border;
  1807. top -= deblock_border;
  1808. }
  1809. if (top >= pic_height || (top + height) < 0)
  1810. return;
  1811. height = FFMIN(height, pic_height - top);
  1812. if (top < 0) {
  1813. height = top + height;
  1814. top = 0;
  1815. }
  1816. ff_h264_draw_horiz_band(h, sl, top, height);
  1817. if (h->droppable)
  1818. return;
  1819. ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
  1820. h->picture_structure == PICT_BOTTOM_FIELD);
  1821. }
  1822. static void er_add_slice(H264SliceContext *sl,
  1823. int startx, int starty,
  1824. int endx, int endy, int status)
  1825. {
  1826. #if CONFIG_ERROR_RESILIENCE
  1827. ERContext *er = &sl->er;
  1828. if (!sl->h264->enable_er)
  1829. return;
  1830. er->ref_count = sl->ref_count[0];
  1831. ff_er_add_slice(er, startx, starty, endx, endy, status);
  1832. #endif
  1833. }
  1834. static int decode_slice(struct AVCodecContext *avctx, void *arg)
  1835. {
  1836. H264SliceContext *sl = arg;
  1837. const H264Context *h = sl->h264;
  1838. int lf_x_start = sl->mb_x;
  1839. int orig_deblock = sl->deblocking_filter;
  1840. int ret;
  1841. sl->linesize = h->cur_pic_ptr->f->linesize[0];
  1842. sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
  1843. ret = alloc_scratch_buffers(sl, sl->linesize);
  1844. if (ret < 0)
  1845. return ret;
  1846. sl->mb_skip_run = -1;
  1847. if (h->postpone_filter)
  1848. sl->deblocking_filter = 0;
  1849. sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
  1850. avctx->codec_id != AV_CODEC_ID_H264 ||
  1851. (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
  1852. if (h->ps.pps->cabac) {
  1853. /* realign */
  1854. align_get_bits(&sl->gb);
  1855. /* init cabac */
  1856. ff_init_cabac_decoder(&sl->cabac,
  1857. sl->gb.buffer + get_bits_count(&sl->gb) / 8,
  1858. (get_bits_left(&sl->gb) + 7) / 8);
  1859. ff_h264_init_cabac_states(h, sl);
  1860. for (;;) {
  1861. // START_TIMER
  1862. int ret, eos;
  1863. if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
  1864. av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
  1865. sl->next_slice_idx);
  1866. return AVERROR_INVALIDDATA;
  1867. }
  1868. ret = ff_h264_decode_mb_cabac(h, sl);
  1869. // STOP_TIMER("decode_mb_cabac")
  1870. if (ret >= 0)
  1871. ff_h264_hl_decode_mb(h, sl);
  1872. // FIXME optimal? or let mb_decode decode 16x32 ?
  1873. if (ret >= 0 && FRAME_MBAFF(h)) {
  1874. sl->mb_y++;
  1875. ret = ff_h264_decode_mb_cabac(h, sl);
  1876. if (ret >= 0)
  1877. ff_h264_hl_decode_mb(h, sl);
  1878. sl->mb_y--;
  1879. }
  1880. eos = get_cabac_terminate(&sl->cabac);
  1881. if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
  1882. sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
  1883. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
  1884. sl->mb_y, ER_MB_END);
  1885. if (sl->mb_x >= lf_x_start)
  1886. loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
  1887. goto finish;
  1888. }
  1889. if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
  1890. av_log(h->avctx, AV_LOG_ERROR,
  1891. "error while decoding MB %d %d, bytestream %td\n",
  1892. sl->mb_x, sl->mb_y,
  1893. sl->cabac.bytestream_end - sl->cabac.bytestream);
  1894. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
  1895. sl->mb_y, ER_MB_ERROR);
  1896. return AVERROR_INVALIDDATA;
  1897. }
  1898. if (++sl->mb_x >= h->mb_width) {
  1899. loop_filter(h, sl, lf_x_start, sl->mb_x);
  1900. sl->mb_x = lf_x_start = 0;
  1901. decode_finish_row(h, sl);
  1902. ++sl->mb_y;
  1903. if (FIELD_OR_MBAFF_PICTURE(h)) {
  1904. ++sl->mb_y;
  1905. if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
  1906. predict_field_decoding_flag(h, sl);
  1907. }
  1908. }
  1909. if (eos || sl->mb_y >= h->mb_height) {
  1910. ff_tlog(h->avctx, "slice end %d %d\n",
  1911. get_bits_count(&sl->gb), sl->gb.size_in_bits);
  1912. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
  1913. sl->mb_y, ER_MB_END);
  1914. if (sl->mb_x > lf_x_start)
  1915. loop_filter(h, sl, lf_x_start, sl->mb_x);
  1916. goto finish;
  1917. }
  1918. }
  1919. } else {
  1920. for (;;) {
  1921. int ret;
  1922. if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
  1923. av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
  1924. sl->next_slice_idx);
  1925. return AVERROR_INVALIDDATA;
  1926. }
  1927. ret = ff_h264_decode_mb_cavlc(h, sl);
  1928. if (ret >= 0)
  1929. ff_h264_hl_decode_mb(h, sl);
  1930. // FIXME optimal? or let mb_decode decode 16x32 ?
  1931. if (ret >= 0 && FRAME_MBAFF(h)) {
  1932. sl->mb_y++;
  1933. ret = ff_h264_decode_mb_cavlc(h, sl);
  1934. if (ret >= 0)
  1935. ff_h264_hl_decode_mb(h, sl);
  1936. sl->mb_y--;
  1937. }
  1938. if (ret < 0) {
  1939. av_log(h->avctx, AV_LOG_ERROR,
  1940. "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
  1941. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
  1942. sl->mb_y, ER_MB_ERROR);
  1943. return ret;
  1944. }
  1945. if (++sl->mb_x >= h->mb_width) {
  1946. loop_filter(h, sl, lf_x_start, sl->mb_x);
  1947. sl->mb_x = lf_x_start = 0;
  1948. decode_finish_row(h, sl);
  1949. ++sl->mb_y;
  1950. if (FIELD_OR_MBAFF_PICTURE(h)) {
  1951. ++sl->mb_y;
  1952. if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
  1953. predict_field_decoding_flag(h, sl);
  1954. }
  1955. if (sl->mb_y >= h->mb_height) {
  1956. ff_tlog(h->avctx, "slice end %d %d\n",
  1957. get_bits_count(&sl->gb), sl->gb.size_in_bits);
  1958. if (get_bits_left(&sl->gb) == 0) {
  1959. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
  1960. sl->mb_x - 1, sl->mb_y, ER_MB_END);
  1961. goto finish;
  1962. } else {
  1963. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
  1964. sl->mb_x - 1, sl->mb_y, ER_MB_END);
  1965. return AVERROR_INVALIDDATA;
  1966. }
  1967. }
  1968. }
  1969. if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
  1970. ff_tlog(h->avctx, "slice end %d %d\n",
  1971. get_bits_count(&sl->gb), sl->gb.size_in_bits);
  1972. if (get_bits_left(&sl->gb) == 0) {
  1973. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
  1974. sl->mb_x - 1, sl->mb_y, ER_MB_END);
  1975. if (sl->mb_x > lf_x_start)
  1976. loop_filter(h, sl, lf_x_start, sl->mb_x);
  1977. goto finish;
  1978. } else {
  1979. er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
  1980. sl->mb_y, ER_MB_ERROR);
  1981. return AVERROR_INVALIDDATA;
  1982. }
  1983. }
  1984. }
  1985. }
  1986. finish:
  1987. sl->deblocking_filter = orig_deblock;
  1988. return 0;
  1989. }
  1990. /**
  1991. * Call decode_slice() for each context.
  1992. *
  1993. * @param h h264 master context
  1994. * @param context_count number of contexts to execute
  1995. */
  1996. int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
  1997. {
  1998. AVCodecContext *const avctx = h->avctx;
  1999. H264SliceContext *sl;
  2000. int i, j;
  2001. if (h->avctx->hwaccel)
  2002. return 0;
  2003. if (context_count == 1) {
  2004. int ret;
  2005. h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
  2006. h->postpone_filter = 0;
  2007. ret = decode_slice(avctx, &h->slice_ctx[0]);
  2008. h->mb_y = h->slice_ctx[0].mb_y;
  2009. return ret;
  2010. } else {
  2011. for (i = 0; i < context_count; i++) {
  2012. int next_slice_idx = h->mb_width * h->mb_height;
  2013. int slice_idx;
  2014. sl = &h->slice_ctx[i];
  2015. sl->er.error_count = 0;
  2016. /* make sure none of those slices overlap */
  2017. slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
  2018. for (j = 0; j < context_count; j++) {
  2019. H264SliceContext *sl2 = &h->slice_ctx[j];
  2020. int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
  2021. if (i == j || slice_idx2 < slice_idx)
  2022. continue;
  2023. next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
  2024. }
  2025. sl->next_slice_idx = next_slice_idx;
  2026. }
  2027. avctx->execute(avctx, decode_slice, h->slice_ctx,
  2028. NULL, context_count, sizeof(h->slice_ctx[0]));
  2029. /* pull back stuff from slices to master context */
  2030. sl = &h->slice_ctx[context_count - 1];
  2031. h->mb_y = sl->mb_y;
  2032. for (i = 1; i < context_count; i++)
  2033. h->slice_ctx[0].er.error_count += h->slice_ctx[i].er.error_count;
  2034. if (h->postpone_filter) {
  2035. h->postpone_filter = 0;
  2036. for (i = 0; i < context_count; i++) {
  2037. int y_end, x_end;
  2038. sl = &h->slice_ctx[i];
  2039. y_end = FFMIN(sl->mb_y + 1, h->mb_height);
  2040. x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
  2041. for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
  2042. sl->mb_y = j;
  2043. loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
  2044. j == y_end - 1 ? x_end : h->mb_width);
  2045. }
  2046. }
  2047. }
  2048. }
  2049. return 0;
  2050. }