You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

832 lines
38KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... decoder
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 macroblock decoding
  24. */
  25. #include <stdint.h>
  26. #include "config.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/intreadwrite.h"
  29. #include "avcodec.h"
  30. #include "h264.h"
  31. #include "qpeldsp.h"
  32. #include "svq3.h"
  33. #include "thread.h"
  34. static inline int get_lowest_part_list_y(H264Context *h, H264SliceContext *sl,
  35. H264Picture *pic, int n,
  36. int height, int y_offset, int list)
  37. {
  38. int raw_my = sl->mv_cache[list][scan8[n]][1];
  39. int filter_height_up = (raw_my & 3) ? 2 : 0;
  40. int filter_height_down = (raw_my & 3) ? 3 : 0;
  41. int full_my = (raw_my >> 2) + y_offset;
  42. int top = full_my - filter_height_up;
  43. int bottom = full_my + filter_height_down + height;
  44. return FFMAX(abs(top), bottom);
  45. }
  46. static inline void get_lowest_part_y(H264Context *h, H264SliceContext *sl,
  47. int refs[2][48], int n,
  48. int height, int y_offset, int list0,
  49. int list1, int *nrefs)
  50. {
  51. int my;
  52. y_offset += 16 * (h->mb_y >> MB_FIELD(h));
  53. if (list0) {
  54. int ref_n = sl->ref_cache[0][scan8[n]];
  55. H264Picture *ref = &h->ref_list[0][ref_n];
  56. // Error resilience puts the current picture in the ref list.
  57. // Don't try to wait on these as it will cause a deadlock.
  58. // Fields can wait on each other, though.
  59. if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
  60. (ref->reference & 3) != h->picture_structure) {
  61. my = get_lowest_part_list_y(h, sl, ref, n, height, y_offset, 0);
  62. if (refs[0][ref_n] < 0)
  63. nrefs[0] += 1;
  64. refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
  65. }
  66. }
  67. if (list1) {
  68. int ref_n = sl->ref_cache[1][scan8[n]];
  69. H264Picture *ref = &h->ref_list[1][ref_n];
  70. if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
  71. (ref->reference & 3) != h->picture_structure) {
  72. my = get_lowest_part_list_y(h, sl, ref, n, height, y_offset, 1);
  73. if (refs[1][ref_n] < 0)
  74. nrefs[1] += 1;
  75. refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
  76. }
  77. }
  78. }
  79. /**
  80. * Wait until all reference frames are available for MC operations.
  81. *
  82. * @param h the H264 context
  83. */
  84. static void await_references(H264Context *h, H264SliceContext *sl)
  85. {
  86. const int mb_xy = h->mb_xy;
  87. const int mb_type = h->cur_pic.mb_type[mb_xy];
  88. int refs[2][48];
  89. int nrefs[2] = { 0 };
  90. int ref, list;
  91. memset(refs, -1, sizeof(refs));
  92. if (IS_16X16(mb_type)) {
  93. get_lowest_part_y(h, sl, refs, 0, 16, 0,
  94. IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
  95. } else if (IS_16X8(mb_type)) {
  96. get_lowest_part_y(h, sl, refs, 0, 8, 0,
  97. IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
  98. get_lowest_part_y(h, sl, refs, 8, 8, 8,
  99. IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
  100. } else if (IS_8X16(mb_type)) {
  101. get_lowest_part_y(h, sl, refs, 0, 16, 0,
  102. IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
  103. get_lowest_part_y(h, sl, refs, 4, 16, 0,
  104. IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
  105. } else {
  106. int i;
  107. assert(IS_8X8(mb_type));
  108. for (i = 0; i < 4; i++) {
  109. const int sub_mb_type = sl->sub_mb_type[i];
  110. const int n = 4 * i;
  111. int y_offset = (i & 2) << 2;
  112. if (IS_SUB_8X8(sub_mb_type)) {
  113. get_lowest_part_y(h, sl, refs, n, 8, y_offset,
  114. IS_DIR(sub_mb_type, 0, 0),
  115. IS_DIR(sub_mb_type, 0, 1),
  116. nrefs);
  117. } else if (IS_SUB_8X4(sub_mb_type)) {
  118. get_lowest_part_y(h, sl, refs, n, 4, y_offset,
  119. IS_DIR(sub_mb_type, 0, 0),
  120. IS_DIR(sub_mb_type, 0, 1),
  121. nrefs);
  122. get_lowest_part_y(h, sl, refs, n + 2, 4, y_offset + 4,
  123. IS_DIR(sub_mb_type, 0, 0),
  124. IS_DIR(sub_mb_type, 0, 1),
  125. nrefs);
  126. } else if (IS_SUB_4X8(sub_mb_type)) {
  127. get_lowest_part_y(h, sl, refs, n, 8, y_offset,
  128. IS_DIR(sub_mb_type, 0, 0),
  129. IS_DIR(sub_mb_type, 0, 1),
  130. nrefs);
  131. get_lowest_part_y(h, sl, refs, n + 1, 8, y_offset,
  132. IS_DIR(sub_mb_type, 0, 0),
  133. IS_DIR(sub_mb_type, 0, 1),
  134. nrefs);
  135. } else {
  136. int j;
  137. assert(IS_SUB_4X4(sub_mb_type));
  138. for (j = 0; j < 4; j++) {
  139. int sub_y_offset = y_offset + 2 * (j & 2);
  140. get_lowest_part_y(h, sl, refs, n + j, 4, sub_y_offset,
  141. IS_DIR(sub_mb_type, 0, 0),
  142. IS_DIR(sub_mb_type, 0, 1),
  143. nrefs);
  144. }
  145. }
  146. }
  147. }
  148. for (list = h->list_count - 1; list >= 0; list--)
  149. for (ref = 0; ref < 48 && nrefs[list]; ref++) {
  150. int row = refs[list][ref];
  151. if (row >= 0) {
  152. H264Picture *ref_pic = &h->ref_list[list][ref];
  153. int ref_field = ref_pic->reference - 1;
  154. int ref_field_picture = ref_pic->field_picture;
  155. int pic_height = 16 * h->mb_height >> ref_field_picture;
  156. row <<= MB_MBAFF(h);
  157. nrefs[list]--;
  158. if (!FIELD_PICTURE(h) && ref_field_picture) { // frame referencing two fields
  159. ff_thread_await_progress(&ref_pic->tf,
  160. FFMIN((row >> 1) - !(row & 1),
  161. pic_height - 1),
  162. 1);
  163. ff_thread_await_progress(&ref_pic->tf,
  164. FFMIN((row >> 1), pic_height - 1),
  165. 0);
  166. } else if (FIELD_PICTURE(h) && !ref_field_picture) { // field referencing one field of a frame
  167. ff_thread_await_progress(&ref_pic->tf,
  168. FFMIN(row * 2 + ref_field,
  169. pic_height - 1),
  170. 0);
  171. } else if (FIELD_PICTURE(h)) {
  172. ff_thread_await_progress(&ref_pic->tf,
  173. FFMIN(row, pic_height - 1),
  174. ref_field);
  175. } else {
  176. ff_thread_await_progress(&ref_pic->tf,
  177. FFMIN(row, pic_height - 1),
  178. 0);
  179. }
  180. }
  181. }
  182. }
  183. static av_always_inline void mc_dir_part(H264Context *h, H264SliceContext *sl,
  184. H264Picture *pic,
  185. int n, int square, int height,
  186. int delta, int list,
  187. uint8_t *dest_y, uint8_t *dest_cb,
  188. uint8_t *dest_cr,
  189. int src_x_offset, int src_y_offset,
  190. qpel_mc_func *qpix_op,
  191. h264_chroma_mc_func chroma_op,
  192. int pixel_shift, int chroma_idc)
  193. {
  194. const int mx = sl->mv_cache[list][scan8[n]][0] + src_x_offset * 8;
  195. int my = sl->mv_cache[list][scan8[n]][1] + src_y_offset * 8;
  196. const int luma_xy = (mx & 3) + ((my & 3) << 2);
  197. ptrdiff_t offset = ((mx >> 2) << pixel_shift) + (my >> 2) * sl->mb_linesize;
  198. uint8_t *src_y = pic->f.data[0] + offset;
  199. uint8_t *src_cb, *src_cr;
  200. int extra_width = 0;
  201. int extra_height = 0;
  202. int emu = 0;
  203. const int full_mx = mx >> 2;
  204. const int full_my = my >> 2;
  205. const int pic_width = 16 * h->mb_width;
  206. const int pic_height = 16 * h->mb_height >> MB_FIELD(h);
  207. int ysh;
  208. if (mx & 7)
  209. extra_width -= 3;
  210. if (my & 7)
  211. extra_height -= 3;
  212. if (full_mx < 0 - extra_width ||
  213. full_my < 0 - extra_height ||
  214. full_mx + 16 /*FIXME*/ > pic_width + extra_width ||
  215. full_my + 16 /*FIXME*/ > pic_height + extra_height) {
  216. h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
  217. src_y - (2 << pixel_shift) - 2 * sl->mb_linesize,
  218. sl->mb_linesize, sl->mb_linesize,
  219. 16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
  220. full_my - 2, pic_width, pic_height);
  221. src_y = h->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
  222. emu = 1;
  223. }
  224. qpix_op[luma_xy](dest_y, src_y, sl->mb_linesize); // FIXME try variable height perhaps?
  225. if (!square)
  226. qpix_op[luma_xy](dest_y + delta, src_y + delta, sl->mb_linesize);
  227. if (CONFIG_GRAY && h->flags & CODEC_FLAG_GRAY)
  228. return;
  229. if (chroma_idc == 3 /* yuv444 */) {
  230. src_cb = pic->f.data[1] + offset;
  231. if (emu) {
  232. h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
  233. src_cb - (2 << pixel_shift) - 2 * sl->mb_linesize,
  234. sl->mb_linesize, sl->mb_linesize,
  235. 16 + 5, 16 + 5 /*FIXME*/,
  236. full_mx - 2, full_my - 2,
  237. pic_width, pic_height);
  238. src_cb = h->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
  239. }
  240. qpix_op[luma_xy](dest_cb, src_cb, sl->mb_linesize); // FIXME try variable height perhaps?
  241. if (!square)
  242. qpix_op[luma_xy](dest_cb + delta, src_cb + delta, sl->mb_linesize);
  243. src_cr = pic->f.data[2] + offset;
  244. if (emu) {
  245. h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
  246. src_cr - (2 << pixel_shift) - 2 * sl->mb_linesize,
  247. sl->mb_linesize, sl->mb_linesize,
  248. 16 + 5, 16 + 5 /*FIXME*/,
  249. full_mx - 2, full_my - 2,
  250. pic_width, pic_height);
  251. src_cr = h->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
  252. }
  253. qpix_op[luma_xy](dest_cr, src_cr, sl->mb_linesize); // FIXME try variable height perhaps?
  254. if (!square)
  255. qpix_op[luma_xy](dest_cr + delta, src_cr + delta, sl->mb_linesize);
  256. return;
  257. }
  258. ysh = 3 - (chroma_idc == 2 /* yuv422 */);
  259. if (chroma_idc == 1 /* yuv420 */ && MB_FIELD(h)) {
  260. // chroma offset when predicting from a field of opposite parity
  261. my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
  262. emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
  263. }
  264. src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) +
  265. (my >> ysh) * sl->mb_uvlinesize;
  266. src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) +
  267. (my >> ysh) * sl->mb_uvlinesize;
  268. if (emu) {
  269. h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb,
  270. sl->mb_uvlinesize, sl->mb_uvlinesize,
  271. 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
  272. pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
  273. src_cb = h->edge_emu_buffer;
  274. }
  275. chroma_op(dest_cb, src_cb, sl->mb_uvlinesize,
  276. height >> (chroma_idc == 1 /* yuv420 */),
  277. mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
  278. if (emu) {
  279. h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr,
  280. sl->mb_uvlinesize, sl->mb_uvlinesize,
  281. 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
  282. pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
  283. src_cr = h->edge_emu_buffer;
  284. }
  285. chroma_op(dest_cr, src_cr, sl->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */),
  286. mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
  287. }
  288. static av_always_inline void mc_part_std(H264Context *h, H264SliceContext *sl,
  289. int n, int square,
  290. int height, int delta,
  291. uint8_t *dest_y, uint8_t *dest_cb,
  292. uint8_t *dest_cr,
  293. int x_offset, int y_offset,
  294. qpel_mc_func *qpix_put,
  295. h264_chroma_mc_func chroma_put,
  296. qpel_mc_func *qpix_avg,
  297. h264_chroma_mc_func chroma_avg,
  298. int list0, int list1,
  299. int pixel_shift, int chroma_idc)
  300. {
  301. qpel_mc_func *qpix_op = qpix_put;
  302. h264_chroma_mc_func chroma_op = chroma_put;
  303. dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  304. if (chroma_idc == 3 /* yuv444 */) {
  305. dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  306. dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  307. } else if (chroma_idc == 2 /* yuv422 */) {
  308. dest_cb += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
  309. dest_cr += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
  310. } else { /* yuv420 */
  311. dest_cb += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
  312. dest_cr += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
  313. }
  314. x_offset += 8 * h->mb_x;
  315. y_offset += 8 * (h->mb_y >> MB_FIELD(h));
  316. if (list0) {
  317. H264Picture *ref = &h->ref_list[0][sl->ref_cache[0][scan8[n]]];
  318. mc_dir_part(h, sl, ref, n, square, height, delta, 0,
  319. dest_y, dest_cb, dest_cr, x_offset, y_offset,
  320. qpix_op, chroma_op, pixel_shift, chroma_idc);
  321. qpix_op = qpix_avg;
  322. chroma_op = chroma_avg;
  323. }
  324. if (list1) {
  325. H264Picture *ref = &h->ref_list[1][sl->ref_cache[1][scan8[n]]];
  326. mc_dir_part(h, sl, ref, n, square, height, delta, 1,
  327. dest_y, dest_cb, dest_cr, x_offset, y_offset,
  328. qpix_op, chroma_op, pixel_shift, chroma_idc);
  329. }
  330. }
  331. static av_always_inline void mc_part_weighted(H264Context *h, H264SliceContext *sl,
  332. int n, int square,
  333. int height, int delta,
  334. uint8_t *dest_y, uint8_t *dest_cb,
  335. uint8_t *dest_cr,
  336. int x_offset, int y_offset,
  337. qpel_mc_func *qpix_put,
  338. h264_chroma_mc_func chroma_put,
  339. h264_weight_func luma_weight_op,
  340. h264_weight_func chroma_weight_op,
  341. h264_biweight_func luma_weight_avg,
  342. h264_biweight_func chroma_weight_avg,
  343. int list0, int list1,
  344. int pixel_shift, int chroma_idc)
  345. {
  346. int chroma_height;
  347. dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  348. if (chroma_idc == 3 /* yuv444 */) {
  349. chroma_height = height;
  350. chroma_weight_avg = luma_weight_avg;
  351. chroma_weight_op = luma_weight_op;
  352. dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  353. dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  354. } else if (chroma_idc == 2 /* yuv422 */) {
  355. chroma_height = height;
  356. dest_cb += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
  357. dest_cr += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
  358. } else { /* yuv420 */
  359. chroma_height = height >> 1;
  360. dest_cb += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
  361. dest_cr += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
  362. }
  363. x_offset += 8 * h->mb_x;
  364. y_offset += 8 * (h->mb_y >> MB_FIELD(h));
  365. if (list0 && list1) {
  366. /* don't optimize for luma-only case, since B-frames usually
  367. * use implicit weights => chroma too. */
  368. uint8_t *tmp_cb = h->bipred_scratchpad;
  369. uint8_t *tmp_cr = h->bipred_scratchpad + (16 << pixel_shift);
  370. uint8_t *tmp_y = h->bipred_scratchpad + 16 * sl->mb_uvlinesize;
  371. int refn0 = sl->ref_cache[0][scan8[n]];
  372. int refn1 = sl->ref_cache[1][scan8[n]];
  373. mc_dir_part(h, sl, &h->ref_list[0][refn0], n, square, height, delta, 0,
  374. dest_y, dest_cb, dest_cr,
  375. x_offset, y_offset, qpix_put, chroma_put,
  376. pixel_shift, chroma_idc);
  377. mc_dir_part(h, sl, &h->ref_list[1][refn1], n, square, height, delta, 1,
  378. tmp_y, tmp_cb, tmp_cr,
  379. x_offset, y_offset, qpix_put, chroma_put,
  380. pixel_shift, chroma_idc);
  381. if (sl->use_weight == 2) {
  382. int weight0 = sl->implicit_weight[refn0][refn1][h->mb_y & 1];
  383. int weight1 = 64 - weight0;
  384. luma_weight_avg(dest_y, tmp_y, sl->mb_linesize,
  385. height, 5, weight0, weight1, 0);
  386. chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize,
  387. chroma_height, 5, weight0, weight1, 0);
  388. chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize,
  389. chroma_height, 5, weight0, weight1, 0);
  390. } else {
  391. luma_weight_avg(dest_y, tmp_y, sl->mb_linesize, height,
  392. sl->luma_log2_weight_denom,
  393. sl->luma_weight[refn0][0][0],
  394. sl->luma_weight[refn1][1][0],
  395. sl->luma_weight[refn0][0][1] +
  396. sl->luma_weight[refn1][1][1]);
  397. chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize, chroma_height,
  398. sl->chroma_log2_weight_denom,
  399. sl->chroma_weight[refn0][0][0][0],
  400. sl->chroma_weight[refn1][1][0][0],
  401. sl->chroma_weight[refn0][0][0][1] +
  402. sl->chroma_weight[refn1][1][0][1]);
  403. chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize, chroma_height,
  404. sl->chroma_log2_weight_denom,
  405. sl->chroma_weight[refn0][0][1][0],
  406. sl->chroma_weight[refn1][1][1][0],
  407. sl->chroma_weight[refn0][0][1][1] +
  408. sl->chroma_weight[refn1][1][1][1]);
  409. }
  410. } else {
  411. int list = list1 ? 1 : 0;
  412. int refn = sl->ref_cache[list][scan8[n]];
  413. H264Picture *ref = &h->ref_list[list][refn];
  414. mc_dir_part(h, sl, ref, n, square, height, delta, list,
  415. dest_y, dest_cb, dest_cr, x_offset, y_offset,
  416. qpix_put, chroma_put, pixel_shift, chroma_idc);
  417. luma_weight_op(dest_y, sl->mb_linesize, height,
  418. sl->luma_log2_weight_denom,
  419. sl->luma_weight[refn][list][0],
  420. sl->luma_weight[refn][list][1]);
  421. if (sl->use_weight_chroma) {
  422. chroma_weight_op(dest_cb, sl->mb_uvlinesize, chroma_height,
  423. sl->chroma_log2_weight_denom,
  424. sl->chroma_weight[refn][list][0][0],
  425. sl->chroma_weight[refn][list][0][1]);
  426. chroma_weight_op(dest_cr, sl->mb_uvlinesize, chroma_height,
  427. sl->chroma_log2_weight_denom,
  428. sl->chroma_weight[refn][list][1][0],
  429. sl->chroma_weight[refn][list][1][1]);
  430. }
  431. }
  432. }
  433. static av_always_inline void prefetch_motion(H264Context *h, H264SliceContext *sl,
  434. int list, int pixel_shift,
  435. int chroma_idc)
  436. {
  437. /* fetch pixels for estimated mv 4 macroblocks ahead
  438. * optimized for 64byte cache lines */
  439. const int refn = sl->ref_cache[list][scan8[0]];
  440. if (refn >= 0) {
  441. const int mx = (sl->mv_cache[list][scan8[0]][0] >> 2) + 16 * h->mb_x + 8;
  442. const int my = (sl->mv_cache[list][scan8[0]][1] >> 2) + 16 * h->mb_y;
  443. uint8_t **src = h->ref_list[list][refn].f.data;
  444. int off = (mx << pixel_shift) +
  445. (my + (h->mb_x & 3) * 4) * sl->mb_linesize +
  446. (64 << pixel_shift);
  447. h->vdsp.prefetch(src[0] + off, h->linesize, 4);
  448. if (chroma_idc == 3 /* yuv444 */) {
  449. h->vdsp.prefetch(src[1] + off, h->linesize, 4);
  450. h->vdsp.prefetch(src[2] + off, h->linesize, 4);
  451. } else {
  452. off = ((mx >> 1) << pixel_shift) +
  453. ((my >> 1) + (h->mb_x & 7)) * h->uvlinesize +
  454. (64 << pixel_shift);
  455. h->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
  456. }
  457. }
  458. }
  459. static av_always_inline void xchg_mb_border(H264Context *h, H264SliceContext *sl,
  460. uint8_t *src_y,
  461. uint8_t *src_cb, uint8_t *src_cr,
  462. int linesize, int uvlinesize,
  463. int xchg, int chroma444,
  464. int simple, int pixel_shift)
  465. {
  466. int deblock_topleft;
  467. int deblock_top;
  468. int top_idx = 1;
  469. uint8_t *top_border_m1;
  470. uint8_t *top_border;
  471. if (!simple && FRAME_MBAFF(h)) {
  472. if (h->mb_y & 1) {
  473. if (!MB_MBAFF(h))
  474. return;
  475. } else {
  476. top_idx = MB_MBAFF(h) ? 0 : 1;
  477. }
  478. }
  479. if (h->deblocking_filter == 2) {
  480. deblock_topleft = h->slice_table[h->mb_xy - 1 - h->mb_stride] == sl->slice_num;
  481. deblock_top = sl->top_type;
  482. } else {
  483. deblock_topleft = (h->mb_x > 0);
  484. deblock_top = (h->mb_y > !!MB_FIELD(h));
  485. }
  486. src_y -= linesize + 1 + pixel_shift;
  487. src_cb -= uvlinesize + 1 + pixel_shift;
  488. src_cr -= uvlinesize + 1 + pixel_shift;
  489. top_border_m1 = h->top_borders[top_idx][h->mb_x - 1];
  490. top_border = h->top_borders[top_idx][h->mb_x];
  491. #define XCHG(a, b, xchg) \
  492. if (pixel_shift) { \
  493. if (xchg) { \
  494. AV_SWAP64(b + 0, a + 0); \
  495. AV_SWAP64(b + 8, a + 8); \
  496. } else { \
  497. AV_COPY128(b, a); \
  498. } \
  499. } else if (xchg) \
  500. AV_SWAP64(b, a); \
  501. else \
  502. AV_COPY64(b, a);
  503. if (deblock_top) {
  504. if (deblock_topleft) {
  505. XCHG(top_border_m1 + (8 << pixel_shift),
  506. src_y - (7 << pixel_shift), 1);
  507. }
  508. XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
  509. XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
  510. if (h->mb_x + 1 < h->mb_width) {
  511. XCHG(h->top_borders[top_idx][h->mb_x + 1],
  512. src_y + (17 << pixel_shift), 1);
  513. }
  514. }
  515. if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
  516. if (chroma444) {
  517. if (deblock_top) {
  518. if (deblock_topleft) {
  519. XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
  520. XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
  521. }
  522. XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
  523. XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
  524. XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
  525. XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
  526. if (h->mb_x + 1 < h->mb_width) {
  527. XCHG(h->top_borders[top_idx][h->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
  528. XCHG(h->top_borders[top_idx][h->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
  529. }
  530. }
  531. } else {
  532. if (deblock_top) {
  533. if (deblock_topleft) {
  534. XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
  535. XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
  536. }
  537. XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
  538. XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
  539. }
  540. }
  541. }
  542. }
  543. static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth,
  544. int index)
  545. {
  546. if (high_bit_depth) {
  547. return AV_RN32A(((int32_t *)mb) + index);
  548. } else
  549. return AV_RN16A(mb + index);
  550. }
  551. static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth,
  552. int index, int value)
  553. {
  554. if (high_bit_depth) {
  555. AV_WN32A(((int32_t *)mb) + index, value);
  556. } else
  557. AV_WN16A(mb + index, value);
  558. }
  559. static av_always_inline void hl_decode_mb_predict_luma(H264Context *h,
  560. H264SliceContext *sl,
  561. int mb_type, int is_h264,
  562. int simple,
  563. int transform_bypass,
  564. int pixel_shift,
  565. int *block_offset,
  566. int linesize,
  567. uint8_t *dest_y, int p)
  568. {
  569. void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
  570. void (*idct_dc_add)(uint8_t *dst, int16_t *block, int stride);
  571. int i;
  572. int qscale = p == 0 ? sl->qscale : sl->chroma_qp[p - 1];
  573. block_offset += 16 * p;
  574. if (IS_INTRA4x4(mb_type)) {
  575. if (IS_8x8DCT(mb_type)) {
  576. if (transform_bypass) {
  577. idct_dc_add =
  578. idct_add = h->h264dsp.h264_add_pixels8_clear;
  579. } else {
  580. idct_dc_add = h->h264dsp.h264_idct8_dc_add;
  581. idct_add = h->h264dsp.h264_idct8_add;
  582. }
  583. for (i = 0; i < 16; i += 4) {
  584. uint8_t *const ptr = dest_y + block_offset[i];
  585. const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
  586. if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
  587. h->hpc.pred8x8l_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  588. } else {
  589. const int nnz = sl->non_zero_count_cache[scan8[i + p * 16]];
  590. h->hpc.pred8x8l[dir](ptr, (sl->topleft_samples_available << i) & 0x8000,
  591. (sl->topright_samples_available << i) & 0x4000, linesize);
  592. if (nnz) {
  593. if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
  594. idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  595. else
  596. idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  597. }
  598. }
  599. }
  600. } else {
  601. if (transform_bypass) {
  602. idct_dc_add =
  603. idct_add = h->h264dsp.h264_add_pixels4_clear;
  604. } else {
  605. idct_dc_add = h->h264dsp.h264_idct_dc_add;
  606. idct_add = h->h264dsp.h264_idct_add;
  607. }
  608. for (i = 0; i < 16; i++) {
  609. uint8_t *const ptr = dest_y + block_offset[i];
  610. const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
  611. if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
  612. h->hpc.pred4x4_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  613. } else {
  614. uint8_t *topright;
  615. int nnz, tr;
  616. uint64_t tr_high;
  617. if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
  618. const int topright_avail = (sl->topright_samples_available << i) & 0x8000;
  619. assert(h->mb_y || linesize <= block_offset[i]);
  620. if (!topright_avail) {
  621. if (pixel_shift) {
  622. tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
  623. topright = (uint8_t *)&tr_high;
  624. } else {
  625. tr = ptr[3 - linesize] * 0x01010101u;
  626. topright = (uint8_t *)&tr;
  627. }
  628. } else
  629. topright = ptr + (4 << pixel_shift) - linesize;
  630. } else
  631. topright = NULL;
  632. h->hpc.pred4x4[dir](ptr, topright, linesize);
  633. nnz = sl->non_zero_count_cache[scan8[i + p * 16]];
  634. if (nnz) {
  635. if (is_h264) {
  636. if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
  637. idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  638. else
  639. idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  640. } else if (CONFIG_SVQ3_DECODER)
  641. ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize, qscale, 0);
  642. }
  643. }
  644. }
  645. }
  646. } else {
  647. h->hpc.pred16x16[sl->intra16x16_pred_mode](dest_y, linesize);
  648. if (is_h264) {
  649. if (sl->non_zero_count_cache[scan8[LUMA_DC_BLOCK_INDEX + p]]) {
  650. if (!transform_bypass)
  651. h->h264dsp.h264_luma_dc_dequant_idct(h->mb + (p * 256 << pixel_shift),
  652. h->mb_luma_dc[p],
  653. h->dequant4_coeff[p][qscale][0]);
  654. else {
  655. static const uint8_t dc_mapping[16] = {
  656. 0 * 16, 1 * 16, 4 * 16, 5 * 16,
  657. 2 * 16, 3 * 16, 6 * 16, 7 * 16,
  658. 8 * 16, 9 * 16, 12 * 16, 13 * 16,
  659. 10 * 16, 11 * 16, 14 * 16, 15 * 16
  660. };
  661. for (i = 0; i < 16; i++)
  662. dctcoef_set(h->mb + (p * 256 << pixel_shift),
  663. pixel_shift, dc_mapping[i],
  664. dctcoef_get(h->mb_luma_dc[p],
  665. pixel_shift, i));
  666. }
  667. }
  668. } else if (CONFIG_SVQ3_DECODER)
  669. ff_svq3_luma_dc_dequant_idct_c(h->mb + p * 256,
  670. h->mb_luma_dc[p], qscale);
  671. }
  672. }
  673. static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, H264SliceContext *sl,
  674. int mb_type,
  675. int is_h264, int simple,
  676. int transform_bypass,
  677. int pixel_shift,
  678. int *block_offset,
  679. int linesize,
  680. uint8_t *dest_y, int p)
  681. {
  682. void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
  683. int i;
  684. block_offset += 16 * p;
  685. if (!IS_INTRA4x4(mb_type)) {
  686. if (is_h264) {
  687. if (IS_INTRA16x16(mb_type)) {
  688. if (transform_bypass) {
  689. if (h->sps.profile_idc == 244 &&
  690. (sl->intra16x16_pred_mode == VERT_PRED8x8 ||
  691. sl->intra16x16_pred_mode == HOR_PRED8x8)) {
  692. h->hpc.pred16x16_add[sl->intra16x16_pred_mode](dest_y, block_offset,
  693. h->mb + (p * 256 << pixel_shift),
  694. linesize);
  695. } else {
  696. for (i = 0; i < 16; i++)
  697. if (sl->non_zero_count_cache[scan8[i + p * 16]] ||
  698. dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
  699. h->h264dsp.h264_add_pixels4_clear(dest_y + block_offset[i],
  700. h->mb + (i * 16 + p * 256 << pixel_shift),
  701. linesize);
  702. }
  703. } else {
  704. h->h264dsp.h264_idct_add16intra(dest_y, block_offset,
  705. h->mb + (p * 256 << pixel_shift),
  706. linesize,
  707. sl->non_zero_count_cache + p * 5 * 8);
  708. }
  709. } else if (h->cbp & 15) {
  710. if (transform_bypass) {
  711. const int di = IS_8x8DCT(mb_type) ? 4 : 1;
  712. idct_add = IS_8x8DCT(mb_type) ? h->h264dsp.h264_add_pixels8_clear
  713. : h->h264dsp.h264_add_pixels4_clear;
  714. for (i = 0; i < 16; i += di)
  715. if (sl->non_zero_count_cache[scan8[i + p * 16]])
  716. idct_add(dest_y + block_offset[i],
  717. h->mb + (i * 16 + p * 256 << pixel_shift),
  718. linesize);
  719. } else {
  720. if (IS_8x8DCT(mb_type))
  721. h->h264dsp.h264_idct8_add4(dest_y, block_offset,
  722. h->mb + (p * 256 << pixel_shift),
  723. linesize,
  724. sl->non_zero_count_cache + p * 5 * 8);
  725. else
  726. h->h264dsp.h264_idct_add16(dest_y, block_offset,
  727. h->mb + (p * 256 << pixel_shift),
  728. linesize,
  729. sl->non_zero_count_cache + p * 5 * 8);
  730. }
  731. }
  732. } else if (CONFIG_SVQ3_DECODER) {
  733. for (i = 0; i < 16; i++)
  734. if (sl->non_zero_count_cache[scan8[i + p * 16]] || h->mb[i * 16 + p * 256]) {
  735. // FIXME benchmark weird rule, & below
  736. uint8_t *const ptr = dest_y + block_offset[i];
  737. ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize,
  738. sl->qscale, IS_INTRA(mb_type) ? 1 : 0);
  739. }
  740. }
  741. }
  742. }
  743. #define BITS 8
  744. #define SIMPLE 1
  745. #include "h264_mb_template.c"
  746. #undef BITS
  747. #define BITS 16
  748. #include "h264_mb_template.c"
  749. #undef SIMPLE
  750. #define SIMPLE 0
  751. #include "h264_mb_template.c"
  752. void ff_h264_hl_decode_mb(H264Context *h, H264SliceContext *sl)
  753. {
  754. const int mb_xy = h->mb_xy;
  755. const int mb_type = h->cur_pic.mb_type[mb_xy];
  756. int is_complex = CONFIG_SMALL || h->is_complex ||
  757. IS_INTRA_PCM(mb_type) || sl->qscale == 0;
  758. if (CHROMA444(h)) {
  759. if (is_complex || h->pixel_shift)
  760. hl_decode_mb_444_complex(h, sl);
  761. else
  762. hl_decode_mb_444_simple_8(h, sl);
  763. } else if (is_complex) {
  764. hl_decode_mb_complex(h, sl);
  765. } else if (h->pixel_shift) {
  766. hl_decode_mb_simple_16(h, sl);
  767. } else
  768. hl_decode_mb_simple_8(h, sl);
  769. }