You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

811 lines
37KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... decoder
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 macroblock decoding
  24. */
  25. #include <stdint.h>
  26. #include "config.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/intreadwrite.h"
  29. #include "avcodec.h"
  30. #include "h264.h"
  31. #include "qpeldsp.h"
  32. #include "thread.h"
  33. static inline int get_lowest_part_list_y(H264SliceContext *sl,
  34. int n, int height, int y_offset, int list)
  35. {
  36. int raw_my = sl->mv_cache[list][scan8[n]][1];
  37. int filter_height_up = (raw_my & 3) ? 2 : 0;
  38. int filter_height_down = (raw_my & 3) ? 3 : 0;
  39. int full_my = (raw_my >> 2) + y_offset;
  40. int top = full_my - filter_height_up;
  41. int bottom = full_my + filter_height_down + height;
  42. return FFMAX(abs(top), bottom);
  43. }
  44. static inline void get_lowest_part_y(const H264Context *h, H264SliceContext *sl,
  45. int refs[2][48], int n,
  46. int height, int y_offset, int list0,
  47. int list1, int *nrefs)
  48. {
  49. int my;
  50. y_offset += 16 * (sl->mb_y >> MB_FIELD(sl));
  51. if (list0) {
  52. int ref_n = sl->ref_cache[0][scan8[n]];
  53. H264Ref *ref = &sl->ref_list[0][ref_n];
  54. // Error resilience puts the current picture in the ref list.
  55. // Don't try to wait on these as it will cause a deadlock.
  56. // Fields can wait on each other, though.
  57. if (ref->parent->tf.progress->data != h->cur_pic.tf.progress->data ||
  58. (ref->reference & 3) != h->picture_structure) {
  59. my = get_lowest_part_list_y(sl, n, height, y_offset, 0);
  60. if (refs[0][ref_n] < 0)
  61. nrefs[0] += 1;
  62. refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
  63. }
  64. }
  65. if (list1) {
  66. int ref_n = sl->ref_cache[1][scan8[n]];
  67. H264Ref *ref = &sl->ref_list[1][ref_n];
  68. if (ref->parent->tf.progress->data != h->cur_pic.tf.progress->data ||
  69. (ref->reference & 3) != h->picture_structure) {
  70. my = get_lowest_part_list_y(sl, n, height, y_offset, 1);
  71. if (refs[1][ref_n] < 0)
  72. nrefs[1] += 1;
  73. refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
  74. }
  75. }
  76. }
  77. /**
  78. * Wait until all reference frames are available for MC operations.
  79. *
  80. * @param h the H264 context
  81. */
  82. static void await_references(const H264Context *h, H264SliceContext *sl)
  83. {
  84. const int mb_xy = sl->mb_xy;
  85. const int mb_type = h->cur_pic.mb_type[mb_xy];
  86. int refs[2][48];
  87. int nrefs[2] = { 0 };
  88. int ref, list;
  89. memset(refs, -1, sizeof(refs));
  90. if (IS_16X16(mb_type)) {
  91. get_lowest_part_y(h, sl, refs, 0, 16, 0,
  92. IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
  93. } else if (IS_16X8(mb_type)) {
  94. get_lowest_part_y(h, sl, refs, 0, 8, 0,
  95. IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
  96. get_lowest_part_y(h, sl, refs, 8, 8, 8,
  97. IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
  98. } else if (IS_8X16(mb_type)) {
  99. get_lowest_part_y(h, sl, refs, 0, 16, 0,
  100. IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
  101. get_lowest_part_y(h, sl, refs, 4, 16, 0,
  102. IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
  103. } else {
  104. int i;
  105. assert(IS_8X8(mb_type));
  106. for (i = 0; i < 4; i++) {
  107. const int sub_mb_type = sl->sub_mb_type[i];
  108. const int n = 4 * i;
  109. int y_offset = (i & 2) << 2;
  110. if (IS_SUB_8X8(sub_mb_type)) {
  111. get_lowest_part_y(h, sl, refs, n, 8, y_offset,
  112. IS_DIR(sub_mb_type, 0, 0),
  113. IS_DIR(sub_mb_type, 0, 1),
  114. nrefs);
  115. } else if (IS_SUB_8X4(sub_mb_type)) {
  116. get_lowest_part_y(h, sl, refs, n, 4, y_offset,
  117. IS_DIR(sub_mb_type, 0, 0),
  118. IS_DIR(sub_mb_type, 0, 1),
  119. nrefs);
  120. get_lowest_part_y(h, sl, refs, n + 2, 4, y_offset + 4,
  121. IS_DIR(sub_mb_type, 0, 0),
  122. IS_DIR(sub_mb_type, 0, 1),
  123. nrefs);
  124. } else if (IS_SUB_4X8(sub_mb_type)) {
  125. get_lowest_part_y(h, sl, refs, n, 8, y_offset,
  126. IS_DIR(sub_mb_type, 0, 0),
  127. IS_DIR(sub_mb_type, 0, 1),
  128. nrefs);
  129. get_lowest_part_y(h, sl, refs, n + 1, 8, y_offset,
  130. IS_DIR(sub_mb_type, 0, 0),
  131. IS_DIR(sub_mb_type, 0, 1),
  132. nrefs);
  133. } else {
  134. int j;
  135. assert(IS_SUB_4X4(sub_mb_type));
  136. for (j = 0; j < 4; j++) {
  137. int sub_y_offset = y_offset + 2 * (j & 2);
  138. get_lowest_part_y(h, sl, refs, n + j, 4, sub_y_offset,
  139. IS_DIR(sub_mb_type, 0, 0),
  140. IS_DIR(sub_mb_type, 0, 1),
  141. nrefs);
  142. }
  143. }
  144. }
  145. }
  146. for (list = sl->list_count - 1; list >= 0; list--)
  147. for (ref = 0; ref < 48 && nrefs[list]; ref++) {
  148. int row = refs[list][ref];
  149. if (row >= 0) {
  150. H264Ref *ref_pic = &sl->ref_list[list][ref];
  151. int ref_field = ref_pic->reference - 1;
  152. int ref_field_picture = ref_pic->parent->field_picture;
  153. int pic_height = 16 * h->mb_height >> ref_field_picture;
  154. row <<= MB_MBAFF(sl);
  155. nrefs[list]--;
  156. if (!FIELD_PICTURE(h) && ref_field_picture) { // frame referencing two fields
  157. ff_thread_await_progress(&ref_pic->parent->tf,
  158. FFMIN((row >> 1) - !(row & 1),
  159. pic_height - 1),
  160. 1);
  161. ff_thread_await_progress(&ref_pic->parent->tf,
  162. FFMIN((row >> 1), pic_height - 1),
  163. 0);
  164. } else if (FIELD_PICTURE(h) && !ref_field_picture) { // field referencing one field of a frame
  165. ff_thread_await_progress(&ref_pic->parent->tf,
  166. FFMIN(row * 2 + ref_field,
  167. pic_height - 1),
  168. 0);
  169. } else if (FIELD_PICTURE(h)) {
  170. ff_thread_await_progress(&ref_pic->parent->tf,
  171. FFMIN(row, pic_height - 1),
  172. ref_field);
  173. } else {
  174. ff_thread_await_progress(&ref_pic->parent->tf,
  175. FFMIN(row, pic_height - 1),
  176. 0);
  177. }
  178. }
  179. }
  180. }
  181. static av_always_inline void mc_dir_part(const H264Context *h, H264SliceContext *sl,
  182. H264Ref *pic,
  183. int n, int square, int height,
  184. int delta, int list,
  185. uint8_t *dest_y, uint8_t *dest_cb,
  186. uint8_t *dest_cr,
  187. int src_x_offset, int src_y_offset,
  188. const qpel_mc_func *qpix_op,
  189. h264_chroma_mc_func chroma_op,
  190. int pixel_shift, int chroma_idc)
  191. {
  192. const int mx = sl->mv_cache[list][scan8[n]][0] + src_x_offset * 8;
  193. int my = sl->mv_cache[list][scan8[n]][1] + src_y_offset * 8;
  194. const int luma_xy = (mx & 3) + ((my & 3) << 2);
  195. ptrdiff_t offset = ((mx >> 2) << pixel_shift) + (my >> 2) * sl->mb_linesize;
  196. uint8_t *src_y = pic->data[0] + offset;
  197. uint8_t *src_cb, *src_cr;
  198. int extra_width = 0;
  199. int extra_height = 0;
  200. int emu = 0;
  201. const int full_mx = mx >> 2;
  202. const int full_my = my >> 2;
  203. const int pic_width = 16 * h->mb_width;
  204. const int pic_height = 16 * h->mb_height >> MB_FIELD(sl);
  205. int ysh;
  206. if (mx & 7)
  207. extra_width -= 3;
  208. if (my & 7)
  209. extra_height -= 3;
  210. if (full_mx < 0 - extra_width ||
  211. full_my < 0 - extra_height ||
  212. full_mx + 16 /*FIXME*/ > pic_width + extra_width ||
  213. full_my + 16 /*FIXME*/ > pic_height + extra_height) {
  214. h->vdsp.emulated_edge_mc(sl->edge_emu_buffer,
  215. src_y - (2 << pixel_shift) - 2 * sl->mb_linesize,
  216. sl->mb_linesize, sl->mb_linesize,
  217. 16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
  218. full_my - 2, pic_width, pic_height);
  219. src_y = sl->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
  220. emu = 1;
  221. }
  222. qpix_op[luma_xy](dest_y, src_y, sl->mb_linesize); // FIXME try variable height perhaps?
  223. if (!square)
  224. qpix_op[luma_xy](dest_y + delta, src_y + delta, sl->mb_linesize);
  225. if (CONFIG_GRAY && h->flags & AV_CODEC_FLAG_GRAY)
  226. return;
  227. if (chroma_idc == 3 /* yuv444 */) {
  228. src_cb = pic->data[1] + offset;
  229. if (emu) {
  230. h->vdsp.emulated_edge_mc(sl->edge_emu_buffer,
  231. src_cb - (2 << pixel_shift) - 2 * sl->mb_linesize,
  232. sl->mb_linesize, sl->mb_linesize,
  233. 16 + 5, 16 + 5 /*FIXME*/,
  234. full_mx - 2, full_my - 2,
  235. pic_width, pic_height);
  236. src_cb = sl->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
  237. }
  238. qpix_op[luma_xy](dest_cb, src_cb, sl->mb_linesize); // FIXME try variable height perhaps?
  239. if (!square)
  240. qpix_op[luma_xy](dest_cb + delta, src_cb + delta, sl->mb_linesize);
  241. src_cr = pic->data[2] + offset;
  242. if (emu) {
  243. h->vdsp.emulated_edge_mc(sl->edge_emu_buffer,
  244. src_cr - (2 << pixel_shift) - 2 * sl->mb_linesize,
  245. sl->mb_linesize, sl->mb_linesize,
  246. 16 + 5, 16 + 5 /*FIXME*/,
  247. full_mx - 2, full_my - 2,
  248. pic_width, pic_height);
  249. src_cr = sl->edge_emu_buffer + (2 << pixel_shift) + 2 * sl->mb_linesize;
  250. }
  251. qpix_op[luma_xy](dest_cr, src_cr, sl->mb_linesize); // FIXME try variable height perhaps?
  252. if (!square)
  253. qpix_op[luma_xy](dest_cr + delta, src_cr + delta, sl->mb_linesize);
  254. return;
  255. }
  256. ysh = 3 - (chroma_idc == 2 /* yuv422 */);
  257. if (chroma_idc == 1 /* yuv420 */ && MB_FIELD(sl)) {
  258. // chroma offset when predicting from a field of opposite parity
  259. my += 2 * ((sl->mb_y & 1) - (pic->reference - 1));
  260. emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
  261. }
  262. src_cb = pic->data[1] + ((mx >> 3) << pixel_shift) +
  263. (my >> ysh) * sl->mb_uvlinesize;
  264. src_cr = pic->data[2] + ((mx >> 3) << pixel_shift) +
  265. (my >> ysh) * sl->mb_uvlinesize;
  266. if (emu) {
  267. h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src_cb,
  268. sl->mb_uvlinesize, sl->mb_uvlinesize,
  269. 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
  270. pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
  271. src_cb = sl->edge_emu_buffer;
  272. }
  273. chroma_op(dest_cb, src_cb, sl->mb_uvlinesize,
  274. height >> (chroma_idc == 1 /* yuv420 */),
  275. mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
  276. if (emu) {
  277. h->vdsp.emulated_edge_mc(sl->edge_emu_buffer, src_cr,
  278. sl->mb_uvlinesize, sl->mb_uvlinesize,
  279. 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
  280. pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
  281. src_cr = sl->edge_emu_buffer;
  282. }
  283. chroma_op(dest_cr, src_cr, sl->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */),
  284. mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
  285. }
  286. static av_always_inline void mc_part_std(const H264Context *h, H264SliceContext *sl,
  287. int n, int square,
  288. int height, int delta,
  289. uint8_t *dest_y, uint8_t *dest_cb,
  290. uint8_t *dest_cr,
  291. int x_offset, int y_offset,
  292. const qpel_mc_func *qpix_put,
  293. h264_chroma_mc_func chroma_put,
  294. const qpel_mc_func *qpix_avg,
  295. h264_chroma_mc_func chroma_avg,
  296. int list0, int list1,
  297. int pixel_shift, int chroma_idc)
  298. {
  299. const qpel_mc_func *qpix_op = qpix_put;
  300. h264_chroma_mc_func chroma_op = chroma_put;
  301. dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  302. if (chroma_idc == 3 /* yuv444 */) {
  303. dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  304. dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  305. } else if (chroma_idc == 2 /* yuv422 */) {
  306. dest_cb += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
  307. dest_cr += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
  308. } else { /* yuv420 */
  309. dest_cb += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
  310. dest_cr += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
  311. }
  312. x_offset += 8 * sl->mb_x;
  313. y_offset += 8 * (sl->mb_y >> MB_FIELD(sl));
  314. if (list0) {
  315. H264Ref *ref = &sl->ref_list[0][sl->ref_cache[0][scan8[n]]];
  316. mc_dir_part(h, sl, ref, n, square, height, delta, 0,
  317. dest_y, dest_cb, dest_cr, x_offset, y_offset,
  318. qpix_op, chroma_op, pixel_shift, chroma_idc);
  319. qpix_op = qpix_avg;
  320. chroma_op = chroma_avg;
  321. }
  322. if (list1) {
  323. H264Ref *ref = &sl->ref_list[1][sl->ref_cache[1][scan8[n]]];
  324. mc_dir_part(h, sl, ref, n, square, height, delta, 1,
  325. dest_y, dest_cb, dest_cr, x_offset, y_offset,
  326. qpix_op, chroma_op, pixel_shift, chroma_idc);
  327. }
  328. }
  329. static av_always_inline void mc_part_weighted(const H264Context *h, H264SliceContext *sl,
  330. int n, int square,
  331. int height, int delta,
  332. uint8_t *dest_y, uint8_t *dest_cb,
  333. uint8_t *dest_cr,
  334. int x_offset, int y_offset,
  335. const qpel_mc_func *qpix_put,
  336. h264_chroma_mc_func chroma_put,
  337. h264_weight_func luma_weight_op,
  338. h264_weight_func chroma_weight_op,
  339. h264_biweight_func luma_weight_avg,
  340. h264_biweight_func chroma_weight_avg,
  341. int list0, int list1,
  342. int pixel_shift, int chroma_idc)
  343. {
  344. int chroma_height;
  345. dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  346. if (chroma_idc == 3 /* yuv444 */) {
  347. chroma_height = height;
  348. chroma_weight_avg = luma_weight_avg;
  349. chroma_weight_op = luma_weight_op;
  350. dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  351. dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * sl->mb_linesize;
  352. } else if (chroma_idc == 2 /* yuv422 */) {
  353. chroma_height = height;
  354. dest_cb += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
  355. dest_cr += (x_offset << pixel_shift) + 2 * y_offset * sl->mb_uvlinesize;
  356. } else { /* yuv420 */
  357. chroma_height = height >> 1;
  358. dest_cb += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
  359. dest_cr += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
  360. }
  361. x_offset += 8 * sl->mb_x;
  362. y_offset += 8 * (sl->mb_y >> MB_FIELD(sl));
  363. if (list0 && list1) {
  364. /* don't optimize for luma-only case, since B-frames usually
  365. * use implicit weights => chroma too. */
  366. uint8_t *tmp_cb = sl->bipred_scratchpad;
  367. uint8_t *tmp_cr = sl->bipred_scratchpad + (16 << pixel_shift);
  368. uint8_t *tmp_y = sl->bipred_scratchpad + 16 * sl->mb_uvlinesize;
  369. int refn0 = sl->ref_cache[0][scan8[n]];
  370. int refn1 = sl->ref_cache[1][scan8[n]];
  371. mc_dir_part(h, sl, &sl->ref_list[0][refn0], n, square, height, delta, 0,
  372. dest_y, dest_cb, dest_cr,
  373. x_offset, y_offset, qpix_put, chroma_put,
  374. pixel_shift, chroma_idc);
  375. mc_dir_part(h, sl, &sl->ref_list[1][refn1], n, square, height, delta, 1,
  376. tmp_y, tmp_cb, tmp_cr,
  377. x_offset, y_offset, qpix_put, chroma_put,
  378. pixel_shift, chroma_idc);
  379. if (sl->pwt.use_weight == 2) {
  380. int weight0 = sl->pwt.implicit_weight[refn0][refn1][sl->mb_y & 1];
  381. int weight1 = 64 - weight0;
  382. luma_weight_avg(dest_y, tmp_y, sl->mb_linesize,
  383. height, 5, weight0, weight1, 0);
  384. chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize,
  385. chroma_height, 5, weight0, weight1, 0);
  386. chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize,
  387. chroma_height, 5, weight0, weight1, 0);
  388. } else {
  389. luma_weight_avg(dest_y, tmp_y, sl->mb_linesize, height,
  390. sl->pwt.luma_log2_weight_denom,
  391. sl->pwt.luma_weight[refn0][0][0],
  392. sl->pwt.luma_weight[refn1][1][0],
  393. sl->pwt.luma_weight[refn0][0][1] +
  394. sl->pwt.luma_weight[refn1][1][1]);
  395. chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize, chroma_height,
  396. sl->pwt.chroma_log2_weight_denom,
  397. sl->pwt.chroma_weight[refn0][0][0][0],
  398. sl->pwt.chroma_weight[refn1][1][0][0],
  399. sl->pwt.chroma_weight[refn0][0][0][1] +
  400. sl->pwt.chroma_weight[refn1][1][0][1]);
  401. chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize, chroma_height,
  402. sl->pwt.chroma_log2_weight_denom,
  403. sl->pwt.chroma_weight[refn0][0][1][0],
  404. sl->pwt.chroma_weight[refn1][1][1][0],
  405. sl->pwt.chroma_weight[refn0][0][1][1] +
  406. sl->pwt.chroma_weight[refn1][1][1][1]);
  407. }
  408. } else {
  409. int list = list1 ? 1 : 0;
  410. int refn = sl->ref_cache[list][scan8[n]];
  411. H264Ref *ref = &sl->ref_list[list][refn];
  412. mc_dir_part(h, sl, ref, n, square, height, delta, list,
  413. dest_y, dest_cb, dest_cr, x_offset, y_offset,
  414. qpix_put, chroma_put, pixel_shift, chroma_idc);
  415. luma_weight_op(dest_y, sl->mb_linesize, height,
  416. sl->pwt.luma_log2_weight_denom,
  417. sl->pwt.luma_weight[refn][list][0],
  418. sl->pwt.luma_weight[refn][list][1]);
  419. if (sl->pwt.use_weight_chroma) {
  420. chroma_weight_op(dest_cb, sl->mb_uvlinesize, chroma_height,
  421. sl->pwt.chroma_log2_weight_denom,
  422. sl->pwt.chroma_weight[refn][list][0][0],
  423. sl->pwt.chroma_weight[refn][list][0][1]);
  424. chroma_weight_op(dest_cr, sl->mb_uvlinesize, chroma_height,
  425. sl->pwt.chroma_log2_weight_denom,
  426. sl->pwt.chroma_weight[refn][list][1][0],
  427. sl->pwt.chroma_weight[refn][list][1][1]);
  428. }
  429. }
  430. }
  431. static av_always_inline void prefetch_motion(const H264Context *h, H264SliceContext *sl,
  432. int list, int pixel_shift,
  433. int chroma_idc)
  434. {
  435. /* fetch pixels for estimated mv 4 macroblocks ahead
  436. * optimized for 64byte cache lines */
  437. const int refn = sl->ref_cache[list][scan8[0]];
  438. if (refn >= 0) {
  439. const int mx = (sl->mv_cache[list][scan8[0]][0] >> 2) + 16 * sl->mb_x + 8;
  440. const int my = (sl->mv_cache[list][scan8[0]][1] >> 2) + 16 * sl->mb_y;
  441. uint8_t **src = sl->ref_list[list][refn].data;
  442. int off = (mx << pixel_shift) +
  443. (my + (sl->mb_x & 3) * 4) * sl->mb_linesize +
  444. (64 << pixel_shift);
  445. h->vdsp.prefetch(src[0] + off, sl->linesize, 4);
  446. if (chroma_idc == 3 /* yuv444 */) {
  447. h->vdsp.prefetch(src[1] + off, sl->linesize, 4);
  448. h->vdsp.prefetch(src[2] + off, sl->linesize, 4);
  449. } else {
  450. off = ((mx >> 1) << pixel_shift) +
  451. ((my >> 1) + (sl->mb_x & 7)) * sl->uvlinesize +
  452. (64 << pixel_shift);
  453. h->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
  454. }
  455. }
  456. }
  457. static av_always_inline void xchg_mb_border(const H264Context *h, H264SliceContext *sl,
  458. uint8_t *src_y,
  459. uint8_t *src_cb, uint8_t *src_cr,
  460. int linesize, int uvlinesize,
  461. int xchg, int chroma444,
  462. int simple, int pixel_shift)
  463. {
  464. int deblock_topleft;
  465. int deblock_top;
  466. int top_idx = 1;
  467. uint8_t *top_border_m1;
  468. uint8_t *top_border;
  469. if (!simple && FRAME_MBAFF(h)) {
  470. if (sl->mb_y & 1) {
  471. if (!MB_MBAFF(sl))
  472. return;
  473. } else {
  474. top_idx = MB_MBAFF(sl) ? 0 : 1;
  475. }
  476. }
  477. if (sl->deblocking_filter == 2) {
  478. deblock_topleft = h->slice_table[sl->mb_xy - 1 - h->mb_stride] == sl->slice_num;
  479. deblock_top = sl->top_type;
  480. } else {
  481. deblock_topleft = (sl->mb_x > 0);
  482. deblock_top = (sl->mb_y > !!MB_FIELD(sl));
  483. }
  484. src_y -= linesize + 1 + pixel_shift;
  485. src_cb -= uvlinesize + 1 + pixel_shift;
  486. src_cr -= uvlinesize + 1 + pixel_shift;
  487. top_border_m1 = sl->top_borders[top_idx][sl->mb_x - 1];
  488. top_border = sl->top_borders[top_idx][sl->mb_x];
  489. #define XCHG(a, b, xchg) \
  490. if (pixel_shift) { \
  491. if (xchg) { \
  492. AV_SWAP64(b + 0, a + 0); \
  493. AV_SWAP64(b + 8, a + 8); \
  494. } else { \
  495. AV_COPY128(b, a); \
  496. } \
  497. } else if (xchg) \
  498. AV_SWAP64(b, a); \
  499. else \
  500. AV_COPY64(b, a);
  501. if (deblock_top) {
  502. if (deblock_topleft) {
  503. XCHG(top_border_m1 + (8 << pixel_shift),
  504. src_y - (7 << pixel_shift), 1);
  505. }
  506. XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
  507. XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
  508. if (sl->mb_x + 1 < h->mb_width) {
  509. XCHG(sl->top_borders[top_idx][sl->mb_x + 1],
  510. src_y + (17 << pixel_shift), 1);
  511. }
  512. }
  513. if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
  514. if (chroma444) {
  515. if (deblock_top) {
  516. if (deblock_topleft) {
  517. XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
  518. XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
  519. }
  520. XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
  521. XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
  522. XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
  523. XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
  524. if (sl->mb_x + 1 < h->mb_width) {
  525. XCHG(sl->top_borders[top_idx][sl->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
  526. XCHG(sl->top_borders[top_idx][sl->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
  527. }
  528. }
  529. } else {
  530. if (deblock_top) {
  531. if (deblock_topleft) {
  532. XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
  533. XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
  534. }
  535. XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
  536. XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
  537. }
  538. }
  539. }
  540. }
  541. static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth,
  542. int index)
  543. {
  544. if (high_bit_depth) {
  545. return AV_RN32A(((int32_t *)mb) + index);
  546. } else
  547. return AV_RN16A(mb + index);
  548. }
  549. static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth,
  550. int index, int value)
  551. {
  552. if (high_bit_depth) {
  553. AV_WN32A(((int32_t *)mb) + index, value);
  554. } else
  555. AV_WN16A(mb + index, value);
  556. }
  557. static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h,
  558. H264SliceContext *sl,
  559. int mb_type, int simple,
  560. int transform_bypass,
  561. int pixel_shift,
  562. const int *block_offset,
  563. int linesize,
  564. uint8_t *dest_y, int p)
  565. {
  566. void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
  567. void (*idct_dc_add)(uint8_t *dst, int16_t *block, int stride);
  568. int i;
  569. int qscale = p == 0 ? sl->qscale : sl->chroma_qp[p - 1];
  570. block_offset += 16 * p;
  571. if (IS_INTRA4x4(mb_type)) {
  572. if (IS_8x8DCT(mb_type)) {
  573. if (transform_bypass) {
  574. idct_dc_add =
  575. idct_add = h->h264dsp.h264_add_pixels8_clear;
  576. } else {
  577. idct_dc_add = h->h264dsp.h264_idct8_dc_add;
  578. idct_add = h->h264dsp.h264_idct8_add;
  579. }
  580. for (i = 0; i < 16; i += 4) {
  581. uint8_t *const ptr = dest_y + block_offset[i];
  582. const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
  583. if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
  584. h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  585. } else {
  586. const int nnz = sl->non_zero_count_cache[scan8[i + p * 16]];
  587. h->hpc.pred8x8l[dir](ptr, (sl->topleft_samples_available << i) & 0x8000,
  588. (sl->topright_samples_available << i) & 0x4000, linesize);
  589. if (nnz) {
  590. if (nnz == 1 && dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256))
  591. idct_dc_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  592. else
  593. idct_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  594. }
  595. }
  596. }
  597. } else {
  598. if (transform_bypass) {
  599. idct_dc_add =
  600. idct_add = h->h264dsp.h264_add_pixels4_clear;
  601. } else {
  602. idct_dc_add = h->h264dsp.h264_idct_dc_add;
  603. idct_add = h->h264dsp.h264_idct_add;
  604. }
  605. for (i = 0; i < 16; i++) {
  606. uint8_t *const ptr = dest_y + block_offset[i];
  607. const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
  608. if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
  609. h->hpc.pred4x4_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  610. } else {
  611. uint8_t *topright;
  612. int nnz, tr;
  613. uint64_t tr_high;
  614. if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
  615. const int topright_avail = (sl->topright_samples_available << i) & 0x8000;
  616. assert(sl->mb_y || linesize <= block_offset[i]);
  617. if (!topright_avail) {
  618. if (pixel_shift) {
  619. tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
  620. topright = (uint8_t *)&tr_high;
  621. } else {
  622. tr = ptr[3 - linesize] * 0x01010101u;
  623. topright = (uint8_t *)&tr;
  624. }
  625. } else
  626. topright = ptr + (4 << pixel_shift) - linesize;
  627. } else
  628. topright = NULL;
  629. h->hpc.pred4x4[dir](ptr, topright, linesize);
  630. nnz = sl->non_zero_count_cache[scan8[i + p * 16]];
  631. if (nnz) {
  632. if (nnz == 1 && dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256))
  633. idct_dc_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  634. else
  635. idct_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  636. }
  637. }
  638. }
  639. }
  640. } else {
  641. h->hpc.pred16x16[sl->intra16x16_pred_mode](dest_y, linesize);
  642. if (sl->non_zero_count_cache[scan8[LUMA_DC_BLOCK_INDEX + p]]) {
  643. if (!transform_bypass)
  644. h->h264dsp.h264_luma_dc_dequant_idct(sl->mb + (p * 256 << pixel_shift),
  645. sl->mb_luma_dc[p],
  646. h->dequant4_coeff[p][qscale][0]);
  647. else {
  648. static const uint8_t dc_mapping[16] = {
  649. 0 * 16, 1 * 16, 4 * 16, 5 * 16,
  650. 2 * 16, 3 * 16, 6 * 16, 7 * 16,
  651. 8 * 16, 9 * 16, 12 * 16, 13 * 16,
  652. 10 * 16, 11 * 16, 14 * 16, 15 * 16
  653. };
  654. for (i = 0; i < 16; i++)
  655. dctcoef_set(sl->mb + (p * 256 << pixel_shift),
  656. pixel_shift, dc_mapping[i],
  657. dctcoef_get(sl->mb_luma_dc[p],
  658. pixel_shift, i));
  659. }
  660. }
  661. }
  662. }
  663. static av_always_inline void hl_decode_mb_idct_luma(const H264Context *h, H264SliceContext *sl,
  664. int mb_type, int simple,
  665. int transform_bypass,
  666. int pixel_shift,
  667. const int *block_offset,
  668. int linesize,
  669. uint8_t *dest_y, int p)
  670. {
  671. void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
  672. int i;
  673. block_offset += 16 * p;
  674. if (!IS_INTRA4x4(mb_type)) {
  675. if (IS_INTRA16x16(mb_type)) {
  676. if (transform_bypass) {
  677. if (h->sps.profile_idc == 244 &&
  678. (sl->intra16x16_pred_mode == VERT_PRED8x8 ||
  679. sl->intra16x16_pred_mode == HOR_PRED8x8)) {
  680. h->hpc.pred16x16_add[sl->intra16x16_pred_mode](dest_y, block_offset,
  681. sl->mb + (p * 256 << pixel_shift),
  682. linesize);
  683. } else {
  684. for (i = 0; i < 16; i++)
  685. if (sl->non_zero_count_cache[scan8[i + p * 16]] ||
  686. dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256))
  687. h->h264dsp.h264_add_pixels4_clear(dest_y + block_offset[i],
  688. sl->mb + (i * 16 + p * 256 << pixel_shift),
  689. linesize);
  690. }
  691. } else {
  692. h->h264dsp.h264_idct_add16intra(dest_y, block_offset,
  693. sl->mb + (p * 256 << pixel_shift),
  694. linesize,
  695. sl->non_zero_count_cache + p * 5 * 8);
  696. }
  697. } else if (sl->cbp & 15) {
  698. if (transform_bypass) {
  699. const int di = IS_8x8DCT(mb_type) ? 4 : 1;
  700. idct_add = IS_8x8DCT(mb_type) ? h->h264dsp.h264_add_pixels8_clear
  701. : h->h264dsp.h264_add_pixels4_clear;
  702. for (i = 0; i < 16; i += di)
  703. if (sl->non_zero_count_cache[scan8[i + p * 16]])
  704. idct_add(dest_y + block_offset[i],
  705. sl->mb + (i * 16 + p * 256 << pixel_shift),
  706. linesize);
  707. } else {
  708. if (IS_8x8DCT(mb_type))
  709. h->h264dsp.h264_idct8_add4(dest_y, block_offset,
  710. sl->mb + (p * 256 << pixel_shift),
  711. linesize,
  712. sl->non_zero_count_cache + p * 5 * 8);
  713. else
  714. h->h264dsp.h264_idct_add16(dest_y, block_offset,
  715. sl->mb + (p * 256 << pixel_shift),
  716. linesize,
  717. sl->non_zero_count_cache + p * 5 * 8);
  718. }
  719. }
  720. }
  721. }
  722. #define BITS 8
  723. #define SIMPLE 1
  724. #include "h264_mb_template.c"
  725. #undef BITS
  726. #define BITS 16
  727. #include "h264_mb_template.c"
  728. #undef SIMPLE
  729. #define SIMPLE 0
  730. #include "h264_mb_template.c"
  731. void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
  732. {
  733. const int mb_xy = sl->mb_xy;
  734. const int mb_type = h->cur_pic.mb_type[mb_xy];
  735. int is_complex = CONFIG_SMALL || sl->is_complex ||
  736. IS_INTRA_PCM(mb_type) || sl->qscale == 0;
  737. if (CHROMA444(h)) {
  738. if (is_complex || h->pixel_shift)
  739. hl_decode_mb_444_complex(h, sl);
  740. else
  741. hl_decode_mb_444_simple_8(h, sl);
  742. } else if (is_complex) {
  743. hl_decode_mb_complex(h, sl);
  744. } else if (h->pixel_shift) {
  745. hl_decode_mb_simple_16(h, sl);
  746. } else
  747. hl_decode_mb_simple_8(h, sl);
  748. }