You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

822 lines
37KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... decoder
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 macroblock decoding
  24. */
  25. #include <stdint.h>
  26. #include "config.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/intreadwrite.h"
  29. #include "avcodec.h"
  30. #include "h264.h"
  31. #include "svq3.h"
  32. #include "thread.h"
  33. static inline int get_lowest_part_list_y(H264Context *h, H264Picture *pic, int n,
  34. int height, int y_offset, int list)
  35. {
  36. int raw_my = h->mv_cache[list][scan8[n]][1];
  37. int filter_height_up = (raw_my & 3) ? 2 : 0;
  38. int filter_height_down = (raw_my & 3) ? 3 : 0;
  39. int full_my = (raw_my >> 2) + y_offset;
  40. int top = full_my - filter_height_up;
  41. int bottom = full_my + filter_height_down + height;
  42. return FFMAX(abs(top), bottom);
  43. }
  44. static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
  45. int height, int y_offset, int list0,
  46. int list1, int *nrefs)
  47. {
  48. int my;
  49. y_offset += 16 * (h->mb_y >> MB_FIELD(h));
  50. if (list0) {
  51. int ref_n = h->ref_cache[0][scan8[n]];
  52. H264Picture *ref = &h->ref_list[0][ref_n];
  53. // Error resilience puts the current picture in the ref list.
  54. // Don't try to wait on these as it will cause a deadlock.
  55. // Fields can wait on each other, though.
  56. if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
  57. (ref->reference & 3) != h->picture_structure) {
  58. my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
  59. if (refs[0][ref_n] < 0)
  60. nrefs[0] += 1;
  61. refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
  62. }
  63. }
  64. if (list1) {
  65. int ref_n = h->ref_cache[1][scan8[n]];
  66. H264Picture *ref = &h->ref_list[1][ref_n];
  67. if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
  68. (ref->reference & 3) != h->picture_structure) {
  69. my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
  70. if (refs[1][ref_n] < 0)
  71. nrefs[1] += 1;
  72. refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
  73. }
  74. }
  75. }
  76. /**
  77. * Wait until all reference frames are available for MC operations.
  78. *
  79. * @param h the H264 context
  80. */
  81. static void await_references(H264Context *h)
  82. {
  83. const int mb_xy = h->mb_xy;
  84. const int mb_type = h->cur_pic.mb_type[mb_xy];
  85. int refs[2][48];
  86. int nrefs[2] = { 0 };
  87. int ref, list;
  88. memset(refs, -1, sizeof(refs));
  89. if (IS_16X16(mb_type)) {
  90. get_lowest_part_y(h, refs, 0, 16, 0,
  91. IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
  92. } else if (IS_16X8(mb_type)) {
  93. get_lowest_part_y(h, refs, 0, 8, 0,
  94. IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
  95. get_lowest_part_y(h, refs, 8, 8, 8,
  96. IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
  97. } else if (IS_8X16(mb_type)) {
  98. get_lowest_part_y(h, refs, 0, 16, 0,
  99. IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
  100. get_lowest_part_y(h, refs, 4, 16, 0,
  101. IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
  102. } else {
  103. int i;
  104. assert(IS_8X8(mb_type));
  105. for (i = 0; i < 4; i++) {
  106. const int sub_mb_type = h->sub_mb_type[i];
  107. const int n = 4 * i;
  108. int y_offset = (i & 2) << 2;
  109. if (IS_SUB_8X8(sub_mb_type)) {
  110. get_lowest_part_y(h, refs, n, 8, y_offset,
  111. IS_DIR(sub_mb_type, 0, 0),
  112. IS_DIR(sub_mb_type, 0, 1),
  113. nrefs);
  114. } else if (IS_SUB_8X4(sub_mb_type)) {
  115. get_lowest_part_y(h, refs, n, 4, y_offset,
  116. IS_DIR(sub_mb_type, 0, 0),
  117. IS_DIR(sub_mb_type, 0, 1),
  118. nrefs);
  119. get_lowest_part_y(h, refs, n + 2, 4, y_offset + 4,
  120. IS_DIR(sub_mb_type, 0, 0),
  121. IS_DIR(sub_mb_type, 0, 1),
  122. nrefs);
  123. } else if (IS_SUB_4X8(sub_mb_type)) {
  124. get_lowest_part_y(h, refs, n, 8, y_offset,
  125. IS_DIR(sub_mb_type, 0, 0),
  126. IS_DIR(sub_mb_type, 0, 1),
  127. nrefs);
  128. get_lowest_part_y(h, refs, n + 1, 8, y_offset,
  129. IS_DIR(sub_mb_type, 0, 0),
  130. IS_DIR(sub_mb_type, 0, 1),
  131. nrefs);
  132. } else {
  133. int j;
  134. assert(IS_SUB_4X4(sub_mb_type));
  135. for (j = 0; j < 4; j++) {
  136. int sub_y_offset = y_offset + 2 * (j & 2);
  137. get_lowest_part_y(h, refs, n + j, 4, sub_y_offset,
  138. IS_DIR(sub_mb_type, 0, 0),
  139. IS_DIR(sub_mb_type, 0, 1),
  140. nrefs);
  141. }
  142. }
  143. }
  144. }
  145. for (list = h->list_count - 1; list >= 0; list--)
  146. for (ref = 0; ref < 48 && nrefs[list]; ref++) {
  147. int row = refs[list][ref];
  148. if (row >= 0) {
  149. H264Picture *ref_pic = &h->ref_list[list][ref];
  150. int ref_field = ref_pic->reference - 1;
  151. int ref_field_picture = ref_pic->field_picture;
  152. int pic_height = 16 * h->mb_height >> ref_field_picture;
  153. row <<= MB_MBAFF(h);
  154. nrefs[list]--;
  155. if (!FIELD_PICTURE(h) && ref_field_picture) { // frame referencing two fields
  156. ff_thread_await_progress(&ref_pic->tf,
  157. FFMIN((row >> 1) - !(row & 1),
  158. pic_height - 1),
  159. 1);
  160. ff_thread_await_progress(&ref_pic->tf,
  161. FFMIN((row >> 1), pic_height - 1),
  162. 0);
  163. } else if (FIELD_PICTURE(h) && !ref_field_picture) { // field referencing one field of a frame
  164. ff_thread_await_progress(&ref_pic->tf,
  165. FFMIN(row * 2 + ref_field,
  166. pic_height - 1),
  167. 0);
  168. } else if (FIELD_PICTURE(h)) {
  169. ff_thread_await_progress(&ref_pic->tf,
  170. FFMIN(row, pic_height - 1),
  171. ref_field);
  172. } else {
  173. ff_thread_await_progress(&ref_pic->tf,
  174. FFMIN(row, pic_height - 1),
  175. 0);
  176. }
  177. }
  178. }
  179. }
  180. static av_always_inline void mc_dir_part(H264Context *h, H264Picture *pic,
  181. int n, int square, int height,
  182. int delta, int list,
  183. uint8_t *dest_y, uint8_t *dest_cb,
  184. uint8_t *dest_cr,
  185. int src_x_offset, int src_y_offset,
  186. qpel_mc_func *qpix_op,
  187. h264_chroma_mc_func chroma_op,
  188. int pixel_shift, int chroma_idc)
  189. {
  190. const int mx = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8;
  191. int my = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8;
  192. const int luma_xy = (mx & 3) + ((my & 3) << 2);
  193. ptrdiff_t offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize;
  194. uint8_t *src_y = pic->f.data[0] + offset;
  195. uint8_t *src_cb, *src_cr;
  196. int extra_width = 0;
  197. int extra_height = 0;
  198. int emu = 0;
  199. const int full_mx = mx >> 2;
  200. const int full_my = my >> 2;
  201. const int pic_width = 16 * h->mb_width;
  202. const int pic_height = 16 * h->mb_height >> MB_FIELD(h);
  203. int ysh;
  204. if (mx & 7)
  205. extra_width -= 3;
  206. if (my & 7)
  207. extra_height -= 3;
  208. if (full_mx < 0 - extra_width ||
  209. full_my < 0 - extra_height ||
  210. full_mx + 16 /*FIXME*/ > pic_width + extra_width ||
  211. full_my + 16 /*FIXME*/ > pic_height + extra_height) {
  212. h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
  213. src_y - (2 << pixel_shift) - 2 * h->mb_linesize,
  214. h->mb_linesize, h->mb_linesize,
  215. 16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
  216. full_my - 2, pic_width, pic_height);
  217. src_y = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
  218. emu = 1;
  219. }
  220. qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); // FIXME try variable height perhaps?
  221. if (!square)
  222. qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize);
  223. if (CONFIG_GRAY && h->flags & CODEC_FLAG_GRAY)
  224. return;
  225. if (chroma_idc == 3 /* yuv444 */) {
  226. src_cb = pic->f.data[1] + offset;
  227. if (emu) {
  228. h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
  229. src_cb - (2 << pixel_shift) - 2 * h->mb_linesize,
  230. h->mb_linesize, h->mb_linesize,
  231. 16 + 5, 16 + 5 /*FIXME*/,
  232. full_mx - 2, full_my - 2,
  233. pic_width, pic_height);
  234. src_cb = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
  235. }
  236. qpix_op[luma_xy](dest_cb, src_cb, h->mb_linesize); // FIXME try variable height perhaps?
  237. if (!square)
  238. qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize);
  239. src_cr = pic->f.data[2] + offset;
  240. if (emu) {
  241. h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
  242. src_cr - (2 << pixel_shift) - 2 * h->mb_linesize,
  243. h->mb_linesize, h->mb_linesize,
  244. 16 + 5, 16 + 5 /*FIXME*/,
  245. full_mx - 2, full_my - 2,
  246. pic_width, pic_height);
  247. src_cr = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
  248. }
  249. qpix_op[luma_xy](dest_cr, src_cr, h->mb_linesize); // FIXME try variable height perhaps?
  250. if (!square)
  251. qpix_op[luma_xy](dest_cr + delta, src_cr + delta, h->mb_linesize);
  252. return;
  253. }
  254. ysh = 3 - (chroma_idc == 2 /* yuv422 */);
  255. if (chroma_idc == 1 /* yuv420 */ && MB_FIELD(h)) {
  256. // chroma offset when predicting from a field of opposite parity
  257. my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
  258. emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
  259. }
  260. src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) +
  261. (my >> ysh) * h->mb_uvlinesize;
  262. src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) +
  263. (my >> ysh) * h->mb_uvlinesize;
  264. if (emu) {
  265. h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb,
  266. h->mb_uvlinesize, h->mb_uvlinesize,
  267. 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
  268. pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
  269. src_cb = h->edge_emu_buffer;
  270. }
  271. chroma_op(dest_cb, src_cb, h->mb_uvlinesize,
  272. height >> (chroma_idc == 1 /* yuv420 */),
  273. mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
  274. if (emu) {
  275. h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr,
  276. h->mb_uvlinesize, h->mb_uvlinesize,
  277. 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
  278. pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
  279. src_cr = h->edge_emu_buffer;
  280. }
  281. chroma_op(dest_cr, src_cr, h->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */),
  282. mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
  283. }
  284. static av_always_inline void mc_part_std(H264Context *h, int n, int square,
  285. int height, int delta,
  286. uint8_t *dest_y, uint8_t *dest_cb,
  287. uint8_t *dest_cr,
  288. int x_offset, int y_offset,
  289. qpel_mc_func *qpix_put,
  290. h264_chroma_mc_func chroma_put,
  291. qpel_mc_func *qpix_avg,
  292. h264_chroma_mc_func chroma_avg,
  293. int list0, int list1,
  294. int pixel_shift, int chroma_idc)
  295. {
  296. qpel_mc_func *qpix_op = qpix_put;
  297. h264_chroma_mc_func chroma_op = chroma_put;
  298. dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
  299. if (chroma_idc == 3 /* yuv444 */) {
  300. dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
  301. dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
  302. } else if (chroma_idc == 2 /* yuv422 */) {
  303. dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
  304. dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
  305. } else { /* yuv420 */
  306. dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
  307. dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
  308. }
  309. x_offset += 8 * h->mb_x;
  310. y_offset += 8 * (h->mb_y >> MB_FIELD(h));
  311. if (list0) {
  312. H264Picture *ref = &h->ref_list[0][h->ref_cache[0][scan8[n]]];
  313. mc_dir_part(h, ref, n, square, height, delta, 0,
  314. dest_y, dest_cb, dest_cr, x_offset, y_offset,
  315. qpix_op, chroma_op, pixel_shift, chroma_idc);
  316. qpix_op = qpix_avg;
  317. chroma_op = chroma_avg;
  318. }
  319. if (list1) {
  320. H264Picture *ref = &h->ref_list[1][h->ref_cache[1][scan8[n]]];
  321. mc_dir_part(h, ref, n, square, height, delta, 1,
  322. dest_y, dest_cb, dest_cr, x_offset, y_offset,
  323. qpix_op, chroma_op, pixel_shift, chroma_idc);
  324. }
  325. }
  326. static av_always_inline void mc_part_weighted(H264Context *h, int n, int square,
  327. int height, int delta,
  328. uint8_t *dest_y, uint8_t *dest_cb,
  329. uint8_t *dest_cr,
  330. int x_offset, int y_offset,
  331. qpel_mc_func *qpix_put,
  332. h264_chroma_mc_func chroma_put,
  333. h264_weight_func luma_weight_op,
  334. h264_weight_func chroma_weight_op,
  335. h264_biweight_func luma_weight_avg,
  336. h264_biweight_func chroma_weight_avg,
  337. int list0, int list1,
  338. int pixel_shift, int chroma_idc)
  339. {
  340. int chroma_height;
  341. dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
  342. if (chroma_idc == 3 /* yuv444 */) {
  343. chroma_height = height;
  344. chroma_weight_avg = luma_weight_avg;
  345. chroma_weight_op = luma_weight_op;
  346. dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
  347. dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
  348. } else if (chroma_idc == 2 /* yuv422 */) {
  349. chroma_height = height;
  350. dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
  351. dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
  352. } else { /* yuv420 */
  353. chroma_height = height >> 1;
  354. dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
  355. dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
  356. }
  357. x_offset += 8 * h->mb_x;
  358. y_offset += 8 * (h->mb_y >> MB_FIELD(h));
  359. if (list0 && list1) {
  360. /* don't optimize for luma-only case, since B-frames usually
  361. * use implicit weights => chroma too. */
  362. uint8_t *tmp_cb = h->bipred_scratchpad;
  363. uint8_t *tmp_cr = h->bipred_scratchpad + (16 << pixel_shift);
  364. uint8_t *tmp_y = h->bipred_scratchpad + 16 * h->mb_uvlinesize;
  365. int refn0 = h->ref_cache[0][scan8[n]];
  366. int refn1 = h->ref_cache[1][scan8[n]];
  367. mc_dir_part(h, &h->ref_list[0][refn0], n, square, height, delta, 0,
  368. dest_y, dest_cb, dest_cr,
  369. x_offset, y_offset, qpix_put, chroma_put,
  370. pixel_shift, chroma_idc);
  371. mc_dir_part(h, &h->ref_list[1][refn1], n, square, height, delta, 1,
  372. tmp_y, tmp_cb, tmp_cr,
  373. x_offset, y_offset, qpix_put, chroma_put,
  374. pixel_shift, chroma_idc);
  375. if (h->use_weight == 2) {
  376. int weight0 = h->implicit_weight[refn0][refn1][h->mb_y & 1];
  377. int weight1 = 64 - weight0;
  378. luma_weight_avg(dest_y, tmp_y, h->mb_linesize,
  379. height, 5, weight0, weight1, 0);
  380. chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize,
  381. chroma_height, 5, weight0, weight1, 0);
  382. chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize,
  383. chroma_height, 5, weight0, weight1, 0);
  384. } else {
  385. luma_weight_avg(dest_y, tmp_y, h->mb_linesize, height,
  386. h->luma_log2_weight_denom,
  387. h->luma_weight[refn0][0][0],
  388. h->luma_weight[refn1][1][0],
  389. h->luma_weight[refn0][0][1] +
  390. h->luma_weight[refn1][1][1]);
  391. chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, chroma_height,
  392. h->chroma_log2_weight_denom,
  393. h->chroma_weight[refn0][0][0][0],
  394. h->chroma_weight[refn1][1][0][0],
  395. h->chroma_weight[refn0][0][0][1] +
  396. h->chroma_weight[refn1][1][0][1]);
  397. chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, chroma_height,
  398. h->chroma_log2_weight_denom,
  399. h->chroma_weight[refn0][0][1][0],
  400. h->chroma_weight[refn1][1][1][0],
  401. h->chroma_weight[refn0][0][1][1] +
  402. h->chroma_weight[refn1][1][1][1]);
  403. }
  404. } else {
  405. int list = list1 ? 1 : 0;
  406. int refn = h->ref_cache[list][scan8[n]];
  407. H264Picture *ref = &h->ref_list[list][refn];
  408. mc_dir_part(h, ref, n, square, height, delta, list,
  409. dest_y, dest_cb, dest_cr, x_offset, y_offset,
  410. qpix_put, chroma_put, pixel_shift, chroma_idc);
  411. luma_weight_op(dest_y, h->mb_linesize, height,
  412. h->luma_log2_weight_denom,
  413. h->luma_weight[refn][list][0],
  414. h->luma_weight[refn][list][1]);
  415. if (h->use_weight_chroma) {
  416. chroma_weight_op(dest_cb, h->mb_uvlinesize, chroma_height,
  417. h->chroma_log2_weight_denom,
  418. h->chroma_weight[refn][list][0][0],
  419. h->chroma_weight[refn][list][0][1]);
  420. chroma_weight_op(dest_cr, h->mb_uvlinesize, chroma_height,
  421. h->chroma_log2_weight_denom,
  422. h->chroma_weight[refn][list][1][0],
  423. h->chroma_weight[refn][list][1][1]);
  424. }
  425. }
  426. }
  427. static av_always_inline void prefetch_motion(H264Context *h, int list,
  428. int pixel_shift, int chroma_idc)
  429. {
  430. /* fetch pixels for estimated mv 4 macroblocks ahead
  431. * optimized for 64byte cache lines */
  432. const int refn = h->ref_cache[list][scan8[0]];
  433. if (refn >= 0) {
  434. const int mx = (h->mv_cache[list][scan8[0]][0] >> 2) + 16 * h->mb_x + 8;
  435. const int my = (h->mv_cache[list][scan8[0]][1] >> 2) + 16 * h->mb_y;
  436. uint8_t **src = h->ref_list[list][refn].f.data;
  437. int off = (mx << pixel_shift) +
  438. (my + (h->mb_x & 3) * 4) * h->mb_linesize +
  439. (64 << pixel_shift);
  440. h->vdsp.prefetch(src[0] + off, h->linesize, 4);
  441. if (chroma_idc == 3 /* yuv444 */) {
  442. h->vdsp.prefetch(src[1] + off, h->linesize, 4);
  443. h->vdsp.prefetch(src[2] + off, h->linesize, 4);
  444. } else {
  445. off = ((mx >> 1) << pixel_shift) +
  446. ((my >> 1) + (h->mb_x & 7)) * h->uvlinesize +
  447. (64 << pixel_shift);
  448. h->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
  449. }
  450. }
  451. }
  452. static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y,
  453. uint8_t *src_cb, uint8_t *src_cr,
  454. int linesize, int uvlinesize,
  455. int xchg, int chroma444,
  456. int simple, int pixel_shift)
  457. {
  458. int deblock_topleft;
  459. int deblock_top;
  460. int top_idx = 1;
  461. uint8_t *top_border_m1;
  462. uint8_t *top_border;
  463. if (!simple && FRAME_MBAFF(h)) {
  464. if (h->mb_y & 1) {
  465. if (!MB_MBAFF(h))
  466. return;
  467. } else {
  468. top_idx = MB_MBAFF(h) ? 0 : 1;
  469. }
  470. }
  471. if (h->deblocking_filter == 2) {
  472. deblock_topleft = h->slice_table[h->mb_xy - 1 - h->mb_stride] == h->slice_num;
  473. deblock_top = h->top_type;
  474. } else {
  475. deblock_topleft = (h->mb_x > 0);
  476. deblock_top = (h->mb_y > !!MB_FIELD(h));
  477. }
  478. src_y -= linesize + 1 + pixel_shift;
  479. src_cb -= uvlinesize + 1 + pixel_shift;
  480. src_cr -= uvlinesize + 1 + pixel_shift;
  481. top_border_m1 = h->top_borders[top_idx][h->mb_x - 1];
  482. top_border = h->top_borders[top_idx][h->mb_x];
  483. #define XCHG(a, b, xchg) \
  484. if (pixel_shift) { \
  485. if (xchg) { \
  486. AV_SWAP64(b + 0, a + 0); \
  487. AV_SWAP64(b + 8, a + 8); \
  488. } else { \
  489. AV_COPY128(b, a); \
  490. } \
  491. } else if (xchg) \
  492. AV_SWAP64(b, a); \
  493. else \
  494. AV_COPY64(b, a);
  495. if (deblock_top) {
  496. if (deblock_topleft) {
  497. XCHG(top_border_m1 + (8 << pixel_shift),
  498. src_y - (7 << pixel_shift), 1);
  499. }
  500. XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
  501. XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
  502. if (h->mb_x + 1 < h->mb_width) {
  503. XCHG(h->top_borders[top_idx][h->mb_x + 1],
  504. src_y + (17 << pixel_shift), 1);
  505. }
  506. }
  507. if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
  508. if (chroma444) {
  509. if (deblock_top) {
  510. if (deblock_topleft) {
  511. XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
  512. XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
  513. }
  514. XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
  515. XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
  516. XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
  517. XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
  518. if (h->mb_x + 1 < h->mb_width) {
  519. XCHG(h->top_borders[top_idx][h->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
  520. XCHG(h->top_borders[top_idx][h->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
  521. }
  522. }
  523. } else {
  524. if (deblock_top) {
  525. if (deblock_topleft) {
  526. XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
  527. XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
  528. }
  529. XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
  530. XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
  531. }
  532. }
  533. }
  534. }
  535. static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth,
  536. int index)
  537. {
  538. if (high_bit_depth) {
  539. return AV_RN32A(((int32_t *)mb) + index);
  540. } else
  541. return AV_RN16A(mb + index);
  542. }
  543. static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth,
  544. int index, int value)
  545. {
  546. if (high_bit_depth) {
  547. AV_WN32A(((int32_t *)mb) + index, value);
  548. } else
  549. AV_WN16A(mb + index, value);
  550. }
  551. static av_always_inline void hl_decode_mb_predict_luma(H264Context *h,
  552. int mb_type, int is_h264,
  553. int simple,
  554. int transform_bypass,
  555. int pixel_shift,
  556. int *block_offset,
  557. int linesize,
  558. uint8_t *dest_y, int p)
  559. {
  560. void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
  561. void (*idct_dc_add)(uint8_t *dst, int16_t *block, int stride);
  562. int i;
  563. int qscale = p == 0 ? h->qscale : h->chroma_qp[p - 1];
  564. block_offset += 16 * p;
  565. if (IS_INTRA4x4(mb_type)) {
  566. if (IS_8x8DCT(mb_type)) {
  567. if (transform_bypass) {
  568. idct_dc_add =
  569. idct_add = h->h264dsp.h264_add_pixels8_clear;
  570. } else {
  571. idct_dc_add = h->h264dsp.h264_idct8_dc_add;
  572. idct_add = h->h264dsp.h264_idct8_add;
  573. }
  574. for (i = 0; i < 16; i += 4) {
  575. uint8_t *const ptr = dest_y + block_offset[i];
  576. const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
  577. if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
  578. h->hpc.pred8x8l_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  579. } else {
  580. const int nnz = h->non_zero_count_cache[scan8[i + p * 16]];
  581. h->hpc.pred8x8l[dir](ptr, (h->topleft_samples_available << i) & 0x8000,
  582. (h->topright_samples_available << i) & 0x4000, linesize);
  583. if (nnz) {
  584. if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
  585. idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  586. else
  587. idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  588. }
  589. }
  590. }
  591. } else {
  592. if (transform_bypass) {
  593. idct_dc_add =
  594. idct_add = h->h264dsp.h264_add_pixels4_clear;
  595. } else {
  596. idct_dc_add = h->h264dsp.h264_idct_dc_add;
  597. idct_add = h->h264dsp.h264_idct_add;
  598. }
  599. for (i = 0; i < 16; i++) {
  600. uint8_t *const ptr = dest_y + block_offset[i];
  601. const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
  602. if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
  603. h->hpc.pred4x4_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  604. } else {
  605. uint8_t *topright;
  606. int nnz, tr;
  607. uint64_t tr_high;
  608. if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
  609. const int topright_avail = (h->topright_samples_available << i) & 0x8000;
  610. assert(h->mb_y || linesize <= block_offset[i]);
  611. if (!topright_avail) {
  612. if (pixel_shift) {
  613. tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
  614. topright = (uint8_t *)&tr_high;
  615. } else {
  616. tr = ptr[3 - linesize] * 0x01010101u;
  617. topright = (uint8_t *)&tr;
  618. }
  619. } else
  620. topright = ptr + (4 << pixel_shift) - linesize;
  621. } else
  622. topright = NULL;
  623. h->hpc.pred4x4[dir](ptr, topright, linesize);
  624. nnz = h->non_zero_count_cache[scan8[i + p * 16]];
  625. if (nnz) {
  626. if (is_h264) {
  627. if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
  628. idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  629. else
  630. idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
  631. } else if (CONFIG_SVQ3_DECODER)
  632. ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize, qscale, 0);
  633. }
  634. }
  635. }
  636. }
  637. } else {
  638. h->hpc.pred16x16[h->intra16x16_pred_mode](dest_y, linesize);
  639. if (is_h264) {
  640. if (h->non_zero_count_cache[scan8[LUMA_DC_BLOCK_INDEX + p]]) {
  641. if (!transform_bypass)
  642. h->h264dsp.h264_luma_dc_dequant_idct(h->mb + (p * 256 << pixel_shift),
  643. h->mb_luma_dc[p],
  644. h->dequant4_coeff[p][qscale][0]);
  645. else {
  646. static const uint8_t dc_mapping[16] = {
  647. 0 * 16, 1 * 16, 4 * 16, 5 * 16,
  648. 2 * 16, 3 * 16, 6 * 16, 7 * 16,
  649. 8 * 16, 9 * 16, 12 * 16, 13 * 16,
  650. 10 * 16, 11 * 16, 14 * 16, 15 * 16
  651. };
  652. for (i = 0; i < 16; i++)
  653. dctcoef_set(h->mb + (p * 256 << pixel_shift),
  654. pixel_shift, dc_mapping[i],
  655. dctcoef_get(h->mb_luma_dc[p],
  656. pixel_shift, i));
  657. }
  658. }
  659. } else if (CONFIG_SVQ3_DECODER)
  660. ff_svq3_luma_dc_dequant_idct_c(h->mb + p * 256,
  661. h->mb_luma_dc[p], qscale);
  662. }
  663. }
  664. static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type,
  665. int is_h264, int simple,
  666. int transform_bypass,
  667. int pixel_shift,
  668. int *block_offset,
  669. int linesize,
  670. uint8_t *dest_y, int p)
  671. {
  672. void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
  673. int i;
  674. block_offset += 16 * p;
  675. if (!IS_INTRA4x4(mb_type)) {
  676. if (is_h264) {
  677. if (IS_INTRA16x16(mb_type)) {
  678. if (transform_bypass) {
  679. if (h->sps.profile_idc == 244 &&
  680. (h->intra16x16_pred_mode == VERT_PRED8x8 ||
  681. h->intra16x16_pred_mode == HOR_PRED8x8)) {
  682. h->hpc.pred16x16_add[h->intra16x16_pred_mode](dest_y, block_offset,
  683. h->mb + (p * 256 << pixel_shift),
  684. linesize);
  685. } else {
  686. for (i = 0; i < 16; i++)
  687. if (h->non_zero_count_cache[scan8[i + p * 16]] ||
  688. dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
  689. h->h264dsp.h264_add_pixels4_clear(dest_y + block_offset[i],
  690. h->mb + (i * 16 + p * 256 << pixel_shift),
  691. linesize);
  692. }
  693. } else {
  694. h->h264dsp.h264_idct_add16intra(dest_y, block_offset,
  695. h->mb + (p * 256 << pixel_shift),
  696. linesize,
  697. h->non_zero_count_cache + p * 5 * 8);
  698. }
  699. } else if (h->cbp & 15) {
  700. if (transform_bypass) {
  701. const int di = IS_8x8DCT(mb_type) ? 4 : 1;
  702. idct_add = IS_8x8DCT(mb_type) ? h->h264dsp.h264_add_pixels8_clear
  703. : h->h264dsp.h264_add_pixels4_clear;
  704. for (i = 0; i < 16; i += di)
  705. if (h->non_zero_count_cache[scan8[i + p * 16]])
  706. idct_add(dest_y + block_offset[i],
  707. h->mb + (i * 16 + p * 256 << pixel_shift),
  708. linesize);
  709. } else {
  710. if (IS_8x8DCT(mb_type))
  711. h->h264dsp.h264_idct8_add4(dest_y, block_offset,
  712. h->mb + (p * 256 << pixel_shift),
  713. linesize,
  714. h->non_zero_count_cache + p * 5 * 8);
  715. else
  716. h->h264dsp.h264_idct_add16(dest_y, block_offset,
  717. h->mb + (p * 256 << pixel_shift),
  718. linesize,
  719. h->non_zero_count_cache + p * 5 * 8);
  720. }
  721. }
  722. } else if (CONFIG_SVQ3_DECODER) {
  723. for (i = 0; i < 16; i++)
  724. if (h->non_zero_count_cache[scan8[i + p * 16]] || h->mb[i * 16 + p * 256]) {
  725. // FIXME benchmark weird rule, & below
  726. uint8_t *const ptr = dest_y + block_offset[i];
  727. ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize,
  728. h->qscale, IS_INTRA(mb_type) ? 1 : 0);
  729. }
  730. }
  731. }
  732. }
  733. #define BITS 8
  734. #define SIMPLE 1
  735. #include "h264_mb_template.c"
  736. #undef BITS
  737. #define BITS 16
  738. #include "h264_mb_template.c"
  739. #undef SIMPLE
  740. #define SIMPLE 0
  741. #include "h264_mb_template.c"
  742. void ff_h264_hl_decode_mb(H264Context *h)
  743. {
  744. const int mb_xy = h->mb_xy;
  745. const int mb_type = h->cur_pic.mb_type[mb_xy];
  746. int is_complex = CONFIG_SMALL || h->is_complex ||
  747. IS_INTRA_PCM(mb_type) || h->qscale == 0;
  748. if (CHROMA444(h)) {
  749. if (is_complex || h->pixel_shift)
  750. hl_decode_mb_444_complex(h);
  751. else
  752. hl_decode_mb_444_simple_8(h);
  753. } else if (is_complex) {
  754. hl_decode_mb_complex(h);
  755. } else if (h->pixel_shift) {
  756. hl_decode_mb_simple_16(h);
  757. } else
  758. hl_decode_mb_simple_8(h);
  759. }