You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

645 lines
29KB

  1. /*
  2. * VP9 compatible video decoder
  3. *
  4. * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
  5. * Copyright (C) 2013 Clément Bœsch <u pkh me>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "libavutil/avassert.h"
  24. #include "avcodec.h"
  25. #include "internal.h"
  26. #include "videodsp.h"
  27. #include "vp9data.h"
  28. #include "vp9dec.h"
  29. static av_always_inline int check_intra_mode(VP9TileData *td, int mode, uint8_t **a,
  30. uint8_t *dst_edge, ptrdiff_t stride_edge,
  31. uint8_t *dst_inner, ptrdiff_t stride_inner,
  32. uint8_t *l, int col, int x, int w,
  33. int row, int y, enum TxfmMode tx,
  34. int p, int ss_h, int ss_v, int bytesperpixel)
  35. {
  36. VP9Context *s = td->s;
  37. int have_top = row > 0 || y > 0;
  38. int have_left = col > td->tile_col_start || x > 0;
  39. int have_right = x < w - 1;
  40. int bpp = s->s.h.bpp;
  41. static const uint8_t mode_conv[10][2 /* have_left */][2 /* have_top */] = {
  42. [VERT_PRED] = { { DC_127_PRED, VERT_PRED },
  43. { DC_127_PRED, VERT_PRED } },
  44. [HOR_PRED] = { { DC_129_PRED, DC_129_PRED },
  45. { HOR_PRED, HOR_PRED } },
  46. [DC_PRED] = { { DC_128_PRED, TOP_DC_PRED },
  47. { LEFT_DC_PRED, DC_PRED } },
  48. [DIAG_DOWN_LEFT_PRED] = { { DC_127_PRED, DIAG_DOWN_LEFT_PRED },
  49. { DC_127_PRED, DIAG_DOWN_LEFT_PRED } },
  50. [DIAG_DOWN_RIGHT_PRED] = { { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED },
  51. { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED } },
  52. [VERT_RIGHT_PRED] = { { VERT_RIGHT_PRED, VERT_RIGHT_PRED },
  53. { VERT_RIGHT_PRED, VERT_RIGHT_PRED } },
  54. [HOR_DOWN_PRED] = { { HOR_DOWN_PRED, HOR_DOWN_PRED },
  55. { HOR_DOWN_PRED, HOR_DOWN_PRED } },
  56. [VERT_LEFT_PRED] = { { DC_127_PRED, VERT_LEFT_PRED },
  57. { DC_127_PRED, VERT_LEFT_PRED } },
  58. [HOR_UP_PRED] = { { DC_129_PRED, DC_129_PRED },
  59. { HOR_UP_PRED, HOR_UP_PRED } },
  60. [TM_VP8_PRED] = { { DC_129_PRED, VERT_PRED },
  61. { HOR_PRED, TM_VP8_PRED } },
  62. };
  63. static const struct {
  64. uint8_t needs_left:1;
  65. uint8_t needs_top:1;
  66. uint8_t needs_topleft:1;
  67. uint8_t needs_topright:1;
  68. uint8_t invert_left:1;
  69. } edges[N_INTRA_PRED_MODES] = {
  70. [VERT_PRED] = { .needs_top = 1 },
  71. [HOR_PRED] = { .needs_left = 1 },
  72. [DC_PRED] = { .needs_top = 1, .needs_left = 1 },
  73. [DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
  74. [DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
  75. .needs_topleft = 1 },
  76. [VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
  77. .needs_topleft = 1 },
  78. [HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1,
  79. .needs_topleft = 1 },
  80. [VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
  81. [HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 },
  82. [TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1,
  83. .needs_topleft = 1 },
  84. [LEFT_DC_PRED] = { .needs_left = 1 },
  85. [TOP_DC_PRED] = { .needs_top = 1 },
  86. [DC_128_PRED] = { 0 },
  87. [DC_127_PRED] = { 0 },
  88. [DC_129_PRED] = { 0 }
  89. };
  90. av_assert2(mode >= 0 && mode < 10);
  91. mode = mode_conv[mode][have_left][have_top];
  92. if (edges[mode].needs_top) {
  93. uint8_t *top, *topleft;
  94. int n_px_need = 4 << tx, n_px_have = (((s->cols - col) << !ss_h) - x) * 4;
  95. int n_px_need_tr = 0;
  96. if (tx == TX_4X4 && edges[mode].needs_topright && have_right)
  97. n_px_need_tr = 4;
  98. // if top of sb64-row, use s->intra_pred_data[] instead of
  99. // dst[-stride] for intra prediction (it contains pre- instead of
  100. // post-loopfilter data)
  101. if (have_top) {
  102. top = !(row & 7) && !y ?
  103. s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
  104. y == 0 ? &dst_edge[-stride_edge] : &dst_inner[-stride_inner];
  105. if (have_left)
  106. topleft = !(row & 7) && !y ?
  107. s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
  108. y == 0 || x == 0 ? &dst_edge[-stride_edge] :
  109. &dst_inner[-stride_inner];
  110. }
  111. if (have_top &&
  112. (!edges[mode].needs_topleft || (have_left && top == topleft)) &&
  113. (tx != TX_4X4 || !edges[mode].needs_topright || have_right) &&
  114. n_px_need + n_px_need_tr <= n_px_have) {
  115. *a = top;
  116. } else {
  117. if (have_top) {
  118. if (n_px_need <= n_px_have) {
  119. memcpy(*a, top, n_px_need * bytesperpixel);
  120. } else {
  121. #define memset_bpp(c, i1, v, i2, num) do { \
  122. if (bytesperpixel == 1) { \
  123. memset(&(c)[(i1)], (v)[(i2)], (num)); \
  124. } else { \
  125. int n, val = AV_RN16A(&(v)[(i2) * 2]); \
  126. for (n = 0; n < (num); n++) { \
  127. AV_WN16A(&(c)[((i1) + n) * 2], val); \
  128. } \
  129. } \
  130. } while (0)
  131. memcpy(*a, top, n_px_have * bytesperpixel);
  132. memset_bpp(*a, n_px_have, (*a), n_px_have - 1, n_px_need - n_px_have);
  133. }
  134. } else {
  135. #define memset_val(c, val, num) do { \
  136. if (bytesperpixel == 1) { \
  137. memset((c), (val), (num)); \
  138. } else { \
  139. int n; \
  140. for (n = 0; n < (num); n++) { \
  141. AV_WN16A(&(c)[n * 2], (val)); \
  142. } \
  143. } \
  144. } while (0)
  145. memset_val(*a, (128 << (bpp - 8)) - 1, n_px_need);
  146. }
  147. if (edges[mode].needs_topleft) {
  148. if (have_left && have_top) {
  149. #define assign_bpp(c, i1, v, i2) do { \
  150. if (bytesperpixel == 1) { \
  151. (c)[(i1)] = (v)[(i2)]; \
  152. } else { \
  153. AV_COPY16(&(c)[(i1) * 2], &(v)[(i2) * 2]); \
  154. } \
  155. } while (0)
  156. assign_bpp(*a, -1, topleft, -1);
  157. } else {
  158. #define assign_val(c, i, v) do { \
  159. if (bytesperpixel == 1) { \
  160. (c)[(i)] = (v); \
  161. } else { \
  162. AV_WN16A(&(c)[(i) * 2], (v)); \
  163. } \
  164. } while (0)
  165. assign_val((*a), -1, (128 << (bpp - 8)) + (have_top ? +1 : -1));
  166. }
  167. }
  168. if (tx == TX_4X4 && edges[mode].needs_topright) {
  169. if (have_top && have_right &&
  170. n_px_need + n_px_need_tr <= n_px_have) {
  171. memcpy(&(*a)[4 * bytesperpixel], &top[4 * bytesperpixel], 4 * bytesperpixel);
  172. } else {
  173. memset_bpp(*a, 4, *a, 3, 4);
  174. }
  175. }
  176. }
  177. }
  178. if (edges[mode].needs_left) {
  179. if (have_left) {
  180. int n_px_need = 4 << tx, i, n_px_have = (((s->rows - row) << !ss_v) - y) * 4;
  181. uint8_t *dst = x == 0 ? dst_edge : dst_inner;
  182. ptrdiff_t stride = x == 0 ? stride_edge : stride_inner;
  183. if (edges[mode].invert_left) {
  184. if (n_px_need <= n_px_have) {
  185. for (i = 0; i < n_px_need; i++)
  186. assign_bpp(l, i, &dst[i * stride], -1);
  187. } else {
  188. for (i = 0; i < n_px_have; i++)
  189. assign_bpp(l, i, &dst[i * stride], -1);
  190. memset_bpp(l, n_px_have, l, n_px_have - 1, n_px_need - n_px_have);
  191. }
  192. } else {
  193. if (n_px_need <= n_px_have) {
  194. for (i = 0; i < n_px_need; i++)
  195. assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
  196. } else {
  197. for (i = 0; i < n_px_have; i++)
  198. assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
  199. memset_bpp(l, 0, l, n_px_need - n_px_have, n_px_need - n_px_have);
  200. }
  201. }
  202. } else {
  203. memset_val(l, (128 << (bpp - 8)) + 1, 4 << tx);
  204. }
  205. }
  206. return mode;
  207. }
  208. static av_always_inline void intra_recon(VP9TileData *td, ptrdiff_t y_off,
  209. ptrdiff_t uv_off, int bytesperpixel)
  210. {
  211. VP9Context *s = td->s;
  212. VP9Block *b = td->b;
  213. int row = td->row, col = td->col;
  214. int w4 = ff_vp9_bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
  215. int h4 = ff_vp9_bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
  216. int end_x = FFMIN(2 * (s->cols - col), w4);
  217. int end_y = FFMIN(2 * (s->rows - row), h4);
  218. int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
  219. int uvstep1d = 1 << b->uvtx, p;
  220. uint8_t *dst = td->dst[0], *dst_r = s->s.frames[CUR_FRAME].tf.f->data[0] + y_off;
  221. LOCAL_ALIGNED_32(uint8_t, a_buf, [96]);
  222. LOCAL_ALIGNED_32(uint8_t, l, [64]);
  223. for (n = 0, y = 0; y < end_y; y += step1d) {
  224. uint8_t *ptr = dst, *ptr_r = dst_r;
  225. for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d * bytesperpixel,
  226. ptr_r += 4 * step1d * bytesperpixel, n += step) {
  227. int mode = b->mode[b->bs > BS_8x8 && b->tx == TX_4X4 ?
  228. y * 2 + x : 0];
  229. uint8_t *a = &a_buf[32];
  230. enum TxfmType txtp = ff_vp9_intra_txfm_type[mode];
  231. int eob = b->skip ? 0 : b->tx > TX_8X8 ? AV_RN16A(&td->eob[n]) : td->eob[n];
  232. mode = check_intra_mode(td, mode, &a, ptr_r,
  233. s->s.frames[CUR_FRAME].tf.f->linesize[0],
  234. ptr, td->y_stride, l,
  235. col, x, w4, row, y, b->tx, 0, 0, 0, bytesperpixel);
  236. s->dsp.intra_pred[b->tx][mode](ptr, td->y_stride, l, a);
  237. if (eob)
  238. s->dsp.itxfm_add[tx][txtp](ptr, td->y_stride,
  239. td->block + 16 * n * bytesperpixel, eob);
  240. }
  241. dst_r += 4 * step1d * s->s.frames[CUR_FRAME].tf.f->linesize[0];
  242. dst += 4 * step1d * td->y_stride;
  243. }
  244. // U/V
  245. w4 >>= s->ss_h;
  246. end_x >>= s->ss_h;
  247. end_y >>= s->ss_v;
  248. step = 1 << (b->uvtx * 2);
  249. for (p = 0; p < 2; p++) {
  250. dst = td->dst[1 + p];
  251. dst_r = s->s.frames[CUR_FRAME].tf.f->data[1 + p] + uv_off;
  252. for (n = 0, y = 0; y < end_y; y += uvstep1d) {
  253. uint8_t *ptr = dst, *ptr_r = dst_r;
  254. for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d * bytesperpixel,
  255. ptr_r += 4 * uvstep1d * bytesperpixel, n += step) {
  256. int mode = b->uvmode;
  257. uint8_t *a = &a_buf[32];
  258. int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&td->uveob[p][n]) : td->uveob[p][n];
  259. mode = check_intra_mode(td, mode, &a, ptr_r,
  260. s->s.frames[CUR_FRAME].tf.f->linesize[1],
  261. ptr, td->uv_stride, l, col, x, w4, row, y,
  262. b->uvtx, p + 1, s->ss_h, s->ss_v, bytesperpixel);
  263. s->dsp.intra_pred[b->uvtx][mode](ptr, td->uv_stride, l, a);
  264. if (eob)
  265. s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, td->uv_stride,
  266. td->uvblock[p] + 16 * n * bytesperpixel, eob);
  267. }
  268. dst_r += 4 * uvstep1d * s->s.frames[CUR_FRAME].tf.f->linesize[1];
  269. dst += 4 * uvstep1d * td->uv_stride;
  270. }
  271. }
  272. }
  273. void ff_vp9_intra_recon_8bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
  274. {
  275. intra_recon(td, y_off, uv_off, 1);
  276. }
  277. void ff_vp9_intra_recon_16bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
  278. {
  279. intra_recon(td, y_off, uv_off, 2);
  280. }
  281. static av_always_inline void mc_luma_unscaled(VP9TileData *td, vp9_mc_func (*mc)[2],
  282. uint8_t *dst, ptrdiff_t dst_stride,
  283. const uint8_t *ref, ptrdiff_t ref_stride,
  284. ThreadFrame *ref_frame,
  285. ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
  286. int bw, int bh, int w, int h, int bytesperpixel)
  287. {
  288. VP9Context *s = td->s;
  289. int mx = mv->x, my = mv->y, th;
  290. y += my >> 3;
  291. x += mx >> 3;
  292. ref += y * ref_stride + x * bytesperpixel;
  293. mx &= 7;
  294. my &= 7;
  295. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  296. // we use +7 because the last 7 pixels of each sbrow can be changed in
  297. // the longest loopfilter of the next sbrow
  298. th = (y + bh + 4 * !!my + 7) >> 6;
  299. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  300. // The arm/aarch64 _hv filters read one more row than what actually is
  301. // needed, so switch to emulated edge one pixel sooner vertically
  302. // (!!my * 5) than horizontally (!!mx * 4).
  303. if (x < !!mx * 3 || y < !!my * 3 ||
  304. x + !!mx * 4 > w - bw || y + !!my * 5 > h - bh) {
  305. s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
  306. ref - !!my * 3 * ref_stride - !!mx * 3 * bytesperpixel,
  307. 160, ref_stride,
  308. bw + !!mx * 7, bh + !!my * 7,
  309. x - !!mx * 3, y - !!my * 3, w, h);
  310. ref = td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
  311. ref_stride = 160;
  312. }
  313. mc[!!mx][!!my](dst, dst_stride, ref, ref_stride, bh, mx << 1, my << 1);
  314. }
  315. static av_always_inline void mc_chroma_unscaled(VP9TileData *td, vp9_mc_func (*mc)[2],
  316. uint8_t *dst_u, uint8_t *dst_v,
  317. ptrdiff_t dst_stride,
  318. const uint8_t *ref_u, ptrdiff_t src_stride_u,
  319. const uint8_t *ref_v, ptrdiff_t src_stride_v,
  320. ThreadFrame *ref_frame,
  321. ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
  322. int bw, int bh, int w, int h, int bytesperpixel)
  323. {
  324. VP9Context *s = td->s;
  325. int mx = mv->x * (1 << !s->ss_h), my = mv->y * (1 << !s->ss_v), th;
  326. y += my >> 4;
  327. x += mx >> 4;
  328. ref_u += y * src_stride_u + x * bytesperpixel;
  329. ref_v += y * src_stride_v + x * bytesperpixel;
  330. mx &= 15;
  331. my &= 15;
  332. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  333. // we use +7 because the last 7 pixels of each sbrow can be changed in
  334. // the longest loopfilter of the next sbrow
  335. th = (y + bh + 4 * !!my + 7) >> (6 - s->ss_v);
  336. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  337. // The arm/aarch64 _hv filters read one more row than what actually is
  338. // needed, so switch to emulated edge one pixel sooner vertically
  339. // (!!my * 5) than horizontally (!!mx * 4).
  340. if (x < !!mx * 3 || y < !!my * 3 ||
  341. x + !!mx * 4 > w - bw || y + !!my * 5 > h - bh) {
  342. s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
  343. ref_u - !!my * 3 * src_stride_u - !!mx * 3 * bytesperpixel,
  344. 160, src_stride_u,
  345. bw + !!mx * 7, bh + !!my * 7,
  346. x - !!mx * 3, y - !!my * 3, w, h);
  347. ref_u = td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
  348. mc[!!mx][!!my](dst_u, dst_stride, ref_u, 160, bh, mx, my);
  349. s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
  350. ref_v - !!my * 3 * src_stride_v - !!mx * 3 * bytesperpixel,
  351. 160, src_stride_v,
  352. bw + !!mx * 7, bh + !!my * 7,
  353. x - !!mx * 3, y - !!my * 3, w, h);
  354. ref_v = td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
  355. mc[!!mx][!!my](dst_v, dst_stride, ref_v, 160, bh, mx, my);
  356. } else {
  357. mc[!!mx][!!my](dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my);
  358. mc[!!mx][!!my](dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my);
  359. }
  360. }
  361. #define mc_luma_dir(td, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
  362. px, py, pw, ph, bw, bh, w, h, i) \
  363. mc_luma_unscaled(td, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
  364. mv, bw, bh, w, h, bytesperpixel)
  365. #define mc_chroma_dir(td, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  366. row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
  367. mc_chroma_unscaled(td, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  368. row, col, mv, bw, bh, w, h, bytesperpixel)
  369. #define SCALED 0
  370. #define FN(x) x##_8bpp
  371. #define BYTES_PER_PIXEL 1
  372. #include "vp9_mc_template.c"
  373. #undef FN
  374. #undef BYTES_PER_PIXEL
  375. #define FN(x) x##_16bpp
  376. #define BYTES_PER_PIXEL 2
  377. #include "vp9_mc_template.c"
  378. #undef mc_luma_dir
  379. #undef mc_chroma_dir
  380. #undef FN
  381. #undef BYTES_PER_PIXEL
  382. #undef SCALED
  383. static av_always_inline void mc_luma_scaled(VP9TileData *td, vp9_scaled_mc_func smc,
  384. vp9_mc_func (*mc)[2],
  385. uint8_t *dst, ptrdiff_t dst_stride,
  386. const uint8_t *ref, ptrdiff_t ref_stride,
  387. ThreadFrame *ref_frame,
  388. ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
  389. int px, int py, int pw, int ph,
  390. int bw, int bh, int w, int h, int bytesperpixel,
  391. const uint16_t *scale, const uint8_t *step)
  392. {
  393. VP9Context *s = td->s;
  394. if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
  395. s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
  396. mc_luma_unscaled(td, mc, dst, dst_stride, ref, ref_stride, ref_frame,
  397. y, x, in_mv, bw, bh, w, h, bytesperpixel);
  398. } else {
  399. #define scale_mv(n, dim) (((int64_t)(n) * scale[dim]) >> 14)
  400. int mx, my;
  401. int refbw_m1, refbh_m1;
  402. int th;
  403. VP56mv mv;
  404. mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
  405. mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
  406. // BUG libvpx seems to scale the two components separately. This introduces
  407. // rounding errors but we have to reproduce them to be exactly compatible
  408. // with the output from libvpx...
  409. mx = scale_mv(mv.x * 2, 0) + scale_mv(x * 16, 0);
  410. my = scale_mv(mv.y * 2, 1) + scale_mv(y * 16, 1);
  411. y = my >> 4;
  412. x = mx >> 4;
  413. ref += y * ref_stride + x * bytesperpixel;
  414. mx &= 15;
  415. my &= 15;
  416. refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
  417. refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
  418. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  419. // we use +7 because the last 7 pixels of each sbrow can be changed in
  420. // the longest loopfilter of the next sbrow
  421. th = (y + refbh_m1 + 4 + 7) >> 6;
  422. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  423. // The arm/aarch64 _hv filters read one more row than what actually is
  424. // needed, so switch to emulated edge one pixel sooner vertically
  425. // (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
  426. if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 5 >= h - refbh_m1) {
  427. s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
  428. ref - 3 * ref_stride - 3 * bytesperpixel,
  429. 288, ref_stride,
  430. refbw_m1 + 8, refbh_m1 + 8,
  431. x - 3, y - 3, w, h);
  432. ref = td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
  433. ref_stride = 288;
  434. }
  435. smc(dst, dst_stride, ref, ref_stride, bh, mx, my, step[0], step[1]);
  436. }
  437. }
  438. static av_always_inline void mc_chroma_scaled(VP9TileData *td, vp9_scaled_mc_func smc,
  439. vp9_mc_func (*mc)[2],
  440. uint8_t *dst_u, uint8_t *dst_v,
  441. ptrdiff_t dst_stride,
  442. const uint8_t *ref_u, ptrdiff_t src_stride_u,
  443. const uint8_t *ref_v, ptrdiff_t src_stride_v,
  444. ThreadFrame *ref_frame,
  445. ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
  446. int px, int py, int pw, int ph,
  447. int bw, int bh, int w, int h, int bytesperpixel,
  448. const uint16_t *scale, const uint8_t *step)
  449. {
  450. VP9Context *s = td->s;
  451. if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
  452. s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
  453. mc_chroma_unscaled(td, mc, dst_u, dst_v, dst_stride, ref_u, src_stride_u,
  454. ref_v, src_stride_v, ref_frame,
  455. y, x, in_mv, bw, bh, w, h, bytesperpixel);
  456. } else {
  457. int mx, my;
  458. int refbw_m1, refbh_m1;
  459. int th;
  460. VP56mv mv;
  461. if (s->ss_h) {
  462. // BUG https://code.google.com/p/webm/issues/detail?id=820
  463. mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 16, (s->cols * 4 - x + px + 3) * 16);
  464. mx = scale_mv(mv.x, 0) + (scale_mv(x * 16, 0) & ~15) + (scale_mv(x * 32, 0) & 15);
  465. } else {
  466. mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
  467. mx = scale_mv(mv.x * 2, 0) + scale_mv(x * 16, 0);
  468. }
  469. if (s->ss_v) {
  470. // BUG https://code.google.com/p/webm/issues/detail?id=820
  471. mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 16, (s->rows * 4 - y + py + 3) * 16);
  472. my = scale_mv(mv.y, 1) + (scale_mv(y * 16, 1) & ~15) + (scale_mv(y * 32, 1) & 15);
  473. } else {
  474. mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
  475. my = scale_mv(mv.y * 2, 1) + scale_mv(y * 16, 1);
  476. }
  477. #undef scale_mv
  478. y = my >> 4;
  479. x = mx >> 4;
  480. ref_u += y * src_stride_u + x * bytesperpixel;
  481. ref_v += y * src_stride_v + x * bytesperpixel;
  482. mx &= 15;
  483. my &= 15;
  484. refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
  485. refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
  486. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  487. // we use +7 because the last 7 pixels of each sbrow can be changed in
  488. // the longest loopfilter of the next sbrow
  489. th = (y + refbh_m1 + 4 + 7) >> (6 - s->ss_v);
  490. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  491. // The arm/aarch64 _hv filters read one more row than what actually is
  492. // needed, so switch to emulated edge one pixel sooner vertically
  493. // (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
  494. if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 5 >= h - refbh_m1) {
  495. s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
  496. ref_u - 3 * src_stride_u - 3 * bytesperpixel,
  497. 288, src_stride_u,
  498. refbw_m1 + 8, refbh_m1 + 8,
  499. x - 3, y - 3, w, h);
  500. ref_u = td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
  501. smc(dst_u, dst_stride, ref_u, 288, bh, mx, my, step[0], step[1]);
  502. s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
  503. ref_v - 3 * src_stride_v - 3 * bytesperpixel,
  504. 288, src_stride_v,
  505. refbw_m1 + 8, refbh_m1 + 8,
  506. x - 3, y - 3, w, h);
  507. ref_v = td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
  508. smc(dst_v, dst_stride, ref_v, 288, bh, mx, my, step[0], step[1]);
  509. } else {
  510. smc(dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my, step[0], step[1]);
  511. smc(dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my, step[0], step[1]);
  512. }
  513. }
  514. }
  515. #define mc_luma_dir(td, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
  516. px, py, pw, ph, bw, bh, w, h, i) \
  517. mc_luma_scaled(td, s->dsp.s##mc, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
  518. mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
  519. s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
  520. #define mc_chroma_dir(td, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  521. row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
  522. mc_chroma_scaled(td, s->dsp.s##mc, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  523. row, col, mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
  524. s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
  525. #define SCALED 1
  526. #define FN(x) x##_scaled_8bpp
  527. #define BYTES_PER_PIXEL 1
  528. #include "vp9_mc_template.c"
  529. #undef FN
  530. #undef BYTES_PER_PIXEL
  531. #define FN(x) x##_scaled_16bpp
  532. #define BYTES_PER_PIXEL 2
  533. #include "vp9_mc_template.c"
  534. #undef mc_luma_dir
  535. #undef mc_chroma_dir
  536. #undef FN
  537. #undef BYTES_PER_PIXEL
  538. #undef SCALED
  539. static av_always_inline void inter_recon(VP9TileData *td, int bytesperpixel)
  540. {
  541. VP9Context *s = td->s;
  542. VP9Block *b = td->b;
  543. int row = td->row, col = td->col;
  544. if (s->mvscale[b->ref[0]][0] || (b->comp && s->mvscale[b->ref[1]][0])) {
  545. if (bytesperpixel == 1) {
  546. inter_pred_scaled_8bpp(td);
  547. } else {
  548. inter_pred_scaled_16bpp(td);
  549. }
  550. } else {
  551. if (bytesperpixel == 1) {
  552. inter_pred_8bpp(td);
  553. } else {
  554. inter_pred_16bpp(td);
  555. }
  556. }
  557. if (!b->skip) {
  558. /* mostly copied intra_recon() */
  559. int w4 = ff_vp9_bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
  560. int h4 = ff_vp9_bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
  561. int end_x = FFMIN(2 * (s->cols - col), w4);
  562. int end_y = FFMIN(2 * (s->rows - row), h4);
  563. int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
  564. int uvstep1d = 1 << b->uvtx, p;
  565. uint8_t *dst = td->dst[0];
  566. // y itxfm add
  567. for (n = 0, y = 0; y < end_y; y += step1d) {
  568. uint8_t *ptr = dst;
  569. for (x = 0; x < end_x; x += step1d,
  570. ptr += 4 * step1d * bytesperpixel, n += step) {
  571. int eob = b->tx > TX_8X8 ? AV_RN16A(&td->eob[n]) : td->eob[n];
  572. if (eob)
  573. s->dsp.itxfm_add[tx][DCT_DCT](ptr, td->y_stride,
  574. td->block + 16 * n * bytesperpixel, eob);
  575. }
  576. dst += 4 * td->y_stride * step1d;
  577. }
  578. // uv itxfm add
  579. end_x >>= s->ss_h;
  580. end_y >>= s->ss_v;
  581. step = 1 << (b->uvtx * 2);
  582. for (p = 0; p < 2; p++) {
  583. dst = td->dst[p + 1];
  584. for (n = 0, y = 0; y < end_y; y += uvstep1d) {
  585. uint8_t *ptr = dst;
  586. for (x = 0; x < end_x; x += uvstep1d,
  587. ptr += 4 * uvstep1d * bytesperpixel, n += step) {
  588. int eob = b->uvtx > TX_8X8 ? AV_RN16A(&td->uveob[p][n]) : td->uveob[p][n];
  589. if (eob)
  590. s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, td->uv_stride,
  591. td->uvblock[p] + 16 * n * bytesperpixel, eob);
  592. }
  593. dst += 4 * uvstep1d * td->uv_stride;
  594. }
  595. }
  596. }
  597. }
  598. void ff_vp9_inter_recon_8bpp(VP9TileData *td)
  599. {
  600. inter_recon(td, 1);
  601. }
  602. void ff_vp9_inter_recon_16bpp(VP9TileData *td)
  603. {
  604. inter_recon(td, 2);
  605. }