You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

656 lines
21KB

  1. /*
  2. * Copyright (C) 2010 David Conrad
  3. * Copyright (C) 2010 Ronald S. Bultje
  4. * Copyright (C) 2014 Peter Ross
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * VP8 compatible video decoder
  25. */
  26. #include "dsputil.h"
  27. #include "vp8dsp.h"
  28. #include "libavutil/common.h"
  29. #define MK_IDCT_DC_ADD4_C(name) \
  30. static void name ## _idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)\
  31. {\
  32. name ## _idct_dc_add_c(dst+stride*0+0, block[0], stride);\
  33. name ## _idct_dc_add_c(dst+stride*0+4, block[1], stride);\
  34. name ## _idct_dc_add_c(dst+stride*4+0, block[2], stride);\
  35. name ## _idct_dc_add_c(dst+stride*4+4, block[3], stride);\
  36. }\
  37. \
  38. static void name ## _idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)\
  39. {\
  40. name ## _idct_dc_add_c(dst+ 0, block[0], stride);\
  41. name ## _idct_dc_add_c(dst+ 4, block[1], stride);\
  42. name ## _idct_dc_add_c(dst+ 8, block[2], stride);\
  43. name ## _idct_dc_add_c(dst+12, block[3], stride);\
  44. }
  45. #if CONFIG_VP7_DECODER
  46. static void vp7_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
  47. {
  48. int i, a1, b1, c1, d1;
  49. int16_t tmp[16];
  50. for (i = 0; i < 4; i++) {
  51. a1 = (dc[i*4+0] + dc[i*4+2]) * 23170;
  52. b1 = (dc[i*4+0] - dc[i*4+2]) * 23170;
  53. c1 = dc[i*4+1] * 12540 - dc[i*4+3] * 30274;
  54. d1 = dc[i*4+1] * 30274 + dc[i*4+3] * 12540;
  55. tmp[i*4+0] = (a1 + d1) >> 14;
  56. tmp[i*4+3] = (a1 - d1) >> 14;
  57. tmp[i*4+1] = (b1 + c1) >> 14;
  58. tmp[i*4+2] = (b1 - c1) >> 14;
  59. }
  60. for (i = 0; i < 4; i++) {
  61. a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
  62. b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
  63. c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
  64. d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
  65. AV_ZERO64(dc + i*4);
  66. block[0][i][0] = (a1 + d1 + 0x20000) >> 18;
  67. block[3][i][0] = (a1 - d1 + 0x20000) >> 18;
  68. block[1][i][0] = (b1 + c1 + 0x20000) >> 18;
  69. block[2][i][0] = (b1 - c1 + 0x20000) >> 18;
  70. }
  71. }
  72. static void vp7_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
  73. {
  74. int i, val = (23170 * (23170 * dc[0] >> 14) + 0x20000) >> 18;
  75. dc[0] = 0;
  76. for (i = 0; i < 4; i++) {
  77. block[i][0][0] = val;
  78. block[i][1][0] = val;
  79. block[i][2][0] = val;
  80. block[i][3][0] = val;
  81. }
  82. }
  83. static void vp7_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
  84. {
  85. int i, a1, b1, c1, d1;
  86. int16_t tmp[16];
  87. for (i = 0; i < 4; i++) {
  88. a1 = (block[i*4+0] + block[i*4+2]) * 23170;
  89. b1 = (block[i*4+0] - block[i*4+2]) * 23170;
  90. c1 = block[i*4+1] * 12540 - block[i*4+3] * 30274;
  91. d1 = block[i*4+1] * 30274 + block[i*4+3] * 12540;
  92. AV_ZERO64(block + i*4);
  93. tmp[i*4+0] = (a1 + d1) >> 14;
  94. tmp[i*4+3] = (a1 - d1) >> 14;
  95. tmp[i*4+1] = (b1 + c1) >> 14;
  96. tmp[i*4+2] = (b1 - c1) >> 14;
  97. }
  98. for (i = 0; i < 4; i++) {
  99. a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
  100. b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
  101. c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
  102. d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
  103. dst[0*stride+i] = av_clip_uint8(dst[0*stride+i] + ((a1 + d1 + 0x20000) >> 18));
  104. dst[3*stride+i] = av_clip_uint8(dst[3*stride+i] + ((a1 - d1 + 0x20000) >> 18));
  105. dst[1*stride+i] = av_clip_uint8(dst[1*stride+i] + ((b1 + c1 + 0x20000) >> 18));
  106. dst[2*stride+i] = av_clip_uint8(dst[2*stride+i] + ((b1 - c1 + 0x20000) >> 18));
  107. }
  108. }
  109. static void vp7_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
  110. {
  111. int i, dc = (23170 * (23170 * block[0] >> 14) + 0x20000) >> 18;
  112. block[0] = 0;
  113. for (i = 0; i < 4; i++) {
  114. dst[0] = av_clip_uint8(dst[0] + dc);
  115. dst[1] = av_clip_uint8(dst[1] + dc);
  116. dst[2] = av_clip_uint8(dst[2] + dc);
  117. dst[3] = av_clip_uint8(dst[3] + dc);
  118. dst += stride;
  119. }
  120. }
  121. MK_IDCT_DC_ADD4_C(vp7)
  122. #endif
  123. // TODO: Maybe add dequant
  124. #if CONFIG_VP8_DECODER
  125. static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
  126. {
  127. int i, t0, t1, t2, t3;
  128. for (i = 0; i < 4; i++) {
  129. t0 = dc[0*4+i] + dc[3*4+i];
  130. t1 = dc[1*4+i] + dc[2*4+i];
  131. t2 = dc[1*4+i] - dc[2*4+i];
  132. t3 = dc[0*4+i] - dc[3*4+i];
  133. dc[0*4+i] = t0 + t1;
  134. dc[1*4+i] = t3 + t2;
  135. dc[2*4+i] = t0 - t1;
  136. dc[3*4+i] = t3 - t2;
  137. }
  138. for (i = 0; i < 4; i++) {
  139. t0 = dc[i*4+0] + dc[i*4+3] + 3; // rounding
  140. t1 = dc[i*4+1] + dc[i*4+2];
  141. t2 = dc[i*4+1] - dc[i*4+2];
  142. t3 = dc[i*4+0] - dc[i*4+3] + 3; // rounding
  143. AV_ZERO64(dc + i*4);
  144. block[i][0][0] = (t0 + t1) >> 3;
  145. block[i][1][0] = (t3 + t2) >> 3;
  146. block[i][2][0] = (t0 - t1) >> 3;
  147. block[i][3][0] = (t3 - t2) >> 3;
  148. }
  149. }
  150. static void vp8_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
  151. {
  152. int i, val = (dc[0] + 3) >> 3;
  153. dc[0] = 0;
  154. for (i = 0; i < 4; i++) {
  155. block[i][0][0] = val;
  156. block[i][1][0] = val;
  157. block[i][2][0] = val;
  158. block[i][3][0] = val;
  159. }
  160. }
  161. #define MUL_20091(a) ((((a)*20091) >> 16) + (a))
  162. #define MUL_35468(a) (((a)*35468) >> 16)
  163. static void vp8_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
  164. {
  165. int i, t0, t1, t2, t3;
  166. int16_t tmp[16];
  167. for (i = 0; i < 4; i++) {
  168. t0 = block[0*4+i] + block[2*4+i];
  169. t1 = block[0*4+i] - block[2*4+i];
  170. t2 = MUL_35468(block[1*4+i]) - MUL_20091(block[3*4+i]);
  171. t3 = MUL_20091(block[1*4+i]) + MUL_35468(block[3*4+i]);
  172. block[0*4+i] = 0;
  173. block[1*4+i] = 0;
  174. block[2*4+i] = 0;
  175. block[3*4+i] = 0;
  176. tmp[i*4+0] = t0 + t3;
  177. tmp[i*4+1] = t1 + t2;
  178. tmp[i*4+2] = t1 - t2;
  179. tmp[i*4+3] = t0 - t3;
  180. }
  181. for (i = 0; i < 4; i++) {
  182. t0 = tmp[0*4+i] + tmp[2*4+i];
  183. t1 = tmp[0*4+i] - tmp[2*4+i];
  184. t2 = MUL_35468(tmp[1*4+i]) - MUL_20091(tmp[3*4+i]);
  185. t3 = MUL_20091(tmp[1*4+i]) + MUL_35468(tmp[3*4+i]);
  186. dst[0] = av_clip_uint8(dst[0] + ((t0 + t3 + 4) >> 3));
  187. dst[1] = av_clip_uint8(dst[1] + ((t1 + t2 + 4) >> 3));
  188. dst[2] = av_clip_uint8(dst[2] + ((t1 - t2 + 4) >> 3));
  189. dst[3] = av_clip_uint8(dst[3] + ((t0 - t3 + 4) >> 3));
  190. dst += stride;
  191. }
  192. }
  193. static void vp8_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
  194. {
  195. int i, dc = (block[0] + 4) >> 3;
  196. block[0] = 0;
  197. for (i = 0; i < 4; i++) {
  198. dst[0] = av_clip_uint8(dst[0] + dc);
  199. dst[1] = av_clip_uint8(dst[1] + dc);
  200. dst[2] = av_clip_uint8(dst[2] + dc);
  201. dst[3] = av_clip_uint8(dst[3] + dc);
  202. dst += stride;
  203. }
  204. }
  205. MK_IDCT_DC_ADD4_C(vp8)
  206. #endif
  207. // because I like only having two parameters to pass functions...
  208. #define LOAD_PIXELS\
  209. int av_unused p3 = p[-4*stride];\
  210. int av_unused p2 = p[-3*stride];\
  211. int av_unused p1 = p[-2*stride];\
  212. int av_unused p0 = p[-1*stride];\
  213. int av_unused q0 = p[ 0*stride];\
  214. int av_unused q1 = p[ 1*stride];\
  215. int av_unused q2 = p[ 2*stride];\
  216. int av_unused q3 = p[ 3*stride];
  217. #define clip_int8(n) (cm[n+0x80]-0x80)
  218. static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, int is4tap, int vpn)
  219. {
  220. LOAD_PIXELS
  221. int a, f1, f2;
  222. const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  223. a = 3*(q0 - p0);
  224. if (is4tap)
  225. a += clip_int8(p1 - q1);
  226. a = clip_int8(a);
  227. // We deviate from the spec here with c(a+3) >> 3
  228. // since that's what libvpx does.
  229. f1 = FFMIN(a+4, 127) >> 3;
  230. if (vpn == 7)
  231. f2 = f1 - ((a & 7) == 4);
  232. else
  233. f2 = FFMIN(a+3, 127) >> 3;
  234. // Despite what the spec says, we do need to clamp here to
  235. // be bitexact with libvpx.
  236. p[-1*stride] = cm[p0 + f2];
  237. p[ 0*stride] = cm[q0 - f1];
  238. // only used for _inner on blocks without high edge variance
  239. if (!is4tap) {
  240. a = (f1+1)>>1;
  241. p[-2*stride] = cm[p1 + a];
  242. p[ 1*stride] = cm[q1 - a];
  243. }
  244. }
  245. static av_always_inline int vp7_simple_limit(uint8_t *p, ptrdiff_t stride, int flim)
  246. {
  247. LOAD_PIXELS
  248. return FFABS(p0-q0) <= flim;
  249. }
  250. static av_always_inline int vp8_simple_limit(uint8_t *p, ptrdiff_t stride, int flim)
  251. {
  252. LOAD_PIXELS
  253. return 2*FFABS(p0-q0) + (FFABS(p1-q1) >> 1) <= flim;
  254. }
  255. /**
  256. * E - limit at the macroblock edge
  257. * I - limit for interior difference
  258. */
  259. #define NORMAL_LIMIT(vpn) \
  260. static av_always_inline int vp ## vpn ## _normal_limit(uint8_t *p, ptrdiff_t stride, int E, int I)\
  261. {\
  262. LOAD_PIXELS\
  263. return vp ## vpn ## _simple_limit(p, stride, E)\
  264. && FFABS(p3-p2) <= I && FFABS(p2-p1) <= I && FFABS(p1-p0) <= I\
  265. && FFABS(q3-q2) <= I && FFABS(q2-q1) <= I && FFABS(q1-q0) <= I;\
  266. }
  267. NORMAL_LIMIT(7)
  268. NORMAL_LIMIT(8)
  269. // high edge variance
  270. static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
  271. {
  272. LOAD_PIXELS
  273. return FFABS(p1-p0) > thresh || FFABS(q1-q0) > thresh;
  274. }
  275. static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
  276. {
  277. int a0, a1, a2, w;
  278. const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  279. LOAD_PIXELS
  280. w = clip_int8(p1-q1);
  281. w = clip_int8(w + 3*(q0-p0));
  282. a0 = (27*w + 63) >> 7;
  283. a1 = (18*w + 63) >> 7;
  284. a2 = ( 9*w + 63) >> 7;
  285. p[-3*stride] = cm[p2 + a2];
  286. p[-2*stride] = cm[p1 + a1];
  287. p[-1*stride] = cm[p0 + a0];
  288. p[ 0*stride] = cm[q0 - a0];
  289. p[ 1*stride] = cm[q1 - a1];
  290. p[ 2*stride] = cm[q2 - a2];
  291. }
  292. #define LOOP_FILTER(vpn, dir, size, stridea, strideb, maybe_inline) \
  293. static maybe_inline void vp ## vpn ## _ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, ptrdiff_t stride,\
  294. int flim_E, int flim_I, int hev_thresh)\
  295. {\
  296. int i;\
  297. \
  298. for (i = 0; i < size; i++)\
  299. if (vp ## vpn ## _normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
  300. if (hev(dst+i*stridea, strideb, hev_thresh))\
  301. filter_common(dst+i*stridea, strideb, 1, vpn);\
  302. else\
  303. filter_mbedge(dst+i*stridea, strideb);\
  304. }\
  305. }\
  306. \
  307. static maybe_inline void vp ## vpn ## _ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, ptrdiff_t stride,\
  308. int flim_E, int flim_I, int hev_thresh)\
  309. {\
  310. int i;\
  311. \
  312. for (i = 0; i < size; i++)\
  313. if (vp ## vpn ## _normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
  314. int hv = hev(dst+i*stridea, strideb, hev_thresh);\
  315. if (hv) \
  316. filter_common(dst+i*stridea, strideb, 1, vpn);\
  317. else \
  318. filter_common(dst+i*stridea, strideb, 0, vpn);\
  319. }\
  320. }
  321. #define UV_LOOP_FILTER(vpn, dir, stridea, strideb) \
  322. LOOP_FILTER(vpn, dir, 8, stridea, strideb, av_always_inline) \
  323. static void vp ## vpn ## _ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
  324. int fE, int fI, int hev_thresh)\
  325. {\
  326. vp ## vpn ## _ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh);\
  327. vp ## vpn ## _ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh);\
  328. }\
  329. static void vp ## vpn ## _ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
  330. int fE, int fI, int hev_thresh)\
  331. {\
  332. vp ## vpn ## _ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, hev_thresh);\
  333. vp ## vpn ## _ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, hev_thresh);\
  334. }
  335. #define LOOP_FILTER_SIMPLE(vpn) \
  336. static void vp ## vpn ## _v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)\
  337. {\
  338. int i;\
  339. \
  340. for (i = 0; i < 16; i++)\
  341. if (vp ## vpn ## _simple_limit(dst+i, stride, flim))\
  342. filter_common(dst+i, stride, 1, vpn);\
  343. }\
  344. \
  345. static void vp ## vpn ## _h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)\
  346. {\
  347. int i;\
  348. \
  349. for (i = 0; i < 16; i++)\
  350. if (vp ## vpn ## _simple_limit(dst+i*stride, 1, flim))\
  351. filter_common(dst+i*stride, 1, 1, vpn);\
  352. }
  353. #if CONFIG_VP7_DECODER
  354. LOOP_FILTER(7, v, 16, 1, stride,)
  355. LOOP_FILTER(7, h, 16, stride, 1,)
  356. UV_LOOP_FILTER(7, v, 1, stride)
  357. UV_LOOP_FILTER(7, h, stride, 1)
  358. LOOP_FILTER_SIMPLE(7)
  359. #endif
  360. #if CONFIG_VP8_DECODER
  361. LOOP_FILTER(8, v, 16, 1, stride,)
  362. LOOP_FILTER(8, h, 16, stride, 1,)
  363. UV_LOOP_FILTER(8, v, 1, stride)
  364. UV_LOOP_FILTER(8, h, stride, 1)
  365. LOOP_FILTER_SIMPLE(8)
  366. #endif
  367. static const uint8_t subpel_filters[7][6] = {
  368. { 0, 6, 123, 12, 1, 0 },
  369. { 2, 11, 108, 36, 8, 1 },
  370. { 0, 9, 93, 50, 6, 0 },
  371. { 3, 16, 77, 77, 16, 3 },
  372. { 0, 6, 50, 93, 9, 0 },
  373. { 1, 8, 36, 108, 11, 2 },
  374. { 0, 1, 12, 123, 6, 0 },
  375. };
  376. #define PUT_PIXELS(WIDTH) \
  377. static void put_vp8_pixels ## WIDTH ##_c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int x, int y) { \
  378. int i; \
  379. for (i = 0; i < h; i++, dst+= dststride, src+= srcstride) { \
  380. memcpy(dst, src, WIDTH); \
  381. } \
  382. }
  383. PUT_PIXELS(16)
  384. PUT_PIXELS(8)
  385. PUT_PIXELS(4)
  386. #define FILTER_6TAP(src, F, stride) \
  387. cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + F[0]*src[x-2*stride] + \
  388. F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + F[5]*src[x+3*stride] + 64) >> 7]
  389. #define FILTER_4TAP(src, F, stride) \
  390. cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + \
  391. F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + 64) >> 7]
  392. #define VP8_EPEL_H(SIZE, TAPS) \
  393. static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
  394. { \
  395. const uint8_t *filter = subpel_filters[mx-1]; \
  396. const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
  397. int x, y; \
  398. \
  399. for (y = 0; y < h; y++) { \
  400. for (x = 0; x < SIZE; x++) \
  401. dst[x] = FILTER_ ## TAPS ## TAP(src, filter, 1); \
  402. dst += dststride; \
  403. src += srcstride; \
  404. } \
  405. }
  406. #define VP8_EPEL_V(SIZE, TAPS) \
  407. static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
  408. { \
  409. const uint8_t *filter = subpel_filters[my-1]; \
  410. const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
  411. int x, y; \
  412. \
  413. for (y = 0; y < h; y++) { \
  414. for (x = 0; x < SIZE; x++) \
  415. dst[x] = FILTER_ ## TAPS ## TAP(src, filter, srcstride); \
  416. dst += dststride; \
  417. src += srcstride; \
  418. } \
  419. }
  420. #define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \
  421. static void put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
  422. { \
  423. const uint8_t *filter = subpel_filters[mx-1]; \
  424. const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
  425. int x, y; \
  426. uint8_t tmp_array[(2*SIZE+VTAPS-1)*SIZE]; \
  427. uint8_t *tmp = tmp_array; \
  428. src -= (2-(VTAPS==4))*srcstride; \
  429. \
  430. for (y = 0; y < h+VTAPS-1; y++) { \
  431. for (x = 0; x < SIZE; x++) \
  432. tmp[x] = FILTER_ ## HTAPS ## TAP(src, filter, 1); \
  433. tmp += SIZE; \
  434. src += srcstride; \
  435. } \
  436. \
  437. tmp = tmp_array + (2-(VTAPS==4))*SIZE; \
  438. filter = subpel_filters[my-1]; \
  439. \
  440. for (y = 0; y < h; y++) { \
  441. for (x = 0; x < SIZE; x++) \
  442. dst[x] = FILTER_ ## VTAPS ## TAP(tmp, filter, SIZE); \
  443. dst += dststride; \
  444. tmp += SIZE; \
  445. } \
  446. }
  447. VP8_EPEL_H(16, 4)
  448. VP8_EPEL_H(8, 4)
  449. VP8_EPEL_H(4, 4)
  450. VP8_EPEL_H(16, 6)
  451. VP8_EPEL_H(8, 6)
  452. VP8_EPEL_H(4, 6)
  453. VP8_EPEL_V(16, 4)
  454. VP8_EPEL_V(8, 4)
  455. VP8_EPEL_V(4, 4)
  456. VP8_EPEL_V(16, 6)
  457. VP8_EPEL_V(8, 6)
  458. VP8_EPEL_V(4, 6)
  459. VP8_EPEL_HV(16, 4, 4)
  460. VP8_EPEL_HV(8, 4, 4)
  461. VP8_EPEL_HV(4, 4, 4)
  462. VP8_EPEL_HV(16, 4, 6)
  463. VP8_EPEL_HV(8, 4, 6)
  464. VP8_EPEL_HV(4, 4, 6)
  465. VP8_EPEL_HV(16, 6, 4)
  466. VP8_EPEL_HV(8, 6, 4)
  467. VP8_EPEL_HV(4, 6, 4)
  468. VP8_EPEL_HV(16, 6, 6)
  469. VP8_EPEL_HV(8, 6, 6)
  470. VP8_EPEL_HV(4, 6, 6)
  471. #define VP8_BILINEAR(SIZE) \
  472. static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
  473. { \
  474. int a = 8-mx, b = mx; \
  475. int x, y; \
  476. \
  477. for (y = 0; y < h; y++) { \
  478. for (x = 0; x < SIZE; x++) \
  479. dst[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
  480. dst += dstride; \
  481. src += sstride; \
  482. } \
  483. } \
  484. static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
  485. { \
  486. int c = 8-my, d = my; \
  487. int x, y; \
  488. \
  489. for (y = 0; y < h; y++) { \
  490. for (x = 0; x < SIZE; x++) \
  491. dst[x] = (c*src[x] + d*src[x+sstride] + 4) >> 3; \
  492. dst += dstride; \
  493. src += sstride; \
  494. } \
  495. } \
  496. \
  497. static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
  498. { \
  499. int a = 8-mx, b = mx; \
  500. int c = 8-my, d = my; \
  501. int x, y; \
  502. uint8_t tmp_array[(2*SIZE+1)*SIZE]; \
  503. uint8_t *tmp = tmp_array; \
  504. \
  505. for (y = 0; y < h+1; y++) { \
  506. for (x = 0; x < SIZE; x++) \
  507. tmp[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
  508. tmp += SIZE; \
  509. src += sstride; \
  510. } \
  511. \
  512. tmp = tmp_array; \
  513. \
  514. for (y = 0; y < h; y++) { \
  515. for (x = 0; x < SIZE; x++) \
  516. dst[x] = (c*tmp[x] + d*tmp[x+SIZE] + 4) >> 3; \
  517. dst += dstride; \
  518. tmp += SIZE; \
  519. } \
  520. }
  521. VP8_BILINEAR(16)
  522. VP8_BILINEAR(8)
  523. VP8_BILINEAR(4)
  524. #define VP8_MC_FUNC(IDX, SIZE) \
  525. dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
  526. dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \
  527. dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \
  528. dsp->put_vp8_epel_pixels_tab[IDX][1][0] = put_vp8_epel ## SIZE ## _v4_c; \
  529. dsp->put_vp8_epel_pixels_tab[IDX][1][1] = put_vp8_epel ## SIZE ## _h4v4_c; \
  530. dsp->put_vp8_epel_pixels_tab[IDX][1][2] = put_vp8_epel ## SIZE ## _h6v4_c; \
  531. dsp->put_vp8_epel_pixels_tab[IDX][2][0] = put_vp8_epel ## SIZE ## _v6_c; \
  532. dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \
  533. dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c
  534. #define VP8_BILINEAR_MC_FUNC(IDX, SIZE) \
  535. dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
  536. dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \
  537. dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \
  538. dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = put_vp8_bilinear ## SIZE ## _v_c; \
  539. dsp->put_vp8_bilinear_pixels_tab[IDX][1][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
  540. dsp->put_vp8_bilinear_pixels_tab[IDX][1][2] = put_vp8_bilinear ## SIZE ## _hv_c; \
  541. dsp->put_vp8_bilinear_pixels_tab[IDX][2][0] = put_vp8_bilinear ## SIZE ## _v_c; \
  542. dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
  543. dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = put_vp8_bilinear ## SIZE ## _hv_c
  544. av_cold void ff_vp8dsp_init(VP8DSPContext *dsp, int vp7)
  545. {
  546. #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
  547. #define VPX(f) vp7 ? vp7_ ## f : vp8_ ## f
  548. #elif CONFIG_VP7_DECODER
  549. #define VPX(f) vp7_ ## f
  550. #else // CONFIG_VP8_DECODER
  551. #define VPX(f) vp8_ ## f
  552. #endif
  553. dsp->vp8_luma_dc_wht = VPX(luma_dc_wht_c);
  554. dsp->vp8_luma_dc_wht_dc = VPX(luma_dc_wht_dc_c);
  555. dsp->vp8_idct_add = VPX(idct_add_c);
  556. dsp->vp8_idct_dc_add = VPX(idct_dc_add_c);
  557. dsp->vp8_idct_dc_add4y = VPX(idct_dc_add4y_c);
  558. dsp->vp8_idct_dc_add4uv = VPX(idct_dc_add4uv_c);
  559. dsp->vp8_v_loop_filter16y = VPX(v_loop_filter16_c);
  560. dsp->vp8_h_loop_filter16y = VPX(h_loop_filter16_c);
  561. dsp->vp8_v_loop_filter8uv = VPX(v_loop_filter8uv_c);
  562. dsp->vp8_h_loop_filter8uv = VPX(h_loop_filter8uv_c);
  563. dsp->vp8_v_loop_filter16y_inner = VPX(v_loop_filter16_inner_c);
  564. dsp->vp8_h_loop_filter16y_inner = VPX(h_loop_filter16_inner_c);
  565. dsp->vp8_v_loop_filter8uv_inner = VPX(v_loop_filter8uv_inner_c);
  566. dsp->vp8_h_loop_filter8uv_inner = VPX(h_loop_filter8uv_inner_c);
  567. dsp->vp8_v_loop_filter_simple = VPX(v_loop_filter_simple_c);
  568. dsp->vp8_h_loop_filter_simple = VPX(h_loop_filter_simple_c);
  569. VP8_MC_FUNC(0, 16);
  570. VP8_MC_FUNC(1, 8);
  571. VP8_MC_FUNC(2, 4);
  572. VP8_BILINEAR_MC_FUNC(0, 16);
  573. VP8_BILINEAR_MC_FUNC(1, 8);
  574. VP8_BILINEAR_MC_FUNC(2, 4);
  575. if (ARCH_ARM)
  576. ff_vp8dsp_init_arm(dsp, vp7);
  577. if (ARCH_PPC)
  578. ff_vp8dsp_init_ppc(dsp);
  579. if (ARCH_X86)
  580. ff_vp8dsp_init_x86(dsp, vp7);
  581. }