You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2648 lines
116KB

  1. /*
  2. * DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * DSP utils
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/imgutils.h"
  30. #include "avcodec.h"
  31. #include "copy_block.h"
  32. #include "dct.h"
  33. #include "dsputil.h"
  34. #include "simple_idct.h"
  35. #include "faandct.h"
  36. #include "faanidct.h"
  37. #include "imgconvert.h"
  38. #include "mathops.h"
  39. #include "mpegvideo.h"
  40. #include "config.h"
  41. uint32_t ff_square_tab[512] = { 0, };
  42. #define BIT_DEPTH 16
  43. #include "dsputilenc_template.c"
  44. #undef BIT_DEPTH
  45. #define BIT_DEPTH 8
  46. #include "hpel_template.c"
  47. #include "tpel_template.c"
  48. #include "dsputil_template.c"
  49. #include "dsputilenc_template.c"
  50. // 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
  51. #define pb_7f (~0UL / 255 * 0x7f)
  52. #define pb_80 (~0UL / 255 * 0x80)
  53. /* Specific zigzag scan for 248 idct. NOTE that unlike the
  54. * specification, we interleave the fields */
  55. const uint8_t ff_zigzag248_direct[64] = {
  56. 0, 8, 1, 9, 16, 24, 2, 10,
  57. 17, 25, 32, 40, 48, 56, 33, 41,
  58. 18, 26, 3, 11, 4, 12, 19, 27,
  59. 34, 42, 49, 57, 50, 58, 35, 43,
  60. 20, 28, 5, 13, 6, 14, 21, 29,
  61. 36, 44, 51, 59, 52, 60, 37, 45,
  62. 22, 30, 7, 15, 23, 31, 38, 46,
  63. 53, 61, 54, 62, 39, 47, 55, 63,
  64. };
  65. const uint8_t ff_alternate_horizontal_scan[64] = {
  66. 0, 1, 2, 3, 8, 9, 16, 17,
  67. 10, 11, 4, 5, 6, 7, 15, 14,
  68. 13, 12, 19, 18, 24, 25, 32, 33,
  69. 26, 27, 20, 21, 22, 23, 28, 29,
  70. 30, 31, 34, 35, 40, 41, 48, 49,
  71. 42, 43, 36, 37, 38, 39, 44, 45,
  72. 46, 47, 50, 51, 56, 57, 58, 59,
  73. 52, 53, 54, 55, 60, 61, 62, 63,
  74. };
  75. const uint8_t ff_alternate_vertical_scan[64] = {
  76. 0, 8, 16, 24, 1, 9, 2, 10,
  77. 17, 25, 32, 40, 48, 56, 57, 49,
  78. 41, 33, 26, 18, 3, 11, 4, 12,
  79. 19, 27, 34, 42, 50, 58, 35, 43,
  80. 51, 59, 20, 28, 5, 13, 6, 14,
  81. 21, 29, 36, 44, 52, 60, 37, 45,
  82. 53, 61, 22, 30, 7, 15, 23, 31,
  83. 38, 46, 54, 62, 39, 47, 55, 63,
  84. };
  85. /* Input permutation for the simple_idct_mmx */
  86. static const uint8_t simple_mmx_permutation[64] = {
  87. 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
  88. 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
  89. 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
  90. 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
  91. 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
  92. 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
  93. 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
  94. 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
  95. };
  96. static const uint8_t idct_sse2_row_perm[8] = { 0, 4, 1, 5, 2, 6, 3, 7 };
  97. av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st,
  98. const uint8_t *src_scantable)
  99. {
  100. int i, end;
  101. st->scantable = src_scantable;
  102. for (i = 0; i < 64; i++) {
  103. int j = src_scantable[i];
  104. st->permutated[i] = permutation[j];
  105. }
  106. end = -1;
  107. for (i = 0; i < 64; i++) {
  108. int j = st->permutated[i];
  109. if (j > end)
  110. end = j;
  111. st->raster_end[i] = end;
  112. }
  113. }
  114. av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation,
  115. int idct_permutation_type)
  116. {
  117. int i;
  118. switch (idct_permutation_type) {
  119. case FF_NO_IDCT_PERM:
  120. for (i = 0; i < 64; i++)
  121. idct_permutation[i] = i;
  122. break;
  123. case FF_LIBMPEG2_IDCT_PERM:
  124. for (i = 0; i < 64; i++)
  125. idct_permutation[i] = (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
  126. break;
  127. case FF_SIMPLE_IDCT_PERM:
  128. for (i = 0; i < 64; i++)
  129. idct_permutation[i] = simple_mmx_permutation[i];
  130. break;
  131. case FF_TRANSPOSE_IDCT_PERM:
  132. for (i = 0; i < 64; i++)
  133. idct_permutation[i] = ((i & 7) << 3) | (i >> 3);
  134. break;
  135. case FF_PARTTRANS_IDCT_PERM:
  136. for (i = 0; i < 64; i++)
  137. idct_permutation[i] = (i & 0x24) | ((i & 3) << 3) | ((i >> 3) & 3);
  138. break;
  139. case FF_SSE2_IDCT_PERM:
  140. for (i = 0; i < 64; i++)
  141. idct_permutation[i] = (i & 0x38) | idct_sse2_row_perm[i & 7];
  142. break;
  143. default:
  144. av_log(NULL, AV_LOG_ERROR,
  145. "Internal error, IDCT permutation not set\n");
  146. }
  147. }
  148. static int pix_sum_c(uint8_t *pix, int line_size)
  149. {
  150. int s = 0, i, j;
  151. for (i = 0; i < 16; i++) {
  152. for (j = 0; j < 16; j += 8) {
  153. s += pix[0];
  154. s += pix[1];
  155. s += pix[2];
  156. s += pix[3];
  157. s += pix[4];
  158. s += pix[5];
  159. s += pix[6];
  160. s += pix[7];
  161. pix += 8;
  162. }
  163. pix += line_size - 16;
  164. }
  165. return s;
  166. }
  167. static int pix_norm1_c(uint8_t *pix, int line_size)
  168. {
  169. int s = 0, i, j;
  170. uint32_t *sq = ff_square_tab + 256;
  171. for (i = 0; i < 16; i++) {
  172. for (j = 0; j < 16; j += 8) {
  173. #if 0
  174. s += sq[pix[0]];
  175. s += sq[pix[1]];
  176. s += sq[pix[2]];
  177. s += sq[pix[3]];
  178. s += sq[pix[4]];
  179. s += sq[pix[5]];
  180. s += sq[pix[6]];
  181. s += sq[pix[7]];
  182. #else
  183. #if HAVE_FAST_64BIT
  184. register uint64_t x = *(uint64_t *) pix;
  185. s += sq[x & 0xff];
  186. s += sq[(x >> 8) & 0xff];
  187. s += sq[(x >> 16) & 0xff];
  188. s += sq[(x >> 24) & 0xff];
  189. s += sq[(x >> 32) & 0xff];
  190. s += sq[(x >> 40) & 0xff];
  191. s += sq[(x >> 48) & 0xff];
  192. s += sq[(x >> 56) & 0xff];
  193. #else
  194. register uint32_t x = *(uint32_t *) pix;
  195. s += sq[x & 0xff];
  196. s += sq[(x >> 8) & 0xff];
  197. s += sq[(x >> 16) & 0xff];
  198. s += sq[(x >> 24) & 0xff];
  199. x = *(uint32_t *) (pix + 4);
  200. s += sq[x & 0xff];
  201. s += sq[(x >> 8) & 0xff];
  202. s += sq[(x >> 16) & 0xff];
  203. s += sq[(x >> 24) & 0xff];
  204. #endif
  205. #endif
  206. pix += 8;
  207. }
  208. pix += line_size - 16;
  209. }
  210. return s;
  211. }
  212. static void bswap_buf(uint32_t *dst, const uint32_t *src, int w)
  213. {
  214. int i;
  215. for (i = 0; i + 8 <= w; i += 8) {
  216. dst[i + 0] = av_bswap32(src[i + 0]);
  217. dst[i + 1] = av_bswap32(src[i + 1]);
  218. dst[i + 2] = av_bswap32(src[i + 2]);
  219. dst[i + 3] = av_bswap32(src[i + 3]);
  220. dst[i + 4] = av_bswap32(src[i + 4]);
  221. dst[i + 5] = av_bswap32(src[i + 5]);
  222. dst[i + 6] = av_bswap32(src[i + 6]);
  223. dst[i + 7] = av_bswap32(src[i + 7]);
  224. }
  225. for (; i < w; i++)
  226. dst[i + 0] = av_bswap32(src[i + 0]);
  227. }
  228. static void bswap16_buf(uint16_t *dst, const uint16_t *src, int len)
  229. {
  230. while (len--)
  231. *dst++ = av_bswap16(*src++);
  232. }
  233. static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  234. int line_size, int h)
  235. {
  236. int s = 0, i;
  237. uint32_t *sq = ff_square_tab + 256;
  238. for (i = 0; i < h; i++) {
  239. s += sq[pix1[0] - pix2[0]];
  240. s += sq[pix1[1] - pix2[1]];
  241. s += sq[pix1[2] - pix2[2]];
  242. s += sq[pix1[3] - pix2[3]];
  243. pix1 += line_size;
  244. pix2 += line_size;
  245. }
  246. return s;
  247. }
  248. static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  249. int line_size, int h)
  250. {
  251. int s = 0, i;
  252. uint32_t *sq = ff_square_tab + 256;
  253. for (i = 0; i < h; i++) {
  254. s += sq[pix1[0] - pix2[0]];
  255. s += sq[pix1[1] - pix2[1]];
  256. s += sq[pix1[2] - pix2[2]];
  257. s += sq[pix1[3] - pix2[3]];
  258. s += sq[pix1[4] - pix2[4]];
  259. s += sq[pix1[5] - pix2[5]];
  260. s += sq[pix1[6] - pix2[6]];
  261. s += sq[pix1[7] - pix2[7]];
  262. pix1 += line_size;
  263. pix2 += line_size;
  264. }
  265. return s;
  266. }
  267. static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  268. int line_size, int h)
  269. {
  270. int s = 0, i;
  271. uint32_t *sq = ff_square_tab + 256;
  272. for (i = 0; i < h; i++) {
  273. s += sq[pix1[0] - pix2[0]];
  274. s += sq[pix1[1] - pix2[1]];
  275. s += sq[pix1[2] - pix2[2]];
  276. s += sq[pix1[3] - pix2[3]];
  277. s += sq[pix1[4] - pix2[4]];
  278. s += sq[pix1[5] - pix2[5]];
  279. s += sq[pix1[6] - pix2[6]];
  280. s += sq[pix1[7] - pix2[7]];
  281. s += sq[pix1[8] - pix2[8]];
  282. s += sq[pix1[9] - pix2[9]];
  283. s += sq[pix1[10] - pix2[10]];
  284. s += sq[pix1[11] - pix2[11]];
  285. s += sq[pix1[12] - pix2[12]];
  286. s += sq[pix1[13] - pix2[13]];
  287. s += sq[pix1[14] - pix2[14]];
  288. s += sq[pix1[15] - pix2[15]];
  289. pix1 += line_size;
  290. pix2 += line_size;
  291. }
  292. return s;
  293. }
  294. static void diff_pixels_c(int16_t *restrict block, const uint8_t *s1,
  295. const uint8_t *s2, int stride)
  296. {
  297. int i;
  298. /* read the pixels */
  299. for (i = 0; i < 8; i++) {
  300. block[0] = s1[0] - s2[0];
  301. block[1] = s1[1] - s2[1];
  302. block[2] = s1[2] - s2[2];
  303. block[3] = s1[3] - s2[3];
  304. block[4] = s1[4] - s2[4];
  305. block[5] = s1[5] - s2[5];
  306. block[6] = s1[6] - s2[6];
  307. block[7] = s1[7] - s2[7];
  308. s1 += stride;
  309. s2 += stride;
  310. block += 8;
  311. }
  312. }
  313. static void put_pixels_clamped_c(const int16_t *block, uint8_t *restrict pixels,
  314. int line_size)
  315. {
  316. int i;
  317. /* read the pixels */
  318. for (i = 0; i < 8; i++) {
  319. pixels[0] = av_clip_uint8(block[0]);
  320. pixels[1] = av_clip_uint8(block[1]);
  321. pixels[2] = av_clip_uint8(block[2]);
  322. pixels[3] = av_clip_uint8(block[3]);
  323. pixels[4] = av_clip_uint8(block[4]);
  324. pixels[5] = av_clip_uint8(block[5]);
  325. pixels[6] = av_clip_uint8(block[6]);
  326. pixels[7] = av_clip_uint8(block[7]);
  327. pixels += line_size;
  328. block += 8;
  329. }
  330. }
  331. static void put_signed_pixels_clamped_c(const int16_t *block,
  332. uint8_t *restrict pixels,
  333. int line_size)
  334. {
  335. int i, j;
  336. for (i = 0; i < 8; i++) {
  337. for (j = 0; j < 8; j++) {
  338. if (*block < -128)
  339. *pixels = 0;
  340. else if (*block > 127)
  341. *pixels = 255;
  342. else
  343. *pixels = (uint8_t) (*block + 128);
  344. block++;
  345. pixels++;
  346. }
  347. pixels += (line_size - 8);
  348. }
  349. }
  350. static void add_pixels8_c(uint8_t *restrict pixels, int16_t *block,
  351. int line_size)
  352. {
  353. int i;
  354. for (i = 0; i < 8; i++) {
  355. pixels[0] += block[0];
  356. pixels[1] += block[1];
  357. pixels[2] += block[2];
  358. pixels[3] += block[3];
  359. pixels[4] += block[4];
  360. pixels[5] += block[5];
  361. pixels[6] += block[6];
  362. pixels[7] += block[7];
  363. pixels += line_size;
  364. block += 8;
  365. }
  366. }
  367. static void add_pixels_clamped_c(const int16_t *block, uint8_t *restrict pixels,
  368. int line_size)
  369. {
  370. int i;
  371. /* read the pixels */
  372. for (i = 0; i < 8; i++) {
  373. pixels[0] = av_clip_uint8(pixels[0] + block[0]);
  374. pixels[1] = av_clip_uint8(pixels[1] + block[1]);
  375. pixels[2] = av_clip_uint8(pixels[2] + block[2]);
  376. pixels[3] = av_clip_uint8(pixels[3] + block[3]);
  377. pixels[4] = av_clip_uint8(pixels[4] + block[4]);
  378. pixels[5] = av_clip_uint8(pixels[5] + block[5]);
  379. pixels[6] = av_clip_uint8(pixels[6] + block[6]);
  380. pixels[7] = av_clip_uint8(pixels[7] + block[7]);
  381. pixels += line_size;
  382. block += 8;
  383. }
  384. }
  385. static int sum_abs_dctelem_c(int16_t *block)
  386. {
  387. int sum = 0, i;
  388. for (i = 0; i < 64; i++)
  389. sum += FFABS(block[i]);
  390. return sum;
  391. }
  392. static void fill_block16_c(uint8_t *block, uint8_t value, int line_size, int h)
  393. {
  394. int i;
  395. for (i = 0; i < h; i++) {
  396. memset(block, value, 16);
  397. block += line_size;
  398. }
  399. }
  400. static void fill_block8_c(uint8_t *block, uint8_t value, int line_size, int h)
  401. {
  402. int i;
  403. for (i = 0; i < h; i++) {
  404. memset(block, value, 8);
  405. block += line_size;
  406. }
  407. }
  408. #define avg2(a, b) ((a + b + 1) >> 1)
  409. #define avg4(a, b, c, d) ((a + b + c + d + 2) >> 2)
  410. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h,
  411. int x16, int y16, int rounder)
  412. {
  413. const int A = (16 - x16) * (16 - y16);
  414. const int B = (x16) * (16 - y16);
  415. const int C = (16 - x16) * (y16);
  416. const int D = (x16) * (y16);
  417. int i;
  418. for (i = 0; i < h; i++) {
  419. dst[0] = (A * src[0] + B * src[1] + C * src[stride + 0] + D * src[stride + 1] + rounder) >> 8;
  420. dst[1] = (A * src[1] + B * src[2] + C * src[stride + 1] + D * src[stride + 2] + rounder) >> 8;
  421. dst[2] = (A * src[2] + B * src[3] + C * src[stride + 2] + D * src[stride + 3] + rounder) >> 8;
  422. dst[3] = (A * src[3] + B * src[4] + C * src[stride + 3] + D * src[stride + 4] + rounder) >> 8;
  423. dst[4] = (A * src[4] + B * src[5] + C * src[stride + 4] + D * src[stride + 5] + rounder) >> 8;
  424. dst[5] = (A * src[5] + B * src[6] + C * src[stride + 5] + D * src[stride + 6] + rounder) >> 8;
  425. dst[6] = (A * src[6] + B * src[7] + C * src[stride + 6] + D * src[stride + 7] + rounder) >> 8;
  426. dst[7] = (A * src[7] + B * src[8] + C * src[stride + 7] + D * src[stride + 8] + rounder) >> 8;
  427. dst += stride;
  428. src += stride;
  429. }
  430. }
  431. void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  432. int dxx, int dxy, int dyx, int dyy, int shift, int r,
  433. int width, int height)
  434. {
  435. int y, vx, vy;
  436. const int s = 1 << shift;
  437. width--;
  438. height--;
  439. for (y = 0; y < h; y++) {
  440. int x;
  441. vx = ox;
  442. vy = oy;
  443. for (x = 0; x < 8; x++) { // FIXME: optimize
  444. int index;
  445. int src_x = vx >> 16;
  446. int src_y = vy >> 16;
  447. int frac_x = src_x & (s - 1);
  448. int frac_y = src_y & (s - 1);
  449. src_x >>= shift;
  450. src_y >>= shift;
  451. if ((unsigned) src_x < width) {
  452. if ((unsigned) src_y < height) {
  453. index = src_x + src_y * stride;
  454. dst[y * stride + x] =
  455. ((src[index] * (s - frac_x) +
  456. src[index + 1] * frac_x) * (s - frac_y) +
  457. (src[index + stride] * (s - frac_x) +
  458. src[index + stride + 1] * frac_x) * frac_y +
  459. r) >> (shift * 2);
  460. } else {
  461. index = src_x + av_clip(src_y, 0, height) * stride;
  462. dst[y * stride + x] =
  463. ((src[index] * (s - frac_x) +
  464. src[index + 1] * frac_x) * s +
  465. r) >> (shift * 2);
  466. }
  467. } else {
  468. if ((unsigned) src_y < height) {
  469. index = av_clip(src_x, 0, width) + src_y * stride;
  470. dst[y * stride + x] =
  471. ((src[index] * (s - frac_y) +
  472. src[index + stride] * frac_y) * s +
  473. r) >> (shift * 2);
  474. } else {
  475. index = av_clip(src_x, 0, width) +
  476. av_clip(src_y, 0, height) * stride;
  477. dst[y * stride + x] = src[index];
  478. }
  479. }
  480. vx += dxx;
  481. vy += dyx;
  482. }
  483. ox += dxy;
  484. oy += dyy;
  485. }
  486. }
  487. #define QPEL_MC(r, OPNAME, RND, OP) \
  488. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, \
  489. int dstStride, int srcStride, \
  490. int h) \
  491. { \
  492. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  493. int i; \
  494. \
  495. for (i = 0; i < h; i++) { \
  496. OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \
  497. OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \
  498. OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \
  499. OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \
  500. OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \
  501. OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[8])); \
  502. OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[8]) * 3 - (src[3] + src[7])); \
  503. OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[8]) * 6 + (src[5] + src[7]) * 3 - (src[4] + src[6])); \
  504. dst += dstStride; \
  505. src += srcStride; \
  506. } \
  507. } \
  508. \
  509. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, \
  510. int dstStride, int srcStride) \
  511. { \
  512. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  513. const int w = 8; \
  514. int i; \
  515. \
  516. for (i = 0; i < w; i++) { \
  517. const int src0 = src[0 * srcStride]; \
  518. const int src1 = src[1 * srcStride]; \
  519. const int src2 = src[2 * srcStride]; \
  520. const int src3 = src[3 * srcStride]; \
  521. const int src4 = src[4 * srcStride]; \
  522. const int src5 = src[5 * srcStride]; \
  523. const int src6 = src[6 * srcStride]; \
  524. const int src7 = src[7 * srcStride]; \
  525. const int src8 = src[8 * srcStride]; \
  526. OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \
  527. OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \
  528. OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \
  529. OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \
  530. OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \
  531. OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src8)); \
  532. OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src8) * 3 - (src3 + src7)); \
  533. OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src8) * 6 + (src5 + src7) * 3 - (src4 + src6)); \
  534. dst++; \
  535. src++; \
  536. } \
  537. } \
  538. \
  539. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, \
  540. int dstStride, int srcStride, \
  541. int h) \
  542. { \
  543. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  544. int i; \
  545. \
  546. for (i = 0; i < h; i++) { \
  547. OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \
  548. OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \
  549. OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \
  550. OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \
  551. OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \
  552. OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[9])); \
  553. OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[9]) * 3 - (src[3] + src[10])); \
  554. OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[9]) * 6 + (src[5] + src[10]) * 3 - (src[4] + src[11])); \
  555. OP(dst[8], (src[8] + src[9]) * 20 - (src[7] + src[10]) * 6 + (src[6] + src[11]) * 3 - (src[5] + src[12])); \
  556. OP(dst[9], (src[9] + src[10]) * 20 - (src[8] + src[11]) * 6 + (src[7] + src[12]) * 3 - (src[6] + src[13])); \
  557. OP(dst[10], (src[10] + src[11]) * 20 - (src[9] + src[12]) * 6 + (src[8] + src[13]) * 3 - (src[7] + src[14])); \
  558. OP(dst[11], (src[11] + src[12]) * 20 - (src[10] + src[13]) * 6 + (src[9] + src[14]) * 3 - (src[8] + src[15])); \
  559. OP(dst[12], (src[12] + src[13]) * 20 - (src[11] + src[14]) * 6 + (src[10] + src[15]) * 3 - (src[9] + src[16])); \
  560. OP(dst[13], (src[13] + src[14]) * 20 - (src[12] + src[15]) * 6 + (src[11] + src[16]) * 3 - (src[10] + src[16])); \
  561. OP(dst[14], (src[14] + src[15]) * 20 - (src[13] + src[16]) * 6 + (src[12] + src[16]) * 3 - (src[11] + src[15])); \
  562. OP(dst[15], (src[15] + src[16]) * 20 - (src[14] + src[16]) * 6 + (src[13] + src[15]) * 3 - (src[12] + src[14])); \
  563. dst += dstStride; \
  564. src += srcStride; \
  565. } \
  566. } \
  567. \
  568. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, \
  569. int dstStride, int srcStride) \
  570. { \
  571. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  572. const int w = 16; \
  573. int i; \
  574. \
  575. for (i = 0; i < w; i++) { \
  576. const int src0 = src[0 * srcStride]; \
  577. const int src1 = src[1 * srcStride]; \
  578. const int src2 = src[2 * srcStride]; \
  579. const int src3 = src[3 * srcStride]; \
  580. const int src4 = src[4 * srcStride]; \
  581. const int src5 = src[5 * srcStride]; \
  582. const int src6 = src[6 * srcStride]; \
  583. const int src7 = src[7 * srcStride]; \
  584. const int src8 = src[8 * srcStride]; \
  585. const int src9 = src[9 * srcStride]; \
  586. const int src10 = src[10 * srcStride]; \
  587. const int src11 = src[11 * srcStride]; \
  588. const int src12 = src[12 * srcStride]; \
  589. const int src13 = src[13 * srcStride]; \
  590. const int src14 = src[14 * srcStride]; \
  591. const int src15 = src[15 * srcStride]; \
  592. const int src16 = src[16 * srcStride]; \
  593. OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \
  594. OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \
  595. OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \
  596. OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \
  597. OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \
  598. OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src9)); \
  599. OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src9) * 3 - (src3 + src10)); \
  600. OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src9) * 6 + (src5 + src10) * 3 - (src4 + src11)); \
  601. OP(dst[8 * dstStride], (src8 + src9) * 20 - (src7 + src10) * 6 + (src6 + src11) * 3 - (src5 + src12)); \
  602. OP(dst[9 * dstStride], (src9 + src10) * 20 - (src8 + src11) * 6 + (src7 + src12) * 3 - (src6 + src13)); \
  603. OP(dst[10 * dstStride], (src10 + src11) * 20 - (src9 + src12) * 6 + (src8 + src13) * 3 - (src7 + src14)); \
  604. OP(dst[11 * dstStride], (src11 + src12) * 20 - (src10 + src13) * 6 + (src9 + src14) * 3 - (src8 + src15)); \
  605. OP(dst[12 * dstStride], (src12 + src13) * 20 - (src11 + src14) * 6 + (src10 + src15) * 3 - (src9 + src16)); \
  606. OP(dst[13 * dstStride], (src13 + src14) * 20 - (src12 + src15) * 6 + (src11 + src16) * 3 - (src10 + src16)); \
  607. OP(dst[14 * dstStride], (src14 + src15) * 20 - (src13 + src16) * 6 + (src12 + src16) * 3 - (src11 + src15)); \
  608. OP(dst[15 * dstStride], (src15 + src16) * 20 - (src14 + src16) * 6 + (src13 + src15) * 3 - (src12 + src14)); \
  609. dst++; \
  610. src++; \
  611. } \
  612. } \
  613. \
  614. static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, \
  615. ptrdiff_t stride) \
  616. { \
  617. uint8_t half[64]; \
  618. \
  619. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \
  620. OPNAME ## pixels8_l2_8(dst, src, half, stride, stride, 8, 8); \
  621. } \
  622. \
  623. static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, \
  624. ptrdiff_t stride) \
  625. { \
  626. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8); \
  627. } \
  628. \
  629. static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, \
  630. ptrdiff_t stride) \
  631. { \
  632. uint8_t half[64]; \
  633. \
  634. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \
  635. OPNAME ## pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8); \
  636. } \
  637. \
  638. static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, \
  639. ptrdiff_t stride) \
  640. { \
  641. uint8_t full[16 * 9]; \
  642. uint8_t half[64]; \
  643. \
  644. copy_block9(full, src, 16, stride, 9); \
  645. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \
  646. OPNAME ## pixels8_l2_8(dst, full, half, stride, 16, 8, 8); \
  647. } \
  648. \
  649. static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, \
  650. ptrdiff_t stride) \
  651. { \
  652. uint8_t full[16 * 9]; \
  653. \
  654. copy_block9(full, src, 16, stride, 9); \
  655. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16); \
  656. } \
  657. \
  658. static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, \
  659. ptrdiff_t stride) \
  660. { \
  661. uint8_t full[16 * 9]; \
  662. uint8_t half[64]; \
  663. \
  664. copy_block9(full, src, 16, stride, 9); \
  665. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \
  666. OPNAME ## pixels8_l2_8(dst, full + 16, half, stride, 16, 8, 8); \
  667. } \
  668. \
  669. void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, \
  670. ptrdiff_t stride) \
  671. { \
  672. uint8_t full[16 * 9]; \
  673. uint8_t halfH[72]; \
  674. uint8_t halfV[64]; \
  675. uint8_t halfHV[64]; \
  676. \
  677. copy_block9(full, src, 16, stride, 9); \
  678. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  679. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  680. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  681. OPNAME ## pixels8_l4_8(dst, full, halfH, halfV, halfHV, \
  682. stride, 16, 8, 8, 8, 8); \
  683. } \
  684. \
  685. static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, \
  686. ptrdiff_t stride) \
  687. { \
  688. uint8_t full[16 * 9]; \
  689. uint8_t halfH[72]; \
  690. uint8_t halfHV[64]; \
  691. \
  692. copy_block9(full, src, 16, stride, 9); \
  693. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  694. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  695. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  696. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  697. } \
  698. \
  699. void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, \
  700. ptrdiff_t stride) \
  701. { \
  702. uint8_t full[16 * 9]; \
  703. uint8_t halfH[72]; \
  704. uint8_t halfV[64]; \
  705. uint8_t halfHV[64]; \
  706. \
  707. copy_block9(full, src, 16, stride, 9); \
  708. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  709. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  710. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  711. OPNAME ## pixels8_l4_8(dst, full + 1, halfH, halfV, halfHV, \
  712. stride, 16, 8, 8, 8, 8); \
  713. } \
  714. \
  715. static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, \
  716. ptrdiff_t stride) \
  717. { \
  718. uint8_t full[16 * 9]; \
  719. uint8_t halfH[72]; \
  720. uint8_t halfHV[64]; \
  721. \
  722. copy_block9(full, src, 16, stride, 9); \
  723. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  724. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  725. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  726. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  727. } \
  728. \
  729. void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, \
  730. ptrdiff_t stride) \
  731. { \
  732. uint8_t full[16 * 9]; \
  733. uint8_t halfH[72]; \
  734. uint8_t halfV[64]; \
  735. uint8_t halfHV[64]; \
  736. \
  737. copy_block9(full, src, 16, stride, 9); \
  738. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  739. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  740. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  741. OPNAME ## pixels8_l4_8(dst, full + 16, halfH + 8, halfV, halfHV, \
  742. stride, 16, 8, 8, 8, 8); \
  743. } \
  744. \
  745. static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, \
  746. ptrdiff_t stride) \
  747. { \
  748. uint8_t full[16 * 9]; \
  749. uint8_t halfH[72]; \
  750. uint8_t halfHV[64]; \
  751. \
  752. copy_block9(full, src, 16, stride, 9); \
  753. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  754. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  755. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  756. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  757. } \
  758. \
  759. void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, \
  760. ptrdiff_t stride) \
  761. { \
  762. uint8_t full[16 * 9]; \
  763. uint8_t halfH[72]; \
  764. uint8_t halfV[64]; \
  765. uint8_t halfHV[64]; \
  766. \
  767. copy_block9(full, src, 16, stride, 9); \
  768. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  769. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  770. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  771. OPNAME ## pixels8_l4_8(dst, full + 17, halfH + 8, halfV, halfHV, \
  772. stride, 16, 8, 8, 8, 8); \
  773. } \
  774. \
  775. static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, \
  776. ptrdiff_t stride) \
  777. { \
  778. uint8_t full[16 * 9]; \
  779. uint8_t halfH[72]; \
  780. uint8_t halfHV[64]; \
  781. \
  782. copy_block9(full, src, 16, stride, 9); \
  783. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  784. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  785. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  786. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  787. } \
  788. \
  789. static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, \
  790. ptrdiff_t stride) \
  791. { \
  792. uint8_t halfH[72]; \
  793. uint8_t halfHV[64]; \
  794. \
  795. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  796. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  797. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  798. } \
  799. \
  800. static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, \
  801. ptrdiff_t stride) \
  802. { \
  803. uint8_t halfH[72]; \
  804. uint8_t halfHV[64]; \
  805. \
  806. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  807. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  808. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  809. } \
  810. \
  811. void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, \
  812. ptrdiff_t stride) \
  813. { \
  814. uint8_t full[16 * 9]; \
  815. uint8_t halfH[72]; \
  816. uint8_t halfV[64]; \
  817. uint8_t halfHV[64]; \
  818. \
  819. copy_block9(full, src, 16, stride, 9); \
  820. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  821. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  822. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  823. OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \
  824. } \
  825. \
  826. static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, \
  827. ptrdiff_t stride) \
  828. { \
  829. uint8_t full[16 * 9]; \
  830. uint8_t halfH[72]; \
  831. \
  832. copy_block9(full, src, 16, stride, 9); \
  833. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  834. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  835. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  836. } \
  837. \
  838. void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, \
  839. ptrdiff_t stride) \
  840. { \
  841. uint8_t full[16 * 9]; \
  842. uint8_t halfH[72]; \
  843. uint8_t halfV[64]; \
  844. uint8_t halfHV[64]; \
  845. \
  846. copy_block9(full, src, 16, stride, 9); \
  847. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  848. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  849. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  850. OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \
  851. } \
  852. \
  853. static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, \
  854. ptrdiff_t stride) \
  855. { \
  856. uint8_t full[16 * 9]; \
  857. uint8_t halfH[72]; \
  858. \
  859. copy_block9(full, src, 16, stride, 9); \
  860. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  861. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  862. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  863. } \
  864. \
  865. static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, \
  866. ptrdiff_t stride) \
  867. { \
  868. uint8_t halfH[72]; \
  869. \
  870. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  871. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  872. } \
  873. \
  874. static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, \
  875. ptrdiff_t stride) \
  876. { \
  877. uint8_t half[256]; \
  878. \
  879. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \
  880. OPNAME ## pixels16_l2_8(dst, src, half, stride, stride, 16, 16); \
  881. } \
  882. \
  883. static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, \
  884. ptrdiff_t stride) \
  885. { \
  886. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16); \
  887. } \
  888. \
  889. static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, \
  890. ptrdiff_t stride) \
  891. { \
  892. uint8_t half[256]; \
  893. \
  894. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \
  895. OPNAME ## pixels16_l2_8(dst, src + 1, half, stride, stride, 16, 16); \
  896. } \
  897. \
  898. static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, \
  899. ptrdiff_t stride) \
  900. { \
  901. uint8_t full[24 * 17]; \
  902. uint8_t half[256]; \
  903. \
  904. copy_block17(full, src, 24, stride, 17); \
  905. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \
  906. OPNAME ## pixels16_l2_8(dst, full, half, stride, 24, 16, 16); \
  907. } \
  908. \
  909. static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, \
  910. ptrdiff_t stride) \
  911. { \
  912. uint8_t full[24 * 17]; \
  913. \
  914. copy_block17(full, src, 24, stride, 17); \
  915. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24); \
  916. } \
  917. \
  918. static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, \
  919. ptrdiff_t stride) \
  920. { \
  921. uint8_t full[24 * 17]; \
  922. uint8_t half[256]; \
  923. \
  924. copy_block17(full, src, 24, stride, 17); \
  925. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \
  926. OPNAME ## pixels16_l2_8(dst, full + 24, half, stride, 24, 16, 16); \
  927. } \
  928. \
  929. void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, \
  930. ptrdiff_t stride) \
  931. { \
  932. uint8_t full[24 * 17]; \
  933. uint8_t halfH[272]; \
  934. uint8_t halfV[256]; \
  935. uint8_t halfHV[256]; \
  936. \
  937. copy_block17(full, src, 24, stride, 17); \
  938. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  939. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  940. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  941. OPNAME ## pixels16_l4_8(dst, full, halfH, halfV, halfHV, \
  942. stride, 24, 16, 16, 16, 16); \
  943. } \
  944. \
  945. static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, \
  946. ptrdiff_t stride) \
  947. { \
  948. uint8_t full[24 * 17]; \
  949. uint8_t halfH[272]; \
  950. uint8_t halfHV[256]; \
  951. \
  952. copy_block17(full, src, 24, stride, 17); \
  953. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  954. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  955. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  956. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  957. } \
  958. \
  959. void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, \
  960. ptrdiff_t stride) \
  961. { \
  962. uint8_t full[24 * 17]; \
  963. uint8_t halfH[272]; \
  964. uint8_t halfV[256]; \
  965. uint8_t halfHV[256]; \
  966. \
  967. copy_block17(full, src, 24, stride, 17); \
  968. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  969. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  970. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  971. OPNAME ## pixels16_l4_8(dst, full + 1, halfH, halfV, halfHV, \
  972. stride, 24, 16, 16, 16, 16); \
  973. } \
  974. \
  975. static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, \
  976. ptrdiff_t stride) \
  977. { \
  978. uint8_t full[24 * 17]; \
  979. uint8_t halfH[272]; \
  980. uint8_t halfHV[256]; \
  981. \
  982. copy_block17(full, src, 24, stride, 17); \
  983. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  984. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  985. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  986. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  987. } \
  988. \
  989. void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, \
  990. ptrdiff_t stride) \
  991. { \
  992. uint8_t full[24 * 17]; \
  993. uint8_t halfH[272]; \
  994. uint8_t halfV[256]; \
  995. uint8_t halfHV[256]; \
  996. \
  997. copy_block17(full, src, 24, stride, 17); \
  998. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  999. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  1000. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1001. OPNAME ## pixels16_l4_8(dst, full + 24, halfH + 16, halfV, halfHV, \
  1002. stride, 24, 16, 16, 16, 16); \
  1003. } \
  1004. \
  1005. static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, \
  1006. ptrdiff_t stride) \
  1007. { \
  1008. uint8_t full[24 * 17]; \
  1009. uint8_t halfH[272]; \
  1010. uint8_t halfHV[256]; \
  1011. \
  1012. copy_block17(full, src, 24, stride, 17); \
  1013. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1014. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1015. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1016. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1017. } \
  1018. \
  1019. void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, \
  1020. ptrdiff_t stride) \
  1021. { \
  1022. uint8_t full[24 * 17]; \
  1023. uint8_t halfH[272]; \
  1024. uint8_t halfV[256]; \
  1025. uint8_t halfHV[256]; \
  1026. \
  1027. copy_block17(full, src, 24, stride, 17); \
  1028. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1029. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1030. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1031. OPNAME ## pixels16_l4_8(dst, full + 25, halfH + 16, halfV, halfHV, \
  1032. stride, 24, 16, 16, 16, 16); \
  1033. } \
  1034. \
  1035. static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, \
  1036. ptrdiff_t stride) \
  1037. { \
  1038. uint8_t full[24 * 17]; \
  1039. uint8_t halfH[272]; \
  1040. uint8_t halfHV[256]; \
  1041. \
  1042. copy_block17(full, src, 24, stride, 17); \
  1043. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1044. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1045. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1046. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1047. } \
  1048. \
  1049. static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, \
  1050. ptrdiff_t stride) \
  1051. { \
  1052. uint8_t halfH[272]; \
  1053. uint8_t halfHV[256]; \
  1054. \
  1055. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1056. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1057. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  1058. } \
  1059. \
  1060. static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, \
  1061. ptrdiff_t stride) \
  1062. { \
  1063. uint8_t halfH[272]; \
  1064. uint8_t halfHV[256]; \
  1065. \
  1066. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1067. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1068. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1069. } \
  1070. \
  1071. void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, \
  1072. ptrdiff_t stride) \
  1073. { \
  1074. uint8_t full[24 * 17]; \
  1075. uint8_t halfH[272]; \
  1076. uint8_t halfV[256]; \
  1077. uint8_t halfHV[256]; \
  1078. \
  1079. copy_block17(full, src, 24, stride, 17); \
  1080. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1081. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  1082. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1083. OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \
  1084. } \
  1085. \
  1086. static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, \
  1087. ptrdiff_t stride) \
  1088. { \
  1089. uint8_t full[24 * 17]; \
  1090. uint8_t halfH[272]; \
  1091. \
  1092. copy_block17(full, src, 24, stride, 17); \
  1093. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1094. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1095. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1096. } \
  1097. \
  1098. void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, \
  1099. ptrdiff_t stride) \
  1100. { \
  1101. uint8_t full[24 * 17]; \
  1102. uint8_t halfH[272]; \
  1103. uint8_t halfV[256]; \
  1104. uint8_t halfHV[256]; \
  1105. \
  1106. copy_block17(full, src, 24, stride, 17); \
  1107. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1108. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1109. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1110. OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \
  1111. } \
  1112. \
  1113. static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, \
  1114. ptrdiff_t stride) \
  1115. { \
  1116. uint8_t full[24 * 17]; \
  1117. uint8_t halfH[272]; \
  1118. \
  1119. copy_block17(full, src, 24, stride, 17); \
  1120. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1121. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1122. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1123. } \
  1124. \
  1125. static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, \
  1126. ptrdiff_t stride) \
  1127. { \
  1128. uint8_t halfH[272]; \
  1129. \
  1130. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1131. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1132. }
  1133. #define op_avg(a, b) a = (((a) + cm[((b) + 16) >> 5] + 1) >> 1)
  1134. #define op_avg_no_rnd(a, b) a = (((a) + cm[((b) + 15) >> 5]) >> 1)
  1135. #define op_put(a, b) a = cm[((b) + 16) >> 5]
  1136. #define op_put_no_rnd(a, b) a = cm[((b) + 15) >> 5]
  1137. QPEL_MC(0, put_, _, op_put)
  1138. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  1139. QPEL_MC(0, avg_, _, op_avg)
  1140. #undef op_avg
  1141. #undef op_put
  1142. #undef op_put_no_rnd
  1143. void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1144. {
  1145. put_pixels8_8_c(dst, src, stride, 8);
  1146. }
  1147. void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1148. {
  1149. avg_pixels8_8_c(dst, src, stride, 8);
  1150. }
  1151. void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1152. {
  1153. put_pixels16_8_c(dst, src, stride, 16);
  1154. }
  1155. void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1156. {
  1157. avg_pixels16_8_c(dst, src, stride, 16);
  1158. }
  1159. #define put_qpel8_mc00_c ff_put_pixels8x8_c
  1160. #define avg_qpel8_mc00_c ff_avg_pixels8x8_c
  1161. #define put_qpel16_mc00_c ff_put_pixels16x16_c
  1162. #define avg_qpel16_mc00_c ff_avg_pixels16x16_c
  1163. #define put_no_rnd_qpel8_mc00_c ff_put_pixels8x8_c
  1164. #define put_no_rnd_qpel16_mc00_c ff_put_pixels16x16_c
  1165. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src,
  1166. int dstStride, int srcStride, int h)
  1167. {
  1168. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1169. int i;
  1170. for (i = 0; i < h; i++) {
  1171. dst[0] = cm[(9 * (src[0] + src[1]) - (src[-1] + src[2]) + 8) >> 4];
  1172. dst[1] = cm[(9 * (src[1] + src[2]) - (src[0] + src[3]) + 8) >> 4];
  1173. dst[2] = cm[(9 * (src[2] + src[3]) - (src[1] + src[4]) + 8) >> 4];
  1174. dst[3] = cm[(9 * (src[3] + src[4]) - (src[2] + src[5]) + 8) >> 4];
  1175. dst[4] = cm[(9 * (src[4] + src[5]) - (src[3] + src[6]) + 8) >> 4];
  1176. dst[5] = cm[(9 * (src[5] + src[6]) - (src[4] + src[7]) + 8) >> 4];
  1177. dst[6] = cm[(9 * (src[6] + src[7]) - (src[5] + src[8]) + 8) >> 4];
  1178. dst[7] = cm[(9 * (src[7] + src[8]) - (src[6] + src[9]) + 8) >> 4];
  1179. dst += dstStride;
  1180. src += srcStride;
  1181. }
  1182. }
  1183. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src,
  1184. int dstStride, int srcStride, int w)
  1185. {
  1186. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1187. int i;
  1188. for (i = 0; i < w; i++) {
  1189. const int src_1 = src[-srcStride];
  1190. const int src0 = src[0];
  1191. const int src1 = src[srcStride];
  1192. const int src2 = src[2 * srcStride];
  1193. const int src3 = src[3 * srcStride];
  1194. const int src4 = src[4 * srcStride];
  1195. const int src5 = src[5 * srcStride];
  1196. const int src6 = src[6 * srcStride];
  1197. const int src7 = src[7 * srcStride];
  1198. const int src8 = src[8 * srcStride];
  1199. const int src9 = src[9 * srcStride];
  1200. dst[0 * dstStride] = cm[(9 * (src0 + src1) - (src_1 + src2) + 8) >> 4];
  1201. dst[1 * dstStride] = cm[(9 * (src1 + src2) - (src0 + src3) + 8) >> 4];
  1202. dst[2 * dstStride] = cm[(9 * (src2 + src3) - (src1 + src4) + 8) >> 4];
  1203. dst[3 * dstStride] = cm[(9 * (src3 + src4) - (src2 + src5) + 8) >> 4];
  1204. dst[4 * dstStride] = cm[(9 * (src4 + src5) - (src3 + src6) + 8) >> 4];
  1205. dst[5 * dstStride] = cm[(9 * (src5 + src6) - (src4 + src7) + 8) >> 4];
  1206. dst[6 * dstStride] = cm[(9 * (src6 + src7) - (src5 + src8) + 8) >> 4];
  1207. dst[7 * dstStride] = cm[(9 * (src7 + src8) - (src6 + src9) + 8) >> 4];
  1208. src++;
  1209. dst++;
  1210. }
  1211. }
  1212. static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1213. {
  1214. uint8_t half[64];
  1215. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1216. put_pixels8_l2_8(dst, src, half, stride, stride, 8, 8);
  1217. }
  1218. static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1219. {
  1220. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  1221. }
  1222. static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1223. {
  1224. uint8_t half[64];
  1225. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1226. put_pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8);
  1227. }
  1228. static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1229. {
  1230. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  1231. }
  1232. static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1233. {
  1234. uint8_t halfH[88];
  1235. uint8_t halfV[64];
  1236. uint8_t halfHV[64];
  1237. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1238. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  1239. wmv2_mspel8_v_lowpass(halfHV, halfH + 8, 8, 8, 8);
  1240. put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
  1241. }
  1242. static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1243. {
  1244. uint8_t halfH[88];
  1245. uint8_t halfV[64];
  1246. uint8_t halfHV[64];
  1247. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1248. wmv2_mspel8_v_lowpass(halfV, src + 1, 8, stride, 8);
  1249. wmv2_mspel8_v_lowpass(halfHV, halfH + 8, 8, 8, 8);
  1250. put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
  1251. }
  1252. static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1253. {
  1254. uint8_t halfH[88];
  1255. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1256. wmv2_mspel8_v_lowpass(dst, halfH + 8, stride, 8, 8);
  1257. }
  1258. static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1259. int line_size, int h)
  1260. {
  1261. int s = 0, i;
  1262. for (i = 0; i < h; i++) {
  1263. s += abs(pix1[0] - pix2[0]);
  1264. s += abs(pix1[1] - pix2[1]);
  1265. s += abs(pix1[2] - pix2[2]);
  1266. s += abs(pix1[3] - pix2[3]);
  1267. s += abs(pix1[4] - pix2[4]);
  1268. s += abs(pix1[5] - pix2[5]);
  1269. s += abs(pix1[6] - pix2[6]);
  1270. s += abs(pix1[7] - pix2[7]);
  1271. s += abs(pix1[8] - pix2[8]);
  1272. s += abs(pix1[9] - pix2[9]);
  1273. s += abs(pix1[10] - pix2[10]);
  1274. s += abs(pix1[11] - pix2[11]);
  1275. s += abs(pix1[12] - pix2[12]);
  1276. s += abs(pix1[13] - pix2[13]);
  1277. s += abs(pix1[14] - pix2[14]);
  1278. s += abs(pix1[15] - pix2[15]);
  1279. pix1 += line_size;
  1280. pix2 += line_size;
  1281. }
  1282. return s;
  1283. }
  1284. static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1285. int line_size, int h)
  1286. {
  1287. int s = 0, i;
  1288. for (i = 0; i < h; i++) {
  1289. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  1290. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  1291. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  1292. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  1293. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  1294. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  1295. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  1296. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  1297. s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
  1298. s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
  1299. s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
  1300. s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
  1301. s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
  1302. s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
  1303. s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
  1304. s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
  1305. pix1 += line_size;
  1306. pix2 += line_size;
  1307. }
  1308. return s;
  1309. }
  1310. static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1311. int line_size, int h)
  1312. {
  1313. int s = 0, i;
  1314. uint8_t *pix3 = pix2 + line_size;
  1315. for (i = 0; i < h; i++) {
  1316. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  1317. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  1318. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  1319. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  1320. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  1321. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  1322. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  1323. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  1324. s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
  1325. s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
  1326. s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
  1327. s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
  1328. s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
  1329. s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
  1330. s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
  1331. s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
  1332. pix1 += line_size;
  1333. pix2 += line_size;
  1334. pix3 += line_size;
  1335. }
  1336. return s;
  1337. }
  1338. static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1339. int line_size, int h)
  1340. {
  1341. int s = 0, i;
  1342. uint8_t *pix3 = pix2 + line_size;
  1343. for (i = 0; i < h; i++) {
  1344. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  1345. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  1346. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  1347. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  1348. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  1349. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  1350. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  1351. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  1352. s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
  1353. s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
  1354. s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
  1355. s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
  1356. s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
  1357. s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
  1358. s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
  1359. s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
  1360. pix1 += line_size;
  1361. pix2 += line_size;
  1362. pix3 += line_size;
  1363. }
  1364. return s;
  1365. }
  1366. static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1367. int line_size, int h)
  1368. {
  1369. int s = 0, i;
  1370. for (i = 0; i < h; i++) {
  1371. s += abs(pix1[0] - pix2[0]);
  1372. s += abs(pix1[1] - pix2[1]);
  1373. s += abs(pix1[2] - pix2[2]);
  1374. s += abs(pix1[3] - pix2[3]);
  1375. s += abs(pix1[4] - pix2[4]);
  1376. s += abs(pix1[5] - pix2[5]);
  1377. s += abs(pix1[6] - pix2[6]);
  1378. s += abs(pix1[7] - pix2[7]);
  1379. pix1 += line_size;
  1380. pix2 += line_size;
  1381. }
  1382. return s;
  1383. }
  1384. static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1385. int line_size, int h)
  1386. {
  1387. int s = 0, i;
  1388. for (i = 0; i < h; i++) {
  1389. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  1390. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  1391. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  1392. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  1393. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  1394. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  1395. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  1396. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  1397. pix1 += line_size;
  1398. pix2 += line_size;
  1399. }
  1400. return s;
  1401. }
  1402. static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1403. int line_size, int h)
  1404. {
  1405. int s = 0, i;
  1406. uint8_t *pix3 = pix2 + line_size;
  1407. for (i = 0; i < h; i++) {
  1408. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  1409. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  1410. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  1411. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  1412. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  1413. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  1414. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  1415. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  1416. pix1 += line_size;
  1417. pix2 += line_size;
  1418. pix3 += line_size;
  1419. }
  1420. return s;
  1421. }
  1422. static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1423. int line_size, int h)
  1424. {
  1425. int s = 0, i;
  1426. uint8_t *pix3 = pix2 + line_size;
  1427. for (i = 0; i < h; i++) {
  1428. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  1429. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  1430. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  1431. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  1432. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  1433. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  1434. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  1435. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  1436. pix1 += line_size;
  1437. pix2 += line_size;
  1438. pix3 += line_size;
  1439. }
  1440. return s;
  1441. }
  1442. static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
  1443. {
  1444. int score1 = 0, score2 = 0, x, y;
  1445. for (y = 0; y < h; y++) {
  1446. for (x = 0; x < 16; x++)
  1447. score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
  1448. if (y + 1 < h) {
  1449. for (x = 0; x < 15; x++)
  1450. score2 += FFABS(s1[x] - s1[x + stride] -
  1451. s1[x + 1] + s1[x + stride + 1]) -
  1452. FFABS(s2[x] - s2[x + stride] -
  1453. s2[x + 1] + s2[x + stride + 1]);
  1454. }
  1455. s1 += stride;
  1456. s2 += stride;
  1457. }
  1458. if (c)
  1459. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  1460. else
  1461. return score1 + FFABS(score2) * 8;
  1462. }
  1463. static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
  1464. {
  1465. int score1 = 0, score2 = 0, x, y;
  1466. for (y = 0; y < h; y++) {
  1467. for (x = 0; x < 8; x++)
  1468. score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
  1469. if (y + 1 < h) {
  1470. for (x = 0; x < 7; x++)
  1471. score2 += FFABS(s1[x] - s1[x + stride] -
  1472. s1[x + 1] + s1[x + stride + 1]) -
  1473. FFABS(s2[x] - s2[x + stride] -
  1474. s2[x + 1] + s2[x + stride + 1]);
  1475. }
  1476. s1 += stride;
  1477. s2 += stride;
  1478. }
  1479. if (c)
  1480. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  1481. else
  1482. return score1 + FFABS(score2) * 8;
  1483. }
  1484. static int try_8x8basis_c(int16_t rem[64], int16_t weight[64],
  1485. int16_t basis[64], int scale)
  1486. {
  1487. int i;
  1488. unsigned int sum = 0;
  1489. for (i = 0; i < 8 * 8; i++) {
  1490. int b = rem[i] + ((basis[i] * scale +
  1491. (1 << (BASIS_SHIFT - RECON_SHIFT - 1))) >>
  1492. (BASIS_SHIFT - RECON_SHIFT));
  1493. int w = weight[i];
  1494. b >>= RECON_SHIFT;
  1495. assert(-512 < b && b < 512);
  1496. sum += (w * b) * (w * b) >> 4;
  1497. }
  1498. return sum >> 2;
  1499. }
  1500. static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale)
  1501. {
  1502. int i;
  1503. for (i = 0; i < 8 * 8; i++)
  1504. rem[i] += (basis[i] * scale +
  1505. (1 << (BASIS_SHIFT - RECON_SHIFT - 1))) >>
  1506. (BASIS_SHIFT - RECON_SHIFT);
  1507. }
  1508. static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
  1509. int stride, int h)
  1510. {
  1511. return 0;
  1512. }
  1513. void ff_set_cmp(DSPContext *c, me_cmp_func *cmp, int type)
  1514. {
  1515. int i;
  1516. memset(cmp, 0, sizeof(void *) * 6);
  1517. for (i = 0; i < 6; i++) {
  1518. switch (type & 0xFF) {
  1519. case FF_CMP_SAD:
  1520. cmp[i] = c->sad[i];
  1521. break;
  1522. case FF_CMP_SATD:
  1523. cmp[i] = c->hadamard8_diff[i];
  1524. break;
  1525. case FF_CMP_SSE:
  1526. cmp[i] = c->sse[i];
  1527. break;
  1528. case FF_CMP_DCT:
  1529. cmp[i] = c->dct_sad[i];
  1530. break;
  1531. case FF_CMP_DCT264:
  1532. cmp[i] = c->dct264_sad[i];
  1533. break;
  1534. case FF_CMP_DCTMAX:
  1535. cmp[i] = c->dct_max[i];
  1536. break;
  1537. case FF_CMP_PSNR:
  1538. cmp[i] = c->quant_psnr[i];
  1539. break;
  1540. case FF_CMP_BIT:
  1541. cmp[i] = c->bit[i];
  1542. break;
  1543. case FF_CMP_RD:
  1544. cmp[i] = c->rd[i];
  1545. break;
  1546. case FF_CMP_VSAD:
  1547. cmp[i] = c->vsad[i];
  1548. break;
  1549. case FF_CMP_VSSE:
  1550. cmp[i] = c->vsse[i];
  1551. break;
  1552. case FF_CMP_ZERO:
  1553. cmp[i] = zero_cmp;
  1554. break;
  1555. case FF_CMP_NSSE:
  1556. cmp[i] = c->nsse[i];
  1557. break;
  1558. default:
  1559. av_log(NULL, AV_LOG_ERROR,
  1560. "internal error in cmp function selection\n");
  1561. }
  1562. }
  1563. }
  1564. static void add_bytes_c(uint8_t *dst, uint8_t *src, int w)
  1565. {
  1566. long i;
  1567. for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
  1568. long a = *(long *) (src + i);
  1569. long b = *(long *) (dst + i);
  1570. *(long *) (dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
  1571. }
  1572. for (; i < w; i++)
  1573. dst[i + 0] += src[i + 0];
  1574. }
  1575. static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
  1576. {
  1577. long i;
  1578. #if !HAVE_FAST_UNALIGNED
  1579. if ((long) src2 & (sizeof(long) - 1)) {
  1580. for (i = 0; i + 7 < w; i += 8) {
  1581. dst[i + 0] = src1[i + 0] - src2[i + 0];
  1582. dst[i + 1] = src1[i + 1] - src2[i + 1];
  1583. dst[i + 2] = src1[i + 2] - src2[i + 2];
  1584. dst[i + 3] = src1[i + 3] - src2[i + 3];
  1585. dst[i + 4] = src1[i + 4] - src2[i + 4];
  1586. dst[i + 5] = src1[i + 5] - src2[i + 5];
  1587. dst[i + 6] = src1[i + 6] - src2[i + 6];
  1588. dst[i + 7] = src1[i + 7] - src2[i + 7];
  1589. }
  1590. } else
  1591. #endif
  1592. for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
  1593. long a = *(long *) (src1 + i);
  1594. long b = *(long *) (src2 + i);
  1595. *(long *) (dst + i) = ((a | pb_80) - (b & pb_7f)) ^
  1596. ((a ^ b ^ pb_80) & pb_80);
  1597. }
  1598. for (; i < w; i++)
  1599. dst[i + 0] = src1[i + 0] - src2[i + 0];
  1600. }
  1601. static void add_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1,
  1602. const uint8_t *diff, int w,
  1603. int *left, int *left_top)
  1604. {
  1605. int i;
  1606. uint8_t l, lt;
  1607. l = *left;
  1608. lt = *left_top;
  1609. for (i = 0; i < w; i++) {
  1610. l = mid_pred(l, src1[i], (l + src1[i] - lt) & 0xFF) + diff[i];
  1611. lt = src1[i];
  1612. dst[i] = l;
  1613. }
  1614. *left = l;
  1615. *left_top = lt;
  1616. }
  1617. static void sub_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1,
  1618. const uint8_t *src2, int w,
  1619. int *left, int *left_top)
  1620. {
  1621. int i;
  1622. uint8_t l, lt;
  1623. l = *left;
  1624. lt = *left_top;
  1625. for (i = 0; i < w; i++) {
  1626. const int pred = mid_pred(l, src1[i], (l + src1[i] - lt) & 0xFF);
  1627. lt = src1[i];
  1628. l = src2[i];
  1629. dst[i] = l - pred;
  1630. }
  1631. *left = l;
  1632. *left_top = lt;
  1633. }
  1634. static int add_hfyu_left_prediction_c(uint8_t *dst, const uint8_t *src,
  1635. int w, int acc)
  1636. {
  1637. int i;
  1638. for (i = 0; i < w - 1; i++) {
  1639. acc += src[i];
  1640. dst[i] = acc;
  1641. i++;
  1642. acc += src[i];
  1643. dst[i] = acc;
  1644. }
  1645. for (; i < w; i++) {
  1646. acc += src[i];
  1647. dst[i] = acc;
  1648. }
  1649. return acc;
  1650. }
  1651. #if HAVE_BIGENDIAN
  1652. #define B 3
  1653. #define G 2
  1654. #define R 1
  1655. #define A 0
  1656. #else
  1657. #define B 0
  1658. #define G 1
  1659. #define R 2
  1660. #define A 3
  1661. #endif
  1662. static void add_hfyu_left_prediction_bgr32_c(uint8_t *dst, const uint8_t *src,
  1663. int w, int *red, int *green,
  1664. int *blue, int *alpha)
  1665. {
  1666. int i, r = *red, g = *green, b = *blue, a = *alpha;
  1667. for (i = 0; i < w; i++) {
  1668. b += src[4 * i + B];
  1669. g += src[4 * i + G];
  1670. r += src[4 * i + R];
  1671. a += src[4 * i + A];
  1672. dst[4 * i + B] = b;
  1673. dst[4 * i + G] = g;
  1674. dst[4 * i + R] = r;
  1675. dst[4 * i + A] = a;
  1676. }
  1677. *red = r;
  1678. *green = g;
  1679. *blue = b;
  1680. *alpha = a;
  1681. }
  1682. #undef B
  1683. #undef G
  1684. #undef R
  1685. #undef A
  1686. #define BUTTERFLY2(o1, o2, i1, i2) \
  1687. o1 = (i1) + (i2); \
  1688. o2 = (i1) - (i2);
  1689. #define BUTTERFLY1(x, y) \
  1690. { \
  1691. int a, b; \
  1692. a = x; \
  1693. b = y; \
  1694. x = a + b; \
  1695. y = a - b; \
  1696. }
  1697. #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
  1698. static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
  1699. uint8_t *src, int stride, int h)
  1700. {
  1701. int i, temp[64], sum = 0;
  1702. assert(h == 8);
  1703. for (i = 0; i < 8; i++) {
  1704. // FIXME: try pointer walks
  1705. BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
  1706. src[stride * i + 0] - dst[stride * i + 0],
  1707. src[stride * i + 1] - dst[stride * i + 1]);
  1708. BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
  1709. src[stride * i + 2] - dst[stride * i + 2],
  1710. src[stride * i + 3] - dst[stride * i + 3]);
  1711. BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
  1712. src[stride * i + 4] - dst[stride * i + 4],
  1713. src[stride * i + 5] - dst[stride * i + 5]);
  1714. BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
  1715. src[stride * i + 6] - dst[stride * i + 6],
  1716. src[stride * i + 7] - dst[stride * i + 7]);
  1717. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
  1718. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
  1719. BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
  1720. BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
  1721. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
  1722. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
  1723. BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
  1724. BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
  1725. }
  1726. for (i = 0; i < 8; i++) {
  1727. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
  1728. BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
  1729. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
  1730. BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
  1731. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
  1732. BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
  1733. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
  1734. BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
  1735. sum += BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i]) +
  1736. BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i]) +
  1737. BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i]) +
  1738. BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
  1739. }
  1740. return sum;
  1741. }
  1742. static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
  1743. uint8_t *dummy, int stride, int h)
  1744. {
  1745. int i, temp[64], sum = 0;
  1746. assert(h == 8);
  1747. for (i = 0; i < 8; i++) {
  1748. // FIXME: try pointer walks
  1749. BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
  1750. src[stride * i + 0], src[stride * i + 1]);
  1751. BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
  1752. src[stride * i + 2], src[stride * i + 3]);
  1753. BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
  1754. src[stride * i + 4], src[stride * i + 5]);
  1755. BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
  1756. src[stride * i + 6], src[stride * i + 7]);
  1757. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
  1758. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
  1759. BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
  1760. BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
  1761. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
  1762. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
  1763. BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
  1764. BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
  1765. }
  1766. for (i = 0; i < 8; i++) {
  1767. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
  1768. BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
  1769. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
  1770. BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
  1771. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
  1772. BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
  1773. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
  1774. BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
  1775. sum +=
  1776. BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i])
  1777. + BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i])
  1778. + BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i])
  1779. + BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
  1780. }
  1781. sum -= FFABS(temp[8 * 0] + temp[8 * 4]); // -mean
  1782. return sum;
  1783. }
  1784. static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
  1785. uint8_t *src2, int stride, int h)
  1786. {
  1787. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1788. assert(h == 8);
  1789. s->dsp.diff_pixels(temp, src1, src2, stride);
  1790. s->dsp.fdct(temp);
  1791. return s->dsp.sum_abs_dctelem(temp);
  1792. }
  1793. #if CONFIG_GPL
  1794. #define DCT8_1D \
  1795. { \
  1796. const int s07 = SRC(0) + SRC(7); \
  1797. const int s16 = SRC(1) + SRC(6); \
  1798. const int s25 = SRC(2) + SRC(5); \
  1799. const int s34 = SRC(3) + SRC(4); \
  1800. const int a0 = s07 + s34; \
  1801. const int a1 = s16 + s25; \
  1802. const int a2 = s07 - s34; \
  1803. const int a3 = s16 - s25; \
  1804. const int d07 = SRC(0) - SRC(7); \
  1805. const int d16 = SRC(1) - SRC(6); \
  1806. const int d25 = SRC(2) - SRC(5); \
  1807. const int d34 = SRC(3) - SRC(4); \
  1808. const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
  1809. const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
  1810. const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
  1811. const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
  1812. DST(0, a0 + a1); \
  1813. DST(1, a4 + (a7 >> 2)); \
  1814. DST(2, a2 + (a3 >> 1)); \
  1815. DST(3, a5 + (a6 >> 2)); \
  1816. DST(4, a0 - a1); \
  1817. DST(5, a6 - (a5 >> 2)); \
  1818. DST(6, (a2 >> 1) - a3); \
  1819. DST(7, (a4 >> 2) - a7); \
  1820. }
  1821. static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
  1822. uint8_t *src2, int stride, int h)
  1823. {
  1824. int16_t dct[8][8];
  1825. int i, sum = 0;
  1826. s->dsp.diff_pixels(dct[0], src1, src2, stride);
  1827. #define SRC(x) dct[i][x]
  1828. #define DST(x, v) dct[i][x] = v
  1829. for (i = 0; i < 8; i++)
  1830. DCT8_1D
  1831. #undef SRC
  1832. #undef DST
  1833. #define SRC(x) dct[x][i]
  1834. #define DST(x, v) sum += FFABS(v)
  1835. for (i = 0; i < 8; i++)
  1836. DCT8_1D
  1837. #undef SRC
  1838. #undef DST
  1839. return sum;
  1840. }
  1841. #endif
  1842. static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
  1843. uint8_t *src2, int stride, int h)
  1844. {
  1845. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1846. int sum = 0, i;
  1847. assert(h == 8);
  1848. s->dsp.diff_pixels(temp, src1, src2, stride);
  1849. s->dsp.fdct(temp);
  1850. for (i = 0; i < 64; i++)
  1851. sum = FFMAX(sum, FFABS(temp[i]));
  1852. return sum;
  1853. }
  1854. static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
  1855. uint8_t *src2, int stride, int h)
  1856. {
  1857. LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
  1858. int16_t *const bak = temp + 64;
  1859. int sum = 0, i;
  1860. assert(h == 8);
  1861. s->mb_intra = 0;
  1862. s->dsp.diff_pixels(temp, src1, src2, stride);
  1863. memcpy(bak, temp, 64 * sizeof(int16_t));
  1864. s->block_last_index[0 /* FIXME */] =
  1865. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  1866. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  1867. ff_simple_idct_8(temp); // FIXME
  1868. for (i = 0; i < 64; i++)
  1869. sum += (temp[i] - bak[i]) * (temp[i] - bak[i]);
  1870. return sum;
  1871. }
  1872. static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
  1873. int stride, int h)
  1874. {
  1875. const uint8_t *scantable = s->intra_scantable.permutated;
  1876. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1877. LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
  1878. LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
  1879. int i, last, run, bits, level, distortion, start_i;
  1880. const int esc_length = s->ac_esc_length;
  1881. uint8_t *length, *last_length;
  1882. assert(h == 8);
  1883. copy_block8(lsrc1, src1, 8, stride, 8);
  1884. copy_block8(lsrc2, src2, 8, stride, 8);
  1885. s->dsp.diff_pixels(temp, lsrc1, lsrc2, 8);
  1886. s->block_last_index[0 /* FIXME */] =
  1887. last =
  1888. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  1889. bits = 0;
  1890. if (s->mb_intra) {
  1891. start_i = 1;
  1892. length = s->intra_ac_vlc_length;
  1893. last_length = s->intra_ac_vlc_last_length;
  1894. bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
  1895. } else {
  1896. start_i = 0;
  1897. length = s->inter_ac_vlc_length;
  1898. last_length = s->inter_ac_vlc_last_length;
  1899. }
  1900. if (last >= start_i) {
  1901. run = 0;
  1902. for (i = start_i; i < last; i++) {
  1903. int j = scantable[i];
  1904. level = temp[j];
  1905. if (level) {
  1906. level += 64;
  1907. if ((level & (~127)) == 0)
  1908. bits += length[UNI_AC_ENC_INDEX(run, level)];
  1909. else
  1910. bits += esc_length;
  1911. run = 0;
  1912. } else
  1913. run++;
  1914. }
  1915. i = scantable[last];
  1916. level = temp[i] + 64;
  1917. assert(level - 64);
  1918. if ((level & (~127)) == 0) {
  1919. bits += last_length[UNI_AC_ENC_INDEX(run, level)];
  1920. } else
  1921. bits += esc_length;
  1922. }
  1923. if (last >= 0) {
  1924. if (s->mb_intra)
  1925. s->dct_unquantize_intra(s, temp, 0, s->qscale);
  1926. else
  1927. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  1928. }
  1929. s->dsp.idct_add(lsrc2, 8, temp);
  1930. distortion = s->dsp.sse[1](NULL, lsrc2, lsrc1, 8, 8);
  1931. return distortion + ((bits * s->qscale * s->qscale * 109 + 64) >> 7);
  1932. }
  1933. static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
  1934. int stride, int h)
  1935. {
  1936. const uint8_t *scantable = s->intra_scantable.permutated;
  1937. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1938. int i, last, run, bits, level, start_i;
  1939. const int esc_length = s->ac_esc_length;
  1940. uint8_t *length, *last_length;
  1941. assert(h == 8);
  1942. s->dsp.diff_pixels(temp, src1, src2, stride);
  1943. s->block_last_index[0 /* FIXME */] =
  1944. last =
  1945. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  1946. bits = 0;
  1947. if (s->mb_intra) {
  1948. start_i = 1;
  1949. length = s->intra_ac_vlc_length;
  1950. last_length = s->intra_ac_vlc_last_length;
  1951. bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
  1952. } else {
  1953. start_i = 0;
  1954. length = s->inter_ac_vlc_length;
  1955. last_length = s->inter_ac_vlc_last_length;
  1956. }
  1957. if (last >= start_i) {
  1958. run = 0;
  1959. for (i = start_i; i < last; i++) {
  1960. int j = scantable[i];
  1961. level = temp[j];
  1962. if (level) {
  1963. level += 64;
  1964. if ((level & (~127)) == 0)
  1965. bits += length[UNI_AC_ENC_INDEX(run, level)];
  1966. else
  1967. bits += esc_length;
  1968. run = 0;
  1969. } else
  1970. run++;
  1971. }
  1972. i = scantable[last];
  1973. level = temp[i] + 64;
  1974. assert(level - 64);
  1975. if ((level & (~127)) == 0)
  1976. bits += last_length[UNI_AC_ENC_INDEX(run, level)];
  1977. else
  1978. bits += esc_length;
  1979. }
  1980. return bits;
  1981. }
  1982. #define VSAD_INTRA(size) \
  1983. static int vsad_intra ## size ## _c(MpegEncContext *c, \
  1984. uint8_t *s, uint8_t *dummy, \
  1985. int stride, int h) \
  1986. { \
  1987. int score = 0, x, y; \
  1988. \
  1989. for (y = 1; y < h; y++) { \
  1990. for (x = 0; x < size; x += 4) { \
  1991. score += FFABS(s[x] - s[x + stride]) + \
  1992. FFABS(s[x + 1] - s[x + stride + 1]) + \
  1993. FFABS(s[x + 2] - s[x + 2 + stride]) + \
  1994. FFABS(s[x + 3] - s[x + 3 + stride]); \
  1995. } \
  1996. s += stride; \
  1997. } \
  1998. \
  1999. return score; \
  2000. }
  2001. VSAD_INTRA(8)
  2002. VSAD_INTRA(16)
  2003. static int vsad16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
  2004. int stride, int h)
  2005. {
  2006. int score = 0, x, y;
  2007. for (y = 1; y < h; y++) {
  2008. for (x = 0; x < 16; x++)
  2009. score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
  2010. s1 += stride;
  2011. s2 += stride;
  2012. }
  2013. return score;
  2014. }
  2015. #define SQ(a) ((a) * (a))
  2016. #define VSSE_INTRA(size) \
  2017. static int vsse_intra ## size ## _c(MpegEncContext *c, \
  2018. uint8_t *s, uint8_t *dummy, \
  2019. int stride, int h) \
  2020. { \
  2021. int score = 0, x, y; \
  2022. \
  2023. for (y = 1; y < h; y++) { \
  2024. for (x = 0; x < size; x += 4) { \
  2025. score += SQ(s[x] - s[x + stride]) + \
  2026. SQ(s[x + 1] - s[x + stride + 1]) + \
  2027. SQ(s[x + 2] - s[x + stride + 2]) + \
  2028. SQ(s[x + 3] - s[x + stride + 3]); \
  2029. } \
  2030. s += stride; \
  2031. } \
  2032. \
  2033. return score; \
  2034. }
  2035. VSSE_INTRA(8)
  2036. VSSE_INTRA(16)
  2037. static int vsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
  2038. int stride, int h)
  2039. {
  2040. int score = 0, x, y;
  2041. for (y = 1; y < h; y++) {
  2042. for (x = 0; x < 16; x++)
  2043. score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
  2044. s1 += stride;
  2045. s2 += stride;
  2046. }
  2047. return score;
  2048. }
  2049. static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
  2050. int size)
  2051. {
  2052. int score = 0, i;
  2053. for (i = 0; i < size; i++)
  2054. score += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]);
  2055. return score;
  2056. }
  2057. #define WRAPPER8_16_SQ(name8, name16) \
  2058. static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src, \
  2059. int stride, int h) \
  2060. { \
  2061. int score = 0; \
  2062. \
  2063. score += name8(s, dst, src, stride, 8); \
  2064. score += name8(s, dst + 8, src + 8, stride, 8); \
  2065. if (h == 16) { \
  2066. dst += 8 * stride; \
  2067. src += 8 * stride; \
  2068. score += name8(s, dst, src, stride, 8); \
  2069. score += name8(s, dst + 8, src + 8, stride, 8); \
  2070. } \
  2071. return score; \
  2072. }
  2073. WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
  2074. WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
  2075. WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
  2076. #if CONFIG_GPL
  2077. WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
  2078. #endif
  2079. WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
  2080. WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
  2081. WRAPPER8_16_SQ(rd8x8_c, rd16_c)
  2082. WRAPPER8_16_SQ(bit8x8_c, bit16_c)
  2083. static inline uint32_t clipf_c_one(uint32_t a, uint32_t mini,
  2084. uint32_t maxi, uint32_t maxisign)
  2085. {
  2086. if (a > mini)
  2087. return mini;
  2088. else if ((a ^ (1U << 31)) > maxisign)
  2089. return maxi;
  2090. else
  2091. return a;
  2092. }
  2093. static void vector_clipf_c_opposite_sign(float *dst, const float *src,
  2094. float *min, float *max, int len)
  2095. {
  2096. int i;
  2097. uint32_t mini = *(uint32_t *) min;
  2098. uint32_t maxi = *(uint32_t *) max;
  2099. uint32_t maxisign = maxi ^ (1U << 31);
  2100. uint32_t *dsti = (uint32_t *) dst;
  2101. const uint32_t *srci = (const uint32_t *) src;
  2102. for (i = 0; i < len; i += 8) {
  2103. dsti[i + 0] = clipf_c_one(srci[i + 0], mini, maxi, maxisign);
  2104. dsti[i + 1] = clipf_c_one(srci[i + 1], mini, maxi, maxisign);
  2105. dsti[i + 2] = clipf_c_one(srci[i + 2], mini, maxi, maxisign);
  2106. dsti[i + 3] = clipf_c_one(srci[i + 3], mini, maxi, maxisign);
  2107. dsti[i + 4] = clipf_c_one(srci[i + 4], mini, maxi, maxisign);
  2108. dsti[i + 5] = clipf_c_one(srci[i + 5], mini, maxi, maxisign);
  2109. dsti[i + 6] = clipf_c_one(srci[i + 6], mini, maxi, maxisign);
  2110. dsti[i + 7] = clipf_c_one(srci[i + 7], mini, maxi, maxisign);
  2111. }
  2112. }
  2113. static void vector_clipf_c(float *dst, const float *src,
  2114. float min, float max, int len)
  2115. {
  2116. int i;
  2117. if (min < 0 && max > 0) {
  2118. vector_clipf_c_opposite_sign(dst, src, &min, &max, len);
  2119. } else {
  2120. for (i = 0; i < len; i += 8) {
  2121. dst[i] = av_clipf(src[i], min, max);
  2122. dst[i + 1] = av_clipf(src[i + 1], min, max);
  2123. dst[i + 2] = av_clipf(src[i + 2], min, max);
  2124. dst[i + 3] = av_clipf(src[i + 3], min, max);
  2125. dst[i + 4] = av_clipf(src[i + 4], min, max);
  2126. dst[i + 5] = av_clipf(src[i + 5], min, max);
  2127. dst[i + 6] = av_clipf(src[i + 6], min, max);
  2128. dst[i + 7] = av_clipf(src[i + 7], min, max);
  2129. }
  2130. }
  2131. }
  2132. static int32_t scalarproduct_int16_c(const int16_t *v1, const int16_t *v2,
  2133. int order)
  2134. {
  2135. int res = 0;
  2136. while (order--)
  2137. res += *v1++ **v2++;
  2138. return res;
  2139. }
  2140. static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2,
  2141. const int16_t *v3,
  2142. int order, int mul)
  2143. {
  2144. int res = 0;
  2145. while (order--) {
  2146. res += *v1 * *v2++;
  2147. *v1++ += mul * *v3++;
  2148. }
  2149. return res;
  2150. }
  2151. static void vector_clip_int32_c(int32_t *dst, const int32_t *src, int32_t min,
  2152. int32_t max, unsigned int len)
  2153. {
  2154. do {
  2155. *dst++ = av_clip(*src++, min, max);
  2156. *dst++ = av_clip(*src++, min, max);
  2157. *dst++ = av_clip(*src++, min, max);
  2158. *dst++ = av_clip(*src++, min, max);
  2159. *dst++ = av_clip(*src++, min, max);
  2160. *dst++ = av_clip(*src++, min, max);
  2161. *dst++ = av_clip(*src++, min, max);
  2162. *dst++ = av_clip(*src++, min, max);
  2163. len -= 8;
  2164. } while (len > 0);
  2165. }
  2166. static void jref_idct_put(uint8_t *dest, int line_size, int16_t *block)
  2167. {
  2168. ff_j_rev_dct(block);
  2169. put_pixels_clamped_c(block, dest, line_size);
  2170. }
  2171. static void jref_idct_add(uint8_t *dest, int line_size, int16_t *block)
  2172. {
  2173. ff_j_rev_dct(block);
  2174. add_pixels_clamped_c(block, dest, line_size);
  2175. }
  2176. /* draw the edges of width 'w' of an image of size width, height */
  2177. // FIXME: Check that this is OK for MPEG-4 interlaced.
  2178. static void draw_edges_8_c(uint8_t *buf, int wrap, int width, int height,
  2179. int w, int h, int sides)
  2180. {
  2181. uint8_t *ptr = buf, *last_line;
  2182. int i;
  2183. /* left and right */
  2184. for (i = 0; i < height; i++) {
  2185. memset(ptr - w, ptr[0], w);
  2186. memset(ptr + width, ptr[width - 1], w);
  2187. ptr += wrap;
  2188. }
  2189. /* top and bottom + corners */
  2190. buf -= w;
  2191. last_line = buf + (height - 1) * wrap;
  2192. if (sides & EDGE_TOP)
  2193. for (i = 0; i < h; i++)
  2194. // top
  2195. memcpy(buf - (i + 1) * wrap, buf, width + w + w);
  2196. if (sides & EDGE_BOTTOM)
  2197. for (i = 0; i < h; i++)
  2198. // bottom
  2199. memcpy(last_line + (i + 1) * wrap, last_line, width + w + w);
  2200. }
  2201. static void clear_block_8_c(int16_t *block)
  2202. {
  2203. memset(block, 0, sizeof(int16_t) * 64);
  2204. }
  2205. static void clear_blocks_8_c(int16_t *blocks)
  2206. {
  2207. memset(blocks, 0, sizeof(int16_t) * 6 * 64);
  2208. }
  2209. /* init static data */
  2210. av_cold void ff_dsputil_static_init(void)
  2211. {
  2212. int i;
  2213. for (i = 0; i < 512; i++)
  2214. ff_square_tab[i] = (i - 256) * (i - 256);
  2215. }
  2216. av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
  2217. {
  2218. const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8;
  2219. #if CONFIG_ENCODERS
  2220. if (avctx->bits_per_raw_sample == 10) {
  2221. c->fdct = ff_jpeg_fdct_islow_10;
  2222. c->fdct248 = ff_fdct248_islow_10;
  2223. } else {
  2224. if (avctx->dct_algo == FF_DCT_FASTINT) {
  2225. c->fdct = ff_fdct_ifast;
  2226. c->fdct248 = ff_fdct_ifast248;
  2227. } else if (avctx->dct_algo == FF_DCT_FAAN) {
  2228. c->fdct = ff_faandct;
  2229. c->fdct248 = ff_faandct248;
  2230. } else {
  2231. c->fdct = ff_jpeg_fdct_islow_8; // slow/accurate/default
  2232. c->fdct248 = ff_fdct248_islow_8;
  2233. }
  2234. }
  2235. #endif /* CONFIG_ENCODERS */
  2236. if (avctx->bits_per_raw_sample == 10) {
  2237. c->idct_put = ff_simple_idct_put_10;
  2238. c->idct_add = ff_simple_idct_add_10;
  2239. c->idct = ff_simple_idct_10;
  2240. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2241. } else {
  2242. if (avctx->idct_algo == FF_IDCT_INT) {
  2243. c->idct_put = jref_idct_put;
  2244. c->idct_add = jref_idct_add;
  2245. c->idct = ff_j_rev_dct;
  2246. c->idct_permutation_type = FF_LIBMPEG2_IDCT_PERM;
  2247. } else if (avctx->idct_algo == FF_IDCT_FAAN) {
  2248. c->idct_put = ff_faanidct_put;
  2249. c->idct_add = ff_faanidct_add;
  2250. c->idct = ff_faanidct;
  2251. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2252. } else { // accurate/default
  2253. c->idct_put = ff_simple_idct_put_8;
  2254. c->idct_add = ff_simple_idct_add_8;
  2255. c->idct = ff_simple_idct_8;
  2256. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2257. }
  2258. }
  2259. c->diff_pixels = diff_pixels_c;
  2260. c->put_pixels_clamped = put_pixels_clamped_c;
  2261. c->put_signed_pixels_clamped = put_signed_pixels_clamped_c;
  2262. c->add_pixels_clamped = add_pixels_clamped_c;
  2263. c->sum_abs_dctelem = sum_abs_dctelem_c;
  2264. c->gmc1 = gmc1_c;
  2265. c->gmc = ff_gmc_c;
  2266. c->pix_sum = pix_sum_c;
  2267. c->pix_norm1 = pix_norm1_c;
  2268. c->fill_block_tab[0] = fill_block16_c;
  2269. c->fill_block_tab[1] = fill_block8_c;
  2270. /* TODO [0] 16 [1] 8 */
  2271. c->pix_abs[0][0] = pix_abs16_c;
  2272. c->pix_abs[0][1] = pix_abs16_x2_c;
  2273. c->pix_abs[0][2] = pix_abs16_y2_c;
  2274. c->pix_abs[0][3] = pix_abs16_xy2_c;
  2275. c->pix_abs[1][0] = pix_abs8_c;
  2276. c->pix_abs[1][1] = pix_abs8_x2_c;
  2277. c->pix_abs[1][2] = pix_abs8_y2_c;
  2278. c->pix_abs[1][3] = pix_abs8_xy2_c;
  2279. #define dspfunc(PFX, IDX, NUM) \
  2280. c->PFX ## _pixels_tab[IDX][0] = PFX ## NUM ## _mc00_c; \
  2281. c->PFX ## _pixels_tab[IDX][1] = PFX ## NUM ## _mc10_c; \
  2282. c->PFX ## _pixels_tab[IDX][2] = PFX ## NUM ## _mc20_c; \
  2283. c->PFX ## _pixels_tab[IDX][3] = PFX ## NUM ## _mc30_c; \
  2284. c->PFX ## _pixels_tab[IDX][4] = PFX ## NUM ## _mc01_c; \
  2285. c->PFX ## _pixels_tab[IDX][5] = PFX ## NUM ## _mc11_c; \
  2286. c->PFX ## _pixels_tab[IDX][6] = PFX ## NUM ## _mc21_c; \
  2287. c->PFX ## _pixels_tab[IDX][7] = PFX ## NUM ## _mc31_c; \
  2288. c->PFX ## _pixels_tab[IDX][8] = PFX ## NUM ## _mc02_c; \
  2289. c->PFX ## _pixels_tab[IDX][9] = PFX ## NUM ## _mc12_c; \
  2290. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
  2291. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
  2292. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
  2293. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
  2294. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
  2295. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
  2296. dspfunc(put_qpel, 0, 16);
  2297. dspfunc(put_qpel, 1, 8);
  2298. dspfunc(put_no_rnd_qpel, 0, 16);
  2299. dspfunc(put_no_rnd_qpel, 1, 8);
  2300. dspfunc(avg_qpel, 0, 16);
  2301. dspfunc(avg_qpel, 1, 8);
  2302. #undef dspfunc
  2303. c->put_mspel_pixels_tab[0] = ff_put_pixels8x8_c;
  2304. c->put_mspel_pixels_tab[1] = put_mspel8_mc10_c;
  2305. c->put_mspel_pixels_tab[2] = put_mspel8_mc20_c;
  2306. c->put_mspel_pixels_tab[3] = put_mspel8_mc30_c;
  2307. c->put_mspel_pixels_tab[4] = put_mspel8_mc02_c;
  2308. c->put_mspel_pixels_tab[5] = put_mspel8_mc12_c;
  2309. c->put_mspel_pixels_tab[6] = put_mspel8_mc22_c;
  2310. c->put_mspel_pixels_tab[7] = put_mspel8_mc32_c;
  2311. #define SET_CMP_FUNC(name) \
  2312. c->name[0] = name ## 16_c; \
  2313. c->name[1] = name ## 8x8_c;
  2314. SET_CMP_FUNC(hadamard8_diff)
  2315. c->hadamard8_diff[4] = hadamard8_intra16_c;
  2316. c->hadamard8_diff[5] = hadamard8_intra8x8_c;
  2317. SET_CMP_FUNC(dct_sad)
  2318. SET_CMP_FUNC(dct_max)
  2319. #if CONFIG_GPL
  2320. SET_CMP_FUNC(dct264_sad)
  2321. #endif
  2322. c->sad[0] = pix_abs16_c;
  2323. c->sad[1] = pix_abs8_c;
  2324. c->sse[0] = sse16_c;
  2325. c->sse[1] = sse8_c;
  2326. c->sse[2] = sse4_c;
  2327. SET_CMP_FUNC(quant_psnr)
  2328. SET_CMP_FUNC(rd)
  2329. SET_CMP_FUNC(bit)
  2330. c->vsad[0] = vsad16_c;
  2331. c->vsad[4] = vsad_intra16_c;
  2332. c->vsad[5] = vsad_intra8_c;
  2333. c->vsse[0] = vsse16_c;
  2334. c->vsse[4] = vsse_intra16_c;
  2335. c->vsse[5] = vsse_intra8_c;
  2336. c->nsse[0] = nsse16_c;
  2337. c->nsse[1] = nsse8_c;
  2338. c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
  2339. c->add_bytes = add_bytes_c;
  2340. c->add_hfyu_median_prediction = add_hfyu_median_prediction_c;
  2341. c->add_hfyu_left_prediction = add_hfyu_left_prediction_c;
  2342. c->add_hfyu_left_prediction_bgr32 = add_hfyu_left_prediction_bgr32_c;
  2343. c->diff_bytes = diff_bytes_c;
  2344. c->sub_hfyu_median_prediction = sub_hfyu_median_prediction_c;
  2345. c->bswap_buf = bswap_buf;
  2346. c->bswap16_buf = bswap16_buf;
  2347. c->try_8x8basis = try_8x8basis_c;
  2348. c->add_8x8basis = add_8x8basis_c;
  2349. c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c;
  2350. c->scalarproduct_int16 = scalarproduct_int16_c;
  2351. c->vector_clip_int32 = vector_clip_int32_c;
  2352. c->vector_clipf = vector_clipf_c;
  2353. c->shrink[0] = av_image_copy_plane;
  2354. c->shrink[1] = ff_shrink22;
  2355. c->shrink[2] = ff_shrink44;
  2356. c->shrink[3] = ff_shrink88;
  2357. c->add_pixels8 = add_pixels8_c;
  2358. c->draw_edges = draw_edges_8_c;
  2359. c->clear_block = clear_block_8_c;
  2360. c->clear_blocks = clear_blocks_8_c;
  2361. switch (avctx->bits_per_raw_sample) {
  2362. case 9:
  2363. case 10:
  2364. c->get_pixels = get_pixels_16_c;
  2365. break;
  2366. default:
  2367. c->get_pixels = get_pixels_8_c;
  2368. break;
  2369. }
  2370. if (ARCH_ARM)
  2371. ff_dsputil_init_arm(c, avctx, high_bit_depth);
  2372. if (ARCH_BFIN)
  2373. ff_dsputil_init_bfin(c, avctx, high_bit_depth);
  2374. if (ARCH_PPC)
  2375. ff_dsputil_init_ppc(c, avctx, high_bit_depth);
  2376. if (ARCH_X86)
  2377. ff_dsputil_init_x86(c, avctx, high_bit_depth);
  2378. ff_init_scantable_permutation(c->idct_permutation,
  2379. c->idct_permutation_type);
  2380. }