You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2481 lines
111KB

  1. /*
  2. * DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * DSP utils
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/imgutils.h"
  30. #include "avcodec.h"
  31. #include "copy_block.h"
  32. #include "dct.h"
  33. #include "dsputil.h"
  34. #include "simple_idct.h"
  35. #include "faandct.h"
  36. #include "faanidct.h"
  37. #include "imgconvert.h"
  38. #include "mathops.h"
  39. #include "mpegvideo.h"
  40. #include "config.h"
  41. uint32_t ff_square_tab[512] = { 0, };
  42. #define BIT_DEPTH 16
  43. #include "dsputilenc_template.c"
  44. #undef BIT_DEPTH
  45. #define BIT_DEPTH 8
  46. #include "hpel_template.c"
  47. #include "tpel_template.c"
  48. #include "dsputil_template.c"
  49. #include "dsputilenc_template.c"
  50. const uint8_t ff_alternate_horizontal_scan[64] = {
  51. 0, 1, 2, 3, 8, 9, 16, 17,
  52. 10, 11, 4, 5, 6, 7, 15, 14,
  53. 13, 12, 19, 18, 24, 25, 32, 33,
  54. 26, 27, 20, 21, 22, 23, 28, 29,
  55. 30, 31, 34, 35, 40, 41, 48, 49,
  56. 42, 43, 36, 37, 38, 39, 44, 45,
  57. 46, 47, 50, 51, 56, 57, 58, 59,
  58. 52, 53, 54, 55, 60, 61, 62, 63,
  59. };
  60. const uint8_t ff_alternate_vertical_scan[64] = {
  61. 0, 8, 16, 24, 1, 9, 2, 10,
  62. 17, 25, 32, 40, 48, 56, 57, 49,
  63. 41, 33, 26, 18, 3, 11, 4, 12,
  64. 19, 27, 34, 42, 50, 58, 35, 43,
  65. 51, 59, 20, 28, 5, 13, 6, 14,
  66. 21, 29, 36, 44, 52, 60, 37, 45,
  67. 53, 61, 22, 30, 7, 15, 23, 31,
  68. 38, 46, 54, 62, 39, 47, 55, 63,
  69. };
  70. /* Input permutation for the simple_idct_mmx */
  71. static const uint8_t simple_mmx_permutation[64] = {
  72. 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
  73. 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
  74. 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
  75. 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
  76. 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
  77. 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
  78. 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
  79. 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
  80. };
  81. static const uint8_t idct_sse2_row_perm[8] = { 0, 4, 1, 5, 2, 6, 3, 7 };
  82. av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st,
  83. const uint8_t *src_scantable)
  84. {
  85. int i, end;
  86. st->scantable = src_scantable;
  87. for (i = 0; i < 64; i++) {
  88. int j = src_scantable[i];
  89. st->permutated[i] = permutation[j];
  90. }
  91. end = -1;
  92. for (i = 0; i < 64; i++) {
  93. int j = st->permutated[i];
  94. if (j > end)
  95. end = j;
  96. st->raster_end[i] = end;
  97. }
  98. }
  99. av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation,
  100. int idct_permutation_type)
  101. {
  102. int i;
  103. switch (idct_permutation_type) {
  104. case FF_NO_IDCT_PERM:
  105. for (i = 0; i < 64; i++)
  106. idct_permutation[i] = i;
  107. break;
  108. case FF_LIBMPEG2_IDCT_PERM:
  109. for (i = 0; i < 64; i++)
  110. idct_permutation[i] = (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
  111. break;
  112. case FF_SIMPLE_IDCT_PERM:
  113. for (i = 0; i < 64; i++)
  114. idct_permutation[i] = simple_mmx_permutation[i];
  115. break;
  116. case FF_TRANSPOSE_IDCT_PERM:
  117. for (i = 0; i < 64; i++)
  118. idct_permutation[i] = ((i & 7) << 3) | (i >> 3);
  119. break;
  120. case FF_PARTTRANS_IDCT_PERM:
  121. for (i = 0; i < 64; i++)
  122. idct_permutation[i] = (i & 0x24) | ((i & 3) << 3) | ((i >> 3) & 3);
  123. break;
  124. case FF_SSE2_IDCT_PERM:
  125. for (i = 0; i < 64; i++)
  126. idct_permutation[i] = (i & 0x38) | idct_sse2_row_perm[i & 7];
  127. break;
  128. default:
  129. av_log(NULL, AV_LOG_ERROR,
  130. "Internal error, IDCT permutation not set\n");
  131. }
  132. }
  133. static int pix_sum_c(uint8_t *pix, int line_size)
  134. {
  135. int s = 0, i, j;
  136. for (i = 0; i < 16; i++) {
  137. for (j = 0; j < 16; j += 8) {
  138. s += pix[0];
  139. s += pix[1];
  140. s += pix[2];
  141. s += pix[3];
  142. s += pix[4];
  143. s += pix[5];
  144. s += pix[6];
  145. s += pix[7];
  146. pix += 8;
  147. }
  148. pix += line_size - 16;
  149. }
  150. return s;
  151. }
  152. static int pix_norm1_c(uint8_t *pix, int line_size)
  153. {
  154. int s = 0, i, j;
  155. uint32_t *sq = ff_square_tab + 256;
  156. for (i = 0; i < 16; i++) {
  157. for (j = 0; j < 16; j += 8) {
  158. #if 0
  159. s += sq[pix[0]];
  160. s += sq[pix[1]];
  161. s += sq[pix[2]];
  162. s += sq[pix[3]];
  163. s += sq[pix[4]];
  164. s += sq[pix[5]];
  165. s += sq[pix[6]];
  166. s += sq[pix[7]];
  167. #else
  168. #if HAVE_FAST_64BIT
  169. register uint64_t x = *(uint64_t *) pix;
  170. s += sq[x & 0xff];
  171. s += sq[(x >> 8) & 0xff];
  172. s += sq[(x >> 16) & 0xff];
  173. s += sq[(x >> 24) & 0xff];
  174. s += sq[(x >> 32) & 0xff];
  175. s += sq[(x >> 40) & 0xff];
  176. s += sq[(x >> 48) & 0xff];
  177. s += sq[(x >> 56) & 0xff];
  178. #else
  179. register uint32_t x = *(uint32_t *) pix;
  180. s += sq[x & 0xff];
  181. s += sq[(x >> 8) & 0xff];
  182. s += sq[(x >> 16) & 0xff];
  183. s += sq[(x >> 24) & 0xff];
  184. x = *(uint32_t *) (pix + 4);
  185. s += sq[x & 0xff];
  186. s += sq[(x >> 8) & 0xff];
  187. s += sq[(x >> 16) & 0xff];
  188. s += sq[(x >> 24) & 0xff];
  189. #endif
  190. #endif
  191. pix += 8;
  192. }
  193. pix += line_size - 16;
  194. }
  195. return s;
  196. }
  197. static void bswap_buf(uint32_t *dst, const uint32_t *src, int w)
  198. {
  199. int i;
  200. for (i = 0; i + 8 <= w; i += 8) {
  201. dst[i + 0] = av_bswap32(src[i + 0]);
  202. dst[i + 1] = av_bswap32(src[i + 1]);
  203. dst[i + 2] = av_bswap32(src[i + 2]);
  204. dst[i + 3] = av_bswap32(src[i + 3]);
  205. dst[i + 4] = av_bswap32(src[i + 4]);
  206. dst[i + 5] = av_bswap32(src[i + 5]);
  207. dst[i + 6] = av_bswap32(src[i + 6]);
  208. dst[i + 7] = av_bswap32(src[i + 7]);
  209. }
  210. for (; i < w; i++)
  211. dst[i + 0] = av_bswap32(src[i + 0]);
  212. }
  213. static void bswap16_buf(uint16_t *dst, const uint16_t *src, int len)
  214. {
  215. while (len--)
  216. *dst++ = av_bswap16(*src++);
  217. }
  218. static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  219. int line_size, int h)
  220. {
  221. int s = 0, i;
  222. uint32_t *sq = ff_square_tab + 256;
  223. for (i = 0; i < h; i++) {
  224. s += sq[pix1[0] - pix2[0]];
  225. s += sq[pix1[1] - pix2[1]];
  226. s += sq[pix1[2] - pix2[2]];
  227. s += sq[pix1[3] - pix2[3]];
  228. pix1 += line_size;
  229. pix2 += line_size;
  230. }
  231. return s;
  232. }
  233. static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  234. int line_size, int h)
  235. {
  236. int s = 0, i;
  237. uint32_t *sq = ff_square_tab + 256;
  238. for (i = 0; i < h; i++) {
  239. s += sq[pix1[0] - pix2[0]];
  240. s += sq[pix1[1] - pix2[1]];
  241. s += sq[pix1[2] - pix2[2]];
  242. s += sq[pix1[3] - pix2[3]];
  243. s += sq[pix1[4] - pix2[4]];
  244. s += sq[pix1[5] - pix2[5]];
  245. s += sq[pix1[6] - pix2[6]];
  246. s += sq[pix1[7] - pix2[7]];
  247. pix1 += line_size;
  248. pix2 += line_size;
  249. }
  250. return s;
  251. }
  252. static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  253. int line_size, int h)
  254. {
  255. int s = 0, i;
  256. uint32_t *sq = ff_square_tab + 256;
  257. for (i = 0; i < h; i++) {
  258. s += sq[pix1[0] - pix2[0]];
  259. s += sq[pix1[1] - pix2[1]];
  260. s += sq[pix1[2] - pix2[2]];
  261. s += sq[pix1[3] - pix2[3]];
  262. s += sq[pix1[4] - pix2[4]];
  263. s += sq[pix1[5] - pix2[5]];
  264. s += sq[pix1[6] - pix2[6]];
  265. s += sq[pix1[7] - pix2[7]];
  266. s += sq[pix1[8] - pix2[8]];
  267. s += sq[pix1[9] - pix2[9]];
  268. s += sq[pix1[10] - pix2[10]];
  269. s += sq[pix1[11] - pix2[11]];
  270. s += sq[pix1[12] - pix2[12]];
  271. s += sq[pix1[13] - pix2[13]];
  272. s += sq[pix1[14] - pix2[14]];
  273. s += sq[pix1[15] - pix2[15]];
  274. pix1 += line_size;
  275. pix2 += line_size;
  276. }
  277. return s;
  278. }
  279. static void diff_pixels_c(int16_t *restrict block, const uint8_t *s1,
  280. const uint8_t *s2, int stride)
  281. {
  282. int i;
  283. /* read the pixels */
  284. for (i = 0; i < 8; i++) {
  285. block[0] = s1[0] - s2[0];
  286. block[1] = s1[1] - s2[1];
  287. block[2] = s1[2] - s2[2];
  288. block[3] = s1[3] - s2[3];
  289. block[4] = s1[4] - s2[4];
  290. block[5] = s1[5] - s2[5];
  291. block[6] = s1[6] - s2[6];
  292. block[7] = s1[7] - s2[7];
  293. s1 += stride;
  294. s2 += stride;
  295. block += 8;
  296. }
  297. }
  298. static void put_pixels_clamped_c(const int16_t *block, uint8_t *restrict pixels,
  299. int line_size)
  300. {
  301. int i;
  302. /* read the pixels */
  303. for (i = 0; i < 8; i++) {
  304. pixels[0] = av_clip_uint8(block[0]);
  305. pixels[1] = av_clip_uint8(block[1]);
  306. pixels[2] = av_clip_uint8(block[2]);
  307. pixels[3] = av_clip_uint8(block[3]);
  308. pixels[4] = av_clip_uint8(block[4]);
  309. pixels[5] = av_clip_uint8(block[5]);
  310. pixels[6] = av_clip_uint8(block[6]);
  311. pixels[7] = av_clip_uint8(block[7]);
  312. pixels += line_size;
  313. block += 8;
  314. }
  315. }
  316. static void put_signed_pixels_clamped_c(const int16_t *block,
  317. uint8_t *restrict pixels,
  318. int line_size)
  319. {
  320. int i, j;
  321. for (i = 0; i < 8; i++) {
  322. for (j = 0; j < 8; j++) {
  323. if (*block < -128)
  324. *pixels = 0;
  325. else if (*block > 127)
  326. *pixels = 255;
  327. else
  328. *pixels = (uint8_t) (*block + 128);
  329. block++;
  330. pixels++;
  331. }
  332. pixels += (line_size - 8);
  333. }
  334. }
  335. static void add_pixels8_c(uint8_t *restrict pixels, int16_t *block,
  336. int line_size)
  337. {
  338. int i;
  339. for (i = 0; i < 8; i++) {
  340. pixels[0] += block[0];
  341. pixels[1] += block[1];
  342. pixels[2] += block[2];
  343. pixels[3] += block[3];
  344. pixels[4] += block[4];
  345. pixels[5] += block[5];
  346. pixels[6] += block[6];
  347. pixels[7] += block[7];
  348. pixels += line_size;
  349. block += 8;
  350. }
  351. }
  352. static void add_pixels_clamped_c(const int16_t *block, uint8_t *restrict pixels,
  353. int line_size)
  354. {
  355. int i;
  356. /* read the pixels */
  357. for (i = 0; i < 8; i++) {
  358. pixels[0] = av_clip_uint8(pixels[0] + block[0]);
  359. pixels[1] = av_clip_uint8(pixels[1] + block[1]);
  360. pixels[2] = av_clip_uint8(pixels[2] + block[2]);
  361. pixels[3] = av_clip_uint8(pixels[3] + block[3]);
  362. pixels[4] = av_clip_uint8(pixels[4] + block[4]);
  363. pixels[5] = av_clip_uint8(pixels[5] + block[5]);
  364. pixels[6] = av_clip_uint8(pixels[6] + block[6]);
  365. pixels[7] = av_clip_uint8(pixels[7] + block[7]);
  366. pixels += line_size;
  367. block += 8;
  368. }
  369. }
  370. static int sum_abs_dctelem_c(int16_t *block)
  371. {
  372. int sum = 0, i;
  373. for (i = 0; i < 64; i++)
  374. sum += FFABS(block[i]);
  375. return sum;
  376. }
  377. static void fill_block16_c(uint8_t *block, uint8_t value, int line_size, int h)
  378. {
  379. int i;
  380. for (i = 0; i < h; i++) {
  381. memset(block, value, 16);
  382. block += line_size;
  383. }
  384. }
  385. static void fill_block8_c(uint8_t *block, uint8_t value, int line_size, int h)
  386. {
  387. int i;
  388. for (i = 0; i < h; i++) {
  389. memset(block, value, 8);
  390. block += line_size;
  391. }
  392. }
  393. #define avg2(a, b) ((a + b + 1) >> 1)
  394. #define avg4(a, b, c, d) ((a + b + c + d + 2) >> 2)
  395. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h,
  396. int x16, int y16, int rounder)
  397. {
  398. const int A = (16 - x16) * (16 - y16);
  399. const int B = (x16) * (16 - y16);
  400. const int C = (16 - x16) * (y16);
  401. const int D = (x16) * (y16);
  402. int i;
  403. for (i = 0; i < h; i++) {
  404. dst[0] = (A * src[0] + B * src[1] + C * src[stride + 0] + D * src[stride + 1] + rounder) >> 8;
  405. dst[1] = (A * src[1] + B * src[2] + C * src[stride + 1] + D * src[stride + 2] + rounder) >> 8;
  406. dst[2] = (A * src[2] + B * src[3] + C * src[stride + 2] + D * src[stride + 3] + rounder) >> 8;
  407. dst[3] = (A * src[3] + B * src[4] + C * src[stride + 3] + D * src[stride + 4] + rounder) >> 8;
  408. dst[4] = (A * src[4] + B * src[5] + C * src[stride + 4] + D * src[stride + 5] + rounder) >> 8;
  409. dst[5] = (A * src[5] + B * src[6] + C * src[stride + 5] + D * src[stride + 6] + rounder) >> 8;
  410. dst[6] = (A * src[6] + B * src[7] + C * src[stride + 6] + D * src[stride + 7] + rounder) >> 8;
  411. dst[7] = (A * src[7] + B * src[8] + C * src[stride + 7] + D * src[stride + 8] + rounder) >> 8;
  412. dst += stride;
  413. src += stride;
  414. }
  415. }
  416. void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  417. int dxx, int dxy, int dyx, int dyy, int shift, int r,
  418. int width, int height)
  419. {
  420. int y, vx, vy;
  421. const int s = 1 << shift;
  422. width--;
  423. height--;
  424. for (y = 0; y < h; y++) {
  425. int x;
  426. vx = ox;
  427. vy = oy;
  428. for (x = 0; x < 8; x++) { // FIXME: optimize
  429. int index;
  430. int src_x = vx >> 16;
  431. int src_y = vy >> 16;
  432. int frac_x = src_x & (s - 1);
  433. int frac_y = src_y & (s - 1);
  434. src_x >>= shift;
  435. src_y >>= shift;
  436. if ((unsigned) src_x < width) {
  437. if ((unsigned) src_y < height) {
  438. index = src_x + src_y * stride;
  439. dst[y * stride + x] =
  440. ((src[index] * (s - frac_x) +
  441. src[index + 1] * frac_x) * (s - frac_y) +
  442. (src[index + stride] * (s - frac_x) +
  443. src[index + stride + 1] * frac_x) * frac_y +
  444. r) >> (shift * 2);
  445. } else {
  446. index = src_x + av_clip(src_y, 0, height) * stride;
  447. dst[y * stride + x] =
  448. ((src[index] * (s - frac_x) +
  449. src[index + 1] * frac_x) * s +
  450. r) >> (shift * 2);
  451. }
  452. } else {
  453. if ((unsigned) src_y < height) {
  454. index = av_clip(src_x, 0, width) + src_y * stride;
  455. dst[y * stride + x] =
  456. ((src[index] * (s - frac_y) +
  457. src[index + stride] * frac_y) * s +
  458. r) >> (shift * 2);
  459. } else {
  460. index = av_clip(src_x, 0, width) +
  461. av_clip(src_y, 0, height) * stride;
  462. dst[y * stride + x] = src[index];
  463. }
  464. }
  465. vx += dxx;
  466. vy += dyx;
  467. }
  468. ox += dxy;
  469. oy += dyy;
  470. }
  471. }
  472. #define QPEL_MC(r, OPNAME, RND, OP) \
  473. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, \
  474. int dstStride, int srcStride, \
  475. int h) \
  476. { \
  477. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  478. int i; \
  479. \
  480. for (i = 0; i < h; i++) { \
  481. OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \
  482. OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \
  483. OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \
  484. OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \
  485. OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \
  486. OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[8])); \
  487. OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[8]) * 3 - (src[3] + src[7])); \
  488. OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[8]) * 6 + (src[5] + src[7]) * 3 - (src[4] + src[6])); \
  489. dst += dstStride; \
  490. src += srcStride; \
  491. } \
  492. } \
  493. \
  494. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, \
  495. int dstStride, int srcStride) \
  496. { \
  497. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  498. const int w = 8; \
  499. int i; \
  500. \
  501. for (i = 0; i < w; i++) { \
  502. const int src0 = src[0 * srcStride]; \
  503. const int src1 = src[1 * srcStride]; \
  504. const int src2 = src[2 * srcStride]; \
  505. const int src3 = src[3 * srcStride]; \
  506. const int src4 = src[4 * srcStride]; \
  507. const int src5 = src[5 * srcStride]; \
  508. const int src6 = src[6 * srcStride]; \
  509. const int src7 = src[7 * srcStride]; \
  510. const int src8 = src[8 * srcStride]; \
  511. OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \
  512. OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \
  513. OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \
  514. OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \
  515. OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \
  516. OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src8)); \
  517. OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src8) * 3 - (src3 + src7)); \
  518. OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src8) * 6 + (src5 + src7) * 3 - (src4 + src6)); \
  519. dst++; \
  520. src++; \
  521. } \
  522. } \
  523. \
  524. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, \
  525. int dstStride, int srcStride, \
  526. int h) \
  527. { \
  528. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  529. int i; \
  530. \
  531. for (i = 0; i < h; i++) { \
  532. OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \
  533. OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \
  534. OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \
  535. OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \
  536. OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \
  537. OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[9])); \
  538. OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[9]) * 3 - (src[3] + src[10])); \
  539. OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[9]) * 6 + (src[5] + src[10]) * 3 - (src[4] + src[11])); \
  540. OP(dst[8], (src[8] + src[9]) * 20 - (src[7] + src[10]) * 6 + (src[6] + src[11]) * 3 - (src[5] + src[12])); \
  541. OP(dst[9], (src[9] + src[10]) * 20 - (src[8] + src[11]) * 6 + (src[7] + src[12]) * 3 - (src[6] + src[13])); \
  542. OP(dst[10], (src[10] + src[11]) * 20 - (src[9] + src[12]) * 6 + (src[8] + src[13]) * 3 - (src[7] + src[14])); \
  543. OP(dst[11], (src[11] + src[12]) * 20 - (src[10] + src[13]) * 6 + (src[9] + src[14]) * 3 - (src[8] + src[15])); \
  544. OP(dst[12], (src[12] + src[13]) * 20 - (src[11] + src[14]) * 6 + (src[10] + src[15]) * 3 - (src[9] + src[16])); \
  545. OP(dst[13], (src[13] + src[14]) * 20 - (src[12] + src[15]) * 6 + (src[11] + src[16]) * 3 - (src[10] + src[16])); \
  546. OP(dst[14], (src[14] + src[15]) * 20 - (src[13] + src[16]) * 6 + (src[12] + src[16]) * 3 - (src[11] + src[15])); \
  547. OP(dst[15], (src[15] + src[16]) * 20 - (src[14] + src[16]) * 6 + (src[13] + src[15]) * 3 - (src[12] + src[14])); \
  548. dst += dstStride; \
  549. src += srcStride; \
  550. } \
  551. } \
  552. \
  553. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, \
  554. int dstStride, int srcStride) \
  555. { \
  556. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  557. const int w = 16; \
  558. int i; \
  559. \
  560. for (i = 0; i < w; i++) { \
  561. const int src0 = src[0 * srcStride]; \
  562. const int src1 = src[1 * srcStride]; \
  563. const int src2 = src[2 * srcStride]; \
  564. const int src3 = src[3 * srcStride]; \
  565. const int src4 = src[4 * srcStride]; \
  566. const int src5 = src[5 * srcStride]; \
  567. const int src6 = src[6 * srcStride]; \
  568. const int src7 = src[7 * srcStride]; \
  569. const int src8 = src[8 * srcStride]; \
  570. const int src9 = src[9 * srcStride]; \
  571. const int src10 = src[10 * srcStride]; \
  572. const int src11 = src[11 * srcStride]; \
  573. const int src12 = src[12 * srcStride]; \
  574. const int src13 = src[13 * srcStride]; \
  575. const int src14 = src[14 * srcStride]; \
  576. const int src15 = src[15 * srcStride]; \
  577. const int src16 = src[16 * srcStride]; \
  578. OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \
  579. OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \
  580. OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \
  581. OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \
  582. OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \
  583. OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src9)); \
  584. OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src9) * 3 - (src3 + src10)); \
  585. OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src9) * 6 + (src5 + src10) * 3 - (src4 + src11)); \
  586. OP(dst[8 * dstStride], (src8 + src9) * 20 - (src7 + src10) * 6 + (src6 + src11) * 3 - (src5 + src12)); \
  587. OP(dst[9 * dstStride], (src9 + src10) * 20 - (src8 + src11) * 6 + (src7 + src12) * 3 - (src6 + src13)); \
  588. OP(dst[10 * dstStride], (src10 + src11) * 20 - (src9 + src12) * 6 + (src8 + src13) * 3 - (src7 + src14)); \
  589. OP(dst[11 * dstStride], (src11 + src12) * 20 - (src10 + src13) * 6 + (src9 + src14) * 3 - (src8 + src15)); \
  590. OP(dst[12 * dstStride], (src12 + src13) * 20 - (src11 + src14) * 6 + (src10 + src15) * 3 - (src9 + src16)); \
  591. OP(dst[13 * dstStride], (src13 + src14) * 20 - (src12 + src15) * 6 + (src11 + src16) * 3 - (src10 + src16)); \
  592. OP(dst[14 * dstStride], (src14 + src15) * 20 - (src13 + src16) * 6 + (src12 + src16) * 3 - (src11 + src15)); \
  593. OP(dst[15 * dstStride], (src15 + src16) * 20 - (src14 + src16) * 6 + (src13 + src15) * 3 - (src12 + src14)); \
  594. dst++; \
  595. src++; \
  596. } \
  597. } \
  598. \
  599. static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, \
  600. ptrdiff_t stride) \
  601. { \
  602. uint8_t half[64]; \
  603. \
  604. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \
  605. OPNAME ## pixels8_l2_8(dst, src, half, stride, stride, 8, 8); \
  606. } \
  607. \
  608. static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, \
  609. ptrdiff_t stride) \
  610. { \
  611. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8); \
  612. } \
  613. \
  614. static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, \
  615. ptrdiff_t stride) \
  616. { \
  617. uint8_t half[64]; \
  618. \
  619. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \
  620. OPNAME ## pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8); \
  621. } \
  622. \
  623. static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, \
  624. ptrdiff_t stride) \
  625. { \
  626. uint8_t full[16 * 9]; \
  627. uint8_t half[64]; \
  628. \
  629. copy_block9(full, src, 16, stride, 9); \
  630. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \
  631. OPNAME ## pixels8_l2_8(dst, full, half, stride, 16, 8, 8); \
  632. } \
  633. \
  634. static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, \
  635. ptrdiff_t stride) \
  636. { \
  637. uint8_t full[16 * 9]; \
  638. \
  639. copy_block9(full, src, 16, stride, 9); \
  640. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16); \
  641. } \
  642. \
  643. static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, \
  644. ptrdiff_t stride) \
  645. { \
  646. uint8_t full[16 * 9]; \
  647. uint8_t half[64]; \
  648. \
  649. copy_block9(full, src, 16, stride, 9); \
  650. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \
  651. OPNAME ## pixels8_l2_8(dst, full + 16, half, stride, 16, 8, 8); \
  652. } \
  653. \
  654. void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, \
  655. ptrdiff_t stride) \
  656. { \
  657. uint8_t full[16 * 9]; \
  658. uint8_t halfH[72]; \
  659. uint8_t halfV[64]; \
  660. uint8_t halfHV[64]; \
  661. \
  662. copy_block9(full, src, 16, stride, 9); \
  663. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  664. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  665. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  666. OPNAME ## pixels8_l4_8(dst, full, halfH, halfV, halfHV, \
  667. stride, 16, 8, 8, 8, 8); \
  668. } \
  669. \
  670. static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, \
  671. ptrdiff_t stride) \
  672. { \
  673. uint8_t full[16 * 9]; \
  674. uint8_t halfH[72]; \
  675. uint8_t halfHV[64]; \
  676. \
  677. copy_block9(full, src, 16, stride, 9); \
  678. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  679. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  680. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  681. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  682. } \
  683. \
  684. void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, \
  685. ptrdiff_t stride) \
  686. { \
  687. uint8_t full[16 * 9]; \
  688. uint8_t halfH[72]; \
  689. uint8_t halfV[64]; \
  690. uint8_t halfHV[64]; \
  691. \
  692. copy_block9(full, src, 16, stride, 9); \
  693. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  694. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  695. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  696. OPNAME ## pixels8_l4_8(dst, full + 1, halfH, halfV, halfHV, \
  697. stride, 16, 8, 8, 8, 8); \
  698. } \
  699. \
  700. static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, \
  701. ptrdiff_t stride) \
  702. { \
  703. uint8_t full[16 * 9]; \
  704. uint8_t halfH[72]; \
  705. uint8_t halfHV[64]; \
  706. \
  707. copy_block9(full, src, 16, stride, 9); \
  708. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  709. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  710. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  711. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  712. } \
  713. \
  714. void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, \
  715. ptrdiff_t stride) \
  716. { \
  717. uint8_t full[16 * 9]; \
  718. uint8_t halfH[72]; \
  719. uint8_t halfV[64]; \
  720. uint8_t halfHV[64]; \
  721. \
  722. copy_block9(full, src, 16, stride, 9); \
  723. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  724. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  725. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  726. OPNAME ## pixels8_l4_8(dst, full + 16, halfH + 8, halfV, halfHV, \
  727. stride, 16, 8, 8, 8, 8); \
  728. } \
  729. \
  730. static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, \
  731. ptrdiff_t stride) \
  732. { \
  733. uint8_t full[16 * 9]; \
  734. uint8_t halfH[72]; \
  735. uint8_t halfHV[64]; \
  736. \
  737. copy_block9(full, src, 16, stride, 9); \
  738. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  739. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  740. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  741. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  742. } \
  743. \
  744. void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, \
  745. ptrdiff_t stride) \
  746. { \
  747. uint8_t full[16 * 9]; \
  748. uint8_t halfH[72]; \
  749. uint8_t halfV[64]; \
  750. uint8_t halfHV[64]; \
  751. \
  752. copy_block9(full, src, 16, stride, 9); \
  753. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  754. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  755. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  756. OPNAME ## pixels8_l4_8(dst, full + 17, halfH + 8, halfV, halfHV, \
  757. stride, 16, 8, 8, 8, 8); \
  758. } \
  759. \
  760. static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, \
  761. ptrdiff_t stride) \
  762. { \
  763. uint8_t full[16 * 9]; \
  764. uint8_t halfH[72]; \
  765. uint8_t halfHV[64]; \
  766. \
  767. copy_block9(full, src, 16, stride, 9); \
  768. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  769. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  770. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  771. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  772. } \
  773. \
  774. static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, \
  775. ptrdiff_t stride) \
  776. { \
  777. uint8_t halfH[72]; \
  778. uint8_t halfHV[64]; \
  779. \
  780. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  781. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  782. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  783. } \
  784. \
  785. static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, \
  786. ptrdiff_t stride) \
  787. { \
  788. uint8_t halfH[72]; \
  789. uint8_t halfHV[64]; \
  790. \
  791. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  792. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  793. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  794. } \
  795. \
  796. void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, \
  797. ptrdiff_t stride) \
  798. { \
  799. uint8_t full[16 * 9]; \
  800. uint8_t halfH[72]; \
  801. uint8_t halfV[64]; \
  802. uint8_t halfHV[64]; \
  803. \
  804. copy_block9(full, src, 16, stride, 9); \
  805. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  806. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  807. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  808. OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \
  809. } \
  810. \
  811. static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, \
  812. ptrdiff_t stride) \
  813. { \
  814. uint8_t full[16 * 9]; \
  815. uint8_t halfH[72]; \
  816. \
  817. copy_block9(full, src, 16, stride, 9); \
  818. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  819. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  820. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  821. } \
  822. \
  823. void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, \
  824. ptrdiff_t stride) \
  825. { \
  826. uint8_t full[16 * 9]; \
  827. uint8_t halfH[72]; \
  828. uint8_t halfV[64]; \
  829. uint8_t halfHV[64]; \
  830. \
  831. copy_block9(full, src, 16, stride, 9); \
  832. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  833. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  834. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  835. OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \
  836. } \
  837. \
  838. static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, \
  839. ptrdiff_t stride) \
  840. { \
  841. uint8_t full[16 * 9]; \
  842. uint8_t halfH[72]; \
  843. \
  844. copy_block9(full, src, 16, stride, 9); \
  845. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  846. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  847. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  848. } \
  849. \
  850. static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, \
  851. ptrdiff_t stride) \
  852. { \
  853. uint8_t halfH[72]; \
  854. \
  855. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  856. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  857. } \
  858. \
  859. static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, \
  860. ptrdiff_t stride) \
  861. { \
  862. uint8_t half[256]; \
  863. \
  864. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \
  865. OPNAME ## pixels16_l2_8(dst, src, half, stride, stride, 16, 16); \
  866. } \
  867. \
  868. static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, \
  869. ptrdiff_t stride) \
  870. { \
  871. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16); \
  872. } \
  873. \
  874. static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, \
  875. ptrdiff_t stride) \
  876. { \
  877. uint8_t half[256]; \
  878. \
  879. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \
  880. OPNAME ## pixels16_l2_8(dst, src + 1, half, stride, stride, 16, 16); \
  881. } \
  882. \
  883. static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, \
  884. ptrdiff_t stride) \
  885. { \
  886. uint8_t full[24 * 17]; \
  887. uint8_t half[256]; \
  888. \
  889. copy_block17(full, src, 24, stride, 17); \
  890. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \
  891. OPNAME ## pixels16_l2_8(dst, full, half, stride, 24, 16, 16); \
  892. } \
  893. \
  894. static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, \
  895. ptrdiff_t stride) \
  896. { \
  897. uint8_t full[24 * 17]; \
  898. \
  899. copy_block17(full, src, 24, stride, 17); \
  900. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24); \
  901. } \
  902. \
  903. static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, \
  904. ptrdiff_t stride) \
  905. { \
  906. uint8_t full[24 * 17]; \
  907. uint8_t half[256]; \
  908. \
  909. copy_block17(full, src, 24, stride, 17); \
  910. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \
  911. OPNAME ## pixels16_l2_8(dst, full + 24, half, stride, 24, 16, 16); \
  912. } \
  913. \
  914. void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, \
  915. ptrdiff_t stride) \
  916. { \
  917. uint8_t full[24 * 17]; \
  918. uint8_t halfH[272]; \
  919. uint8_t halfV[256]; \
  920. uint8_t halfHV[256]; \
  921. \
  922. copy_block17(full, src, 24, stride, 17); \
  923. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  924. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  925. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  926. OPNAME ## pixels16_l4_8(dst, full, halfH, halfV, halfHV, \
  927. stride, 24, 16, 16, 16, 16); \
  928. } \
  929. \
  930. static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, \
  931. ptrdiff_t stride) \
  932. { \
  933. uint8_t full[24 * 17]; \
  934. uint8_t halfH[272]; \
  935. uint8_t halfHV[256]; \
  936. \
  937. copy_block17(full, src, 24, stride, 17); \
  938. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  939. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  940. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  941. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  942. } \
  943. \
  944. void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, \
  945. ptrdiff_t stride) \
  946. { \
  947. uint8_t full[24 * 17]; \
  948. uint8_t halfH[272]; \
  949. uint8_t halfV[256]; \
  950. uint8_t halfHV[256]; \
  951. \
  952. copy_block17(full, src, 24, stride, 17); \
  953. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  954. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  955. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  956. OPNAME ## pixels16_l4_8(dst, full + 1, halfH, halfV, halfHV, \
  957. stride, 24, 16, 16, 16, 16); \
  958. } \
  959. \
  960. static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, \
  961. ptrdiff_t stride) \
  962. { \
  963. uint8_t full[24 * 17]; \
  964. uint8_t halfH[272]; \
  965. uint8_t halfHV[256]; \
  966. \
  967. copy_block17(full, src, 24, stride, 17); \
  968. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  969. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  970. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  971. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  972. } \
  973. \
  974. void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, \
  975. ptrdiff_t stride) \
  976. { \
  977. uint8_t full[24 * 17]; \
  978. uint8_t halfH[272]; \
  979. uint8_t halfV[256]; \
  980. uint8_t halfHV[256]; \
  981. \
  982. copy_block17(full, src, 24, stride, 17); \
  983. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  984. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  985. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  986. OPNAME ## pixels16_l4_8(dst, full + 24, halfH + 16, halfV, halfHV, \
  987. stride, 24, 16, 16, 16, 16); \
  988. } \
  989. \
  990. static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, \
  991. ptrdiff_t stride) \
  992. { \
  993. uint8_t full[24 * 17]; \
  994. uint8_t halfH[272]; \
  995. uint8_t halfHV[256]; \
  996. \
  997. copy_block17(full, src, 24, stride, 17); \
  998. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  999. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1000. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1001. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1002. } \
  1003. \
  1004. void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, \
  1005. ptrdiff_t stride) \
  1006. { \
  1007. uint8_t full[24 * 17]; \
  1008. uint8_t halfH[272]; \
  1009. uint8_t halfV[256]; \
  1010. uint8_t halfHV[256]; \
  1011. \
  1012. copy_block17(full, src, 24, stride, 17); \
  1013. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1014. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1015. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1016. OPNAME ## pixels16_l4_8(dst, full + 25, halfH + 16, halfV, halfHV, \
  1017. stride, 24, 16, 16, 16, 16); \
  1018. } \
  1019. \
  1020. static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, \
  1021. ptrdiff_t stride) \
  1022. { \
  1023. uint8_t full[24 * 17]; \
  1024. uint8_t halfH[272]; \
  1025. uint8_t halfHV[256]; \
  1026. \
  1027. copy_block17(full, src, 24, stride, 17); \
  1028. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1029. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1030. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1031. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1032. } \
  1033. \
  1034. static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, \
  1035. ptrdiff_t stride) \
  1036. { \
  1037. uint8_t halfH[272]; \
  1038. uint8_t halfHV[256]; \
  1039. \
  1040. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1041. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1042. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  1043. } \
  1044. \
  1045. static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, \
  1046. ptrdiff_t stride) \
  1047. { \
  1048. uint8_t halfH[272]; \
  1049. uint8_t halfHV[256]; \
  1050. \
  1051. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1052. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1053. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1054. } \
  1055. \
  1056. void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, \
  1057. ptrdiff_t stride) \
  1058. { \
  1059. uint8_t full[24 * 17]; \
  1060. uint8_t halfH[272]; \
  1061. uint8_t halfV[256]; \
  1062. uint8_t halfHV[256]; \
  1063. \
  1064. copy_block17(full, src, 24, stride, 17); \
  1065. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1066. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  1067. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1068. OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \
  1069. } \
  1070. \
  1071. static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, \
  1072. ptrdiff_t stride) \
  1073. { \
  1074. uint8_t full[24 * 17]; \
  1075. uint8_t halfH[272]; \
  1076. \
  1077. copy_block17(full, src, 24, stride, 17); \
  1078. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1079. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1080. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1081. } \
  1082. \
  1083. void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, \
  1084. ptrdiff_t stride) \
  1085. { \
  1086. uint8_t full[24 * 17]; \
  1087. uint8_t halfH[272]; \
  1088. uint8_t halfV[256]; \
  1089. uint8_t halfHV[256]; \
  1090. \
  1091. copy_block17(full, src, 24, stride, 17); \
  1092. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1093. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1094. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1095. OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \
  1096. } \
  1097. \
  1098. static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, \
  1099. ptrdiff_t stride) \
  1100. { \
  1101. uint8_t full[24 * 17]; \
  1102. uint8_t halfH[272]; \
  1103. \
  1104. copy_block17(full, src, 24, stride, 17); \
  1105. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1106. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1107. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1108. } \
  1109. \
  1110. static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, \
  1111. ptrdiff_t stride) \
  1112. { \
  1113. uint8_t halfH[272]; \
  1114. \
  1115. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1116. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1117. }
  1118. #define op_avg(a, b) a = (((a) + cm[((b) + 16) >> 5] + 1) >> 1)
  1119. #define op_avg_no_rnd(a, b) a = (((a) + cm[((b) + 15) >> 5]) >> 1)
  1120. #define op_put(a, b) a = cm[((b) + 16) >> 5]
  1121. #define op_put_no_rnd(a, b) a = cm[((b) + 15) >> 5]
  1122. QPEL_MC(0, put_, _, op_put)
  1123. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  1124. QPEL_MC(0, avg_, _, op_avg)
  1125. #undef op_avg
  1126. #undef op_put
  1127. #undef op_put_no_rnd
  1128. void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1129. {
  1130. put_pixels8_8_c(dst, src, stride, 8);
  1131. }
  1132. void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1133. {
  1134. avg_pixels8_8_c(dst, src, stride, 8);
  1135. }
  1136. void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1137. {
  1138. put_pixels16_8_c(dst, src, stride, 16);
  1139. }
  1140. void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1141. {
  1142. avg_pixels16_8_c(dst, src, stride, 16);
  1143. }
  1144. #define put_qpel8_mc00_c ff_put_pixels8x8_c
  1145. #define avg_qpel8_mc00_c ff_avg_pixels8x8_c
  1146. #define put_qpel16_mc00_c ff_put_pixels16x16_c
  1147. #define avg_qpel16_mc00_c ff_avg_pixels16x16_c
  1148. #define put_no_rnd_qpel8_mc00_c ff_put_pixels8x8_c
  1149. #define put_no_rnd_qpel16_mc00_c ff_put_pixels16x16_c
  1150. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src,
  1151. int dstStride, int srcStride, int h)
  1152. {
  1153. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1154. int i;
  1155. for (i = 0; i < h; i++) {
  1156. dst[0] = cm[(9 * (src[0] + src[1]) - (src[-1] + src[2]) + 8) >> 4];
  1157. dst[1] = cm[(9 * (src[1] + src[2]) - (src[0] + src[3]) + 8) >> 4];
  1158. dst[2] = cm[(9 * (src[2] + src[3]) - (src[1] + src[4]) + 8) >> 4];
  1159. dst[3] = cm[(9 * (src[3] + src[4]) - (src[2] + src[5]) + 8) >> 4];
  1160. dst[4] = cm[(9 * (src[4] + src[5]) - (src[3] + src[6]) + 8) >> 4];
  1161. dst[5] = cm[(9 * (src[5] + src[6]) - (src[4] + src[7]) + 8) >> 4];
  1162. dst[6] = cm[(9 * (src[6] + src[7]) - (src[5] + src[8]) + 8) >> 4];
  1163. dst[7] = cm[(9 * (src[7] + src[8]) - (src[6] + src[9]) + 8) >> 4];
  1164. dst += dstStride;
  1165. src += srcStride;
  1166. }
  1167. }
  1168. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src,
  1169. int dstStride, int srcStride, int w)
  1170. {
  1171. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1172. int i;
  1173. for (i = 0; i < w; i++) {
  1174. const int src_1 = src[-srcStride];
  1175. const int src0 = src[0];
  1176. const int src1 = src[srcStride];
  1177. const int src2 = src[2 * srcStride];
  1178. const int src3 = src[3 * srcStride];
  1179. const int src4 = src[4 * srcStride];
  1180. const int src5 = src[5 * srcStride];
  1181. const int src6 = src[6 * srcStride];
  1182. const int src7 = src[7 * srcStride];
  1183. const int src8 = src[8 * srcStride];
  1184. const int src9 = src[9 * srcStride];
  1185. dst[0 * dstStride] = cm[(9 * (src0 + src1) - (src_1 + src2) + 8) >> 4];
  1186. dst[1 * dstStride] = cm[(9 * (src1 + src2) - (src0 + src3) + 8) >> 4];
  1187. dst[2 * dstStride] = cm[(9 * (src2 + src3) - (src1 + src4) + 8) >> 4];
  1188. dst[3 * dstStride] = cm[(9 * (src3 + src4) - (src2 + src5) + 8) >> 4];
  1189. dst[4 * dstStride] = cm[(9 * (src4 + src5) - (src3 + src6) + 8) >> 4];
  1190. dst[5 * dstStride] = cm[(9 * (src5 + src6) - (src4 + src7) + 8) >> 4];
  1191. dst[6 * dstStride] = cm[(9 * (src6 + src7) - (src5 + src8) + 8) >> 4];
  1192. dst[7 * dstStride] = cm[(9 * (src7 + src8) - (src6 + src9) + 8) >> 4];
  1193. src++;
  1194. dst++;
  1195. }
  1196. }
  1197. static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1198. {
  1199. uint8_t half[64];
  1200. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1201. put_pixels8_l2_8(dst, src, half, stride, stride, 8, 8);
  1202. }
  1203. static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1204. {
  1205. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  1206. }
  1207. static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1208. {
  1209. uint8_t half[64];
  1210. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1211. put_pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8);
  1212. }
  1213. static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1214. {
  1215. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  1216. }
  1217. static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1218. {
  1219. uint8_t halfH[88];
  1220. uint8_t halfV[64];
  1221. uint8_t halfHV[64];
  1222. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1223. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  1224. wmv2_mspel8_v_lowpass(halfHV, halfH + 8, 8, 8, 8);
  1225. put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
  1226. }
  1227. static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1228. {
  1229. uint8_t halfH[88];
  1230. uint8_t halfV[64];
  1231. uint8_t halfHV[64];
  1232. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1233. wmv2_mspel8_v_lowpass(halfV, src + 1, 8, stride, 8);
  1234. wmv2_mspel8_v_lowpass(halfHV, halfH + 8, 8, 8, 8);
  1235. put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
  1236. }
  1237. static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1238. {
  1239. uint8_t halfH[88];
  1240. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1241. wmv2_mspel8_v_lowpass(dst, halfH + 8, stride, 8, 8);
  1242. }
  1243. static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1244. int line_size, int h)
  1245. {
  1246. int s = 0, i;
  1247. for (i = 0; i < h; i++) {
  1248. s += abs(pix1[0] - pix2[0]);
  1249. s += abs(pix1[1] - pix2[1]);
  1250. s += abs(pix1[2] - pix2[2]);
  1251. s += abs(pix1[3] - pix2[3]);
  1252. s += abs(pix1[4] - pix2[4]);
  1253. s += abs(pix1[5] - pix2[5]);
  1254. s += abs(pix1[6] - pix2[6]);
  1255. s += abs(pix1[7] - pix2[7]);
  1256. s += abs(pix1[8] - pix2[8]);
  1257. s += abs(pix1[9] - pix2[9]);
  1258. s += abs(pix1[10] - pix2[10]);
  1259. s += abs(pix1[11] - pix2[11]);
  1260. s += abs(pix1[12] - pix2[12]);
  1261. s += abs(pix1[13] - pix2[13]);
  1262. s += abs(pix1[14] - pix2[14]);
  1263. s += abs(pix1[15] - pix2[15]);
  1264. pix1 += line_size;
  1265. pix2 += line_size;
  1266. }
  1267. return s;
  1268. }
  1269. static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1270. int line_size, int h)
  1271. {
  1272. int s = 0, i;
  1273. for (i = 0; i < h; i++) {
  1274. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  1275. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  1276. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  1277. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  1278. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  1279. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  1280. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  1281. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  1282. s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
  1283. s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
  1284. s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
  1285. s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
  1286. s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
  1287. s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
  1288. s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
  1289. s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
  1290. pix1 += line_size;
  1291. pix2 += line_size;
  1292. }
  1293. return s;
  1294. }
  1295. static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1296. int line_size, int h)
  1297. {
  1298. int s = 0, i;
  1299. uint8_t *pix3 = pix2 + line_size;
  1300. for (i = 0; i < h; i++) {
  1301. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  1302. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  1303. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  1304. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  1305. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  1306. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  1307. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  1308. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  1309. s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
  1310. s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
  1311. s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
  1312. s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
  1313. s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
  1314. s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
  1315. s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
  1316. s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
  1317. pix1 += line_size;
  1318. pix2 += line_size;
  1319. pix3 += line_size;
  1320. }
  1321. return s;
  1322. }
  1323. static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1324. int line_size, int h)
  1325. {
  1326. int s = 0, i;
  1327. uint8_t *pix3 = pix2 + line_size;
  1328. for (i = 0; i < h; i++) {
  1329. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  1330. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  1331. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  1332. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  1333. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  1334. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  1335. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  1336. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  1337. s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
  1338. s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
  1339. s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
  1340. s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
  1341. s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
  1342. s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
  1343. s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
  1344. s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
  1345. pix1 += line_size;
  1346. pix2 += line_size;
  1347. pix3 += line_size;
  1348. }
  1349. return s;
  1350. }
  1351. static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1352. int line_size, int h)
  1353. {
  1354. int s = 0, i;
  1355. for (i = 0; i < h; i++) {
  1356. s += abs(pix1[0] - pix2[0]);
  1357. s += abs(pix1[1] - pix2[1]);
  1358. s += abs(pix1[2] - pix2[2]);
  1359. s += abs(pix1[3] - pix2[3]);
  1360. s += abs(pix1[4] - pix2[4]);
  1361. s += abs(pix1[5] - pix2[5]);
  1362. s += abs(pix1[6] - pix2[6]);
  1363. s += abs(pix1[7] - pix2[7]);
  1364. pix1 += line_size;
  1365. pix2 += line_size;
  1366. }
  1367. return s;
  1368. }
  1369. static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1370. int line_size, int h)
  1371. {
  1372. int s = 0, i;
  1373. for (i = 0; i < h; i++) {
  1374. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  1375. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  1376. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  1377. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  1378. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  1379. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  1380. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  1381. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  1382. pix1 += line_size;
  1383. pix2 += line_size;
  1384. }
  1385. return s;
  1386. }
  1387. static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1388. int line_size, int h)
  1389. {
  1390. int s = 0, i;
  1391. uint8_t *pix3 = pix2 + line_size;
  1392. for (i = 0; i < h; i++) {
  1393. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  1394. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  1395. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  1396. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  1397. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  1398. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  1399. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  1400. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  1401. pix1 += line_size;
  1402. pix2 += line_size;
  1403. pix3 += line_size;
  1404. }
  1405. return s;
  1406. }
  1407. static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1408. int line_size, int h)
  1409. {
  1410. int s = 0, i;
  1411. uint8_t *pix3 = pix2 + line_size;
  1412. for (i = 0; i < h; i++) {
  1413. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  1414. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  1415. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  1416. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  1417. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  1418. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  1419. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  1420. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  1421. pix1 += line_size;
  1422. pix2 += line_size;
  1423. pix3 += line_size;
  1424. }
  1425. return s;
  1426. }
  1427. static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
  1428. {
  1429. int score1 = 0, score2 = 0, x, y;
  1430. for (y = 0; y < h; y++) {
  1431. for (x = 0; x < 16; x++)
  1432. score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
  1433. if (y + 1 < h) {
  1434. for (x = 0; x < 15; x++)
  1435. score2 += FFABS(s1[x] - s1[x + stride] -
  1436. s1[x + 1] + s1[x + stride + 1]) -
  1437. FFABS(s2[x] - s2[x + stride] -
  1438. s2[x + 1] + s2[x + stride + 1]);
  1439. }
  1440. s1 += stride;
  1441. s2 += stride;
  1442. }
  1443. if (c)
  1444. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  1445. else
  1446. return score1 + FFABS(score2) * 8;
  1447. }
  1448. static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
  1449. {
  1450. int score1 = 0, score2 = 0, x, y;
  1451. for (y = 0; y < h; y++) {
  1452. for (x = 0; x < 8; x++)
  1453. score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
  1454. if (y + 1 < h) {
  1455. for (x = 0; x < 7; x++)
  1456. score2 += FFABS(s1[x] - s1[x + stride] -
  1457. s1[x + 1] + s1[x + stride + 1]) -
  1458. FFABS(s2[x] - s2[x + stride] -
  1459. s2[x + 1] + s2[x + stride + 1]);
  1460. }
  1461. s1 += stride;
  1462. s2 += stride;
  1463. }
  1464. if (c)
  1465. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  1466. else
  1467. return score1 + FFABS(score2) * 8;
  1468. }
  1469. static int try_8x8basis_c(int16_t rem[64], int16_t weight[64],
  1470. int16_t basis[64], int scale)
  1471. {
  1472. int i;
  1473. unsigned int sum = 0;
  1474. for (i = 0; i < 8 * 8; i++) {
  1475. int b = rem[i] + ((basis[i] * scale +
  1476. (1 << (BASIS_SHIFT - RECON_SHIFT - 1))) >>
  1477. (BASIS_SHIFT - RECON_SHIFT));
  1478. int w = weight[i];
  1479. b >>= RECON_SHIFT;
  1480. assert(-512 < b && b < 512);
  1481. sum += (w * b) * (w * b) >> 4;
  1482. }
  1483. return sum >> 2;
  1484. }
  1485. static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale)
  1486. {
  1487. int i;
  1488. for (i = 0; i < 8 * 8; i++)
  1489. rem[i] += (basis[i] * scale +
  1490. (1 << (BASIS_SHIFT - RECON_SHIFT - 1))) >>
  1491. (BASIS_SHIFT - RECON_SHIFT);
  1492. }
  1493. static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
  1494. int stride, int h)
  1495. {
  1496. return 0;
  1497. }
  1498. void ff_set_cmp(DSPContext *c, me_cmp_func *cmp, int type)
  1499. {
  1500. int i;
  1501. memset(cmp, 0, sizeof(void *) * 6);
  1502. for (i = 0; i < 6; i++) {
  1503. switch (type & 0xFF) {
  1504. case FF_CMP_SAD:
  1505. cmp[i] = c->sad[i];
  1506. break;
  1507. case FF_CMP_SATD:
  1508. cmp[i] = c->hadamard8_diff[i];
  1509. break;
  1510. case FF_CMP_SSE:
  1511. cmp[i] = c->sse[i];
  1512. break;
  1513. case FF_CMP_DCT:
  1514. cmp[i] = c->dct_sad[i];
  1515. break;
  1516. case FF_CMP_DCT264:
  1517. cmp[i] = c->dct264_sad[i];
  1518. break;
  1519. case FF_CMP_DCTMAX:
  1520. cmp[i] = c->dct_max[i];
  1521. break;
  1522. case FF_CMP_PSNR:
  1523. cmp[i] = c->quant_psnr[i];
  1524. break;
  1525. case FF_CMP_BIT:
  1526. cmp[i] = c->bit[i];
  1527. break;
  1528. case FF_CMP_RD:
  1529. cmp[i] = c->rd[i];
  1530. break;
  1531. case FF_CMP_VSAD:
  1532. cmp[i] = c->vsad[i];
  1533. break;
  1534. case FF_CMP_VSSE:
  1535. cmp[i] = c->vsse[i];
  1536. break;
  1537. case FF_CMP_ZERO:
  1538. cmp[i] = zero_cmp;
  1539. break;
  1540. case FF_CMP_NSSE:
  1541. cmp[i] = c->nsse[i];
  1542. break;
  1543. default:
  1544. av_log(NULL, AV_LOG_ERROR,
  1545. "internal error in cmp function selection\n");
  1546. }
  1547. }
  1548. }
  1549. #define BUTTERFLY2(o1, o2, i1, i2) \
  1550. o1 = (i1) + (i2); \
  1551. o2 = (i1) - (i2);
  1552. #define BUTTERFLY1(x, y) \
  1553. { \
  1554. int a, b; \
  1555. a = x; \
  1556. b = y; \
  1557. x = a + b; \
  1558. y = a - b; \
  1559. }
  1560. #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
  1561. static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
  1562. uint8_t *src, int stride, int h)
  1563. {
  1564. int i, temp[64], sum = 0;
  1565. assert(h == 8);
  1566. for (i = 0; i < 8; i++) {
  1567. // FIXME: try pointer walks
  1568. BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
  1569. src[stride * i + 0] - dst[stride * i + 0],
  1570. src[stride * i + 1] - dst[stride * i + 1]);
  1571. BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
  1572. src[stride * i + 2] - dst[stride * i + 2],
  1573. src[stride * i + 3] - dst[stride * i + 3]);
  1574. BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
  1575. src[stride * i + 4] - dst[stride * i + 4],
  1576. src[stride * i + 5] - dst[stride * i + 5]);
  1577. BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
  1578. src[stride * i + 6] - dst[stride * i + 6],
  1579. src[stride * i + 7] - dst[stride * i + 7]);
  1580. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
  1581. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
  1582. BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
  1583. BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
  1584. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
  1585. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
  1586. BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
  1587. BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
  1588. }
  1589. for (i = 0; i < 8; i++) {
  1590. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
  1591. BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
  1592. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
  1593. BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
  1594. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
  1595. BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
  1596. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
  1597. BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
  1598. sum += BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i]) +
  1599. BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i]) +
  1600. BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i]) +
  1601. BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
  1602. }
  1603. return sum;
  1604. }
  1605. static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
  1606. uint8_t *dummy, int stride, int h)
  1607. {
  1608. int i, temp[64], sum = 0;
  1609. assert(h == 8);
  1610. for (i = 0; i < 8; i++) {
  1611. // FIXME: try pointer walks
  1612. BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
  1613. src[stride * i + 0], src[stride * i + 1]);
  1614. BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
  1615. src[stride * i + 2], src[stride * i + 3]);
  1616. BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
  1617. src[stride * i + 4], src[stride * i + 5]);
  1618. BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
  1619. src[stride * i + 6], src[stride * i + 7]);
  1620. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
  1621. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
  1622. BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
  1623. BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
  1624. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
  1625. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
  1626. BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
  1627. BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
  1628. }
  1629. for (i = 0; i < 8; i++) {
  1630. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
  1631. BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
  1632. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
  1633. BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
  1634. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
  1635. BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
  1636. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
  1637. BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
  1638. sum +=
  1639. BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i])
  1640. + BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i])
  1641. + BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i])
  1642. + BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
  1643. }
  1644. sum -= FFABS(temp[8 * 0] + temp[8 * 4]); // -mean
  1645. return sum;
  1646. }
  1647. static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
  1648. uint8_t *src2, int stride, int h)
  1649. {
  1650. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1651. assert(h == 8);
  1652. s->dsp.diff_pixels(temp, src1, src2, stride);
  1653. s->dsp.fdct(temp);
  1654. return s->dsp.sum_abs_dctelem(temp);
  1655. }
  1656. #if CONFIG_GPL
  1657. #define DCT8_1D \
  1658. { \
  1659. const int s07 = SRC(0) + SRC(7); \
  1660. const int s16 = SRC(1) + SRC(6); \
  1661. const int s25 = SRC(2) + SRC(5); \
  1662. const int s34 = SRC(3) + SRC(4); \
  1663. const int a0 = s07 + s34; \
  1664. const int a1 = s16 + s25; \
  1665. const int a2 = s07 - s34; \
  1666. const int a3 = s16 - s25; \
  1667. const int d07 = SRC(0) - SRC(7); \
  1668. const int d16 = SRC(1) - SRC(6); \
  1669. const int d25 = SRC(2) - SRC(5); \
  1670. const int d34 = SRC(3) - SRC(4); \
  1671. const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
  1672. const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
  1673. const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
  1674. const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
  1675. DST(0, a0 + a1); \
  1676. DST(1, a4 + (a7 >> 2)); \
  1677. DST(2, a2 + (a3 >> 1)); \
  1678. DST(3, a5 + (a6 >> 2)); \
  1679. DST(4, a0 - a1); \
  1680. DST(5, a6 - (a5 >> 2)); \
  1681. DST(6, (a2 >> 1) - a3); \
  1682. DST(7, (a4 >> 2) - a7); \
  1683. }
  1684. static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
  1685. uint8_t *src2, int stride, int h)
  1686. {
  1687. int16_t dct[8][8];
  1688. int i, sum = 0;
  1689. s->dsp.diff_pixels(dct[0], src1, src2, stride);
  1690. #define SRC(x) dct[i][x]
  1691. #define DST(x, v) dct[i][x] = v
  1692. for (i = 0; i < 8; i++)
  1693. DCT8_1D
  1694. #undef SRC
  1695. #undef DST
  1696. #define SRC(x) dct[x][i]
  1697. #define DST(x, v) sum += FFABS(v)
  1698. for (i = 0; i < 8; i++)
  1699. DCT8_1D
  1700. #undef SRC
  1701. #undef DST
  1702. return sum;
  1703. }
  1704. #endif
  1705. static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
  1706. uint8_t *src2, int stride, int h)
  1707. {
  1708. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1709. int sum = 0, i;
  1710. assert(h == 8);
  1711. s->dsp.diff_pixels(temp, src1, src2, stride);
  1712. s->dsp.fdct(temp);
  1713. for (i = 0; i < 64; i++)
  1714. sum = FFMAX(sum, FFABS(temp[i]));
  1715. return sum;
  1716. }
  1717. static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
  1718. uint8_t *src2, int stride, int h)
  1719. {
  1720. LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
  1721. int16_t *const bak = temp + 64;
  1722. int sum = 0, i;
  1723. assert(h == 8);
  1724. s->mb_intra = 0;
  1725. s->dsp.diff_pixels(temp, src1, src2, stride);
  1726. memcpy(bak, temp, 64 * sizeof(int16_t));
  1727. s->block_last_index[0 /* FIXME */] =
  1728. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  1729. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  1730. ff_simple_idct_8(temp); // FIXME
  1731. for (i = 0; i < 64; i++)
  1732. sum += (temp[i] - bak[i]) * (temp[i] - bak[i]);
  1733. return sum;
  1734. }
  1735. static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
  1736. int stride, int h)
  1737. {
  1738. const uint8_t *scantable = s->intra_scantable.permutated;
  1739. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1740. LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
  1741. LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
  1742. int i, last, run, bits, level, distortion, start_i;
  1743. const int esc_length = s->ac_esc_length;
  1744. uint8_t *length, *last_length;
  1745. assert(h == 8);
  1746. copy_block8(lsrc1, src1, 8, stride, 8);
  1747. copy_block8(lsrc2, src2, 8, stride, 8);
  1748. s->dsp.diff_pixels(temp, lsrc1, lsrc2, 8);
  1749. s->block_last_index[0 /* FIXME */] =
  1750. last =
  1751. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  1752. bits = 0;
  1753. if (s->mb_intra) {
  1754. start_i = 1;
  1755. length = s->intra_ac_vlc_length;
  1756. last_length = s->intra_ac_vlc_last_length;
  1757. bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
  1758. } else {
  1759. start_i = 0;
  1760. length = s->inter_ac_vlc_length;
  1761. last_length = s->inter_ac_vlc_last_length;
  1762. }
  1763. if (last >= start_i) {
  1764. run = 0;
  1765. for (i = start_i; i < last; i++) {
  1766. int j = scantable[i];
  1767. level = temp[j];
  1768. if (level) {
  1769. level += 64;
  1770. if ((level & (~127)) == 0)
  1771. bits += length[UNI_AC_ENC_INDEX(run, level)];
  1772. else
  1773. bits += esc_length;
  1774. run = 0;
  1775. } else
  1776. run++;
  1777. }
  1778. i = scantable[last];
  1779. level = temp[i] + 64;
  1780. assert(level - 64);
  1781. if ((level & (~127)) == 0) {
  1782. bits += last_length[UNI_AC_ENC_INDEX(run, level)];
  1783. } else
  1784. bits += esc_length;
  1785. }
  1786. if (last >= 0) {
  1787. if (s->mb_intra)
  1788. s->dct_unquantize_intra(s, temp, 0, s->qscale);
  1789. else
  1790. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  1791. }
  1792. s->dsp.idct_add(lsrc2, 8, temp);
  1793. distortion = s->dsp.sse[1](NULL, lsrc2, lsrc1, 8, 8);
  1794. return distortion + ((bits * s->qscale * s->qscale * 109 + 64) >> 7);
  1795. }
  1796. static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
  1797. int stride, int h)
  1798. {
  1799. const uint8_t *scantable = s->intra_scantable.permutated;
  1800. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1801. int i, last, run, bits, level, start_i;
  1802. const int esc_length = s->ac_esc_length;
  1803. uint8_t *length, *last_length;
  1804. assert(h == 8);
  1805. s->dsp.diff_pixels(temp, src1, src2, stride);
  1806. s->block_last_index[0 /* FIXME */] =
  1807. last =
  1808. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  1809. bits = 0;
  1810. if (s->mb_intra) {
  1811. start_i = 1;
  1812. length = s->intra_ac_vlc_length;
  1813. last_length = s->intra_ac_vlc_last_length;
  1814. bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
  1815. } else {
  1816. start_i = 0;
  1817. length = s->inter_ac_vlc_length;
  1818. last_length = s->inter_ac_vlc_last_length;
  1819. }
  1820. if (last >= start_i) {
  1821. run = 0;
  1822. for (i = start_i; i < last; i++) {
  1823. int j = scantable[i];
  1824. level = temp[j];
  1825. if (level) {
  1826. level += 64;
  1827. if ((level & (~127)) == 0)
  1828. bits += length[UNI_AC_ENC_INDEX(run, level)];
  1829. else
  1830. bits += esc_length;
  1831. run = 0;
  1832. } else
  1833. run++;
  1834. }
  1835. i = scantable[last];
  1836. level = temp[i] + 64;
  1837. assert(level - 64);
  1838. if ((level & (~127)) == 0)
  1839. bits += last_length[UNI_AC_ENC_INDEX(run, level)];
  1840. else
  1841. bits += esc_length;
  1842. }
  1843. return bits;
  1844. }
  1845. #define VSAD_INTRA(size) \
  1846. static int vsad_intra ## size ## _c(MpegEncContext *c, \
  1847. uint8_t *s, uint8_t *dummy, \
  1848. int stride, int h) \
  1849. { \
  1850. int score = 0, x, y; \
  1851. \
  1852. for (y = 1; y < h; y++) { \
  1853. for (x = 0; x < size; x += 4) { \
  1854. score += FFABS(s[x] - s[x + stride]) + \
  1855. FFABS(s[x + 1] - s[x + stride + 1]) + \
  1856. FFABS(s[x + 2] - s[x + 2 + stride]) + \
  1857. FFABS(s[x + 3] - s[x + 3 + stride]); \
  1858. } \
  1859. s += stride; \
  1860. } \
  1861. \
  1862. return score; \
  1863. }
  1864. VSAD_INTRA(8)
  1865. VSAD_INTRA(16)
  1866. static int vsad16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
  1867. int stride, int h)
  1868. {
  1869. int score = 0, x, y;
  1870. for (y = 1; y < h; y++) {
  1871. for (x = 0; x < 16; x++)
  1872. score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
  1873. s1 += stride;
  1874. s2 += stride;
  1875. }
  1876. return score;
  1877. }
  1878. #define SQ(a) ((a) * (a))
  1879. #define VSSE_INTRA(size) \
  1880. static int vsse_intra ## size ## _c(MpegEncContext *c, \
  1881. uint8_t *s, uint8_t *dummy, \
  1882. int stride, int h) \
  1883. { \
  1884. int score = 0, x, y; \
  1885. \
  1886. for (y = 1; y < h; y++) { \
  1887. for (x = 0; x < size; x += 4) { \
  1888. score += SQ(s[x] - s[x + stride]) + \
  1889. SQ(s[x + 1] - s[x + stride + 1]) + \
  1890. SQ(s[x + 2] - s[x + stride + 2]) + \
  1891. SQ(s[x + 3] - s[x + stride + 3]); \
  1892. } \
  1893. s += stride; \
  1894. } \
  1895. \
  1896. return score; \
  1897. }
  1898. VSSE_INTRA(8)
  1899. VSSE_INTRA(16)
  1900. static int vsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
  1901. int stride, int h)
  1902. {
  1903. int score = 0, x, y;
  1904. for (y = 1; y < h; y++) {
  1905. for (x = 0; x < 16; x++)
  1906. score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
  1907. s1 += stride;
  1908. s2 += stride;
  1909. }
  1910. return score;
  1911. }
  1912. static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
  1913. int size)
  1914. {
  1915. int score = 0, i;
  1916. for (i = 0; i < size; i++)
  1917. score += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]);
  1918. return score;
  1919. }
  1920. #define WRAPPER8_16_SQ(name8, name16) \
  1921. static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src, \
  1922. int stride, int h) \
  1923. { \
  1924. int score = 0; \
  1925. \
  1926. score += name8(s, dst, src, stride, 8); \
  1927. score += name8(s, dst + 8, src + 8, stride, 8); \
  1928. if (h == 16) { \
  1929. dst += 8 * stride; \
  1930. src += 8 * stride; \
  1931. score += name8(s, dst, src, stride, 8); \
  1932. score += name8(s, dst + 8, src + 8, stride, 8); \
  1933. } \
  1934. return score; \
  1935. }
  1936. WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
  1937. WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
  1938. WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
  1939. #if CONFIG_GPL
  1940. WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
  1941. #endif
  1942. WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
  1943. WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
  1944. WRAPPER8_16_SQ(rd8x8_c, rd16_c)
  1945. WRAPPER8_16_SQ(bit8x8_c, bit16_c)
  1946. static inline uint32_t clipf_c_one(uint32_t a, uint32_t mini,
  1947. uint32_t maxi, uint32_t maxisign)
  1948. {
  1949. if (a > mini)
  1950. return mini;
  1951. else if ((a ^ (1U << 31)) > maxisign)
  1952. return maxi;
  1953. else
  1954. return a;
  1955. }
  1956. static void vector_clipf_c_opposite_sign(float *dst, const float *src,
  1957. float *min, float *max, int len)
  1958. {
  1959. int i;
  1960. uint32_t mini = *(uint32_t *) min;
  1961. uint32_t maxi = *(uint32_t *) max;
  1962. uint32_t maxisign = maxi ^ (1U << 31);
  1963. uint32_t *dsti = (uint32_t *) dst;
  1964. const uint32_t *srci = (const uint32_t *) src;
  1965. for (i = 0; i < len; i += 8) {
  1966. dsti[i + 0] = clipf_c_one(srci[i + 0], mini, maxi, maxisign);
  1967. dsti[i + 1] = clipf_c_one(srci[i + 1], mini, maxi, maxisign);
  1968. dsti[i + 2] = clipf_c_one(srci[i + 2], mini, maxi, maxisign);
  1969. dsti[i + 3] = clipf_c_one(srci[i + 3], mini, maxi, maxisign);
  1970. dsti[i + 4] = clipf_c_one(srci[i + 4], mini, maxi, maxisign);
  1971. dsti[i + 5] = clipf_c_one(srci[i + 5], mini, maxi, maxisign);
  1972. dsti[i + 6] = clipf_c_one(srci[i + 6], mini, maxi, maxisign);
  1973. dsti[i + 7] = clipf_c_one(srci[i + 7], mini, maxi, maxisign);
  1974. }
  1975. }
  1976. static void vector_clipf_c(float *dst, const float *src,
  1977. float min, float max, int len)
  1978. {
  1979. int i;
  1980. if (min < 0 && max > 0) {
  1981. vector_clipf_c_opposite_sign(dst, src, &min, &max, len);
  1982. } else {
  1983. for (i = 0; i < len; i += 8) {
  1984. dst[i] = av_clipf(src[i], min, max);
  1985. dst[i + 1] = av_clipf(src[i + 1], min, max);
  1986. dst[i + 2] = av_clipf(src[i + 2], min, max);
  1987. dst[i + 3] = av_clipf(src[i + 3], min, max);
  1988. dst[i + 4] = av_clipf(src[i + 4], min, max);
  1989. dst[i + 5] = av_clipf(src[i + 5], min, max);
  1990. dst[i + 6] = av_clipf(src[i + 6], min, max);
  1991. dst[i + 7] = av_clipf(src[i + 7], min, max);
  1992. }
  1993. }
  1994. }
  1995. static int32_t scalarproduct_int16_c(const int16_t *v1, const int16_t *v2,
  1996. int order)
  1997. {
  1998. int res = 0;
  1999. while (order--)
  2000. res += *v1++ **v2++;
  2001. return res;
  2002. }
  2003. static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2,
  2004. const int16_t *v3,
  2005. int order, int mul)
  2006. {
  2007. int res = 0;
  2008. while (order--) {
  2009. res += *v1 * *v2++;
  2010. *v1++ += mul * *v3++;
  2011. }
  2012. return res;
  2013. }
  2014. static void vector_clip_int32_c(int32_t *dst, const int32_t *src, int32_t min,
  2015. int32_t max, unsigned int len)
  2016. {
  2017. do {
  2018. *dst++ = av_clip(*src++, min, max);
  2019. *dst++ = av_clip(*src++, min, max);
  2020. *dst++ = av_clip(*src++, min, max);
  2021. *dst++ = av_clip(*src++, min, max);
  2022. *dst++ = av_clip(*src++, min, max);
  2023. *dst++ = av_clip(*src++, min, max);
  2024. *dst++ = av_clip(*src++, min, max);
  2025. *dst++ = av_clip(*src++, min, max);
  2026. len -= 8;
  2027. } while (len > 0);
  2028. }
  2029. static void jref_idct_put(uint8_t *dest, int line_size, int16_t *block)
  2030. {
  2031. ff_j_rev_dct(block);
  2032. put_pixels_clamped_c(block, dest, line_size);
  2033. }
  2034. static void jref_idct_add(uint8_t *dest, int line_size, int16_t *block)
  2035. {
  2036. ff_j_rev_dct(block);
  2037. add_pixels_clamped_c(block, dest, line_size);
  2038. }
  2039. /* draw the edges of width 'w' of an image of size width, height */
  2040. // FIXME: Check that this is OK for MPEG-4 interlaced.
  2041. static void draw_edges_8_c(uint8_t *buf, int wrap, int width, int height,
  2042. int w, int h, int sides)
  2043. {
  2044. uint8_t *ptr = buf, *last_line;
  2045. int i;
  2046. /* left and right */
  2047. for (i = 0; i < height; i++) {
  2048. memset(ptr - w, ptr[0], w);
  2049. memset(ptr + width, ptr[width - 1], w);
  2050. ptr += wrap;
  2051. }
  2052. /* top and bottom + corners */
  2053. buf -= w;
  2054. last_line = buf + (height - 1) * wrap;
  2055. if (sides & EDGE_TOP)
  2056. for (i = 0; i < h; i++)
  2057. // top
  2058. memcpy(buf - (i + 1) * wrap, buf, width + w + w);
  2059. if (sides & EDGE_BOTTOM)
  2060. for (i = 0; i < h; i++)
  2061. // bottom
  2062. memcpy(last_line + (i + 1) * wrap, last_line, width + w + w);
  2063. }
  2064. static void clear_block_8_c(int16_t *block)
  2065. {
  2066. memset(block, 0, sizeof(int16_t) * 64);
  2067. }
  2068. static void clear_blocks_8_c(int16_t *blocks)
  2069. {
  2070. memset(blocks, 0, sizeof(int16_t) * 6 * 64);
  2071. }
  2072. /* init static data */
  2073. av_cold void ff_dsputil_static_init(void)
  2074. {
  2075. int i;
  2076. for (i = 0; i < 512; i++)
  2077. ff_square_tab[i] = (i - 256) * (i - 256);
  2078. }
  2079. av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
  2080. {
  2081. const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8;
  2082. #if CONFIG_ENCODERS
  2083. if (avctx->bits_per_raw_sample == 10) {
  2084. c->fdct = ff_jpeg_fdct_islow_10;
  2085. c->fdct248 = ff_fdct248_islow_10;
  2086. } else {
  2087. if (avctx->dct_algo == FF_DCT_FASTINT) {
  2088. c->fdct = ff_fdct_ifast;
  2089. c->fdct248 = ff_fdct_ifast248;
  2090. } else if (avctx->dct_algo == FF_DCT_FAAN) {
  2091. c->fdct = ff_faandct;
  2092. c->fdct248 = ff_faandct248;
  2093. } else {
  2094. c->fdct = ff_jpeg_fdct_islow_8; // slow/accurate/default
  2095. c->fdct248 = ff_fdct248_islow_8;
  2096. }
  2097. }
  2098. #endif /* CONFIG_ENCODERS */
  2099. if (avctx->bits_per_raw_sample == 10) {
  2100. c->idct_put = ff_simple_idct_put_10;
  2101. c->idct_add = ff_simple_idct_add_10;
  2102. c->idct = ff_simple_idct_10;
  2103. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2104. } else {
  2105. if (avctx->idct_algo == FF_IDCT_INT) {
  2106. c->idct_put = jref_idct_put;
  2107. c->idct_add = jref_idct_add;
  2108. c->idct = ff_j_rev_dct;
  2109. c->idct_permutation_type = FF_LIBMPEG2_IDCT_PERM;
  2110. } else if (avctx->idct_algo == FF_IDCT_FAAN) {
  2111. c->idct_put = ff_faanidct_put;
  2112. c->idct_add = ff_faanidct_add;
  2113. c->idct = ff_faanidct;
  2114. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2115. } else { // accurate/default
  2116. c->idct_put = ff_simple_idct_put_8;
  2117. c->idct_add = ff_simple_idct_add_8;
  2118. c->idct = ff_simple_idct_8;
  2119. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2120. }
  2121. }
  2122. c->diff_pixels = diff_pixels_c;
  2123. c->put_pixels_clamped = put_pixels_clamped_c;
  2124. c->put_signed_pixels_clamped = put_signed_pixels_clamped_c;
  2125. c->add_pixels_clamped = add_pixels_clamped_c;
  2126. c->sum_abs_dctelem = sum_abs_dctelem_c;
  2127. c->gmc1 = gmc1_c;
  2128. c->gmc = ff_gmc_c;
  2129. c->pix_sum = pix_sum_c;
  2130. c->pix_norm1 = pix_norm1_c;
  2131. c->fill_block_tab[0] = fill_block16_c;
  2132. c->fill_block_tab[1] = fill_block8_c;
  2133. /* TODO [0] 16 [1] 8 */
  2134. c->pix_abs[0][0] = pix_abs16_c;
  2135. c->pix_abs[0][1] = pix_abs16_x2_c;
  2136. c->pix_abs[0][2] = pix_abs16_y2_c;
  2137. c->pix_abs[0][3] = pix_abs16_xy2_c;
  2138. c->pix_abs[1][0] = pix_abs8_c;
  2139. c->pix_abs[1][1] = pix_abs8_x2_c;
  2140. c->pix_abs[1][2] = pix_abs8_y2_c;
  2141. c->pix_abs[1][3] = pix_abs8_xy2_c;
  2142. #define dspfunc(PFX, IDX, NUM) \
  2143. c->PFX ## _pixels_tab[IDX][0] = PFX ## NUM ## _mc00_c; \
  2144. c->PFX ## _pixels_tab[IDX][1] = PFX ## NUM ## _mc10_c; \
  2145. c->PFX ## _pixels_tab[IDX][2] = PFX ## NUM ## _mc20_c; \
  2146. c->PFX ## _pixels_tab[IDX][3] = PFX ## NUM ## _mc30_c; \
  2147. c->PFX ## _pixels_tab[IDX][4] = PFX ## NUM ## _mc01_c; \
  2148. c->PFX ## _pixels_tab[IDX][5] = PFX ## NUM ## _mc11_c; \
  2149. c->PFX ## _pixels_tab[IDX][6] = PFX ## NUM ## _mc21_c; \
  2150. c->PFX ## _pixels_tab[IDX][7] = PFX ## NUM ## _mc31_c; \
  2151. c->PFX ## _pixels_tab[IDX][8] = PFX ## NUM ## _mc02_c; \
  2152. c->PFX ## _pixels_tab[IDX][9] = PFX ## NUM ## _mc12_c; \
  2153. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
  2154. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
  2155. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
  2156. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
  2157. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
  2158. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
  2159. dspfunc(put_qpel, 0, 16);
  2160. dspfunc(put_qpel, 1, 8);
  2161. dspfunc(put_no_rnd_qpel, 0, 16);
  2162. dspfunc(put_no_rnd_qpel, 1, 8);
  2163. dspfunc(avg_qpel, 0, 16);
  2164. dspfunc(avg_qpel, 1, 8);
  2165. #undef dspfunc
  2166. c->put_mspel_pixels_tab[0] = ff_put_pixels8x8_c;
  2167. c->put_mspel_pixels_tab[1] = put_mspel8_mc10_c;
  2168. c->put_mspel_pixels_tab[2] = put_mspel8_mc20_c;
  2169. c->put_mspel_pixels_tab[3] = put_mspel8_mc30_c;
  2170. c->put_mspel_pixels_tab[4] = put_mspel8_mc02_c;
  2171. c->put_mspel_pixels_tab[5] = put_mspel8_mc12_c;
  2172. c->put_mspel_pixels_tab[6] = put_mspel8_mc22_c;
  2173. c->put_mspel_pixels_tab[7] = put_mspel8_mc32_c;
  2174. #define SET_CMP_FUNC(name) \
  2175. c->name[0] = name ## 16_c; \
  2176. c->name[1] = name ## 8x8_c;
  2177. SET_CMP_FUNC(hadamard8_diff)
  2178. c->hadamard8_diff[4] = hadamard8_intra16_c;
  2179. c->hadamard8_diff[5] = hadamard8_intra8x8_c;
  2180. SET_CMP_FUNC(dct_sad)
  2181. SET_CMP_FUNC(dct_max)
  2182. #if CONFIG_GPL
  2183. SET_CMP_FUNC(dct264_sad)
  2184. #endif
  2185. c->sad[0] = pix_abs16_c;
  2186. c->sad[1] = pix_abs8_c;
  2187. c->sse[0] = sse16_c;
  2188. c->sse[1] = sse8_c;
  2189. c->sse[2] = sse4_c;
  2190. SET_CMP_FUNC(quant_psnr)
  2191. SET_CMP_FUNC(rd)
  2192. SET_CMP_FUNC(bit)
  2193. c->vsad[0] = vsad16_c;
  2194. c->vsad[4] = vsad_intra16_c;
  2195. c->vsad[5] = vsad_intra8_c;
  2196. c->vsse[0] = vsse16_c;
  2197. c->vsse[4] = vsse_intra16_c;
  2198. c->vsse[5] = vsse_intra8_c;
  2199. c->nsse[0] = nsse16_c;
  2200. c->nsse[1] = nsse8_c;
  2201. c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
  2202. c->bswap_buf = bswap_buf;
  2203. c->bswap16_buf = bswap16_buf;
  2204. c->try_8x8basis = try_8x8basis_c;
  2205. c->add_8x8basis = add_8x8basis_c;
  2206. c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c;
  2207. c->scalarproduct_int16 = scalarproduct_int16_c;
  2208. c->vector_clip_int32 = vector_clip_int32_c;
  2209. c->vector_clipf = vector_clipf_c;
  2210. c->shrink[0] = av_image_copy_plane;
  2211. c->shrink[1] = ff_shrink22;
  2212. c->shrink[2] = ff_shrink44;
  2213. c->shrink[3] = ff_shrink88;
  2214. c->add_pixels8 = add_pixels8_c;
  2215. c->draw_edges = draw_edges_8_c;
  2216. c->clear_block = clear_block_8_c;
  2217. c->clear_blocks = clear_blocks_8_c;
  2218. switch (avctx->bits_per_raw_sample) {
  2219. case 9:
  2220. case 10:
  2221. c->get_pixels = get_pixels_16_c;
  2222. break;
  2223. default:
  2224. c->get_pixels = get_pixels_8_c;
  2225. break;
  2226. }
  2227. if (ARCH_ARM)
  2228. ff_dsputil_init_arm(c, avctx, high_bit_depth);
  2229. if (ARCH_BFIN)
  2230. ff_dsputil_init_bfin(c, avctx, high_bit_depth);
  2231. if (ARCH_PPC)
  2232. ff_dsputil_init_ppc(c, avctx, high_bit_depth);
  2233. if (ARCH_X86)
  2234. ff_dsputil_init_x86(c, avctx, high_bit_depth);
  2235. ff_init_scantable_permutation(c->idct_permutation,
  2236. c->idct_permutation_type);
  2237. }