You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2938 lines
125KB

  1. /*
  2. * DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * DSP utils
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/imgutils.h"
  30. #include "avcodec.h"
  31. #include "copy_block.h"
  32. #include "dct.h"
  33. #include "dsputil.h"
  34. #include "simple_idct.h"
  35. #include "faandct.h"
  36. #include "faanidct.h"
  37. #include "imgconvert.h"
  38. #include "mathops.h"
  39. #include "mpegvideo.h"
  40. #include "config.h"
  41. uint32_t ff_square_tab[512] = { 0, };
  42. #define BIT_DEPTH 16
  43. #include "dsputil_template.c"
  44. #undef BIT_DEPTH
  45. #define BIT_DEPTH 8
  46. #include "dsputil_template.c"
  47. // 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
  48. #define pb_7f (~0UL / 255 * 0x7f)
  49. #define pb_80 (~0UL / 255 * 0x80)
  50. /* Specific zigzag scan for 248 idct. NOTE that unlike the
  51. * specification, we interleave the fields */
  52. const uint8_t ff_zigzag248_direct[64] = {
  53. 0, 8, 1, 9, 16, 24, 2, 10,
  54. 17, 25, 32, 40, 48, 56, 33, 41,
  55. 18, 26, 3, 11, 4, 12, 19, 27,
  56. 34, 42, 49, 57, 50, 58, 35, 43,
  57. 20, 28, 5, 13, 6, 14, 21, 29,
  58. 36, 44, 51, 59, 52, 60, 37, 45,
  59. 22, 30, 7, 15, 23, 31, 38, 46,
  60. 53, 61, 54, 62, 39, 47, 55, 63,
  61. };
  62. const uint8_t ff_alternate_horizontal_scan[64] = {
  63. 0, 1, 2, 3, 8, 9, 16, 17,
  64. 10, 11, 4, 5, 6, 7, 15, 14,
  65. 13, 12, 19, 18, 24, 25, 32, 33,
  66. 26, 27, 20, 21, 22, 23, 28, 29,
  67. 30, 31, 34, 35, 40, 41, 48, 49,
  68. 42, 43, 36, 37, 38, 39, 44, 45,
  69. 46, 47, 50, 51, 56, 57, 58, 59,
  70. 52, 53, 54, 55, 60, 61, 62, 63,
  71. };
  72. const uint8_t ff_alternate_vertical_scan[64] = {
  73. 0, 8, 16, 24, 1, 9, 2, 10,
  74. 17, 25, 32, 40, 48, 56, 57, 49,
  75. 41, 33, 26, 18, 3, 11, 4, 12,
  76. 19, 27, 34, 42, 50, 58, 35, 43,
  77. 51, 59, 20, 28, 5, 13, 6, 14,
  78. 21, 29, 36, 44, 52, 60, 37, 45,
  79. 53, 61, 22, 30, 7, 15, 23, 31,
  80. 38, 46, 54, 62, 39, 47, 55, 63,
  81. };
  82. /* Input permutation for the simple_idct_mmx */
  83. static const uint8_t simple_mmx_permutation[64] = {
  84. 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
  85. 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
  86. 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
  87. 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
  88. 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
  89. 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
  90. 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
  91. 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
  92. };
  93. static const uint8_t idct_sse2_row_perm[8] = { 0, 4, 1, 5, 2, 6, 3, 7 };
  94. av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st,
  95. const uint8_t *src_scantable)
  96. {
  97. int i, end;
  98. st->scantable = src_scantable;
  99. for (i = 0; i < 64; i++) {
  100. int j = src_scantable[i];
  101. st->permutated[i] = permutation[j];
  102. }
  103. end = -1;
  104. for (i = 0; i < 64; i++) {
  105. int j = st->permutated[i];
  106. if (j > end)
  107. end = j;
  108. st->raster_end[i] = end;
  109. }
  110. }
  111. av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation,
  112. int idct_permutation_type)
  113. {
  114. int i;
  115. switch (idct_permutation_type) {
  116. case FF_NO_IDCT_PERM:
  117. for (i = 0; i < 64; i++)
  118. idct_permutation[i] = i;
  119. break;
  120. case FF_LIBMPEG2_IDCT_PERM:
  121. for (i = 0; i < 64; i++)
  122. idct_permutation[i] = (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
  123. break;
  124. case FF_SIMPLE_IDCT_PERM:
  125. for (i = 0; i < 64; i++)
  126. idct_permutation[i] = simple_mmx_permutation[i];
  127. break;
  128. case FF_TRANSPOSE_IDCT_PERM:
  129. for (i = 0; i < 64; i++)
  130. idct_permutation[i] = ((i & 7) << 3) | (i >> 3);
  131. break;
  132. case FF_PARTTRANS_IDCT_PERM:
  133. for (i = 0; i < 64; i++)
  134. idct_permutation[i] = (i & 0x24) | ((i & 3) << 3) | ((i >> 3) & 3);
  135. break;
  136. case FF_SSE2_IDCT_PERM:
  137. for (i = 0; i < 64; i++)
  138. idct_permutation[i] = (i & 0x38) | idct_sse2_row_perm[i & 7];
  139. break;
  140. default:
  141. av_log(NULL, AV_LOG_ERROR,
  142. "Internal error, IDCT permutation not set\n");
  143. }
  144. }
  145. static int pix_sum_c(uint8_t *pix, int line_size)
  146. {
  147. int s = 0, i, j;
  148. for (i = 0; i < 16; i++) {
  149. for (j = 0; j < 16; j += 8) {
  150. s += pix[0];
  151. s += pix[1];
  152. s += pix[2];
  153. s += pix[3];
  154. s += pix[4];
  155. s += pix[5];
  156. s += pix[6];
  157. s += pix[7];
  158. pix += 8;
  159. }
  160. pix += line_size - 16;
  161. }
  162. return s;
  163. }
  164. static int pix_norm1_c(uint8_t *pix, int line_size)
  165. {
  166. int s = 0, i, j;
  167. uint32_t *sq = ff_square_tab + 256;
  168. for (i = 0; i < 16; i++) {
  169. for (j = 0; j < 16; j += 8) {
  170. #if 0
  171. s += sq[pix[0]];
  172. s += sq[pix[1]];
  173. s += sq[pix[2]];
  174. s += sq[pix[3]];
  175. s += sq[pix[4]];
  176. s += sq[pix[5]];
  177. s += sq[pix[6]];
  178. s += sq[pix[7]];
  179. #else
  180. #if HAVE_FAST_64BIT
  181. register uint64_t x = *(uint64_t *) pix;
  182. s += sq[x & 0xff];
  183. s += sq[(x >> 8) & 0xff];
  184. s += sq[(x >> 16) & 0xff];
  185. s += sq[(x >> 24) & 0xff];
  186. s += sq[(x >> 32) & 0xff];
  187. s += sq[(x >> 40) & 0xff];
  188. s += sq[(x >> 48) & 0xff];
  189. s += sq[(x >> 56) & 0xff];
  190. #else
  191. register uint32_t x = *(uint32_t *) pix;
  192. s += sq[x & 0xff];
  193. s += sq[(x >> 8) & 0xff];
  194. s += sq[(x >> 16) & 0xff];
  195. s += sq[(x >> 24) & 0xff];
  196. x = *(uint32_t *) (pix + 4);
  197. s += sq[x & 0xff];
  198. s += sq[(x >> 8) & 0xff];
  199. s += sq[(x >> 16) & 0xff];
  200. s += sq[(x >> 24) & 0xff];
  201. #endif
  202. #endif
  203. pix += 8;
  204. }
  205. pix += line_size - 16;
  206. }
  207. return s;
  208. }
  209. static void bswap_buf(uint32_t *dst, const uint32_t *src, int w)
  210. {
  211. int i;
  212. for (i = 0; i + 8 <= w; i += 8) {
  213. dst[i + 0] = av_bswap32(src[i + 0]);
  214. dst[i + 1] = av_bswap32(src[i + 1]);
  215. dst[i + 2] = av_bswap32(src[i + 2]);
  216. dst[i + 3] = av_bswap32(src[i + 3]);
  217. dst[i + 4] = av_bswap32(src[i + 4]);
  218. dst[i + 5] = av_bswap32(src[i + 5]);
  219. dst[i + 6] = av_bswap32(src[i + 6]);
  220. dst[i + 7] = av_bswap32(src[i + 7]);
  221. }
  222. for (; i < w; i++)
  223. dst[i + 0] = av_bswap32(src[i + 0]);
  224. }
  225. static void bswap16_buf(uint16_t *dst, const uint16_t *src, int len)
  226. {
  227. while (len--)
  228. *dst++ = av_bswap16(*src++);
  229. }
  230. static int sse4_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  231. {
  232. int s = 0, i;
  233. uint32_t *sq = ff_square_tab + 256;
  234. for (i = 0; i < h; i++) {
  235. s += sq[pix1[0] - pix2[0]];
  236. s += sq[pix1[1] - pix2[1]];
  237. s += sq[pix1[2] - pix2[2]];
  238. s += sq[pix1[3] - pix2[3]];
  239. pix1 += line_size;
  240. pix2 += line_size;
  241. }
  242. return s;
  243. }
  244. static int sse8_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  245. {
  246. int s = 0, i;
  247. uint32_t *sq = ff_square_tab + 256;
  248. for (i = 0; i < h; i++) {
  249. s += sq[pix1[0] - pix2[0]];
  250. s += sq[pix1[1] - pix2[1]];
  251. s += sq[pix1[2] - pix2[2]];
  252. s += sq[pix1[3] - pix2[3]];
  253. s += sq[pix1[4] - pix2[4]];
  254. s += sq[pix1[5] - pix2[5]];
  255. s += sq[pix1[6] - pix2[6]];
  256. s += sq[pix1[7] - pix2[7]];
  257. pix1 += line_size;
  258. pix2 += line_size;
  259. }
  260. return s;
  261. }
  262. static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  263. {
  264. int s = 0, i;
  265. uint32_t *sq = ff_square_tab + 256;
  266. for (i = 0; i < h; i++) {
  267. s += sq[pix1[0] - pix2[0]];
  268. s += sq[pix1[1] - pix2[1]];
  269. s += sq[pix1[2] - pix2[2]];
  270. s += sq[pix1[3] - pix2[3]];
  271. s += sq[pix1[4] - pix2[4]];
  272. s += sq[pix1[5] - pix2[5]];
  273. s += sq[pix1[6] - pix2[6]];
  274. s += sq[pix1[7] - pix2[7]];
  275. s += sq[pix1[8] - pix2[8]];
  276. s += sq[pix1[9] - pix2[9]];
  277. s += sq[pix1[10] - pix2[10]];
  278. s += sq[pix1[11] - pix2[11]];
  279. s += sq[pix1[12] - pix2[12]];
  280. s += sq[pix1[13] - pix2[13]];
  281. s += sq[pix1[14] - pix2[14]];
  282. s += sq[pix1[15] - pix2[15]];
  283. pix1 += line_size;
  284. pix2 += line_size;
  285. }
  286. return s;
  287. }
  288. static void diff_pixels_c(int16_t *restrict block, const uint8_t *s1,
  289. const uint8_t *s2, int stride)
  290. {
  291. int i;
  292. /* read the pixels */
  293. for (i = 0; i < 8; i++) {
  294. block[0] = s1[0] - s2[0];
  295. block[1] = s1[1] - s2[1];
  296. block[2] = s1[2] - s2[2];
  297. block[3] = s1[3] - s2[3];
  298. block[4] = s1[4] - s2[4];
  299. block[5] = s1[5] - s2[5];
  300. block[6] = s1[6] - s2[6];
  301. block[7] = s1[7] - s2[7];
  302. s1 += stride;
  303. s2 += stride;
  304. block += 8;
  305. }
  306. }
  307. static void put_pixels_clamped_c(const int16_t *block, uint8_t *restrict pixels,
  308. int line_size)
  309. {
  310. int i;
  311. /* read the pixels */
  312. for (i = 0; i < 8; i++) {
  313. pixels[0] = av_clip_uint8(block[0]);
  314. pixels[1] = av_clip_uint8(block[1]);
  315. pixels[2] = av_clip_uint8(block[2]);
  316. pixels[3] = av_clip_uint8(block[3]);
  317. pixels[4] = av_clip_uint8(block[4]);
  318. pixels[5] = av_clip_uint8(block[5]);
  319. pixels[6] = av_clip_uint8(block[6]);
  320. pixels[7] = av_clip_uint8(block[7]);
  321. pixels += line_size;
  322. block += 8;
  323. }
  324. }
  325. static void put_signed_pixels_clamped_c(const int16_t *block,
  326. uint8_t *restrict pixels,
  327. int line_size)
  328. {
  329. int i, j;
  330. for (i = 0; i < 8; i++) {
  331. for (j = 0; j < 8; j++) {
  332. if (*block < -128)
  333. *pixels = 0;
  334. else if (*block > 127)
  335. *pixels = 255;
  336. else
  337. *pixels = (uint8_t) (*block + 128);
  338. block++;
  339. pixels++;
  340. }
  341. pixels += (line_size - 8);
  342. }
  343. }
  344. static void add_pixels8_c(uint8_t *restrict pixels, int16_t *block,
  345. int line_size)
  346. {
  347. int i;
  348. for (i = 0; i < 8; i++) {
  349. pixels[0] += block[0];
  350. pixels[1] += block[1];
  351. pixels[2] += block[2];
  352. pixels[3] += block[3];
  353. pixels[4] += block[4];
  354. pixels[5] += block[5];
  355. pixels[6] += block[6];
  356. pixels[7] += block[7];
  357. pixels += line_size;
  358. block += 8;
  359. }
  360. }
  361. static void add_pixels_clamped_c(const int16_t *block, uint8_t *restrict pixels,
  362. int line_size)
  363. {
  364. int i;
  365. /* read the pixels */
  366. for (i = 0; i < 8; i++) {
  367. pixels[0] = av_clip_uint8(pixels[0] + block[0]);
  368. pixels[1] = av_clip_uint8(pixels[1] + block[1]);
  369. pixels[2] = av_clip_uint8(pixels[2] + block[2]);
  370. pixels[3] = av_clip_uint8(pixels[3] + block[3]);
  371. pixels[4] = av_clip_uint8(pixels[4] + block[4]);
  372. pixels[5] = av_clip_uint8(pixels[5] + block[5]);
  373. pixels[6] = av_clip_uint8(pixels[6] + block[6]);
  374. pixels[7] = av_clip_uint8(pixels[7] + block[7]);
  375. pixels += line_size;
  376. block += 8;
  377. }
  378. }
  379. static int sum_abs_dctelem_c(int16_t *block)
  380. {
  381. int sum = 0, i;
  382. for (i = 0; i < 64; i++)
  383. sum += FFABS(block[i]);
  384. return sum;
  385. }
  386. static void fill_block16_c(uint8_t *block, uint8_t value, int line_size, int h)
  387. {
  388. int i;
  389. for (i = 0; i < h; i++) {
  390. memset(block, value, 16);
  391. block += line_size;
  392. }
  393. }
  394. static void fill_block8_c(uint8_t *block, uint8_t value, int line_size, int h)
  395. {
  396. int i;
  397. for (i = 0; i < h; i++) {
  398. memset(block, value, 8);
  399. block += line_size;
  400. }
  401. }
  402. #define avg2(a, b) ((a + b + 1) >> 1)
  403. #define avg4(a, b, c, d) ((a + b + c + d + 2) >> 2)
  404. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h,
  405. int x16, int y16, int rounder)
  406. {
  407. const int A = (16 - x16) * (16 - y16);
  408. const int B = (x16) * (16 - y16);
  409. const int C = (16 - x16) * (y16);
  410. const int D = (x16) * (y16);
  411. int i;
  412. for (i = 0; i < h; i++) {
  413. dst[0] = (A * src[0] + B * src[1] + C * src[stride + 0] + D * src[stride + 1] + rounder) >> 8;
  414. dst[1] = (A * src[1] + B * src[2] + C * src[stride + 1] + D * src[stride + 2] + rounder) >> 8;
  415. dst[2] = (A * src[2] + B * src[3] + C * src[stride + 2] + D * src[stride + 3] + rounder) >> 8;
  416. dst[3] = (A * src[3] + B * src[4] + C * src[stride + 3] + D * src[stride + 4] + rounder) >> 8;
  417. dst[4] = (A * src[4] + B * src[5] + C * src[stride + 4] + D * src[stride + 5] + rounder) >> 8;
  418. dst[5] = (A * src[5] + B * src[6] + C * src[stride + 5] + D * src[stride + 6] + rounder) >> 8;
  419. dst[6] = (A * src[6] + B * src[7] + C * src[stride + 6] + D * src[stride + 7] + rounder) >> 8;
  420. dst[7] = (A * src[7] + B * src[8] + C * src[stride + 7] + D * src[stride + 8] + rounder) >> 8;
  421. dst += stride;
  422. src += stride;
  423. }
  424. }
  425. void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  426. int dxx, int dxy, int dyx, int dyy, int shift, int r,
  427. int width, int height)
  428. {
  429. int y, vx, vy;
  430. const int s = 1 << shift;
  431. width--;
  432. height--;
  433. for (y = 0; y < h; y++) {
  434. int x;
  435. vx = ox;
  436. vy = oy;
  437. for (x = 0; x < 8; x++) { // FIXME: optimize
  438. int index;
  439. int src_x = vx >> 16;
  440. int src_y = vy >> 16;
  441. int frac_x = src_x & (s - 1);
  442. int frac_y = src_y & (s - 1);
  443. src_x >>= shift;
  444. src_y >>= shift;
  445. if ((unsigned) src_x < width) {
  446. if ((unsigned) src_y < height) {
  447. index = src_x + src_y * stride;
  448. dst[y * stride + x] =
  449. ((src[index] * (s - frac_x) +
  450. src[index + 1] * frac_x) * (s - frac_y) +
  451. (src[index + stride] * (s - frac_x) +
  452. src[index + stride + 1] * frac_x) * frac_y +
  453. r) >> (shift * 2);
  454. } else {
  455. index = src_x + av_clip(src_y, 0, height) * stride;
  456. dst[y * stride + x] =
  457. ((src[index] * (s - frac_x) +
  458. src[index + 1] * frac_x) * s +
  459. r) >> (shift * 2);
  460. }
  461. } else {
  462. if ((unsigned) src_y < height) {
  463. index = av_clip(src_x, 0, width) + src_y * stride;
  464. dst[y * stride + x] =
  465. ((src[index] * (s - frac_y) +
  466. src[index + stride] * frac_y) * s +
  467. r) >> (shift * 2);
  468. } else {
  469. index = av_clip(src_x, 0, width) +
  470. av_clip(src_y, 0, height) * stride;
  471. dst[y * stride + x] = src[index];
  472. }
  473. }
  474. vx += dxx;
  475. vy += dyx;
  476. }
  477. ox += dxy;
  478. oy += dyy;
  479. }
  480. }
  481. static inline void put_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src,
  482. int stride, int width, int height)
  483. {
  484. switch (width) {
  485. case 2:
  486. put_pixels2_8_c(dst, src, stride, height);
  487. break;
  488. case 4:
  489. put_pixels4_8_c(dst, src, stride, height);
  490. break;
  491. case 8:
  492. put_pixels8_8_c(dst, src, stride, height);
  493. break;
  494. case 16:
  495. put_pixels16_8_c(dst, src, stride, height);
  496. break;
  497. }
  498. }
  499. static inline void put_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src,
  500. int stride, int width, int height)
  501. {
  502. int i, j;
  503. for (i = 0; i < height; i++) {
  504. for (j = 0; j < width; j++)
  505. dst[j] = ((2 * src[j] + src[j + 1] + 1) *
  506. 683) >> 11;
  507. src += stride;
  508. dst += stride;
  509. }
  510. }
  511. static inline void put_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src,
  512. int stride, int width, int height)
  513. {
  514. int i, j;
  515. for (i = 0; i < height; i++) {
  516. for (j = 0; j < width; j++)
  517. dst[j] = ((src[j] + 2 * src[j + 1] + 1) *
  518. 683) >> 11;
  519. src += stride;
  520. dst += stride;
  521. }
  522. }
  523. static inline void put_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src,
  524. int stride, int width, int height)
  525. {
  526. int i, j;
  527. for (i = 0; i < height; i++) {
  528. for (j = 0; j < width; j++)
  529. dst[j] = ((2 * src[j] + src[j + stride] + 1) *
  530. 683) >> 11;
  531. src += stride;
  532. dst += stride;
  533. }
  534. }
  535. static inline void put_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src,
  536. int stride, int width, int height)
  537. {
  538. int i, j;
  539. for (i = 0; i < height; i++) {
  540. for (j = 0; j < width; j++)
  541. dst[j] = ((4 * src[j] + 3 * src[j + 1] +
  542. 3 * src[j + stride] + 2 * src[j + stride + 1] + 6) *
  543. 2731) >> 15;
  544. src += stride;
  545. dst += stride;
  546. }
  547. }
  548. static inline void put_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src,
  549. int stride, int width, int height)
  550. {
  551. int i, j;
  552. for (i = 0; i < height; i++) {
  553. for (j = 0; j < width; j++)
  554. dst[j] = ((3 * src[j] + 2 * src[j + 1] +
  555. 4 * src[j + stride] + 3 * src[j + stride + 1] + 6) *
  556. 2731) >> 15;
  557. src += stride;
  558. dst += stride;
  559. }
  560. }
  561. static inline void put_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src,
  562. int stride, int width, int height)
  563. {
  564. int i, j;
  565. for (i = 0; i < height; i++) {
  566. for (j = 0; j < width; j++)
  567. dst[j] = ((src[j] + 2 * src[j + stride] + 1) *
  568. 683) >> 11;
  569. src += stride;
  570. dst += stride;
  571. }
  572. }
  573. static inline void put_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src,
  574. int stride, int width, int height)
  575. {
  576. int i, j;
  577. for (i = 0; i < height; i++) {
  578. for (j = 0; j < width; j++)
  579. dst[j] = ((3 * src[j] + 4 * src[j + 1] +
  580. 2 * src[j + stride] + 3 * src[j + stride + 1] + 6) *
  581. 2731) >> 15;
  582. src += stride;
  583. dst += stride;
  584. }
  585. }
  586. static inline void put_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src,
  587. int stride, int width, int height)
  588. {
  589. int i, j;
  590. for (i = 0; i < height; i++) {
  591. for (j = 0; j < width; j++)
  592. dst[j] = ((2 * src[j] + 3 * src[j + 1] +
  593. 3 * src[j + stride] + 4 * src[j + stride + 1] + 6) *
  594. 2731) >> 15;
  595. src += stride;
  596. dst += stride;
  597. }
  598. }
  599. static inline void avg_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src,
  600. int stride, int width, int height)
  601. {
  602. switch (width) {
  603. case 2:
  604. avg_pixels2_8_c(dst, src, stride, height);
  605. break;
  606. case 4:
  607. avg_pixels4_8_c(dst, src, stride, height);
  608. break;
  609. case 8:
  610. avg_pixels8_8_c(dst, src, stride, height);
  611. break;
  612. case 16:
  613. avg_pixels16_8_c(dst, src, stride, height);
  614. break;
  615. }
  616. }
  617. static inline void avg_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src,
  618. int stride, int width, int height)
  619. {
  620. int i, j;
  621. for (i = 0; i < height; i++) {
  622. for (j = 0; j < width; j++)
  623. dst[j] = (dst[j] +
  624. (((2 * src[j] + src[j + 1] + 1) *
  625. 683) >> 11) + 1) >> 1;
  626. src += stride;
  627. dst += stride;
  628. }
  629. }
  630. static inline void avg_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src,
  631. int stride, int width, int height)
  632. {
  633. int i, j;
  634. for (i = 0; i < height; i++) {
  635. for (j = 0; j < width; j++)
  636. dst[j] = (dst[j] +
  637. (((src[j] + 2 * src[j + 1] + 1) *
  638. 683) >> 11) + 1) >> 1;
  639. src += stride;
  640. dst += stride;
  641. }
  642. }
  643. static inline void avg_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src,
  644. int stride, int width, int height)
  645. {
  646. int i, j;
  647. for (i = 0; i < height; i++) {
  648. for (j = 0; j < width; j++)
  649. dst[j] = (dst[j] +
  650. (((2 * src[j] + src[j + stride] + 1) *
  651. 683) >> 11) + 1) >> 1;
  652. src += stride;
  653. dst += stride;
  654. }
  655. }
  656. static inline void avg_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src,
  657. int stride, int width, int height)
  658. {
  659. int i, j;
  660. for (i = 0; i < height; i++) {
  661. for (j = 0; j < width; j++)
  662. dst[j] = (dst[j] +
  663. (((4 * src[j] + 3 * src[j + 1] +
  664. 3 * src[j + stride] + 2 * src[j + stride + 1] + 6) *
  665. 2731) >> 15) + 1) >> 1;
  666. src += stride;
  667. dst += stride;
  668. }
  669. }
  670. static inline void avg_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src,
  671. int stride, int width, int height)
  672. {
  673. int i, j;
  674. for (i = 0; i < height; i++) {
  675. for (j = 0; j < width; j++)
  676. dst[j] = (dst[j] +
  677. (((3 * src[j] + 2 * src[j + 1] +
  678. 4 * src[j + stride] + 3 * src[j + stride + 1] + 6) *
  679. 2731) >> 15) + 1) >> 1;
  680. src += stride;
  681. dst += stride;
  682. }
  683. }
  684. static inline void avg_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src,
  685. int stride, int width, int height)
  686. {
  687. int i, j;
  688. for (i = 0; i < height; i++) {
  689. for (j = 0; j < width; j++)
  690. dst[j] = (dst[j] +
  691. (((src[j] + 2 * src[j + stride] + 1) *
  692. 683) >> 11) + 1) >> 1;
  693. src += stride;
  694. dst += stride;
  695. }
  696. }
  697. static inline void avg_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src,
  698. int stride, int width, int height)
  699. {
  700. int i, j;
  701. for (i = 0; i < height; i++) {
  702. for (j = 0; j < width; j++)
  703. dst[j] = (dst[j] +
  704. (((3 * src[j] + 4 * src[j + 1] +
  705. 2 * src[j + stride] + 3 * src[j + stride + 1] + 6) *
  706. 2731) >> 15) + 1) >> 1;
  707. src += stride;
  708. dst += stride;
  709. }
  710. }
  711. static inline void avg_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src,
  712. int stride, int width, int height)
  713. {
  714. int i, j;
  715. for (i = 0; i < height; i++) {
  716. for (j = 0; j < width; j++)
  717. dst[j] = (dst[j] +
  718. (((2 * src[j] + 3 * src[j + 1] +
  719. 3 * src[j + stride] + 4 * src[j + stride + 1] + 6) *
  720. 2731) >> 15) + 1) >> 1;
  721. src += stride;
  722. dst += stride;
  723. }
  724. }
  725. #define QPEL_MC(r, OPNAME, RND, OP) \
  726. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, \
  727. int dstStride, int srcStride, \
  728. int h) \
  729. { \
  730. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  731. int i; \
  732. \
  733. for (i = 0; i < h; i++) { \
  734. OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \
  735. OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \
  736. OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \
  737. OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \
  738. OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \
  739. OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[8])); \
  740. OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[8]) * 3 - (src[3] + src[7])); \
  741. OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[8]) * 6 + (src[5] + src[7]) * 3 - (src[4] + src[6])); \
  742. dst += dstStride; \
  743. src += srcStride; \
  744. } \
  745. } \
  746. \
  747. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, \
  748. int dstStride, int srcStride) \
  749. { \
  750. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  751. const int w = 8; \
  752. int i; \
  753. \
  754. for (i = 0; i < w; i++) { \
  755. const int src0 = src[0 * srcStride]; \
  756. const int src1 = src[1 * srcStride]; \
  757. const int src2 = src[2 * srcStride]; \
  758. const int src3 = src[3 * srcStride]; \
  759. const int src4 = src[4 * srcStride]; \
  760. const int src5 = src[5 * srcStride]; \
  761. const int src6 = src[6 * srcStride]; \
  762. const int src7 = src[7 * srcStride]; \
  763. const int src8 = src[8 * srcStride]; \
  764. OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \
  765. OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \
  766. OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \
  767. OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \
  768. OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \
  769. OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src8)); \
  770. OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src8) * 3 - (src3 + src7)); \
  771. OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src8) * 6 + (src5 + src7) * 3 - (src4 + src6)); \
  772. dst++; \
  773. src++; \
  774. } \
  775. } \
  776. \
  777. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, \
  778. int dstStride, int srcStride, \
  779. int h) \
  780. { \
  781. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  782. int i; \
  783. \
  784. for (i = 0; i < h; i++) { \
  785. OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \
  786. OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \
  787. OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \
  788. OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \
  789. OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \
  790. OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[9])); \
  791. OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[9]) * 3 - (src[3] + src[10])); \
  792. OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[9]) * 6 + (src[5] + src[10]) * 3 - (src[4] + src[11])); \
  793. OP(dst[8], (src[8] + src[9]) * 20 - (src[7] + src[10]) * 6 + (src[6] + src[11]) * 3 - (src[5] + src[12])); \
  794. OP(dst[9], (src[9] + src[10]) * 20 - (src[8] + src[11]) * 6 + (src[7] + src[12]) * 3 - (src[6] + src[13])); \
  795. OP(dst[10], (src[10] + src[11]) * 20 - (src[9] + src[12]) * 6 + (src[8] + src[13]) * 3 - (src[7] + src[14])); \
  796. OP(dst[11], (src[11] + src[12]) * 20 - (src[10] + src[13]) * 6 + (src[9] + src[14]) * 3 - (src[8] + src[15])); \
  797. OP(dst[12], (src[12] + src[13]) * 20 - (src[11] + src[14]) * 6 + (src[10] + src[15]) * 3 - (src[9] + src[16])); \
  798. OP(dst[13], (src[13] + src[14]) * 20 - (src[12] + src[15]) * 6 + (src[11] + src[16]) * 3 - (src[10] + src[16])); \
  799. OP(dst[14], (src[14] + src[15]) * 20 - (src[13] + src[16]) * 6 + (src[12] + src[16]) * 3 - (src[11] + src[15])); \
  800. OP(dst[15], (src[15] + src[16]) * 20 - (src[14] + src[16]) * 6 + (src[13] + src[15]) * 3 - (src[12] + src[14])); \
  801. dst += dstStride; \
  802. src += srcStride; \
  803. } \
  804. } \
  805. \
  806. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, \
  807. int dstStride, int srcStride) \
  808. { \
  809. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  810. const int w = 16; \
  811. int i; \
  812. \
  813. for (i = 0; i < w; i++) { \
  814. const int src0 = src[0 * srcStride]; \
  815. const int src1 = src[1 * srcStride]; \
  816. const int src2 = src[2 * srcStride]; \
  817. const int src3 = src[3 * srcStride]; \
  818. const int src4 = src[4 * srcStride]; \
  819. const int src5 = src[5 * srcStride]; \
  820. const int src6 = src[6 * srcStride]; \
  821. const int src7 = src[7 * srcStride]; \
  822. const int src8 = src[8 * srcStride]; \
  823. const int src9 = src[9 * srcStride]; \
  824. const int src10 = src[10 * srcStride]; \
  825. const int src11 = src[11 * srcStride]; \
  826. const int src12 = src[12 * srcStride]; \
  827. const int src13 = src[13 * srcStride]; \
  828. const int src14 = src[14 * srcStride]; \
  829. const int src15 = src[15 * srcStride]; \
  830. const int src16 = src[16 * srcStride]; \
  831. OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \
  832. OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \
  833. OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \
  834. OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \
  835. OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \
  836. OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src9)); \
  837. OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src9) * 3 - (src3 + src10)); \
  838. OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src9) * 6 + (src5 + src10) * 3 - (src4 + src11)); \
  839. OP(dst[8 * dstStride], (src8 + src9) * 20 - (src7 + src10) * 6 + (src6 + src11) * 3 - (src5 + src12)); \
  840. OP(dst[9 * dstStride], (src9 + src10) * 20 - (src8 + src11) * 6 + (src7 + src12) * 3 - (src6 + src13)); \
  841. OP(dst[10 * dstStride], (src10 + src11) * 20 - (src9 + src12) * 6 + (src8 + src13) * 3 - (src7 + src14)); \
  842. OP(dst[11 * dstStride], (src11 + src12) * 20 - (src10 + src13) * 6 + (src9 + src14) * 3 - (src8 + src15)); \
  843. OP(dst[12 * dstStride], (src12 + src13) * 20 - (src11 + src14) * 6 + (src10 + src15) * 3 - (src9 + src16)); \
  844. OP(dst[13 * dstStride], (src13 + src14) * 20 - (src12 + src15) * 6 + (src11 + src16) * 3 - (src10 + src16)); \
  845. OP(dst[14 * dstStride], (src14 + src15) * 20 - (src13 + src16) * 6 + (src12 + src16) * 3 - (src11 + src15)); \
  846. OP(dst[15 * dstStride], (src15 + src16) * 20 - (src14 + src16) * 6 + (src13 + src15) * 3 - (src12 + src14)); \
  847. dst++; \
  848. src++; \
  849. } \
  850. } \
  851. \
  852. static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, \
  853. ptrdiff_t stride) \
  854. { \
  855. uint8_t half[64]; \
  856. \
  857. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \
  858. OPNAME ## pixels8_l2_8(dst, src, half, stride, stride, 8, 8); \
  859. } \
  860. \
  861. static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, \
  862. ptrdiff_t stride) \
  863. { \
  864. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8); \
  865. } \
  866. \
  867. static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, \
  868. ptrdiff_t stride) \
  869. { \
  870. uint8_t half[64]; \
  871. \
  872. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \
  873. OPNAME ## pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8); \
  874. } \
  875. \
  876. static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, \
  877. ptrdiff_t stride) \
  878. { \
  879. uint8_t full[16 * 9]; \
  880. uint8_t half[64]; \
  881. \
  882. copy_block9(full, src, 16, stride, 9); \
  883. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \
  884. OPNAME ## pixels8_l2_8(dst, full, half, stride, 16, 8, 8); \
  885. } \
  886. \
  887. static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, \
  888. ptrdiff_t stride) \
  889. { \
  890. uint8_t full[16 * 9]; \
  891. \
  892. copy_block9(full, src, 16, stride, 9); \
  893. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16); \
  894. } \
  895. \
  896. static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, \
  897. ptrdiff_t stride) \
  898. { \
  899. uint8_t full[16 * 9]; \
  900. uint8_t half[64]; \
  901. \
  902. copy_block9(full, src, 16, stride, 9); \
  903. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \
  904. OPNAME ## pixels8_l2_8(dst, full + 16, half, stride, 16, 8, 8); \
  905. } \
  906. \
  907. void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, \
  908. ptrdiff_t stride) \
  909. { \
  910. uint8_t full[16 * 9]; \
  911. uint8_t halfH[72]; \
  912. uint8_t halfV[64]; \
  913. uint8_t halfHV[64]; \
  914. \
  915. copy_block9(full, src, 16, stride, 9); \
  916. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  917. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  918. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  919. OPNAME ## pixels8_l4_8(dst, full, halfH, halfV, halfHV, \
  920. stride, 16, 8, 8, 8, 8); \
  921. } \
  922. \
  923. static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, \
  924. ptrdiff_t stride) \
  925. { \
  926. uint8_t full[16 * 9]; \
  927. uint8_t halfH[72]; \
  928. uint8_t halfHV[64]; \
  929. \
  930. copy_block9(full, src, 16, stride, 9); \
  931. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  932. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  933. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  934. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  935. } \
  936. \
  937. void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, \
  938. ptrdiff_t stride) \
  939. { \
  940. uint8_t full[16 * 9]; \
  941. uint8_t halfH[72]; \
  942. uint8_t halfV[64]; \
  943. uint8_t halfHV[64]; \
  944. \
  945. copy_block9(full, src, 16, stride, 9); \
  946. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  947. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  948. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  949. OPNAME ## pixels8_l4_8(dst, full + 1, halfH, halfV, halfHV, \
  950. stride, 16, 8, 8, 8, 8); \
  951. } \
  952. \
  953. static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, \
  954. ptrdiff_t stride) \
  955. { \
  956. uint8_t full[16 * 9]; \
  957. uint8_t halfH[72]; \
  958. uint8_t halfHV[64]; \
  959. \
  960. copy_block9(full, src, 16, stride, 9); \
  961. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  962. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  963. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  964. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  965. } \
  966. \
  967. void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, \
  968. ptrdiff_t stride) \
  969. { \
  970. uint8_t full[16 * 9]; \
  971. uint8_t halfH[72]; \
  972. uint8_t halfV[64]; \
  973. uint8_t halfHV[64]; \
  974. \
  975. copy_block9(full, src, 16, stride, 9); \
  976. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  977. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  978. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  979. OPNAME ## pixels8_l4_8(dst, full + 16, halfH + 8, halfV, halfHV, \
  980. stride, 16, 8, 8, 8, 8); \
  981. } \
  982. \
  983. static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, \
  984. ptrdiff_t stride) \
  985. { \
  986. uint8_t full[16 * 9]; \
  987. uint8_t halfH[72]; \
  988. uint8_t halfHV[64]; \
  989. \
  990. copy_block9(full, src, 16, stride, 9); \
  991. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  992. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  993. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  994. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  995. } \
  996. \
  997. void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, \
  998. ptrdiff_t stride) \
  999. { \
  1000. uint8_t full[16 * 9]; \
  1001. uint8_t halfH[72]; \
  1002. uint8_t halfV[64]; \
  1003. uint8_t halfHV[64]; \
  1004. \
  1005. copy_block9(full, src, 16, stride, 9); \
  1006. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  1007. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  1008. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  1009. OPNAME ## pixels8_l4_8(dst, full + 17, halfH + 8, halfV, halfHV, \
  1010. stride, 16, 8, 8, 8, 8); \
  1011. } \
  1012. \
  1013. static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, \
  1014. ptrdiff_t stride) \
  1015. { \
  1016. uint8_t full[16 * 9]; \
  1017. uint8_t halfH[72]; \
  1018. uint8_t halfHV[64]; \
  1019. \
  1020. copy_block9(full, src, 16, stride, 9); \
  1021. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  1022. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  1023. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  1024. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  1025. } \
  1026. \
  1027. static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, \
  1028. ptrdiff_t stride) \
  1029. { \
  1030. uint8_t halfH[72]; \
  1031. uint8_t halfHV[64]; \
  1032. \
  1033. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  1034. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  1035. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  1036. } \
  1037. \
  1038. static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, \
  1039. ptrdiff_t stride) \
  1040. { \
  1041. uint8_t halfH[72]; \
  1042. uint8_t halfHV[64]; \
  1043. \
  1044. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  1045. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  1046. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  1047. } \
  1048. \
  1049. void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, \
  1050. ptrdiff_t stride) \
  1051. { \
  1052. uint8_t full[16 * 9]; \
  1053. uint8_t halfH[72]; \
  1054. uint8_t halfV[64]; \
  1055. uint8_t halfHV[64]; \
  1056. \
  1057. copy_block9(full, src, 16, stride, 9); \
  1058. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  1059. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  1060. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  1061. OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \
  1062. } \
  1063. \
  1064. static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, \
  1065. ptrdiff_t stride) \
  1066. { \
  1067. uint8_t full[16 * 9]; \
  1068. uint8_t halfH[72]; \
  1069. \
  1070. copy_block9(full, src, 16, stride, 9); \
  1071. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  1072. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  1073. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  1074. } \
  1075. \
  1076. void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, \
  1077. ptrdiff_t stride) \
  1078. { \
  1079. uint8_t full[16 * 9]; \
  1080. uint8_t halfH[72]; \
  1081. uint8_t halfV[64]; \
  1082. uint8_t halfHV[64]; \
  1083. \
  1084. copy_block9(full, src, 16, stride, 9); \
  1085. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  1086. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  1087. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  1088. OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \
  1089. } \
  1090. \
  1091. static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, \
  1092. ptrdiff_t stride) \
  1093. { \
  1094. uint8_t full[16 * 9]; \
  1095. uint8_t halfH[72]; \
  1096. \
  1097. copy_block9(full, src, 16, stride, 9); \
  1098. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  1099. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  1100. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  1101. } \
  1102. \
  1103. static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, \
  1104. ptrdiff_t stride) \
  1105. { \
  1106. uint8_t halfH[72]; \
  1107. \
  1108. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  1109. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  1110. } \
  1111. \
  1112. static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, \
  1113. ptrdiff_t stride) \
  1114. { \
  1115. uint8_t half[256]; \
  1116. \
  1117. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \
  1118. OPNAME ## pixels16_l2_8(dst, src, half, stride, stride, 16, 16); \
  1119. } \
  1120. \
  1121. static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, \
  1122. ptrdiff_t stride) \
  1123. { \
  1124. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16); \
  1125. } \
  1126. \
  1127. static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, \
  1128. ptrdiff_t stride) \
  1129. { \
  1130. uint8_t half[256]; \
  1131. \
  1132. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \
  1133. OPNAME ## pixels16_l2_8(dst, src + 1, half, stride, stride, 16, 16); \
  1134. } \
  1135. \
  1136. static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, \
  1137. ptrdiff_t stride) \
  1138. { \
  1139. uint8_t full[24 * 17]; \
  1140. uint8_t half[256]; \
  1141. \
  1142. copy_block17(full, src, 24, stride, 17); \
  1143. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \
  1144. OPNAME ## pixels16_l2_8(dst, full, half, stride, 24, 16, 16); \
  1145. } \
  1146. \
  1147. static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, \
  1148. ptrdiff_t stride) \
  1149. { \
  1150. uint8_t full[24 * 17]; \
  1151. \
  1152. copy_block17(full, src, 24, stride, 17); \
  1153. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24); \
  1154. } \
  1155. \
  1156. static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, \
  1157. ptrdiff_t stride) \
  1158. { \
  1159. uint8_t full[24 * 17]; \
  1160. uint8_t half[256]; \
  1161. \
  1162. copy_block17(full, src, 24, stride, 17); \
  1163. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \
  1164. OPNAME ## pixels16_l2_8(dst, full + 24, half, stride, 24, 16, 16); \
  1165. } \
  1166. \
  1167. void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, \
  1168. ptrdiff_t stride) \
  1169. { \
  1170. uint8_t full[24 * 17]; \
  1171. uint8_t halfH[272]; \
  1172. uint8_t halfV[256]; \
  1173. uint8_t halfHV[256]; \
  1174. \
  1175. copy_block17(full, src, 24, stride, 17); \
  1176. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1177. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  1178. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1179. OPNAME ## pixels16_l4_8(dst, full, halfH, halfV, halfHV, \
  1180. stride, 24, 16, 16, 16, 16); \
  1181. } \
  1182. \
  1183. static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, \
  1184. ptrdiff_t stride) \
  1185. { \
  1186. uint8_t full[24 * 17]; \
  1187. uint8_t halfH[272]; \
  1188. uint8_t halfHV[256]; \
  1189. \
  1190. copy_block17(full, src, 24, stride, 17); \
  1191. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1192. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1193. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1194. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  1195. } \
  1196. \
  1197. void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, \
  1198. ptrdiff_t stride) \
  1199. { \
  1200. uint8_t full[24 * 17]; \
  1201. uint8_t halfH[272]; \
  1202. uint8_t halfV[256]; \
  1203. uint8_t halfHV[256]; \
  1204. \
  1205. copy_block17(full, src, 24, stride, 17); \
  1206. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1207. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1208. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1209. OPNAME ## pixels16_l4_8(dst, full + 1, halfH, halfV, halfHV, \
  1210. stride, 24, 16, 16, 16, 16); \
  1211. } \
  1212. \
  1213. static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, \
  1214. ptrdiff_t stride) \
  1215. { \
  1216. uint8_t full[24 * 17]; \
  1217. uint8_t halfH[272]; \
  1218. uint8_t halfHV[256]; \
  1219. \
  1220. copy_block17(full, src, 24, stride, 17); \
  1221. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1222. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1223. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1224. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  1225. } \
  1226. \
  1227. void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, \
  1228. ptrdiff_t stride) \
  1229. { \
  1230. uint8_t full[24 * 17]; \
  1231. uint8_t halfH[272]; \
  1232. uint8_t halfV[256]; \
  1233. uint8_t halfHV[256]; \
  1234. \
  1235. copy_block17(full, src, 24, stride, 17); \
  1236. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1237. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  1238. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1239. OPNAME ## pixels16_l4_8(dst, full + 24, halfH + 16, halfV, halfHV, \
  1240. stride, 24, 16, 16, 16, 16); \
  1241. } \
  1242. \
  1243. static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, \
  1244. ptrdiff_t stride) \
  1245. { \
  1246. uint8_t full[24 * 17]; \
  1247. uint8_t halfH[272]; \
  1248. uint8_t halfHV[256]; \
  1249. \
  1250. copy_block17(full, src, 24, stride, 17); \
  1251. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1252. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1253. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1254. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1255. } \
  1256. \
  1257. void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, \
  1258. ptrdiff_t stride) \
  1259. { \
  1260. uint8_t full[24 * 17]; \
  1261. uint8_t halfH[272]; \
  1262. uint8_t halfV[256]; \
  1263. uint8_t halfHV[256]; \
  1264. \
  1265. copy_block17(full, src, 24, stride, 17); \
  1266. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1267. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1268. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1269. OPNAME ## pixels16_l4_8(dst, full + 25, halfH + 16, halfV, halfHV, \
  1270. stride, 24, 16, 16, 16, 16); \
  1271. } \
  1272. \
  1273. static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, \
  1274. ptrdiff_t stride) \
  1275. { \
  1276. uint8_t full[24 * 17]; \
  1277. uint8_t halfH[272]; \
  1278. uint8_t halfHV[256]; \
  1279. \
  1280. copy_block17(full, src, 24, stride, 17); \
  1281. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1282. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1283. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1284. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1285. } \
  1286. \
  1287. static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, \
  1288. ptrdiff_t stride) \
  1289. { \
  1290. uint8_t halfH[272]; \
  1291. uint8_t halfHV[256]; \
  1292. \
  1293. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1294. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1295. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  1296. } \
  1297. \
  1298. static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, \
  1299. ptrdiff_t stride) \
  1300. { \
  1301. uint8_t halfH[272]; \
  1302. uint8_t halfHV[256]; \
  1303. \
  1304. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1305. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1306. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1307. } \
  1308. \
  1309. void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, \
  1310. ptrdiff_t stride) \
  1311. { \
  1312. uint8_t full[24 * 17]; \
  1313. uint8_t halfH[272]; \
  1314. uint8_t halfV[256]; \
  1315. uint8_t halfHV[256]; \
  1316. \
  1317. copy_block17(full, src, 24, stride, 17); \
  1318. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1319. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  1320. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1321. OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \
  1322. } \
  1323. \
  1324. static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, \
  1325. ptrdiff_t stride) \
  1326. { \
  1327. uint8_t full[24 * 17]; \
  1328. uint8_t halfH[272]; \
  1329. \
  1330. copy_block17(full, src, 24, stride, 17); \
  1331. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1332. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1333. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1334. } \
  1335. \
  1336. void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, \
  1337. ptrdiff_t stride) \
  1338. { \
  1339. uint8_t full[24 * 17]; \
  1340. uint8_t halfH[272]; \
  1341. uint8_t halfV[256]; \
  1342. uint8_t halfHV[256]; \
  1343. \
  1344. copy_block17(full, src, 24, stride, 17); \
  1345. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1346. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1347. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1348. OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \
  1349. } \
  1350. \
  1351. static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, \
  1352. ptrdiff_t stride) \
  1353. { \
  1354. uint8_t full[24 * 17]; \
  1355. uint8_t halfH[272]; \
  1356. \
  1357. copy_block17(full, src, 24, stride, 17); \
  1358. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1359. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1360. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1361. } \
  1362. \
  1363. static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, \
  1364. ptrdiff_t stride) \
  1365. { \
  1366. uint8_t halfH[272]; \
  1367. \
  1368. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1369. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1370. }
  1371. #define op_avg(a, b) a = (((a) + cm[((b) + 16) >> 5] + 1) >> 1)
  1372. #define op_avg_no_rnd(a, b) a = (((a) + cm[((b) + 15) >> 5]) >> 1)
  1373. #define op_put(a, b) a = cm[((b) + 16) >> 5]
  1374. #define op_put_no_rnd(a, b) a = cm[((b) + 15) >> 5]
  1375. QPEL_MC(0, put_, _, op_put)
  1376. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  1377. QPEL_MC(0, avg_, _, op_avg)
  1378. #undef op_avg
  1379. #undef op_put
  1380. #undef op_put_no_rnd
  1381. void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1382. {
  1383. put_pixels8_8_c(dst, src, stride, 8);
  1384. }
  1385. void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1386. {
  1387. avg_pixels8_8_c(dst, src, stride, 8);
  1388. }
  1389. void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1390. {
  1391. put_pixels16_8_c(dst, src, stride, 16);
  1392. }
  1393. void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1394. {
  1395. avg_pixels16_8_c(dst, src, stride, 16);
  1396. }
  1397. #define put_qpel8_mc00_c ff_put_pixels8x8_c
  1398. #define avg_qpel8_mc00_c ff_avg_pixels8x8_c
  1399. #define put_qpel16_mc00_c ff_put_pixels16x16_c
  1400. #define avg_qpel16_mc00_c ff_avg_pixels16x16_c
  1401. #define put_no_rnd_qpel8_mc00_c ff_put_pixels8x8_c
  1402. #define put_no_rnd_qpel16_mc00_c ff_put_pixels16x16_c
  1403. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src,
  1404. int dstStride, int srcStride, int h)
  1405. {
  1406. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1407. int i;
  1408. for (i = 0; i < h; i++) {
  1409. dst[0] = cm[(9 * (src[0] + src[1]) - (src[-1] + src[2]) + 8) >> 4];
  1410. dst[1] = cm[(9 * (src[1] + src[2]) - (src[0] + src[3]) + 8) >> 4];
  1411. dst[2] = cm[(9 * (src[2] + src[3]) - (src[1] + src[4]) + 8) >> 4];
  1412. dst[3] = cm[(9 * (src[3] + src[4]) - (src[2] + src[5]) + 8) >> 4];
  1413. dst[4] = cm[(9 * (src[4] + src[5]) - (src[3] + src[6]) + 8) >> 4];
  1414. dst[5] = cm[(9 * (src[5] + src[6]) - (src[4] + src[7]) + 8) >> 4];
  1415. dst[6] = cm[(9 * (src[6] + src[7]) - (src[5] + src[8]) + 8) >> 4];
  1416. dst[7] = cm[(9 * (src[7] + src[8]) - (src[6] + src[9]) + 8) >> 4];
  1417. dst += dstStride;
  1418. src += srcStride;
  1419. }
  1420. }
  1421. #if CONFIG_RV40_DECODER
  1422. void ff_put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1423. {
  1424. put_pixels16_xy2_8_c(dst, src, stride, 16);
  1425. }
  1426. void ff_avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1427. {
  1428. avg_pixels16_xy2_8_c(dst, src, stride, 16);
  1429. }
  1430. void ff_put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1431. {
  1432. put_pixels8_xy2_8_c(dst, src, stride, 8);
  1433. }
  1434. void ff_avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1435. {
  1436. avg_pixels8_xy2_8_c(dst, src, stride, 8);
  1437. }
  1438. #endif /* CONFIG_RV40_DECODER */
  1439. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src,
  1440. int dstStride, int srcStride, int w)
  1441. {
  1442. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1443. int i;
  1444. for (i = 0; i < w; i++) {
  1445. const int src_1 = src[-srcStride];
  1446. const int src0 = src[0];
  1447. const int src1 = src[srcStride];
  1448. const int src2 = src[2 * srcStride];
  1449. const int src3 = src[3 * srcStride];
  1450. const int src4 = src[4 * srcStride];
  1451. const int src5 = src[5 * srcStride];
  1452. const int src6 = src[6 * srcStride];
  1453. const int src7 = src[7 * srcStride];
  1454. const int src8 = src[8 * srcStride];
  1455. const int src9 = src[9 * srcStride];
  1456. dst[0 * dstStride] = cm[(9 * (src0 + src1) - (src_1 + src2) + 8) >> 4];
  1457. dst[1 * dstStride] = cm[(9 * (src1 + src2) - (src0 + src3) + 8) >> 4];
  1458. dst[2 * dstStride] = cm[(9 * (src2 + src3) - (src1 + src4) + 8) >> 4];
  1459. dst[3 * dstStride] = cm[(9 * (src3 + src4) - (src2 + src5) + 8) >> 4];
  1460. dst[4 * dstStride] = cm[(9 * (src4 + src5) - (src3 + src6) + 8) >> 4];
  1461. dst[5 * dstStride] = cm[(9 * (src5 + src6) - (src4 + src7) + 8) >> 4];
  1462. dst[6 * dstStride] = cm[(9 * (src6 + src7) - (src5 + src8) + 8) >> 4];
  1463. dst[7 * dstStride] = cm[(9 * (src7 + src8) - (src6 + src9) + 8) >> 4];
  1464. src++;
  1465. dst++;
  1466. }
  1467. }
  1468. static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1469. {
  1470. uint8_t half[64];
  1471. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1472. put_pixels8_l2_8(dst, src, half, stride, stride, 8, 8);
  1473. }
  1474. static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1475. {
  1476. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  1477. }
  1478. static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1479. {
  1480. uint8_t half[64];
  1481. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1482. put_pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8);
  1483. }
  1484. static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1485. {
  1486. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  1487. }
  1488. static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1489. {
  1490. uint8_t halfH[88];
  1491. uint8_t halfV[64];
  1492. uint8_t halfHV[64];
  1493. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1494. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  1495. wmv2_mspel8_v_lowpass(halfHV, halfH + 8, 8, 8, 8);
  1496. put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
  1497. }
  1498. static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1499. {
  1500. uint8_t halfH[88];
  1501. uint8_t halfV[64];
  1502. uint8_t halfHV[64];
  1503. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1504. wmv2_mspel8_v_lowpass(halfV, src + 1, 8, stride, 8);
  1505. wmv2_mspel8_v_lowpass(halfHV, halfH + 8, 8, 8, 8);
  1506. put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
  1507. }
  1508. static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1509. {
  1510. uint8_t halfH[88];
  1511. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1512. wmv2_mspel8_v_lowpass(dst, halfH + 8, stride, 8, 8);
  1513. }
  1514. static inline int pix_abs16_c(void *v, uint8_t *pix1, uint8_t *pix2,
  1515. int line_size, int h)
  1516. {
  1517. int s = 0, i;
  1518. for (i = 0; i < h; i++) {
  1519. s += abs(pix1[0] - pix2[0]);
  1520. s += abs(pix1[1] - pix2[1]);
  1521. s += abs(pix1[2] - pix2[2]);
  1522. s += abs(pix1[3] - pix2[3]);
  1523. s += abs(pix1[4] - pix2[4]);
  1524. s += abs(pix1[5] - pix2[5]);
  1525. s += abs(pix1[6] - pix2[6]);
  1526. s += abs(pix1[7] - pix2[7]);
  1527. s += abs(pix1[8] - pix2[8]);
  1528. s += abs(pix1[9] - pix2[9]);
  1529. s += abs(pix1[10] - pix2[10]);
  1530. s += abs(pix1[11] - pix2[11]);
  1531. s += abs(pix1[12] - pix2[12]);
  1532. s += abs(pix1[13] - pix2[13]);
  1533. s += abs(pix1[14] - pix2[14]);
  1534. s += abs(pix1[15] - pix2[15]);
  1535. pix1 += line_size;
  1536. pix2 += line_size;
  1537. }
  1538. return s;
  1539. }
  1540. static int pix_abs16_x2_c(void *v, uint8_t *pix1, uint8_t *pix2,
  1541. int line_size, int h)
  1542. {
  1543. int s = 0, i;
  1544. for (i = 0; i < h; i++) {
  1545. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  1546. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  1547. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  1548. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  1549. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  1550. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  1551. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  1552. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  1553. s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
  1554. s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
  1555. s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
  1556. s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
  1557. s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
  1558. s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
  1559. s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
  1560. s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
  1561. pix1 += line_size;
  1562. pix2 += line_size;
  1563. }
  1564. return s;
  1565. }
  1566. static int pix_abs16_y2_c(void *v, uint8_t *pix1, uint8_t *pix2,
  1567. int line_size, int h)
  1568. {
  1569. int s = 0, i;
  1570. uint8_t *pix3 = pix2 + line_size;
  1571. for (i = 0; i < h; i++) {
  1572. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  1573. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  1574. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  1575. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  1576. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  1577. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  1578. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  1579. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  1580. s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
  1581. s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
  1582. s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
  1583. s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
  1584. s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
  1585. s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
  1586. s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
  1587. s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
  1588. pix1 += line_size;
  1589. pix2 += line_size;
  1590. pix3 += line_size;
  1591. }
  1592. return s;
  1593. }
  1594. static int pix_abs16_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2,
  1595. int line_size, int h)
  1596. {
  1597. int s = 0, i;
  1598. uint8_t *pix3 = pix2 + line_size;
  1599. for (i = 0; i < h; i++) {
  1600. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  1601. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  1602. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  1603. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  1604. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  1605. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  1606. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  1607. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  1608. s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
  1609. s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
  1610. s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
  1611. s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
  1612. s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
  1613. s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
  1614. s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
  1615. s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
  1616. pix1 += line_size;
  1617. pix2 += line_size;
  1618. pix3 += line_size;
  1619. }
  1620. return s;
  1621. }
  1622. static inline int pix_abs8_c(void *v, uint8_t *pix1, uint8_t *pix2,
  1623. int line_size, int h)
  1624. {
  1625. int s = 0, i;
  1626. for (i = 0; i < h; i++) {
  1627. s += abs(pix1[0] - pix2[0]);
  1628. s += abs(pix1[1] - pix2[1]);
  1629. s += abs(pix1[2] - pix2[2]);
  1630. s += abs(pix1[3] - pix2[3]);
  1631. s += abs(pix1[4] - pix2[4]);
  1632. s += abs(pix1[5] - pix2[5]);
  1633. s += abs(pix1[6] - pix2[6]);
  1634. s += abs(pix1[7] - pix2[7]);
  1635. pix1 += line_size;
  1636. pix2 += line_size;
  1637. }
  1638. return s;
  1639. }
  1640. static int pix_abs8_x2_c(void *v, uint8_t *pix1, uint8_t *pix2,
  1641. int line_size, int h)
  1642. {
  1643. int s = 0, i;
  1644. for (i = 0; i < h; i++) {
  1645. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  1646. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  1647. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  1648. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  1649. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  1650. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  1651. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  1652. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  1653. pix1 += line_size;
  1654. pix2 += line_size;
  1655. }
  1656. return s;
  1657. }
  1658. static int pix_abs8_y2_c(void *v, uint8_t *pix1, uint8_t *pix2,
  1659. int line_size, int h)
  1660. {
  1661. int s = 0, i;
  1662. uint8_t *pix3 = pix2 + line_size;
  1663. for (i = 0; i < h; i++) {
  1664. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  1665. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  1666. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  1667. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  1668. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  1669. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  1670. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  1671. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  1672. pix1 += line_size;
  1673. pix2 += line_size;
  1674. pix3 += line_size;
  1675. }
  1676. return s;
  1677. }
  1678. static int pix_abs8_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2,
  1679. int line_size, int h)
  1680. {
  1681. int s = 0, i;
  1682. uint8_t *pix3 = pix2 + line_size;
  1683. for (i = 0; i < h; i++) {
  1684. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  1685. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  1686. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  1687. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  1688. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  1689. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  1690. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  1691. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  1692. pix1 += line_size;
  1693. pix2 += line_size;
  1694. pix3 += line_size;
  1695. }
  1696. return s;
  1697. }
  1698. static int nsse16_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h)
  1699. {
  1700. MpegEncContext *c = v;
  1701. int score1 = 0, score2 = 0, x, y;
  1702. for (y = 0; y < h; y++) {
  1703. for (x = 0; x < 16; x++)
  1704. score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
  1705. if (y + 1 < h) {
  1706. for (x = 0; x < 15; x++)
  1707. score2 += FFABS(s1[x] - s1[x + stride] -
  1708. s1[x + 1] + s1[x + stride + 1]) -
  1709. FFABS(s2[x] - s2[x + stride] -
  1710. s2[x + 1] + s2[x + stride + 1]);
  1711. }
  1712. s1 += stride;
  1713. s2 += stride;
  1714. }
  1715. if (c)
  1716. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  1717. else
  1718. return score1 + FFABS(score2) * 8;
  1719. }
  1720. static int nsse8_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h)
  1721. {
  1722. MpegEncContext *c = v;
  1723. int score1 = 0, score2 = 0, x, y;
  1724. for (y = 0; y < h; y++) {
  1725. for (x = 0; x < 8; x++)
  1726. score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
  1727. if (y + 1 < h) {
  1728. for (x = 0; x < 7; x++)
  1729. score2 += FFABS(s1[x] - s1[x + stride] -
  1730. s1[x + 1] + s1[x + stride + 1]) -
  1731. FFABS(s2[x] - s2[x + stride] -
  1732. s2[x + 1] + s2[x + stride + 1]);
  1733. }
  1734. s1 += stride;
  1735. s2 += stride;
  1736. }
  1737. if (c)
  1738. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  1739. else
  1740. return score1 + FFABS(score2) * 8;
  1741. }
  1742. static int try_8x8basis_c(int16_t rem[64], int16_t weight[64],
  1743. int16_t basis[64], int scale)
  1744. {
  1745. int i;
  1746. unsigned int sum = 0;
  1747. for (i = 0; i < 8 * 8; i++) {
  1748. int b = rem[i] + ((basis[i] * scale +
  1749. (1 << (BASIS_SHIFT - RECON_SHIFT - 1))) >>
  1750. (BASIS_SHIFT - RECON_SHIFT));
  1751. int w = weight[i];
  1752. b >>= RECON_SHIFT;
  1753. assert(-512 < b && b < 512);
  1754. sum += (w * b) * (w * b) >> 4;
  1755. }
  1756. return sum >> 2;
  1757. }
  1758. static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale)
  1759. {
  1760. int i;
  1761. for (i = 0; i < 8 * 8; i++)
  1762. rem[i] += (basis[i] * scale +
  1763. (1 << (BASIS_SHIFT - RECON_SHIFT - 1))) >>
  1764. (BASIS_SHIFT - RECON_SHIFT);
  1765. }
  1766. static int zero_cmp(void *s, uint8_t *a, uint8_t *b, int stride, int h)
  1767. {
  1768. return 0;
  1769. }
  1770. void ff_set_cmp(DSPContext *c, me_cmp_func *cmp, int type)
  1771. {
  1772. int i;
  1773. memset(cmp, 0, sizeof(void *) * 6);
  1774. for (i = 0; i < 6; i++) {
  1775. switch (type & 0xFF) {
  1776. case FF_CMP_SAD:
  1777. cmp[i] = c->sad[i];
  1778. break;
  1779. case FF_CMP_SATD:
  1780. cmp[i] = c->hadamard8_diff[i];
  1781. break;
  1782. case FF_CMP_SSE:
  1783. cmp[i] = c->sse[i];
  1784. break;
  1785. case FF_CMP_DCT:
  1786. cmp[i] = c->dct_sad[i];
  1787. break;
  1788. case FF_CMP_DCT264:
  1789. cmp[i] = c->dct264_sad[i];
  1790. break;
  1791. case FF_CMP_DCTMAX:
  1792. cmp[i] = c->dct_max[i];
  1793. break;
  1794. case FF_CMP_PSNR:
  1795. cmp[i] = c->quant_psnr[i];
  1796. break;
  1797. case FF_CMP_BIT:
  1798. cmp[i] = c->bit[i];
  1799. break;
  1800. case FF_CMP_RD:
  1801. cmp[i] = c->rd[i];
  1802. break;
  1803. case FF_CMP_VSAD:
  1804. cmp[i] = c->vsad[i];
  1805. break;
  1806. case FF_CMP_VSSE:
  1807. cmp[i] = c->vsse[i];
  1808. break;
  1809. case FF_CMP_ZERO:
  1810. cmp[i] = zero_cmp;
  1811. break;
  1812. case FF_CMP_NSSE:
  1813. cmp[i] = c->nsse[i];
  1814. break;
  1815. default:
  1816. av_log(NULL, AV_LOG_ERROR,
  1817. "internal error in cmp function selection\n");
  1818. }
  1819. }
  1820. }
  1821. static void add_bytes_c(uint8_t *dst, uint8_t *src, int w)
  1822. {
  1823. long i;
  1824. for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
  1825. long a = *(long *) (src + i);
  1826. long b = *(long *) (dst + i);
  1827. *(long *) (dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
  1828. }
  1829. for (; i < w; i++)
  1830. dst[i + 0] += src[i + 0];
  1831. }
  1832. static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
  1833. {
  1834. long i;
  1835. #if !HAVE_FAST_UNALIGNED
  1836. if ((long) src2 & (sizeof(long) - 1)) {
  1837. for (i = 0; i + 7 < w; i += 8) {
  1838. dst[i + 0] = src1[i + 0] - src2[i + 0];
  1839. dst[i + 1] = src1[i + 1] - src2[i + 1];
  1840. dst[i + 2] = src1[i + 2] - src2[i + 2];
  1841. dst[i + 3] = src1[i + 3] - src2[i + 3];
  1842. dst[i + 4] = src1[i + 4] - src2[i + 4];
  1843. dst[i + 5] = src1[i + 5] - src2[i + 5];
  1844. dst[i + 6] = src1[i + 6] - src2[i + 6];
  1845. dst[i + 7] = src1[i + 7] - src2[i + 7];
  1846. }
  1847. } else
  1848. #endif
  1849. for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
  1850. long a = *(long *) (src1 + i);
  1851. long b = *(long *) (src2 + i);
  1852. *(long *) (dst + i) = ((a | pb_80) - (b & pb_7f)) ^
  1853. ((a ^ b ^ pb_80) & pb_80);
  1854. }
  1855. for (; i < w; i++)
  1856. dst[i + 0] = src1[i + 0] - src2[i + 0];
  1857. }
  1858. static void add_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1,
  1859. const uint8_t *diff, int w,
  1860. int *left, int *left_top)
  1861. {
  1862. int i;
  1863. uint8_t l, lt;
  1864. l = *left;
  1865. lt = *left_top;
  1866. for (i = 0; i < w; i++) {
  1867. l = mid_pred(l, src1[i], (l + src1[i] - lt) & 0xFF) + diff[i];
  1868. lt = src1[i];
  1869. dst[i] = l;
  1870. }
  1871. *left = l;
  1872. *left_top = lt;
  1873. }
  1874. static void sub_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1,
  1875. const uint8_t *src2, int w,
  1876. int *left, int *left_top)
  1877. {
  1878. int i;
  1879. uint8_t l, lt;
  1880. l = *left;
  1881. lt = *left_top;
  1882. for (i = 0; i < w; i++) {
  1883. const int pred = mid_pred(l, src1[i], (l + src1[i] - lt) & 0xFF);
  1884. lt = src1[i];
  1885. l = src2[i];
  1886. dst[i] = l - pred;
  1887. }
  1888. *left = l;
  1889. *left_top = lt;
  1890. }
  1891. static int add_hfyu_left_prediction_c(uint8_t *dst, const uint8_t *src,
  1892. int w, int acc)
  1893. {
  1894. int i;
  1895. for (i = 0; i < w - 1; i++) {
  1896. acc += src[i];
  1897. dst[i] = acc;
  1898. i++;
  1899. acc += src[i];
  1900. dst[i] = acc;
  1901. }
  1902. for (; i < w; i++) {
  1903. acc += src[i];
  1904. dst[i] = acc;
  1905. }
  1906. return acc;
  1907. }
  1908. #if HAVE_BIGENDIAN
  1909. #define B 3
  1910. #define G 2
  1911. #define R 1
  1912. #define A 0
  1913. #else
  1914. #define B 0
  1915. #define G 1
  1916. #define R 2
  1917. #define A 3
  1918. #endif
  1919. static void add_hfyu_left_prediction_bgr32_c(uint8_t *dst, const uint8_t *src,
  1920. int w, int *red, int *green,
  1921. int *blue, int *alpha)
  1922. {
  1923. int i, r = *red, g = *green, b = *blue, a = *alpha;
  1924. for (i = 0; i < w; i++) {
  1925. b += src[4 * i + B];
  1926. g += src[4 * i + G];
  1927. r += src[4 * i + R];
  1928. a += src[4 * i + A];
  1929. dst[4 * i + B] = b;
  1930. dst[4 * i + G] = g;
  1931. dst[4 * i + R] = r;
  1932. dst[4 * i + A] = a;
  1933. }
  1934. *red = r;
  1935. *green = g;
  1936. *blue = b;
  1937. *alpha = a;
  1938. }
  1939. #undef B
  1940. #undef G
  1941. #undef R
  1942. #undef A
  1943. #define BUTTERFLY2(o1, o2, i1, i2) \
  1944. o1 = (i1) + (i2); \
  1945. o2 = (i1) - (i2);
  1946. #define BUTTERFLY1(x, y) \
  1947. { \
  1948. int a, b; \
  1949. a = x; \
  1950. b = y; \
  1951. x = a + b; \
  1952. y = a - b; \
  1953. }
  1954. #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
  1955. static int hadamard8_diff8x8_c(/* MpegEncContext */ void *s, uint8_t *dst,
  1956. uint8_t *src, int stride, int h)
  1957. {
  1958. int i, temp[64], sum = 0;
  1959. assert(h == 8);
  1960. for (i = 0; i < 8; i++) {
  1961. // FIXME: try pointer walks
  1962. BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
  1963. src[stride * i + 0] - dst[stride * i + 0],
  1964. src[stride * i + 1] - dst[stride * i + 1]);
  1965. BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
  1966. src[stride * i + 2] - dst[stride * i + 2],
  1967. src[stride * i + 3] - dst[stride * i + 3]);
  1968. BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
  1969. src[stride * i + 4] - dst[stride * i + 4],
  1970. src[stride * i + 5] - dst[stride * i + 5]);
  1971. BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
  1972. src[stride * i + 6] - dst[stride * i + 6],
  1973. src[stride * i + 7] - dst[stride * i + 7]);
  1974. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
  1975. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
  1976. BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
  1977. BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
  1978. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
  1979. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
  1980. BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
  1981. BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
  1982. }
  1983. for (i = 0; i < 8; i++) {
  1984. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
  1985. BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
  1986. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
  1987. BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
  1988. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
  1989. BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
  1990. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
  1991. BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
  1992. sum += BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i]) +
  1993. BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i]) +
  1994. BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i]) +
  1995. BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
  1996. }
  1997. return sum;
  1998. }
  1999. static int hadamard8_intra8x8_c(/* MpegEncContext */ void *s, uint8_t *src,
  2000. uint8_t *dummy, int stride, int h)
  2001. {
  2002. int i, temp[64], sum = 0;
  2003. assert(h == 8);
  2004. for (i = 0; i < 8; i++) {
  2005. // FIXME: try pointer walks
  2006. BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
  2007. src[stride * i + 0], src[stride * i + 1]);
  2008. BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
  2009. src[stride * i + 2], src[stride * i + 3]);
  2010. BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
  2011. src[stride * i + 4], src[stride * i + 5]);
  2012. BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
  2013. src[stride * i + 6], src[stride * i + 7]);
  2014. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
  2015. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
  2016. BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
  2017. BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
  2018. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
  2019. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
  2020. BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
  2021. BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
  2022. }
  2023. for (i = 0; i < 8; i++) {
  2024. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
  2025. BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
  2026. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
  2027. BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
  2028. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
  2029. BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
  2030. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
  2031. BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
  2032. sum +=
  2033. BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i])
  2034. + BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i])
  2035. + BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i])
  2036. + BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
  2037. }
  2038. sum -= FFABS(temp[8 * 0] + temp[8 * 4]); // -mean
  2039. return sum;
  2040. }
  2041. static int dct_sad8x8_c(/* MpegEncContext */ void *c, uint8_t *src1,
  2042. uint8_t *src2, int stride, int h)
  2043. {
  2044. MpegEncContext *const s = (MpegEncContext *) c;
  2045. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  2046. assert(h == 8);
  2047. s->dsp.diff_pixels(temp, src1, src2, stride);
  2048. s->dsp.fdct(temp);
  2049. return s->dsp.sum_abs_dctelem(temp);
  2050. }
  2051. #if CONFIG_GPL
  2052. #define DCT8_1D \
  2053. { \
  2054. const int s07 = SRC(0) + SRC(7); \
  2055. const int s16 = SRC(1) + SRC(6); \
  2056. const int s25 = SRC(2) + SRC(5); \
  2057. const int s34 = SRC(3) + SRC(4); \
  2058. const int a0 = s07 + s34; \
  2059. const int a1 = s16 + s25; \
  2060. const int a2 = s07 - s34; \
  2061. const int a3 = s16 - s25; \
  2062. const int d07 = SRC(0) - SRC(7); \
  2063. const int d16 = SRC(1) - SRC(6); \
  2064. const int d25 = SRC(2) - SRC(5); \
  2065. const int d34 = SRC(3) - SRC(4); \
  2066. const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
  2067. const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
  2068. const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
  2069. const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
  2070. DST(0, a0 + a1); \
  2071. DST(1, a4 + (a7 >> 2)); \
  2072. DST(2, a2 + (a3 >> 1)); \
  2073. DST(3, a5 + (a6 >> 2)); \
  2074. DST(4, a0 - a1); \
  2075. DST(5, a6 - (a5 >> 2)); \
  2076. DST(6, (a2 >> 1) - a3); \
  2077. DST(7, (a4 >> 2) - a7); \
  2078. }
  2079. static int dct264_sad8x8_c(/* MpegEncContext */ void *c, uint8_t *src1,
  2080. uint8_t *src2, int stride, int h)
  2081. {
  2082. MpegEncContext *const s = (MpegEncContext *) c;
  2083. int16_t dct[8][8];
  2084. int i, sum = 0;
  2085. s->dsp.diff_pixels(dct[0], src1, src2, stride);
  2086. #define SRC(x) dct[i][x]
  2087. #define DST(x, v) dct[i][x] = v
  2088. for (i = 0; i < 8; i++)
  2089. DCT8_1D
  2090. #undef SRC
  2091. #undef DST
  2092. #define SRC(x) dct[x][i]
  2093. #define DST(x, v) sum += FFABS(v)
  2094. for (i = 0; i < 8; i++)
  2095. DCT8_1D
  2096. #undef SRC
  2097. #undef DST
  2098. return sum;
  2099. }
  2100. #endif
  2101. static int dct_max8x8_c(/* MpegEncContext */ void *c, uint8_t *src1,
  2102. uint8_t *src2, int stride, int h)
  2103. {
  2104. MpegEncContext *const s = (MpegEncContext *) c;
  2105. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  2106. int sum = 0, i;
  2107. assert(h == 8);
  2108. s->dsp.diff_pixels(temp, src1, src2, stride);
  2109. s->dsp.fdct(temp);
  2110. for (i = 0; i < 64; i++)
  2111. sum = FFMAX(sum, FFABS(temp[i]));
  2112. return sum;
  2113. }
  2114. static int quant_psnr8x8_c(/* MpegEncContext */ void *c, uint8_t *src1,
  2115. uint8_t *src2, int stride, int h)
  2116. {
  2117. MpegEncContext *const s = c;
  2118. LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
  2119. int16_t *const bak = temp + 64;
  2120. int sum = 0, i;
  2121. assert(h == 8);
  2122. s->mb_intra = 0;
  2123. s->dsp.diff_pixels(temp, src1, src2, stride);
  2124. memcpy(bak, temp, 64 * sizeof(int16_t));
  2125. s->block_last_index[0 /* FIXME */] =
  2126. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  2127. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  2128. ff_simple_idct_8(temp); // FIXME
  2129. for (i = 0; i < 64; i++)
  2130. sum += (temp[i] - bak[i]) * (temp[i] - bak[i]);
  2131. return sum;
  2132. }
  2133. static int rd8x8_c(/* MpegEncContext */ void *c, uint8_t *src1, uint8_t *src2,
  2134. int stride, int h)
  2135. {
  2136. MpegEncContext *const s = (MpegEncContext *) c;
  2137. const uint8_t *scantable = s->intra_scantable.permutated;
  2138. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  2139. LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
  2140. LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
  2141. int i, last, run, bits, level, distortion, start_i;
  2142. const int esc_length = s->ac_esc_length;
  2143. uint8_t *length, *last_length;
  2144. assert(h == 8);
  2145. copy_block8(lsrc1, src1, 8, stride, 8);
  2146. copy_block8(lsrc2, src2, 8, stride, 8);
  2147. s->dsp.diff_pixels(temp, lsrc1, lsrc2, 8);
  2148. s->block_last_index[0 /* FIXME */] =
  2149. last =
  2150. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  2151. bits = 0;
  2152. if (s->mb_intra) {
  2153. start_i = 1;
  2154. length = s->intra_ac_vlc_length;
  2155. last_length = s->intra_ac_vlc_last_length;
  2156. bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
  2157. } else {
  2158. start_i = 0;
  2159. length = s->inter_ac_vlc_length;
  2160. last_length = s->inter_ac_vlc_last_length;
  2161. }
  2162. if (last >= start_i) {
  2163. run = 0;
  2164. for (i = start_i; i < last; i++) {
  2165. int j = scantable[i];
  2166. level = temp[j];
  2167. if (level) {
  2168. level += 64;
  2169. if ((level & (~127)) == 0)
  2170. bits += length[UNI_AC_ENC_INDEX(run, level)];
  2171. else
  2172. bits += esc_length;
  2173. run = 0;
  2174. } else
  2175. run++;
  2176. }
  2177. i = scantable[last];
  2178. level = temp[i] + 64;
  2179. assert(level - 64);
  2180. if ((level & (~127)) == 0) {
  2181. bits += last_length[UNI_AC_ENC_INDEX(run, level)];
  2182. } else
  2183. bits += esc_length;
  2184. }
  2185. if (last >= 0) {
  2186. if (s->mb_intra)
  2187. s->dct_unquantize_intra(s, temp, 0, s->qscale);
  2188. else
  2189. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  2190. }
  2191. s->dsp.idct_add(lsrc2, 8, temp);
  2192. distortion = s->dsp.sse[1](NULL, lsrc2, lsrc1, 8, 8);
  2193. return distortion + ((bits * s->qscale * s->qscale * 109 + 64) >> 7);
  2194. }
  2195. static int bit8x8_c(/* MpegEncContext */ void *c, uint8_t *src1, uint8_t *src2,
  2196. int stride, int h)
  2197. {
  2198. MpegEncContext *const s = (MpegEncContext *) c;
  2199. const uint8_t *scantable = s->intra_scantable.permutated;
  2200. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  2201. int i, last, run, bits, level, start_i;
  2202. const int esc_length = s->ac_esc_length;
  2203. uint8_t *length, *last_length;
  2204. assert(h == 8);
  2205. s->dsp.diff_pixels(temp, src1, src2, stride);
  2206. s->block_last_index[0 /* FIXME */] =
  2207. last =
  2208. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  2209. bits = 0;
  2210. if (s->mb_intra) {
  2211. start_i = 1;
  2212. length = s->intra_ac_vlc_length;
  2213. last_length = s->intra_ac_vlc_last_length;
  2214. bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
  2215. } else {
  2216. start_i = 0;
  2217. length = s->inter_ac_vlc_length;
  2218. last_length = s->inter_ac_vlc_last_length;
  2219. }
  2220. if (last >= start_i) {
  2221. run = 0;
  2222. for (i = start_i; i < last; i++) {
  2223. int j = scantable[i];
  2224. level = temp[j];
  2225. if (level) {
  2226. level += 64;
  2227. if ((level & (~127)) == 0)
  2228. bits += length[UNI_AC_ENC_INDEX(run, level)];
  2229. else
  2230. bits += esc_length;
  2231. run = 0;
  2232. } else
  2233. run++;
  2234. }
  2235. i = scantable[last];
  2236. level = temp[i] + 64;
  2237. assert(level - 64);
  2238. if ((level & (~127)) == 0)
  2239. bits += last_length[UNI_AC_ENC_INDEX(run, level)];
  2240. else
  2241. bits += esc_length;
  2242. }
  2243. return bits;
  2244. }
  2245. #define VSAD_INTRA(size) \
  2246. static int vsad_intra ## size ## _c(/* MpegEncContext */ void *c, \
  2247. uint8_t *s, uint8_t *dummy, \
  2248. int stride, int h) \
  2249. { \
  2250. int score = 0, x, y; \
  2251. \
  2252. for (y = 1; y < h; y++) { \
  2253. for (x = 0; x < size; x += 4) { \
  2254. score += FFABS(s[x] - s[x + stride]) + \
  2255. FFABS(s[x + 1] - s[x + stride + 1]) + \
  2256. FFABS(s[x + 2] - s[x + 2 + stride]) + \
  2257. FFABS(s[x + 3] - s[x + 3 + stride]); \
  2258. } \
  2259. s += stride; \
  2260. } \
  2261. \
  2262. return score; \
  2263. }
  2264. VSAD_INTRA(8)
  2265. VSAD_INTRA(16)
  2266. static int vsad16_c(/* MpegEncContext */ void *c, uint8_t *s1, uint8_t *s2,
  2267. int stride, int h)
  2268. {
  2269. int score = 0, x, y;
  2270. for (y = 1; y < h; y++) {
  2271. for (x = 0; x < 16; x++)
  2272. score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
  2273. s1 += stride;
  2274. s2 += stride;
  2275. }
  2276. return score;
  2277. }
  2278. #define SQ(a) ((a) * (a))
  2279. #define VSSE_INTRA(size) \
  2280. static int vsse_intra ## size ## _c(/* MpegEncContext */ void *c, \
  2281. uint8_t *s, uint8_t *dummy, \
  2282. int stride, int h) \
  2283. { \
  2284. int score = 0, x, y; \
  2285. \
  2286. for (y = 1; y < h; y++) { \
  2287. for (x = 0; x < size; x += 4) { \
  2288. score += SQ(s[x] - s[x + stride]) + \
  2289. SQ(s[x + 1] - s[x + stride + 1]) + \
  2290. SQ(s[x + 2] - s[x + stride + 2]) + \
  2291. SQ(s[x + 3] - s[x + stride + 3]); \
  2292. } \
  2293. s += stride; \
  2294. } \
  2295. \
  2296. return score; \
  2297. }
  2298. VSSE_INTRA(8)
  2299. VSSE_INTRA(16)
  2300. static int vsse16_c(/* MpegEncContext */ void *c, uint8_t *s1, uint8_t *s2,
  2301. int stride, int h)
  2302. {
  2303. int score = 0, x, y;
  2304. for (y = 1; y < h; y++) {
  2305. for (x = 0; x < 16; x++)
  2306. score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
  2307. s1 += stride;
  2308. s2 += stride;
  2309. }
  2310. return score;
  2311. }
  2312. static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
  2313. int size)
  2314. {
  2315. int score = 0, i;
  2316. for (i = 0; i < size; i++)
  2317. score += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]);
  2318. return score;
  2319. }
  2320. #define WRAPPER8_16_SQ(name8, name16) \
  2321. static int name16(void /*MpegEncContext*/ *s, \
  2322. uint8_t *dst, uint8_t *src, \
  2323. int stride, int h) \
  2324. { \
  2325. int score = 0; \
  2326. \
  2327. score += name8(s, dst, src, stride, 8); \
  2328. score += name8(s, dst + 8, src + 8, stride, 8); \
  2329. if (h == 16) { \
  2330. dst += 8 * stride; \
  2331. src += 8 * stride; \
  2332. score += name8(s, dst, src, stride, 8); \
  2333. score += name8(s, dst + 8, src + 8, stride, 8); \
  2334. } \
  2335. return score; \
  2336. }
  2337. WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
  2338. WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
  2339. WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
  2340. #if CONFIG_GPL
  2341. WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
  2342. #endif
  2343. WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
  2344. WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
  2345. WRAPPER8_16_SQ(rd8x8_c, rd16_c)
  2346. WRAPPER8_16_SQ(bit8x8_c, bit16_c)
  2347. static inline uint32_t clipf_c_one(uint32_t a, uint32_t mini,
  2348. uint32_t maxi, uint32_t maxisign)
  2349. {
  2350. if (a > mini)
  2351. return mini;
  2352. else if ((a ^ (1U << 31)) > maxisign)
  2353. return maxi;
  2354. else
  2355. return a;
  2356. }
  2357. static void vector_clipf_c_opposite_sign(float *dst, const float *src,
  2358. float *min, float *max, int len)
  2359. {
  2360. int i;
  2361. uint32_t mini = *(uint32_t *) min;
  2362. uint32_t maxi = *(uint32_t *) max;
  2363. uint32_t maxisign = maxi ^ (1U << 31);
  2364. uint32_t *dsti = (uint32_t *) dst;
  2365. const uint32_t *srci = (const uint32_t *) src;
  2366. for (i = 0; i < len; i += 8) {
  2367. dsti[i + 0] = clipf_c_one(srci[i + 0], mini, maxi, maxisign);
  2368. dsti[i + 1] = clipf_c_one(srci[i + 1], mini, maxi, maxisign);
  2369. dsti[i + 2] = clipf_c_one(srci[i + 2], mini, maxi, maxisign);
  2370. dsti[i + 3] = clipf_c_one(srci[i + 3], mini, maxi, maxisign);
  2371. dsti[i + 4] = clipf_c_one(srci[i + 4], mini, maxi, maxisign);
  2372. dsti[i + 5] = clipf_c_one(srci[i + 5], mini, maxi, maxisign);
  2373. dsti[i + 6] = clipf_c_one(srci[i + 6], mini, maxi, maxisign);
  2374. dsti[i + 7] = clipf_c_one(srci[i + 7], mini, maxi, maxisign);
  2375. }
  2376. }
  2377. static void vector_clipf_c(float *dst, const float *src,
  2378. float min, float max, int len)
  2379. {
  2380. int i;
  2381. if (min < 0 && max > 0) {
  2382. vector_clipf_c_opposite_sign(dst, src, &min, &max, len);
  2383. } else {
  2384. for (i = 0; i < len; i += 8) {
  2385. dst[i] = av_clipf(src[i], min, max);
  2386. dst[i + 1] = av_clipf(src[i + 1], min, max);
  2387. dst[i + 2] = av_clipf(src[i + 2], min, max);
  2388. dst[i + 3] = av_clipf(src[i + 3], min, max);
  2389. dst[i + 4] = av_clipf(src[i + 4], min, max);
  2390. dst[i + 5] = av_clipf(src[i + 5], min, max);
  2391. dst[i + 6] = av_clipf(src[i + 6], min, max);
  2392. dst[i + 7] = av_clipf(src[i + 7], min, max);
  2393. }
  2394. }
  2395. }
  2396. static int32_t scalarproduct_int16_c(const int16_t *v1, const int16_t *v2,
  2397. int order)
  2398. {
  2399. int res = 0;
  2400. while (order--)
  2401. res += *v1++ **v2++;
  2402. return res;
  2403. }
  2404. static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2,
  2405. const int16_t *v3,
  2406. int order, int mul)
  2407. {
  2408. int res = 0;
  2409. while (order--) {
  2410. res += *v1 * *v2++;
  2411. *v1++ += mul * *v3++;
  2412. }
  2413. return res;
  2414. }
  2415. static void vector_clip_int32_c(int32_t *dst, const int32_t *src, int32_t min,
  2416. int32_t max, unsigned int len)
  2417. {
  2418. do {
  2419. *dst++ = av_clip(*src++, min, max);
  2420. *dst++ = av_clip(*src++, min, max);
  2421. *dst++ = av_clip(*src++, min, max);
  2422. *dst++ = av_clip(*src++, min, max);
  2423. *dst++ = av_clip(*src++, min, max);
  2424. *dst++ = av_clip(*src++, min, max);
  2425. *dst++ = av_clip(*src++, min, max);
  2426. *dst++ = av_clip(*src++, min, max);
  2427. len -= 8;
  2428. } while (len > 0);
  2429. }
  2430. static void jref_idct_put(uint8_t *dest, int line_size, int16_t *block)
  2431. {
  2432. ff_j_rev_dct(block);
  2433. put_pixels_clamped_c(block, dest, line_size);
  2434. }
  2435. static void jref_idct_add(uint8_t *dest, int line_size, int16_t *block)
  2436. {
  2437. ff_j_rev_dct(block);
  2438. add_pixels_clamped_c(block, dest, line_size);
  2439. }
  2440. /* init static data */
  2441. av_cold void ff_dsputil_static_init(void)
  2442. {
  2443. int i;
  2444. for (i = 0; i < 512; i++)
  2445. ff_square_tab[i] = (i - 256) * (i - 256);
  2446. }
  2447. av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
  2448. {
  2449. #if CONFIG_ENCODERS
  2450. if (avctx->bits_per_raw_sample == 10) {
  2451. c->fdct = ff_jpeg_fdct_islow_10;
  2452. c->fdct248 = ff_fdct248_islow_10;
  2453. } else {
  2454. if (avctx->dct_algo == FF_DCT_FASTINT) {
  2455. c->fdct = ff_fdct_ifast;
  2456. c->fdct248 = ff_fdct_ifast248;
  2457. } else if (avctx->dct_algo == FF_DCT_FAAN) {
  2458. c->fdct = ff_faandct;
  2459. c->fdct248 = ff_faandct248;
  2460. } else {
  2461. c->fdct = ff_jpeg_fdct_islow_8; // slow/accurate/default
  2462. c->fdct248 = ff_fdct248_islow_8;
  2463. }
  2464. }
  2465. #endif /* CONFIG_ENCODERS */
  2466. if (avctx->bits_per_raw_sample == 10) {
  2467. c->idct_put = ff_simple_idct_put_10;
  2468. c->idct_add = ff_simple_idct_add_10;
  2469. c->idct = ff_simple_idct_10;
  2470. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2471. } else {
  2472. if (avctx->idct_algo == FF_IDCT_INT) {
  2473. c->idct_put = jref_idct_put;
  2474. c->idct_add = jref_idct_add;
  2475. c->idct = ff_j_rev_dct;
  2476. c->idct_permutation_type = FF_LIBMPEG2_IDCT_PERM;
  2477. } else if (avctx->idct_algo == FF_IDCT_FAAN) {
  2478. c->idct_put = ff_faanidct_put;
  2479. c->idct_add = ff_faanidct_add;
  2480. c->idct = ff_faanidct;
  2481. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2482. } else { // accurate/default
  2483. c->idct_put = ff_simple_idct_put_8;
  2484. c->idct_add = ff_simple_idct_add_8;
  2485. c->idct = ff_simple_idct_8;
  2486. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2487. }
  2488. }
  2489. c->diff_pixels = diff_pixels_c;
  2490. c->put_pixels_clamped = put_pixels_clamped_c;
  2491. c->put_signed_pixels_clamped = put_signed_pixels_clamped_c;
  2492. c->add_pixels_clamped = add_pixels_clamped_c;
  2493. c->sum_abs_dctelem = sum_abs_dctelem_c;
  2494. c->gmc1 = gmc1_c;
  2495. c->gmc = ff_gmc_c;
  2496. c->pix_sum = pix_sum_c;
  2497. c->pix_norm1 = pix_norm1_c;
  2498. c->fill_block_tab[0] = fill_block16_c;
  2499. c->fill_block_tab[1] = fill_block8_c;
  2500. /* TODO [0] 16 [1] 8 */
  2501. c->pix_abs[0][0] = pix_abs16_c;
  2502. c->pix_abs[0][1] = pix_abs16_x2_c;
  2503. c->pix_abs[0][2] = pix_abs16_y2_c;
  2504. c->pix_abs[0][3] = pix_abs16_xy2_c;
  2505. c->pix_abs[1][0] = pix_abs8_c;
  2506. c->pix_abs[1][1] = pix_abs8_x2_c;
  2507. c->pix_abs[1][2] = pix_abs8_y2_c;
  2508. c->pix_abs[1][3] = pix_abs8_xy2_c;
  2509. c->put_tpel_pixels_tab[0] = put_tpel_pixels_mc00_c;
  2510. c->put_tpel_pixels_tab[1] = put_tpel_pixels_mc10_c;
  2511. c->put_tpel_pixels_tab[2] = put_tpel_pixels_mc20_c;
  2512. c->put_tpel_pixels_tab[4] = put_tpel_pixels_mc01_c;
  2513. c->put_tpel_pixels_tab[5] = put_tpel_pixels_mc11_c;
  2514. c->put_tpel_pixels_tab[6] = put_tpel_pixels_mc21_c;
  2515. c->put_tpel_pixels_tab[8] = put_tpel_pixels_mc02_c;
  2516. c->put_tpel_pixels_tab[9] = put_tpel_pixels_mc12_c;
  2517. c->put_tpel_pixels_tab[10] = put_tpel_pixels_mc22_c;
  2518. c->avg_tpel_pixels_tab[0] = avg_tpel_pixels_mc00_c;
  2519. c->avg_tpel_pixels_tab[1] = avg_tpel_pixels_mc10_c;
  2520. c->avg_tpel_pixels_tab[2] = avg_tpel_pixels_mc20_c;
  2521. c->avg_tpel_pixels_tab[4] = avg_tpel_pixels_mc01_c;
  2522. c->avg_tpel_pixels_tab[5] = avg_tpel_pixels_mc11_c;
  2523. c->avg_tpel_pixels_tab[6] = avg_tpel_pixels_mc21_c;
  2524. c->avg_tpel_pixels_tab[8] = avg_tpel_pixels_mc02_c;
  2525. c->avg_tpel_pixels_tab[9] = avg_tpel_pixels_mc12_c;
  2526. c->avg_tpel_pixels_tab[10] = avg_tpel_pixels_mc22_c;
  2527. #define dspfunc(PFX, IDX, NUM) \
  2528. c->PFX ## _pixels_tab[IDX][0] = PFX ## NUM ## _mc00_c; \
  2529. c->PFX ## _pixels_tab[IDX][1] = PFX ## NUM ## _mc10_c; \
  2530. c->PFX ## _pixels_tab[IDX][2] = PFX ## NUM ## _mc20_c; \
  2531. c->PFX ## _pixels_tab[IDX][3] = PFX ## NUM ## _mc30_c; \
  2532. c->PFX ## _pixels_tab[IDX][4] = PFX ## NUM ## _mc01_c; \
  2533. c->PFX ## _pixels_tab[IDX][5] = PFX ## NUM ## _mc11_c; \
  2534. c->PFX ## _pixels_tab[IDX][6] = PFX ## NUM ## _mc21_c; \
  2535. c->PFX ## _pixels_tab[IDX][7] = PFX ## NUM ## _mc31_c; \
  2536. c->PFX ## _pixels_tab[IDX][8] = PFX ## NUM ## _mc02_c; \
  2537. c->PFX ## _pixels_tab[IDX][9] = PFX ## NUM ## _mc12_c; \
  2538. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
  2539. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
  2540. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
  2541. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
  2542. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
  2543. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
  2544. dspfunc(put_qpel, 0, 16);
  2545. dspfunc(put_qpel, 1, 8);
  2546. dspfunc(put_no_rnd_qpel, 0, 16);
  2547. dspfunc(put_no_rnd_qpel, 1, 8);
  2548. dspfunc(avg_qpel, 0, 16);
  2549. dspfunc(avg_qpel, 1, 8);
  2550. #undef dspfunc
  2551. c->put_mspel_pixels_tab[0] = ff_put_pixels8x8_c;
  2552. c->put_mspel_pixels_tab[1] = put_mspel8_mc10_c;
  2553. c->put_mspel_pixels_tab[2] = put_mspel8_mc20_c;
  2554. c->put_mspel_pixels_tab[3] = put_mspel8_mc30_c;
  2555. c->put_mspel_pixels_tab[4] = put_mspel8_mc02_c;
  2556. c->put_mspel_pixels_tab[5] = put_mspel8_mc12_c;
  2557. c->put_mspel_pixels_tab[6] = put_mspel8_mc22_c;
  2558. c->put_mspel_pixels_tab[7] = put_mspel8_mc32_c;
  2559. #define SET_CMP_FUNC(name) \
  2560. c->name[0] = name ## 16_c; \
  2561. c->name[1] = name ## 8x8_c;
  2562. SET_CMP_FUNC(hadamard8_diff)
  2563. c->hadamard8_diff[4] = hadamard8_intra16_c;
  2564. c->hadamard8_diff[5] = hadamard8_intra8x8_c;
  2565. SET_CMP_FUNC(dct_sad)
  2566. SET_CMP_FUNC(dct_max)
  2567. #if CONFIG_GPL
  2568. SET_CMP_FUNC(dct264_sad)
  2569. #endif
  2570. c->sad[0] = pix_abs16_c;
  2571. c->sad[1] = pix_abs8_c;
  2572. c->sse[0] = sse16_c;
  2573. c->sse[1] = sse8_c;
  2574. c->sse[2] = sse4_c;
  2575. SET_CMP_FUNC(quant_psnr)
  2576. SET_CMP_FUNC(rd)
  2577. SET_CMP_FUNC(bit)
  2578. c->vsad[0] = vsad16_c;
  2579. c->vsad[4] = vsad_intra16_c;
  2580. c->vsad[5] = vsad_intra8_c;
  2581. c->vsse[0] = vsse16_c;
  2582. c->vsse[4] = vsse_intra16_c;
  2583. c->vsse[5] = vsse_intra8_c;
  2584. c->nsse[0] = nsse16_c;
  2585. c->nsse[1] = nsse8_c;
  2586. c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
  2587. c->add_bytes = add_bytes_c;
  2588. c->add_hfyu_median_prediction = add_hfyu_median_prediction_c;
  2589. c->add_hfyu_left_prediction = add_hfyu_left_prediction_c;
  2590. c->add_hfyu_left_prediction_bgr32 = add_hfyu_left_prediction_bgr32_c;
  2591. c->diff_bytes = diff_bytes_c;
  2592. c->sub_hfyu_median_prediction = sub_hfyu_median_prediction_c;
  2593. c->bswap_buf = bswap_buf;
  2594. c->bswap16_buf = bswap16_buf;
  2595. c->try_8x8basis = try_8x8basis_c;
  2596. c->add_8x8basis = add_8x8basis_c;
  2597. c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c;
  2598. c->scalarproduct_int16 = scalarproduct_int16_c;
  2599. c->vector_clip_int32 = vector_clip_int32_c;
  2600. c->vector_clipf = vector_clipf_c;
  2601. c->shrink[0] = av_image_copy_plane;
  2602. c->shrink[1] = ff_shrink22;
  2603. c->shrink[2] = ff_shrink44;
  2604. c->shrink[3] = ff_shrink88;
  2605. c->add_pixels8 = add_pixels8_c;
  2606. #undef FUNC
  2607. #undef FUNCC
  2608. #define FUNC(f, depth) f ## _ ## depth
  2609. #define FUNCC(f, depth) f ## _ ## depth ## _c
  2610. c->draw_edges = FUNCC(draw_edges, 8);
  2611. c->clear_block = FUNCC(clear_block, 8);
  2612. c->clear_blocks = FUNCC(clear_blocks, 8);
  2613. #define BIT_DEPTH_FUNCS(depth) \
  2614. c->get_pixels = FUNCC(get_pixels, depth);
  2615. switch (avctx->bits_per_raw_sample) {
  2616. case 9:
  2617. case 10:
  2618. BIT_DEPTH_FUNCS(16);
  2619. break;
  2620. default:
  2621. BIT_DEPTH_FUNCS(8);
  2622. break;
  2623. }
  2624. if (ARCH_ARM)
  2625. ff_dsputil_init_arm(c, avctx);
  2626. if (ARCH_BFIN)
  2627. ff_dsputil_init_bfin(c, avctx);
  2628. if (ARCH_PPC)
  2629. ff_dsputil_init_ppc(c, avctx);
  2630. if (ARCH_X86)
  2631. ff_dsputil_init_x86(c, avctx);
  2632. ff_init_scantable_permutation(c->idct_permutation,
  2633. c->idct_permutation_type);
  2634. }