You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2851 lines
122KB

  1. /*
  2. * DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * DSP utils
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/imgutils.h"
  30. #include "libavutil/internal.h"
  31. #include "avcodec.h"
  32. #include "copy_block.h"
  33. #include "dct.h"
  34. #include "dsputil.h"
  35. #include "simple_idct.h"
  36. #include "faandct.h"
  37. #include "faanidct.h"
  38. #include "imgconvert.h"
  39. #include "mathops.h"
  40. #include "mpegvideo.h"
  41. #include "config.h"
  42. #include "diracdsp.h"
  43. uint32_t ff_square_tab[512] = { 0, };
  44. #define BIT_DEPTH 16
  45. #include "dsputil_template.c"
  46. #undef BIT_DEPTH
  47. #define BIT_DEPTH 8
  48. #include "tpel_template.c"
  49. #include "dsputil_template.c"
  50. // 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
  51. #define pb_7f (~0UL / 255 * 0x7f)
  52. #define pb_80 (~0UL / 255 * 0x80)
  53. /* Specific zigzag scan for 248 idct. NOTE that unlike the
  54. * specification, we interleave the fields */
  55. const uint8_t ff_zigzag248_direct[64] = {
  56. 0, 8, 1, 9, 16, 24, 2, 10,
  57. 17, 25, 32, 40, 48, 56, 33, 41,
  58. 18, 26, 3, 11, 4, 12, 19, 27,
  59. 34, 42, 49, 57, 50, 58, 35, 43,
  60. 20, 28, 5, 13, 6, 14, 21, 29,
  61. 36, 44, 51, 59, 52, 60, 37, 45,
  62. 22, 30, 7, 15, 23, 31, 38, 46,
  63. 53, 61, 54, 62, 39, 47, 55, 63,
  64. };
  65. const uint8_t ff_alternate_horizontal_scan[64] = {
  66. 0, 1, 2, 3, 8, 9, 16, 17,
  67. 10, 11, 4, 5, 6, 7, 15, 14,
  68. 13, 12, 19, 18, 24, 25, 32, 33,
  69. 26, 27, 20, 21, 22, 23, 28, 29,
  70. 30, 31, 34, 35, 40, 41, 48, 49,
  71. 42, 43, 36, 37, 38, 39, 44, 45,
  72. 46, 47, 50, 51, 56, 57, 58, 59,
  73. 52, 53, 54, 55, 60, 61, 62, 63,
  74. };
  75. const uint8_t ff_alternate_vertical_scan[64] = {
  76. 0, 8, 16, 24, 1, 9, 2, 10,
  77. 17, 25, 32, 40, 48, 56, 57, 49,
  78. 41, 33, 26, 18, 3, 11, 4, 12,
  79. 19, 27, 34, 42, 50, 58, 35, 43,
  80. 51, 59, 20, 28, 5, 13, 6, 14,
  81. 21, 29, 36, 44, 52, 60, 37, 45,
  82. 53, 61, 22, 30, 7, 15, 23, 31,
  83. 38, 46, 54, 62, 39, 47, 55, 63,
  84. };
  85. /* Input permutation for the simple_idct_mmx */
  86. static const uint8_t simple_mmx_permutation[64] = {
  87. 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
  88. 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
  89. 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
  90. 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
  91. 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
  92. 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
  93. 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
  94. 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
  95. };
  96. static const uint8_t idct_sse2_row_perm[8] = { 0, 4, 1, 5, 2, 6, 3, 7 };
  97. av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st,
  98. const uint8_t *src_scantable)
  99. {
  100. int i, end;
  101. st->scantable = src_scantable;
  102. for (i = 0; i < 64; i++) {
  103. int j = src_scantable[i];
  104. st->permutated[i] = permutation[j];
  105. }
  106. end = -1;
  107. for (i = 0; i < 64; i++) {
  108. int j = st->permutated[i];
  109. if (j > end)
  110. end = j;
  111. st->raster_end[i] = end;
  112. }
  113. }
  114. av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation,
  115. int idct_permutation_type)
  116. {
  117. int i;
  118. switch (idct_permutation_type) {
  119. case FF_NO_IDCT_PERM:
  120. for (i = 0; i < 64; i++)
  121. idct_permutation[i] = i;
  122. break;
  123. case FF_LIBMPEG2_IDCT_PERM:
  124. for (i = 0; i < 64; i++)
  125. idct_permutation[i] = (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
  126. break;
  127. case FF_SIMPLE_IDCT_PERM:
  128. for (i = 0; i < 64; i++)
  129. idct_permutation[i] = simple_mmx_permutation[i];
  130. break;
  131. case FF_TRANSPOSE_IDCT_PERM:
  132. for (i = 0; i < 64; i++)
  133. idct_permutation[i] = ((i & 7) << 3) | (i >> 3);
  134. break;
  135. case FF_PARTTRANS_IDCT_PERM:
  136. for (i = 0; i < 64; i++)
  137. idct_permutation[i] = (i & 0x24) | ((i & 3) << 3) | ((i >> 3) & 3);
  138. break;
  139. case FF_SSE2_IDCT_PERM:
  140. for (i = 0; i < 64; i++)
  141. idct_permutation[i] = (i & 0x38) | idct_sse2_row_perm[i & 7];
  142. break;
  143. default:
  144. av_log(NULL, AV_LOG_ERROR,
  145. "Internal error, IDCT permutation not set\n");
  146. }
  147. }
  148. static int pix_sum_c(uint8_t *pix, int line_size)
  149. {
  150. int s = 0, i, j;
  151. for (i = 0; i < 16; i++) {
  152. for (j = 0; j < 16; j += 8) {
  153. s += pix[0];
  154. s += pix[1];
  155. s += pix[2];
  156. s += pix[3];
  157. s += pix[4];
  158. s += pix[5];
  159. s += pix[6];
  160. s += pix[7];
  161. pix += 8;
  162. }
  163. pix += line_size - 16;
  164. }
  165. return s;
  166. }
  167. static int pix_norm1_c(uint8_t *pix, int line_size)
  168. {
  169. int s = 0, i, j;
  170. uint32_t *sq = ff_square_tab + 256;
  171. for (i = 0; i < 16; i++) {
  172. for (j = 0; j < 16; j += 8) {
  173. #if 0
  174. s += sq[pix[0]];
  175. s += sq[pix[1]];
  176. s += sq[pix[2]];
  177. s += sq[pix[3]];
  178. s += sq[pix[4]];
  179. s += sq[pix[5]];
  180. s += sq[pix[6]];
  181. s += sq[pix[7]];
  182. #else
  183. #if HAVE_FAST_64BIT
  184. register uint64_t x = *(uint64_t *) pix;
  185. s += sq[x & 0xff];
  186. s += sq[(x >> 8) & 0xff];
  187. s += sq[(x >> 16) & 0xff];
  188. s += sq[(x >> 24) & 0xff];
  189. s += sq[(x >> 32) & 0xff];
  190. s += sq[(x >> 40) & 0xff];
  191. s += sq[(x >> 48) & 0xff];
  192. s += sq[(x >> 56) & 0xff];
  193. #else
  194. register uint32_t x = *(uint32_t *) pix;
  195. s += sq[x & 0xff];
  196. s += sq[(x >> 8) & 0xff];
  197. s += sq[(x >> 16) & 0xff];
  198. s += sq[(x >> 24) & 0xff];
  199. x = *(uint32_t *) (pix + 4);
  200. s += sq[x & 0xff];
  201. s += sq[(x >> 8) & 0xff];
  202. s += sq[(x >> 16) & 0xff];
  203. s += sq[(x >> 24) & 0xff];
  204. #endif
  205. #endif
  206. pix += 8;
  207. }
  208. pix += line_size - 16;
  209. }
  210. return s;
  211. }
  212. static void bswap_buf(uint32_t *dst, const uint32_t *src, int w)
  213. {
  214. int i;
  215. for (i = 0; i + 8 <= w; i += 8) {
  216. dst[i + 0] = av_bswap32(src[i + 0]);
  217. dst[i + 1] = av_bswap32(src[i + 1]);
  218. dst[i + 2] = av_bswap32(src[i + 2]);
  219. dst[i + 3] = av_bswap32(src[i + 3]);
  220. dst[i + 4] = av_bswap32(src[i + 4]);
  221. dst[i + 5] = av_bswap32(src[i + 5]);
  222. dst[i + 6] = av_bswap32(src[i + 6]);
  223. dst[i + 7] = av_bswap32(src[i + 7]);
  224. }
  225. for (; i < w; i++)
  226. dst[i + 0] = av_bswap32(src[i + 0]);
  227. }
  228. static void bswap16_buf(uint16_t *dst, const uint16_t *src, int len)
  229. {
  230. while (len--)
  231. *dst++ = av_bswap16(*src++);
  232. }
  233. static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  234. int line_size, int h)
  235. {
  236. int s = 0, i;
  237. uint32_t *sq = ff_square_tab + 256;
  238. for (i = 0; i < h; i++) {
  239. s += sq[pix1[0] - pix2[0]];
  240. s += sq[pix1[1] - pix2[1]];
  241. s += sq[pix1[2] - pix2[2]];
  242. s += sq[pix1[3] - pix2[3]];
  243. pix1 += line_size;
  244. pix2 += line_size;
  245. }
  246. return s;
  247. }
  248. static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  249. int line_size, int h)
  250. {
  251. int s = 0, i;
  252. uint32_t *sq = ff_square_tab + 256;
  253. for (i = 0; i < h; i++) {
  254. s += sq[pix1[0] - pix2[0]];
  255. s += sq[pix1[1] - pix2[1]];
  256. s += sq[pix1[2] - pix2[2]];
  257. s += sq[pix1[3] - pix2[3]];
  258. s += sq[pix1[4] - pix2[4]];
  259. s += sq[pix1[5] - pix2[5]];
  260. s += sq[pix1[6] - pix2[6]];
  261. s += sq[pix1[7] - pix2[7]];
  262. pix1 += line_size;
  263. pix2 += line_size;
  264. }
  265. return s;
  266. }
  267. static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  268. int line_size, int h)
  269. {
  270. int s = 0, i;
  271. uint32_t *sq = ff_square_tab + 256;
  272. for (i = 0; i < h; i++) {
  273. s += sq[pix1[0] - pix2[0]];
  274. s += sq[pix1[1] - pix2[1]];
  275. s += sq[pix1[2] - pix2[2]];
  276. s += sq[pix1[3] - pix2[3]];
  277. s += sq[pix1[4] - pix2[4]];
  278. s += sq[pix1[5] - pix2[5]];
  279. s += sq[pix1[6] - pix2[6]];
  280. s += sq[pix1[7] - pix2[7]];
  281. s += sq[pix1[8] - pix2[8]];
  282. s += sq[pix1[9] - pix2[9]];
  283. s += sq[pix1[10] - pix2[10]];
  284. s += sq[pix1[11] - pix2[11]];
  285. s += sq[pix1[12] - pix2[12]];
  286. s += sq[pix1[13] - pix2[13]];
  287. s += sq[pix1[14] - pix2[14]];
  288. s += sq[pix1[15] - pix2[15]];
  289. pix1 += line_size;
  290. pix2 += line_size;
  291. }
  292. return s;
  293. }
  294. static void diff_pixels_c(int16_t *av_restrict block, const uint8_t *s1,
  295. const uint8_t *s2, int stride)
  296. {
  297. int i;
  298. /* read the pixels */
  299. for (i = 0; i < 8; i++) {
  300. block[0] = s1[0] - s2[0];
  301. block[1] = s1[1] - s2[1];
  302. block[2] = s1[2] - s2[2];
  303. block[3] = s1[3] - s2[3];
  304. block[4] = s1[4] - s2[4];
  305. block[5] = s1[5] - s2[5];
  306. block[6] = s1[6] - s2[6];
  307. block[7] = s1[7] - s2[7];
  308. s1 += stride;
  309. s2 += stride;
  310. block += 8;
  311. }
  312. }
  313. static void put_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
  314. int line_size)
  315. {
  316. int i;
  317. /* read the pixels */
  318. for (i = 0; i < 8; i++) {
  319. pixels[0] = av_clip_uint8(block[0]);
  320. pixels[1] = av_clip_uint8(block[1]);
  321. pixels[2] = av_clip_uint8(block[2]);
  322. pixels[3] = av_clip_uint8(block[3]);
  323. pixels[4] = av_clip_uint8(block[4]);
  324. pixels[5] = av_clip_uint8(block[5]);
  325. pixels[6] = av_clip_uint8(block[6]);
  326. pixels[7] = av_clip_uint8(block[7]);
  327. pixels += line_size;
  328. block += 8;
  329. }
  330. }
  331. static void put_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
  332. int line_size)
  333. {
  334. int i;
  335. /* read the pixels */
  336. for(i=0;i<4;i++) {
  337. pixels[0] = av_clip_uint8(block[0]);
  338. pixels[1] = av_clip_uint8(block[1]);
  339. pixels[2] = av_clip_uint8(block[2]);
  340. pixels[3] = av_clip_uint8(block[3]);
  341. pixels += line_size;
  342. block += 8;
  343. }
  344. }
  345. static void put_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
  346. int line_size)
  347. {
  348. int i;
  349. /* read the pixels */
  350. for(i=0;i<2;i++) {
  351. pixels[0] = av_clip_uint8(block[0]);
  352. pixels[1] = av_clip_uint8(block[1]);
  353. pixels += line_size;
  354. block += 8;
  355. }
  356. }
  357. static void put_signed_pixels_clamped_c(const int16_t *block,
  358. uint8_t *av_restrict pixels,
  359. int line_size)
  360. {
  361. int i, j;
  362. for (i = 0; i < 8; i++) {
  363. for (j = 0; j < 8; j++) {
  364. if (*block < -128)
  365. *pixels = 0;
  366. else if (*block > 127)
  367. *pixels = 255;
  368. else
  369. *pixels = (uint8_t) (*block + 128);
  370. block++;
  371. pixels++;
  372. }
  373. pixels += (line_size - 8);
  374. }
  375. }
  376. static void add_pixels8_c(uint8_t *av_restrict pixels, int16_t *block,
  377. int line_size)
  378. {
  379. int i;
  380. for (i = 0; i < 8; i++) {
  381. pixels[0] += block[0];
  382. pixels[1] += block[1];
  383. pixels[2] += block[2];
  384. pixels[3] += block[3];
  385. pixels[4] += block[4];
  386. pixels[5] += block[5];
  387. pixels[6] += block[6];
  388. pixels[7] += block[7];
  389. pixels += line_size;
  390. block += 8;
  391. }
  392. }
  393. static void add_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
  394. int line_size)
  395. {
  396. int i;
  397. /* read the pixels */
  398. for (i = 0; i < 8; i++) {
  399. pixels[0] = av_clip_uint8(pixels[0] + block[0]);
  400. pixels[1] = av_clip_uint8(pixels[1] + block[1]);
  401. pixels[2] = av_clip_uint8(pixels[2] + block[2]);
  402. pixels[3] = av_clip_uint8(pixels[3] + block[3]);
  403. pixels[4] = av_clip_uint8(pixels[4] + block[4]);
  404. pixels[5] = av_clip_uint8(pixels[5] + block[5]);
  405. pixels[6] = av_clip_uint8(pixels[6] + block[6]);
  406. pixels[7] = av_clip_uint8(pixels[7] + block[7]);
  407. pixels += line_size;
  408. block += 8;
  409. }
  410. }
  411. static void add_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
  412. int line_size)
  413. {
  414. int i;
  415. /* read the pixels */
  416. for(i=0;i<4;i++) {
  417. pixels[0] = av_clip_uint8(pixels[0] + block[0]);
  418. pixels[1] = av_clip_uint8(pixels[1] + block[1]);
  419. pixels[2] = av_clip_uint8(pixels[2] + block[2]);
  420. pixels[3] = av_clip_uint8(pixels[3] + block[3]);
  421. pixels += line_size;
  422. block += 8;
  423. }
  424. }
  425. static void add_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
  426. int line_size)
  427. {
  428. int i;
  429. /* read the pixels */
  430. for(i=0;i<2;i++) {
  431. pixels[0] = av_clip_uint8(pixels[0] + block[0]);
  432. pixels[1] = av_clip_uint8(pixels[1] + block[1]);
  433. pixels += line_size;
  434. block += 8;
  435. }
  436. }
  437. static int sum_abs_dctelem_c(int16_t *block)
  438. {
  439. int sum = 0, i;
  440. for (i = 0; i < 64; i++)
  441. sum += FFABS(block[i]);
  442. return sum;
  443. }
  444. static void fill_block16_c(uint8_t *block, uint8_t value, int line_size, int h)
  445. {
  446. int i;
  447. for (i = 0; i < h; i++) {
  448. memset(block, value, 16);
  449. block += line_size;
  450. }
  451. }
  452. static void fill_block8_c(uint8_t *block, uint8_t value, int line_size, int h)
  453. {
  454. int i;
  455. for (i = 0; i < h; i++) {
  456. memset(block, value, 8);
  457. block += line_size;
  458. }
  459. }
  460. #define avg2(a, b) ((a + b + 1) >> 1)
  461. #define avg4(a, b, c, d) ((a + b + c + d + 2) >> 2)
  462. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h,
  463. int x16, int y16, int rounder)
  464. {
  465. const int A = (16 - x16) * (16 - y16);
  466. const int B = (x16) * (16 - y16);
  467. const int C = (16 - x16) * (y16);
  468. const int D = (x16) * (y16);
  469. int i;
  470. for (i = 0; i < h; i++) {
  471. dst[0] = (A * src[0] + B * src[1] + C * src[stride + 0] + D * src[stride + 1] + rounder) >> 8;
  472. dst[1] = (A * src[1] + B * src[2] + C * src[stride + 1] + D * src[stride + 2] + rounder) >> 8;
  473. dst[2] = (A * src[2] + B * src[3] + C * src[stride + 2] + D * src[stride + 3] + rounder) >> 8;
  474. dst[3] = (A * src[3] + B * src[4] + C * src[stride + 3] + D * src[stride + 4] + rounder) >> 8;
  475. dst[4] = (A * src[4] + B * src[5] + C * src[stride + 4] + D * src[stride + 5] + rounder) >> 8;
  476. dst[5] = (A * src[5] + B * src[6] + C * src[stride + 5] + D * src[stride + 6] + rounder) >> 8;
  477. dst[6] = (A * src[6] + B * src[7] + C * src[stride + 6] + D * src[stride + 7] + rounder) >> 8;
  478. dst[7] = (A * src[7] + B * src[8] + C * src[stride + 7] + D * src[stride + 8] + rounder) >> 8;
  479. dst += stride;
  480. src += stride;
  481. }
  482. }
  483. void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  484. int dxx, int dxy, int dyx, int dyy, int shift, int r,
  485. int width, int height)
  486. {
  487. int y, vx, vy;
  488. const int s = 1 << shift;
  489. width--;
  490. height--;
  491. for (y = 0; y < h; y++) {
  492. int x;
  493. vx = ox;
  494. vy = oy;
  495. for (x = 0; x < 8; x++) { // FIXME: optimize
  496. int index;
  497. int src_x = vx >> 16;
  498. int src_y = vy >> 16;
  499. int frac_x = src_x & (s - 1);
  500. int frac_y = src_y & (s - 1);
  501. src_x >>= shift;
  502. src_y >>= shift;
  503. if ((unsigned) src_x < width) {
  504. if ((unsigned) src_y < height) {
  505. index = src_x + src_y * stride;
  506. dst[y * stride + x] =
  507. ((src[index] * (s - frac_x) +
  508. src[index + 1] * frac_x) * (s - frac_y) +
  509. (src[index + stride] * (s - frac_x) +
  510. src[index + stride + 1] * frac_x) * frac_y +
  511. r) >> (shift * 2);
  512. } else {
  513. index = src_x + av_clip(src_y, 0, height) * stride;
  514. dst[y * stride + x] =
  515. ((src[index] * (s - frac_x) +
  516. src[index + 1] * frac_x) * s +
  517. r) >> (shift * 2);
  518. }
  519. } else {
  520. if ((unsigned) src_y < height) {
  521. index = av_clip(src_x, 0, width) + src_y * stride;
  522. dst[y * stride + x] =
  523. ((src[index] * (s - frac_y) +
  524. src[index + stride] * frac_y) * s +
  525. r) >> (shift * 2);
  526. } else {
  527. index = av_clip(src_x, 0, width) +
  528. av_clip(src_y, 0, height) * stride;
  529. dst[y * stride + x] = src[index];
  530. }
  531. }
  532. vx += dxx;
  533. vy += dyx;
  534. }
  535. ox += dxy;
  536. oy += dyy;
  537. }
  538. }
  539. #define QPEL_MC(r, OPNAME, RND, OP) \
  540. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, \
  541. int dstStride, int srcStride, \
  542. int h) \
  543. { \
  544. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  545. int i; \
  546. \
  547. for (i = 0; i < h; i++) { \
  548. OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \
  549. OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \
  550. OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \
  551. OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \
  552. OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \
  553. OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[8])); \
  554. OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[8]) * 3 - (src[3] + src[7])); \
  555. OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[8]) * 6 + (src[5] + src[7]) * 3 - (src[4] + src[6])); \
  556. dst += dstStride; \
  557. src += srcStride; \
  558. } \
  559. } \
  560. \
  561. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, \
  562. int dstStride, int srcStride) \
  563. { \
  564. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  565. const int w = 8; \
  566. int i; \
  567. \
  568. for (i = 0; i < w; i++) { \
  569. const int src0 = src[0 * srcStride]; \
  570. const int src1 = src[1 * srcStride]; \
  571. const int src2 = src[2 * srcStride]; \
  572. const int src3 = src[3 * srcStride]; \
  573. const int src4 = src[4 * srcStride]; \
  574. const int src5 = src[5 * srcStride]; \
  575. const int src6 = src[6 * srcStride]; \
  576. const int src7 = src[7 * srcStride]; \
  577. const int src8 = src[8 * srcStride]; \
  578. OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \
  579. OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \
  580. OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \
  581. OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \
  582. OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \
  583. OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src8)); \
  584. OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src8) * 3 - (src3 + src7)); \
  585. OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src8) * 6 + (src5 + src7) * 3 - (src4 + src6)); \
  586. dst++; \
  587. src++; \
  588. } \
  589. } \
  590. \
  591. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, \
  592. int dstStride, int srcStride, \
  593. int h) \
  594. { \
  595. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  596. int i; \
  597. \
  598. for (i = 0; i < h; i++) { \
  599. OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \
  600. OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \
  601. OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \
  602. OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \
  603. OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \
  604. OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[9])); \
  605. OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[9]) * 3 - (src[3] + src[10])); \
  606. OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[9]) * 6 + (src[5] + src[10]) * 3 - (src[4] + src[11])); \
  607. OP(dst[8], (src[8] + src[9]) * 20 - (src[7] + src[10]) * 6 + (src[6] + src[11]) * 3 - (src[5] + src[12])); \
  608. OP(dst[9], (src[9] + src[10]) * 20 - (src[8] + src[11]) * 6 + (src[7] + src[12]) * 3 - (src[6] + src[13])); \
  609. OP(dst[10], (src[10] + src[11]) * 20 - (src[9] + src[12]) * 6 + (src[8] + src[13]) * 3 - (src[7] + src[14])); \
  610. OP(dst[11], (src[11] + src[12]) * 20 - (src[10] + src[13]) * 6 + (src[9] + src[14]) * 3 - (src[8] + src[15])); \
  611. OP(dst[12], (src[12] + src[13]) * 20 - (src[11] + src[14]) * 6 + (src[10] + src[15]) * 3 - (src[9] + src[16])); \
  612. OP(dst[13], (src[13] + src[14]) * 20 - (src[12] + src[15]) * 6 + (src[11] + src[16]) * 3 - (src[10] + src[16])); \
  613. OP(dst[14], (src[14] + src[15]) * 20 - (src[13] + src[16]) * 6 + (src[12] + src[16]) * 3 - (src[11] + src[15])); \
  614. OP(dst[15], (src[15] + src[16]) * 20 - (src[14] + src[16]) * 6 + (src[13] + src[15]) * 3 - (src[12] + src[14])); \
  615. dst += dstStride; \
  616. src += srcStride; \
  617. } \
  618. } \
  619. \
  620. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, \
  621. int dstStride, int srcStride) \
  622. { \
  623. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  624. const int w = 16; \
  625. int i; \
  626. \
  627. for (i = 0; i < w; i++) { \
  628. const int src0 = src[0 * srcStride]; \
  629. const int src1 = src[1 * srcStride]; \
  630. const int src2 = src[2 * srcStride]; \
  631. const int src3 = src[3 * srcStride]; \
  632. const int src4 = src[4 * srcStride]; \
  633. const int src5 = src[5 * srcStride]; \
  634. const int src6 = src[6 * srcStride]; \
  635. const int src7 = src[7 * srcStride]; \
  636. const int src8 = src[8 * srcStride]; \
  637. const int src9 = src[9 * srcStride]; \
  638. const int src10 = src[10 * srcStride]; \
  639. const int src11 = src[11 * srcStride]; \
  640. const int src12 = src[12 * srcStride]; \
  641. const int src13 = src[13 * srcStride]; \
  642. const int src14 = src[14 * srcStride]; \
  643. const int src15 = src[15 * srcStride]; \
  644. const int src16 = src[16 * srcStride]; \
  645. OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \
  646. OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \
  647. OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \
  648. OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \
  649. OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \
  650. OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src9)); \
  651. OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src9) * 3 - (src3 + src10)); \
  652. OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src9) * 6 + (src5 + src10) * 3 - (src4 + src11)); \
  653. OP(dst[8 * dstStride], (src8 + src9) * 20 - (src7 + src10) * 6 + (src6 + src11) * 3 - (src5 + src12)); \
  654. OP(dst[9 * dstStride], (src9 + src10) * 20 - (src8 + src11) * 6 + (src7 + src12) * 3 - (src6 + src13)); \
  655. OP(dst[10 * dstStride], (src10 + src11) * 20 - (src9 + src12) * 6 + (src8 + src13) * 3 - (src7 + src14)); \
  656. OP(dst[11 * dstStride], (src11 + src12) * 20 - (src10 + src13) * 6 + (src9 + src14) * 3 - (src8 + src15)); \
  657. OP(dst[12 * dstStride], (src12 + src13) * 20 - (src11 + src14) * 6 + (src10 + src15) * 3 - (src9 + src16)); \
  658. OP(dst[13 * dstStride], (src13 + src14) * 20 - (src12 + src15) * 6 + (src11 + src16) * 3 - (src10 + src16)); \
  659. OP(dst[14 * dstStride], (src14 + src15) * 20 - (src13 + src16) * 6 + (src12 + src16) * 3 - (src11 + src15)); \
  660. OP(dst[15 * dstStride], (src15 + src16) * 20 - (src14 + src16) * 6 + (src13 + src15) * 3 - (src12 + src14)); \
  661. dst++; \
  662. src++; \
  663. } \
  664. } \
  665. \
  666. static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, \
  667. ptrdiff_t stride) \
  668. { \
  669. uint8_t half[64]; \
  670. \
  671. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \
  672. OPNAME ## pixels8_l2_8(dst, src, half, stride, stride, 8, 8); \
  673. } \
  674. \
  675. static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, \
  676. ptrdiff_t stride) \
  677. { \
  678. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8); \
  679. } \
  680. \
  681. static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, \
  682. ptrdiff_t stride) \
  683. { \
  684. uint8_t half[64]; \
  685. \
  686. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \
  687. OPNAME ## pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8); \
  688. } \
  689. \
  690. static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, \
  691. ptrdiff_t stride) \
  692. { \
  693. uint8_t full[16 * 9]; \
  694. uint8_t half[64]; \
  695. \
  696. copy_block9(full, src, 16, stride, 9); \
  697. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \
  698. OPNAME ## pixels8_l2_8(dst, full, half, stride, 16, 8, 8); \
  699. } \
  700. \
  701. static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, \
  702. ptrdiff_t stride) \
  703. { \
  704. uint8_t full[16 * 9]; \
  705. \
  706. copy_block9(full, src, 16, stride, 9); \
  707. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16); \
  708. } \
  709. \
  710. static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, \
  711. ptrdiff_t stride) \
  712. { \
  713. uint8_t full[16 * 9]; \
  714. uint8_t half[64]; \
  715. \
  716. copy_block9(full, src, 16, stride, 9); \
  717. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \
  718. OPNAME ## pixels8_l2_8(dst, full + 16, half, stride, 16, 8, 8); \
  719. } \
  720. \
  721. void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, \
  722. ptrdiff_t stride) \
  723. { \
  724. uint8_t full[16 * 9]; \
  725. uint8_t halfH[72]; \
  726. uint8_t halfV[64]; \
  727. uint8_t halfHV[64]; \
  728. \
  729. copy_block9(full, src, 16, stride, 9); \
  730. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  731. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  732. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  733. OPNAME ## pixels8_l4_8(dst, full, halfH, halfV, halfHV, \
  734. stride, 16, 8, 8, 8, 8); \
  735. } \
  736. \
  737. static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, \
  738. ptrdiff_t stride) \
  739. { \
  740. uint8_t full[16 * 9]; \
  741. uint8_t halfH[72]; \
  742. uint8_t halfHV[64]; \
  743. \
  744. copy_block9(full, src, 16, stride, 9); \
  745. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  746. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  747. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  748. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  749. } \
  750. \
  751. void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, \
  752. ptrdiff_t stride) \
  753. { \
  754. uint8_t full[16 * 9]; \
  755. uint8_t halfH[72]; \
  756. uint8_t halfV[64]; \
  757. uint8_t halfHV[64]; \
  758. \
  759. copy_block9(full, src, 16, stride, 9); \
  760. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  761. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  762. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  763. OPNAME ## pixels8_l4_8(dst, full + 1, halfH, halfV, halfHV, \
  764. stride, 16, 8, 8, 8, 8); \
  765. } \
  766. \
  767. static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, \
  768. ptrdiff_t stride) \
  769. { \
  770. uint8_t full[16 * 9]; \
  771. uint8_t halfH[72]; \
  772. uint8_t halfHV[64]; \
  773. \
  774. copy_block9(full, src, 16, stride, 9); \
  775. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  776. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  777. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  778. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  779. } \
  780. \
  781. void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, \
  782. ptrdiff_t stride) \
  783. { \
  784. uint8_t full[16 * 9]; \
  785. uint8_t halfH[72]; \
  786. uint8_t halfV[64]; \
  787. uint8_t halfHV[64]; \
  788. \
  789. copy_block9(full, src, 16, stride, 9); \
  790. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  791. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  792. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  793. OPNAME ## pixels8_l4_8(dst, full + 16, halfH + 8, halfV, halfHV, \
  794. stride, 16, 8, 8, 8, 8); \
  795. } \
  796. \
  797. static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, \
  798. ptrdiff_t stride) \
  799. { \
  800. uint8_t full[16 * 9]; \
  801. uint8_t halfH[72]; \
  802. uint8_t halfHV[64]; \
  803. \
  804. copy_block9(full, src, 16, stride, 9); \
  805. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  806. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  807. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  808. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  809. } \
  810. \
  811. void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, \
  812. ptrdiff_t stride) \
  813. { \
  814. uint8_t full[16 * 9]; \
  815. uint8_t halfH[72]; \
  816. uint8_t halfV[64]; \
  817. uint8_t halfHV[64]; \
  818. \
  819. copy_block9(full, src, 16, stride, 9); \
  820. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  821. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  822. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  823. OPNAME ## pixels8_l4_8(dst, full + 17, halfH + 8, halfV, halfHV, \
  824. stride, 16, 8, 8, 8, 8); \
  825. } \
  826. \
  827. static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, \
  828. ptrdiff_t stride) \
  829. { \
  830. uint8_t full[16 * 9]; \
  831. uint8_t halfH[72]; \
  832. uint8_t halfHV[64]; \
  833. \
  834. copy_block9(full, src, 16, stride, 9); \
  835. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  836. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  837. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  838. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  839. } \
  840. \
  841. static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, \
  842. ptrdiff_t stride) \
  843. { \
  844. uint8_t halfH[72]; \
  845. uint8_t halfHV[64]; \
  846. \
  847. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  848. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  849. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  850. } \
  851. \
  852. static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, \
  853. ptrdiff_t stride) \
  854. { \
  855. uint8_t halfH[72]; \
  856. uint8_t halfHV[64]; \
  857. \
  858. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  859. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  860. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  861. } \
  862. \
  863. void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, \
  864. ptrdiff_t stride) \
  865. { \
  866. uint8_t full[16 * 9]; \
  867. uint8_t halfH[72]; \
  868. uint8_t halfV[64]; \
  869. uint8_t halfHV[64]; \
  870. \
  871. copy_block9(full, src, 16, stride, 9); \
  872. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  873. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  874. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  875. OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \
  876. } \
  877. \
  878. static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, \
  879. ptrdiff_t stride) \
  880. { \
  881. uint8_t full[16 * 9]; \
  882. uint8_t halfH[72]; \
  883. \
  884. copy_block9(full, src, 16, stride, 9); \
  885. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  886. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  887. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  888. } \
  889. \
  890. void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, \
  891. ptrdiff_t stride) \
  892. { \
  893. uint8_t full[16 * 9]; \
  894. uint8_t halfH[72]; \
  895. uint8_t halfV[64]; \
  896. uint8_t halfHV[64]; \
  897. \
  898. copy_block9(full, src, 16, stride, 9); \
  899. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  900. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  901. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  902. OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \
  903. } \
  904. \
  905. static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, \
  906. ptrdiff_t stride) \
  907. { \
  908. uint8_t full[16 * 9]; \
  909. uint8_t halfH[72]; \
  910. \
  911. copy_block9(full, src, 16, stride, 9); \
  912. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  913. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  914. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  915. } \
  916. \
  917. static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, \
  918. ptrdiff_t stride) \
  919. { \
  920. uint8_t halfH[72]; \
  921. \
  922. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  923. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  924. } \
  925. \
  926. static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, \
  927. ptrdiff_t stride) \
  928. { \
  929. uint8_t half[256]; \
  930. \
  931. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \
  932. OPNAME ## pixels16_l2_8(dst, src, half, stride, stride, 16, 16); \
  933. } \
  934. \
  935. static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, \
  936. ptrdiff_t stride) \
  937. { \
  938. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16); \
  939. } \
  940. \
  941. static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, \
  942. ptrdiff_t stride) \
  943. { \
  944. uint8_t half[256]; \
  945. \
  946. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \
  947. OPNAME ## pixels16_l2_8(dst, src + 1, half, stride, stride, 16, 16); \
  948. } \
  949. \
  950. static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, \
  951. ptrdiff_t stride) \
  952. { \
  953. uint8_t full[24 * 17]; \
  954. uint8_t half[256]; \
  955. \
  956. copy_block17(full, src, 24, stride, 17); \
  957. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \
  958. OPNAME ## pixels16_l2_8(dst, full, half, stride, 24, 16, 16); \
  959. } \
  960. \
  961. static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, \
  962. ptrdiff_t stride) \
  963. { \
  964. uint8_t full[24 * 17]; \
  965. \
  966. copy_block17(full, src, 24, stride, 17); \
  967. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24); \
  968. } \
  969. \
  970. static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, \
  971. ptrdiff_t stride) \
  972. { \
  973. uint8_t full[24 * 17]; \
  974. uint8_t half[256]; \
  975. \
  976. copy_block17(full, src, 24, stride, 17); \
  977. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \
  978. OPNAME ## pixels16_l2_8(dst, full + 24, half, stride, 24, 16, 16); \
  979. } \
  980. \
  981. void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, \
  982. ptrdiff_t stride) \
  983. { \
  984. uint8_t full[24 * 17]; \
  985. uint8_t halfH[272]; \
  986. uint8_t halfV[256]; \
  987. uint8_t halfHV[256]; \
  988. \
  989. copy_block17(full, src, 24, stride, 17); \
  990. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  991. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  992. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  993. OPNAME ## pixels16_l4_8(dst, full, halfH, halfV, halfHV, \
  994. stride, 24, 16, 16, 16, 16); \
  995. } \
  996. \
  997. static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, \
  998. ptrdiff_t stride) \
  999. { \
  1000. uint8_t full[24 * 17]; \
  1001. uint8_t halfH[272]; \
  1002. uint8_t halfHV[256]; \
  1003. \
  1004. copy_block17(full, src, 24, stride, 17); \
  1005. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1006. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1007. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1008. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  1009. } \
  1010. \
  1011. void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, \
  1012. ptrdiff_t stride) \
  1013. { \
  1014. uint8_t full[24 * 17]; \
  1015. uint8_t halfH[272]; \
  1016. uint8_t halfV[256]; \
  1017. uint8_t halfHV[256]; \
  1018. \
  1019. copy_block17(full, src, 24, stride, 17); \
  1020. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1021. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1022. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1023. OPNAME ## pixels16_l4_8(dst, full + 1, halfH, halfV, halfHV, \
  1024. stride, 24, 16, 16, 16, 16); \
  1025. } \
  1026. \
  1027. static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, \
  1028. ptrdiff_t stride) \
  1029. { \
  1030. uint8_t full[24 * 17]; \
  1031. uint8_t halfH[272]; \
  1032. uint8_t halfHV[256]; \
  1033. \
  1034. copy_block17(full, src, 24, stride, 17); \
  1035. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1036. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1037. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1038. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  1039. } \
  1040. \
  1041. void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, \
  1042. ptrdiff_t stride) \
  1043. { \
  1044. uint8_t full[24 * 17]; \
  1045. uint8_t halfH[272]; \
  1046. uint8_t halfV[256]; \
  1047. uint8_t halfHV[256]; \
  1048. \
  1049. copy_block17(full, src, 24, stride, 17); \
  1050. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1051. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  1052. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1053. OPNAME ## pixels16_l4_8(dst, full + 24, halfH + 16, halfV, halfHV, \
  1054. stride, 24, 16, 16, 16, 16); \
  1055. } \
  1056. \
  1057. static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, \
  1058. ptrdiff_t stride) \
  1059. { \
  1060. uint8_t full[24 * 17]; \
  1061. uint8_t halfH[272]; \
  1062. uint8_t halfHV[256]; \
  1063. \
  1064. copy_block17(full, src, 24, stride, 17); \
  1065. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1066. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1067. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1068. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1069. } \
  1070. \
  1071. void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, \
  1072. ptrdiff_t stride) \
  1073. { \
  1074. uint8_t full[24 * 17]; \
  1075. uint8_t halfH[272]; \
  1076. uint8_t halfV[256]; \
  1077. uint8_t halfHV[256]; \
  1078. \
  1079. copy_block17(full, src, 24, stride, 17); \
  1080. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1081. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1082. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1083. OPNAME ## pixels16_l4_8(dst, full + 25, halfH + 16, halfV, halfHV, \
  1084. stride, 24, 16, 16, 16, 16); \
  1085. } \
  1086. \
  1087. static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, \
  1088. ptrdiff_t stride) \
  1089. { \
  1090. uint8_t full[24 * 17]; \
  1091. uint8_t halfH[272]; \
  1092. uint8_t halfHV[256]; \
  1093. \
  1094. copy_block17(full, src, 24, stride, 17); \
  1095. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1096. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1097. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1098. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1099. } \
  1100. \
  1101. static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, \
  1102. ptrdiff_t stride) \
  1103. { \
  1104. uint8_t halfH[272]; \
  1105. uint8_t halfHV[256]; \
  1106. \
  1107. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1108. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1109. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  1110. } \
  1111. \
  1112. static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, \
  1113. ptrdiff_t stride) \
  1114. { \
  1115. uint8_t halfH[272]; \
  1116. uint8_t halfHV[256]; \
  1117. \
  1118. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1119. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1120. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1121. } \
  1122. \
  1123. void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, \
  1124. ptrdiff_t stride) \
  1125. { \
  1126. uint8_t full[24 * 17]; \
  1127. uint8_t halfH[272]; \
  1128. uint8_t halfV[256]; \
  1129. uint8_t halfHV[256]; \
  1130. \
  1131. copy_block17(full, src, 24, stride, 17); \
  1132. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1133. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  1134. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1135. OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \
  1136. } \
  1137. \
  1138. static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, \
  1139. ptrdiff_t stride) \
  1140. { \
  1141. uint8_t full[24 * 17]; \
  1142. uint8_t halfH[272]; \
  1143. \
  1144. copy_block17(full, src, 24, stride, 17); \
  1145. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1146. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1147. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1148. } \
  1149. \
  1150. void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, \
  1151. ptrdiff_t stride) \
  1152. { \
  1153. uint8_t full[24 * 17]; \
  1154. uint8_t halfH[272]; \
  1155. uint8_t halfV[256]; \
  1156. uint8_t halfHV[256]; \
  1157. \
  1158. copy_block17(full, src, 24, stride, 17); \
  1159. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1160. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1161. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1162. OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \
  1163. } \
  1164. \
  1165. static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, \
  1166. ptrdiff_t stride) \
  1167. { \
  1168. uint8_t full[24 * 17]; \
  1169. uint8_t halfH[272]; \
  1170. \
  1171. copy_block17(full, src, 24, stride, 17); \
  1172. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1173. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1174. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1175. } \
  1176. \
  1177. static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, \
  1178. ptrdiff_t stride) \
  1179. { \
  1180. uint8_t halfH[272]; \
  1181. \
  1182. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1183. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1184. }
  1185. #define op_avg(a, b) a = (((a) + cm[((b) + 16) >> 5] + 1) >> 1)
  1186. #define op_avg_no_rnd(a, b) a = (((a) + cm[((b) + 15) >> 5]) >> 1)
  1187. #define op_put(a, b) a = cm[((b) + 16) >> 5]
  1188. #define op_put_no_rnd(a, b) a = cm[((b) + 15) >> 5]
  1189. QPEL_MC(0, put_, _, op_put)
  1190. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  1191. QPEL_MC(0, avg_, _, op_avg)
  1192. #undef op_avg
  1193. #undef op_put
  1194. #undef op_put_no_rnd
  1195. void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1196. {
  1197. put_pixels8_8_c(dst, src, stride, 8);
  1198. }
  1199. void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1200. {
  1201. avg_pixels8_8_c(dst, src, stride, 8);
  1202. }
  1203. void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1204. {
  1205. put_pixels16_8_c(dst, src, stride, 16);
  1206. }
  1207. void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1208. {
  1209. avg_pixels16_8_c(dst, src, stride, 16);
  1210. }
  1211. #define put_qpel8_mc00_c ff_put_pixels8x8_c
  1212. #define avg_qpel8_mc00_c ff_avg_pixels8x8_c
  1213. #define put_qpel16_mc00_c ff_put_pixels16x16_c
  1214. #define avg_qpel16_mc00_c ff_avg_pixels16x16_c
  1215. #define put_no_rnd_qpel8_mc00_c ff_put_pixels8x8_c
  1216. #define put_no_rnd_qpel16_mc00_c ff_put_pixels16x16_c
  1217. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src,
  1218. int dstStride, int srcStride, int h)
  1219. {
  1220. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1221. int i;
  1222. for (i = 0; i < h; i++) {
  1223. dst[0] = cm[(9 * (src[0] + src[1]) - (src[-1] + src[2]) + 8) >> 4];
  1224. dst[1] = cm[(9 * (src[1] + src[2]) - (src[0] + src[3]) + 8) >> 4];
  1225. dst[2] = cm[(9 * (src[2] + src[3]) - (src[1] + src[4]) + 8) >> 4];
  1226. dst[3] = cm[(9 * (src[3] + src[4]) - (src[2] + src[5]) + 8) >> 4];
  1227. dst[4] = cm[(9 * (src[4] + src[5]) - (src[3] + src[6]) + 8) >> 4];
  1228. dst[5] = cm[(9 * (src[5] + src[6]) - (src[4] + src[7]) + 8) >> 4];
  1229. dst[6] = cm[(9 * (src[6] + src[7]) - (src[5] + src[8]) + 8) >> 4];
  1230. dst[7] = cm[(9 * (src[7] + src[8]) - (src[6] + src[9]) + 8) >> 4];
  1231. dst += dstStride;
  1232. src += srcStride;
  1233. }
  1234. }
  1235. #if CONFIG_RV40_DECODER
  1236. void ff_put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1237. {
  1238. put_pixels16_xy2_8_c(dst, src, stride, 16);
  1239. }
  1240. void ff_avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1241. {
  1242. avg_pixels16_xy2_8_c(dst, src, stride, 16);
  1243. }
  1244. void ff_put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1245. {
  1246. put_pixels8_xy2_8_c(dst, src, stride, 8);
  1247. }
  1248. void ff_avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1249. {
  1250. avg_pixels8_xy2_8_c(dst, src, stride, 8);
  1251. }
  1252. #endif /* CONFIG_RV40_DECODER */
  1253. #if CONFIG_DIRAC_DECODER
  1254. #define DIRAC_MC(OPNAME)\
  1255. void ff_ ## OPNAME ## _dirac_pixels8_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1256. {\
  1257. OPNAME ## _pixels8_8_c(dst, src[0], stride, h);\
  1258. }\
  1259. void ff_ ## OPNAME ## _dirac_pixels16_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1260. {\
  1261. OPNAME ## _pixels16_8_c(dst, src[0], stride, h);\
  1262. }\
  1263. void ff_ ## OPNAME ## _dirac_pixels32_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1264. {\
  1265. OPNAME ## _pixels16_8_c(dst , src[0] , stride, h);\
  1266. OPNAME ## _pixels16_8_c(dst+16, src[0]+16, stride, h);\
  1267. }\
  1268. void ff_ ## OPNAME ## _dirac_pixels8_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1269. {\
  1270. OPNAME ## _pixels8_l2_8(dst, src[0], src[1], stride, stride, stride, h);\
  1271. }\
  1272. void ff_ ## OPNAME ## _dirac_pixels16_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1273. {\
  1274. OPNAME ## _pixels16_l2_8(dst, src[0], src[1], stride, stride, stride, h);\
  1275. }\
  1276. void ff_ ## OPNAME ## _dirac_pixels32_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1277. {\
  1278. OPNAME ## _pixels16_l2_8(dst , src[0] , src[1] , stride, stride, stride, h);\
  1279. OPNAME ## _pixels16_l2_8(dst+16, src[0]+16, src[1]+16, stride, stride, stride, h);\
  1280. }\
  1281. void ff_ ## OPNAME ## _dirac_pixels8_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1282. {\
  1283. OPNAME ## _pixels8_l4_8(dst, src[0], src[1], src[2], src[3], stride, stride, stride, stride, stride, h);\
  1284. }\
  1285. void ff_ ## OPNAME ## _dirac_pixels16_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1286. {\
  1287. OPNAME ## _pixels16_l4_8(dst, src[0], src[1], src[2], src[3], stride, stride, stride, stride, stride, h);\
  1288. }\
  1289. void ff_ ## OPNAME ## _dirac_pixels32_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1290. {\
  1291. OPNAME ## _pixels16_l4_8(dst , src[0] , src[1] , src[2] , src[3] , stride, stride, stride, stride, stride, h);\
  1292. OPNAME ## _pixels16_l4_8(dst+16, src[0]+16, src[1]+16, src[2]+16, src[3]+16, stride, stride, stride, stride, stride, h);\
  1293. }
  1294. DIRAC_MC(put)
  1295. DIRAC_MC(avg)
  1296. #endif
  1297. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src,
  1298. int dstStride, int srcStride, int w)
  1299. {
  1300. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1301. int i;
  1302. for (i = 0; i < w; i++) {
  1303. const int src_1 = src[-srcStride];
  1304. const int src0 = src[0];
  1305. const int src1 = src[srcStride];
  1306. const int src2 = src[2 * srcStride];
  1307. const int src3 = src[3 * srcStride];
  1308. const int src4 = src[4 * srcStride];
  1309. const int src5 = src[5 * srcStride];
  1310. const int src6 = src[6 * srcStride];
  1311. const int src7 = src[7 * srcStride];
  1312. const int src8 = src[8 * srcStride];
  1313. const int src9 = src[9 * srcStride];
  1314. dst[0 * dstStride] = cm[(9 * (src0 + src1) - (src_1 + src2) + 8) >> 4];
  1315. dst[1 * dstStride] = cm[(9 * (src1 + src2) - (src0 + src3) + 8) >> 4];
  1316. dst[2 * dstStride] = cm[(9 * (src2 + src3) - (src1 + src4) + 8) >> 4];
  1317. dst[3 * dstStride] = cm[(9 * (src3 + src4) - (src2 + src5) + 8) >> 4];
  1318. dst[4 * dstStride] = cm[(9 * (src4 + src5) - (src3 + src6) + 8) >> 4];
  1319. dst[5 * dstStride] = cm[(9 * (src5 + src6) - (src4 + src7) + 8) >> 4];
  1320. dst[6 * dstStride] = cm[(9 * (src6 + src7) - (src5 + src8) + 8) >> 4];
  1321. dst[7 * dstStride] = cm[(9 * (src7 + src8) - (src6 + src9) + 8) >> 4];
  1322. src++;
  1323. dst++;
  1324. }
  1325. }
  1326. static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1327. {
  1328. uint8_t half[64];
  1329. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1330. put_pixels8_l2_8(dst, src, half, stride, stride, 8, 8);
  1331. }
  1332. static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1333. {
  1334. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  1335. }
  1336. static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1337. {
  1338. uint8_t half[64];
  1339. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1340. put_pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8);
  1341. }
  1342. static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1343. {
  1344. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  1345. }
  1346. static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1347. {
  1348. uint8_t halfH[88];
  1349. uint8_t halfV[64];
  1350. uint8_t halfHV[64];
  1351. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1352. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  1353. wmv2_mspel8_v_lowpass(halfHV, halfH + 8, 8, 8, 8);
  1354. put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
  1355. }
  1356. static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1357. {
  1358. uint8_t halfH[88];
  1359. uint8_t halfV[64];
  1360. uint8_t halfHV[64];
  1361. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1362. wmv2_mspel8_v_lowpass(halfV, src + 1, 8, stride, 8);
  1363. wmv2_mspel8_v_lowpass(halfHV, halfH + 8, 8, 8, 8);
  1364. put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
  1365. }
  1366. static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1367. {
  1368. uint8_t halfH[88];
  1369. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1370. wmv2_mspel8_v_lowpass(dst, halfH + 8, stride, 8, 8);
  1371. }
  1372. static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1373. int line_size, int h)
  1374. {
  1375. int s = 0, i;
  1376. for (i = 0; i < h; i++) {
  1377. s += abs(pix1[0] - pix2[0]);
  1378. s += abs(pix1[1] - pix2[1]);
  1379. s += abs(pix1[2] - pix2[2]);
  1380. s += abs(pix1[3] - pix2[3]);
  1381. s += abs(pix1[4] - pix2[4]);
  1382. s += abs(pix1[5] - pix2[5]);
  1383. s += abs(pix1[6] - pix2[6]);
  1384. s += abs(pix1[7] - pix2[7]);
  1385. s += abs(pix1[8] - pix2[8]);
  1386. s += abs(pix1[9] - pix2[9]);
  1387. s += abs(pix1[10] - pix2[10]);
  1388. s += abs(pix1[11] - pix2[11]);
  1389. s += abs(pix1[12] - pix2[12]);
  1390. s += abs(pix1[13] - pix2[13]);
  1391. s += abs(pix1[14] - pix2[14]);
  1392. s += abs(pix1[15] - pix2[15]);
  1393. pix1 += line_size;
  1394. pix2 += line_size;
  1395. }
  1396. return s;
  1397. }
  1398. static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1399. int line_size, int h)
  1400. {
  1401. int s = 0, i;
  1402. for (i = 0; i < h; i++) {
  1403. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  1404. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  1405. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  1406. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  1407. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  1408. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  1409. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  1410. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  1411. s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
  1412. s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
  1413. s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
  1414. s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
  1415. s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
  1416. s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
  1417. s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
  1418. s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
  1419. pix1 += line_size;
  1420. pix2 += line_size;
  1421. }
  1422. return s;
  1423. }
  1424. static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1425. int line_size, int h)
  1426. {
  1427. int s = 0, i;
  1428. uint8_t *pix3 = pix2 + line_size;
  1429. for (i = 0; i < h; i++) {
  1430. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  1431. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  1432. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  1433. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  1434. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  1435. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  1436. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  1437. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  1438. s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
  1439. s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
  1440. s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
  1441. s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
  1442. s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
  1443. s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
  1444. s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
  1445. s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
  1446. pix1 += line_size;
  1447. pix2 += line_size;
  1448. pix3 += line_size;
  1449. }
  1450. return s;
  1451. }
  1452. static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1453. int line_size, int h)
  1454. {
  1455. int s = 0, i;
  1456. uint8_t *pix3 = pix2 + line_size;
  1457. for (i = 0; i < h; i++) {
  1458. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  1459. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  1460. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  1461. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  1462. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  1463. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  1464. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  1465. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  1466. s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
  1467. s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
  1468. s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
  1469. s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
  1470. s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
  1471. s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
  1472. s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
  1473. s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
  1474. pix1 += line_size;
  1475. pix2 += line_size;
  1476. pix3 += line_size;
  1477. }
  1478. return s;
  1479. }
  1480. static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1481. int line_size, int h)
  1482. {
  1483. int s = 0, i;
  1484. for (i = 0; i < h; i++) {
  1485. s += abs(pix1[0] - pix2[0]);
  1486. s += abs(pix1[1] - pix2[1]);
  1487. s += abs(pix1[2] - pix2[2]);
  1488. s += abs(pix1[3] - pix2[3]);
  1489. s += abs(pix1[4] - pix2[4]);
  1490. s += abs(pix1[5] - pix2[5]);
  1491. s += abs(pix1[6] - pix2[6]);
  1492. s += abs(pix1[7] - pix2[7]);
  1493. pix1 += line_size;
  1494. pix2 += line_size;
  1495. }
  1496. return s;
  1497. }
  1498. static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1499. int line_size, int h)
  1500. {
  1501. int s = 0, i;
  1502. for (i = 0; i < h; i++) {
  1503. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  1504. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  1505. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  1506. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  1507. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  1508. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  1509. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  1510. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  1511. pix1 += line_size;
  1512. pix2 += line_size;
  1513. }
  1514. return s;
  1515. }
  1516. static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1517. int line_size, int h)
  1518. {
  1519. int s = 0, i;
  1520. uint8_t *pix3 = pix2 + line_size;
  1521. for (i = 0; i < h; i++) {
  1522. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  1523. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  1524. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  1525. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  1526. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  1527. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  1528. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  1529. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  1530. pix1 += line_size;
  1531. pix2 += line_size;
  1532. pix3 += line_size;
  1533. }
  1534. return s;
  1535. }
  1536. static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1537. int line_size, int h)
  1538. {
  1539. int s = 0, i;
  1540. uint8_t *pix3 = pix2 + line_size;
  1541. for (i = 0; i < h; i++) {
  1542. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  1543. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  1544. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  1545. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  1546. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  1547. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  1548. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  1549. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  1550. pix1 += line_size;
  1551. pix2 += line_size;
  1552. pix3 += line_size;
  1553. }
  1554. return s;
  1555. }
  1556. static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
  1557. {
  1558. int score1 = 0, score2 = 0, x, y;
  1559. for (y = 0; y < h; y++) {
  1560. for (x = 0; x < 16; x++)
  1561. score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
  1562. if (y + 1 < h) {
  1563. for (x = 0; x < 15; x++)
  1564. score2 += FFABS(s1[x] - s1[x + stride] -
  1565. s1[x + 1] + s1[x + stride + 1]) -
  1566. FFABS(s2[x] - s2[x + stride] -
  1567. s2[x + 1] + s2[x + stride + 1]);
  1568. }
  1569. s1 += stride;
  1570. s2 += stride;
  1571. }
  1572. if (c)
  1573. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  1574. else
  1575. return score1 + FFABS(score2) * 8;
  1576. }
  1577. static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
  1578. {
  1579. int score1 = 0, score2 = 0, x, y;
  1580. for (y = 0; y < h; y++) {
  1581. for (x = 0; x < 8; x++)
  1582. score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
  1583. if (y + 1 < h) {
  1584. for (x = 0; x < 7; x++)
  1585. score2 += FFABS(s1[x] - s1[x + stride] -
  1586. s1[x + 1] + s1[x + stride + 1]) -
  1587. FFABS(s2[x] - s2[x + stride] -
  1588. s2[x + 1] + s2[x + stride + 1]);
  1589. }
  1590. s1 += stride;
  1591. s2 += stride;
  1592. }
  1593. if (c)
  1594. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  1595. else
  1596. return score1 + FFABS(score2) * 8;
  1597. }
  1598. static int try_8x8basis_c(int16_t rem[64], int16_t weight[64],
  1599. int16_t basis[64], int scale)
  1600. {
  1601. int i;
  1602. unsigned int sum = 0;
  1603. for (i = 0; i < 8 * 8; i++) {
  1604. int b = rem[i] + ((basis[i] * scale +
  1605. (1 << (BASIS_SHIFT - RECON_SHIFT - 1))) >>
  1606. (BASIS_SHIFT - RECON_SHIFT));
  1607. int w = weight[i];
  1608. b >>= RECON_SHIFT;
  1609. av_assert2(-512 < b && b < 512);
  1610. sum += (w * b) * (w * b) >> 4;
  1611. }
  1612. return sum >> 2;
  1613. }
  1614. static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale)
  1615. {
  1616. int i;
  1617. for (i = 0; i < 8 * 8; i++)
  1618. rem[i] += (basis[i] * scale +
  1619. (1 << (BASIS_SHIFT - RECON_SHIFT - 1))) >>
  1620. (BASIS_SHIFT - RECON_SHIFT);
  1621. }
  1622. static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
  1623. int stride, int h)
  1624. {
  1625. return 0;
  1626. }
  1627. void ff_set_cmp(DSPContext *c, me_cmp_func *cmp, int type)
  1628. {
  1629. int i;
  1630. memset(cmp, 0, sizeof(void *) * 6);
  1631. for (i = 0; i < 6; i++) {
  1632. switch (type & 0xFF) {
  1633. case FF_CMP_SAD:
  1634. cmp[i] = c->sad[i];
  1635. break;
  1636. case FF_CMP_SATD:
  1637. cmp[i] = c->hadamard8_diff[i];
  1638. break;
  1639. case FF_CMP_SSE:
  1640. cmp[i] = c->sse[i];
  1641. break;
  1642. case FF_CMP_DCT:
  1643. cmp[i] = c->dct_sad[i];
  1644. break;
  1645. case FF_CMP_DCT264:
  1646. cmp[i] = c->dct264_sad[i];
  1647. break;
  1648. case FF_CMP_DCTMAX:
  1649. cmp[i] = c->dct_max[i];
  1650. break;
  1651. case FF_CMP_PSNR:
  1652. cmp[i] = c->quant_psnr[i];
  1653. break;
  1654. case FF_CMP_BIT:
  1655. cmp[i] = c->bit[i];
  1656. break;
  1657. case FF_CMP_RD:
  1658. cmp[i] = c->rd[i];
  1659. break;
  1660. case FF_CMP_VSAD:
  1661. cmp[i] = c->vsad[i];
  1662. break;
  1663. case FF_CMP_VSSE:
  1664. cmp[i] = c->vsse[i];
  1665. break;
  1666. case FF_CMP_ZERO:
  1667. cmp[i] = zero_cmp;
  1668. break;
  1669. case FF_CMP_NSSE:
  1670. cmp[i] = c->nsse[i];
  1671. break;
  1672. #if CONFIG_DWT
  1673. case FF_CMP_W53:
  1674. cmp[i]= c->w53[i];
  1675. break;
  1676. case FF_CMP_W97:
  1677. cmp[i]= c->w97[i];
  1678. break;
  1679. #endif
  1680. default:
  1681. av_log(NULL, AV_LOG_ERROR,
  1682. "internal error in cmp function selection\n");
  1683. }
  1684. }
  1685. }
  1686. static void add_bytes_c(uint8_t *dst, uint8_t *src, int w)
  1687. {
  1688. long i;
  1689. for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
  1690. long a = *(long *) (src + i);
  1691. long b = *(long *) (dst + i);
  1692. *(long *) (dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
  1693. }
  1694. for (; i < w; i++)
  1695. dst[i + 0] += src[i + 0];
  1696. }
  1697. static void diff_bytes_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w)
  1698. {
  1699. long i;
  1700. #if !HAVE_FAST_UNALIGNED
  1701. if ((long) src2 & (sizeof(long) - 1)) {
  1702. for (i = 0; i + 7 < w; i += 8) {
  1703. dst[i + 0] = src1[i + 0] - src2[i + 0];
  1704. dst[i + 1] = src1[i + 1] - src2[i + 1];
  1705. dst[i + 2] = src1[i + 2] - src2[i + 2];
  1706. dst[i + 3] = src1[i + 3] - src2[i + 3];
  1707. dst[i + 4] = src1[i + 4] - src2[i + 4];
  1708. dst[i + 5] = src1[i + 5] - src2[i + 5];
  1709. dst[i + 6] = src1[i + 6] - src2[i + 6];
  1710. dst[i + 7] = src1[i + 7] - src2[i + 7];
  1711. }
  1712. } else
  1713. #endif
  1714. for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
  1715. long a = *(long *) (src1 + i);
  1716. long b = *(long *) (src2 + i);
  1717. *(long *) (dst + i) = ((a | pb_80) - (b & pb_7f)) ^
  1718. ((a ^ b ^ pb_80) & pb_80);
  1719. }
  1720. for (; i < w; i++)
  1721. dst[i + 0] = src1[i + 0] - src2[i + 0];
  1722. }
  1723. static void add_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1,
  1724. const uint8_t *diff, int w,
  1725. int *left, int *left_top)
  1726. {
  1727. int i;
  1728. uint8_t l, lt;
  1729. l = *left;
  1730. lt = *left_top;
  1731. for (i = 0; i < w; i++) {
  1732. l = mid_pred(l, src1[i], (l + src1[i] - lt) & 0xFF) + diff[i];
  1733. lt = src1[i];
  1734. dst[i] = l;
  1735. }
  1736. *left = l;
  1737. *left_top = lt;
  1738. }
  1739. static void sub_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1,
  1740. const uint8_t *src2, int w,
  1741. int *left, int *left_top)
  1742. {
  1743. int i;
  1744. uint8_t l, lt;
  1745. l = *left;
  1746. lt = *left_top;
  1747. for (i = 0; i < w; i++) {
  1748. const int pred = mid_pred(l, src1[i], (l + src1[i] - lt) & 0xFF);
  1749. lt = src1[i];
  1750. l = src2[i];
  1751. dst[i] = l - pred;
  1752. }
  1753. *left = l;
  1754. *left_top = lt;
  1755. }
  1756. static int add_hfyu_left_prediction_c(uint8_t *dst, const uint8_t *src,
  1757. int w, int acc)
  1758. {
  1759. int i;
  1760. for (i = 0; i < w - 1; i++) {
  1761. acc += src[i];
  1762. dst[i] = acc;
  1763. i++;
  1764. acc += src[i];
  1765. dst[i] = acc;
  1766. }
  1767. for (; i < w; i++) {
  1768. acc += src[i];
  1769. dst[i] = acc;
  1770. }
  1771. return acc;
  1772. }
  1773. #if HAVE_BIGENDIAN
  1774. #define B 3
  1775. #define G 2
  1776. #define R 1
  1777. #define A 0
  1778. #else
  1779. #define B 0
  1780. #define G 1
  1781. #define R 2
  1782. #define A 3
  1783. #endif
  1784. static void add_hfyu_left_prediction_bgr32_c(uint8_t *dst, const uint8_t *src,
  1785. int w, int *red, int *green,
  1786. int *blue, int *alpha)
  1787. {
  1788. int i, r = *red, g = *green, b = *blue, a = *alpha;
  1789. for (i = 0; i < w; i++) {
  1790. b += src[4 * i + B];
  1791. g += src[4 * i + G];
  1792. r += src[4 * i + R];
  1793. a += src[4 * i + A];
  1794. dst[4 * i + B] = b;
  1795. dst[4 * i + G] = g;
  1796. dst[4 * i + R] = r;
  1797. dst[4 * i + A] = a;
  1798. }
  1799. *red = r;
  1800. *green = g;
  1801. *blue = b;
  1802. *alpha = a;
  1803. }
  1804. #undef B
  1805. #undef G
  1806. #undef R
  1807. #undef A
  1808. #define BUTTERFLY2(o1, o2, i1, i2) \
  1809. o1 = (i1) + (i2); \
  1810. o2 = (i1) - (i2);
  1811. #define BUTTERFLY1(x, y) \
  1812. { \
  1813. int a, b; \
  1814. a = x; \
  1815. b = y; \
  1816. x = a + b; \
  1817. y = a - b; \
  1818. }
  1819. #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
  1820. static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
  1821. uint8_t *src, int stride, int h)
  1822. {
  1823. int i, temp[64], sum = 0;
  1824. av_assert2(h == 8);
  1825. for (i = 0; i < 8; i++) {
  1826. // FIXME: try pointer walks
  1827. BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
  1828. src[stride * i + 0] - dst[stride * i + 0],
  1829. src[stride * i + 1] - dst[stride * i + 1]);
  1830. BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
  1831. src[stride * i + 2] - dst[stride * i + 2],
  1832. src[stride * i + 3] - dst[stride * i + 3]);
  1833. BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
  1834. src[stride * i + 4] - dst[stride * i + 4],
  1835. src[stride * i + 5] - dst[stride * i + 5]);
  1836. BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
  1837. src[stride * i + 6] - dst[stride * i + 6],
  1838. src[stride * i + 7] - dst[stride * i + 7]);
  1839. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
  1840. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
  1841. BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
  1842. BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
  1843. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
  1844. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
  1845. BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
  1846. BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
  1847. }
  1848. for (i = 0; i < 8; i++) {
  1849. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
  1850. BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
  1851. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
  1852. BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
  1853. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
  1854. BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
  1855. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
  1856. BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
  1857. sum += BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i]) +
  1858. BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i]) +
  1859. BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i]) +
  1860. BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
  1861. }
  1862. return sum;
  1863. }
  1864. static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
  1865. uint8_t *dummy, int stride, int h)
  1866. {
  1867. int i, temp[64], sum = 0;
  1868. av_assert2(h == 8);
  1869. for (i = 0; i < 8; i++) {
  1870. // FIXME: try pointer walks
  1871. BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
  1872. src[stride * i + 0], src[stride * i + 1]);
  1873. BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
  1874. src[stride * i + 2], src[stride * i + 3]);
  1875. BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
  1876. src[stride * i + 4], src[stride * i + 5]);
  1877. BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
  1878. src[stride * i + 6], src[stride * i + 7]);
  1879. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
  1880. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
  1881. BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
  1882. BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
  1883. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
  1884. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
  1885. BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
  1886. BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
  1887. }
  1888. for (i = 0; i < 8; i++) {
  1889. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
  1890. BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
  1891. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
  1892. BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
  1893. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
  1894. BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
  1895. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
  1896. BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
  1897. sum +=
  1898. BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i])
  1899. + BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i])
  1900. + BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i])
  1901. + BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
  1902. }
  1903. sum -= FFABS(temp[8 * 0] + temp[8 * 4]); // -mean
  1904. return sum;
  1905. }
  1906. static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
  1907. uint8_t *src2, int stride, int h)
  1908. {
  1909. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1910. av_assert2(h == 8);
  1911. s->dsp.diff_pixels(temp, src1, src2, stride);
  1912. s->dsp.fdct(temp);
  1913. return s->dsp.sum_abs_dctelem(temp);
  1914. }
  1915. #if CONFIG_GPL
  1916. #define DCT8_1D \
  1917. { \
  1918. const int s07 = SRC(0) + SRC(7); \
  1919. const int s16 = SRC(1) + SRC(6); \
  1920. const int s25 = SRC(2) + SRC(5); \
  1921. const int s34 = SRC(3) + SRC(4); \
  1922. const int a0 = s07 + s34; \
  1923. const int a1 = s16 + s25; \
  1924. const int a2 = s07 - s34; \
  1925. const int a3 = s16 - s25; \
  1926. const int d07 = SRC(0) - SRC(7); \
  1927. const int d16 = SRC(1) - SRC(6); \
  1928. const int d25 = SRC(2) - SRC(5); \
  1929. const int d34 = SRC(3) - SRC(4); \
  1930. const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
  1931. const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
  1932. const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
  1933. const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
  1934. DST(0, a0 + a1); \
  1935. DST(1, a4 + (a7 >> 2)); \
  1936. DST(2, a2 + (a3 >> 1)); \
  1937. DST(3, a5 + (a6 >> 2)); \
  1938. DST(4, a0 - a1); \
  1939. DST(5, a6 - (a5 >> 2)); \
  1940. DST(6, (a2 >> 1) - a3); \
  1941. DST(7, (a4 >> 2) - a7); \
  1942. }
  1943. static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
  1944. uint8_t *src2, int stride, int h)
  1945. {
  1946. int16_t dct[8][8];
  1947. int i, sum = 0;
  1948. s->dsp.diff_pixels(dct[0], src1, src2, stride);
  1949. #define SRC(x) dct[i][x]
  1950. #define DST(x, v) dct[i][x] = v
  1951. for (i = 0; i < 8; i++)
  1952. DCT8_1D
  1953. #undef SRC
  1954. #undef DST
  1955. #define SRC(x) dct[x][i]
  1956. #define DST(x, v) sum += FFABS(v)
  1957. for (i = 0; i < 8; i++)
  1958. DCT8_1D
  1959. #undef SRC
  1960. #undef DST
  1961. return sum;
  1962. }
  1963. #endif
  1964. static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
  1965. uint8_t *src2, int stride, int h)
  1966. {
  1967. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1968. int sum = 0, i;
  1969. av_assert2(h == 8);
  1970. s->dsp.diff_pixels(temp, src1, src2, stride);
  1971. s->dsp.fdct(temp);
  1972. for (i = 0; i < 64; i++)
  1973. sum = FFMAX(sum, FFABS(temp[i]));
  1974. return sum;
  1975. }
  1976. static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
  1977. uint8_t *src2, int stride, int h)
  1978. {
  1979. LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
  1980. int16_t *const bak = temp + 64;
  1981. int sum = 0, i;
  1982. av_assert2(h == 8);
  1983. s->mb_intra = 0;
  1984. s->dsp.diff_pixels(temp, src1, src2, stride);
  1985. memcpy(bak, temp, 64 * sizeof(int16_t));
  1986. s->block_last_index[0 /* FIXME */] =
  1987. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  1988. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  1989. ff_simple_idct_8(temp); // FIXME
  1990. for (i = 0; i < 64; i++)
  1991. sum += (temp[i] - bak[i]) * (temp[i] - bak[i]);
  1992. return sum;
  1993. }
  1994. static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
  1995. int stride, int h)
  1996. {
  1997. const uint8_t *scantable = s->intra_scantable.permutated;
  1998. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1999. LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
  2000. LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
  2001. int i, last, run, bits, level, distortion, start_i;
  2002. const int esc_length = s->ac_esc_length;
  2003. uint8_t *length, *last_length;
  2004. av_assert2(h == 8);
  2005. copy_block8(lsrc1, src1, 8, stride, 8);
  2006. copy_block8(lsrc2, src2, 8, stride, 8);
  2007. s->dsp.diff_pixels(temp, lsrc1, lsrc2, 8);
  2008. s->block_last_index[0 /* FIXME */] =
  2009. last =
  2010. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  2011. bits = 0;
  2012. if (s->mb_intra) {
  2013. start_i = 1;
  2014. length = s->intra_ac_vlc_length;
  2015. last_length = s->intra_ac_vlc_last_length;
  2016. bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
  2017. } else {
  2018. start_i = 0;
  2019. length = s->inter_ac_vlc_length;
  2020. last_length = s->inter_ac_vlc_last_length;
  2021. }
  2022. if (last >= start_i) {
  2023. run = 0;
  2024. for (i = start_i; i < last; i++) {
  2025. int j = scantable[i];
  2026. level = temp[j];
  2027. if (level) {
  2028. level += 64;
  2029. if ((level & (~127)) == 0)
  2030. bits += length[UNI_AC_ENC_INDEX(run, level)];
  2031. else
  2032. bits += esc_length;
  2033. run = 0;
  2034. } else
  2035. run++;
  2036. }
  2037. i = scantable[last];
  2038. level = temp[i] + 64;
  2039. av_assert2(level - 64);
  2040. if ((level & (~127)) == 0) {
  2041. bits += last_length[UNI_AC_ENC_INDEX(run, level)];
  2042. } else
  2043. bits += esc_length;
  2044. }
  2045. if (last >= 0) {
  2046. if (s->mb_intra)
  2047. s->dct_unquantize_intra(s, temp, 0, s->qscale);
  2048. else
  2049. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  2050. }
  2051. s->dsp.idct_add(lsrc2, 8, temp);
  2052. distortion = s->dsp.sse[1](NULL, lsrc2, lsrc1, 8, 8);
  2053. return distortion + ((bits * s->qscale * s->qscale * 109 + 64) >> 7);
  2054. }
  2055. static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
  2056. int stride, int h)
  2057. {
  2058. const uint8_t *scantable = s->intra_scantable.permutated;
  2059. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  2060. int i, last, run, bits, level, start_i;
  2061. const int esc_length = s->ac_esc_length;
  2062. uint8_t *length, *last_length;
  2063. av_assert2(h == 8);
  2064. s->dsp.diff_pixels(temp, src1, src2, stride);
  2065. s->block_last_index[0 /* FIXME */] =
  2066. last =
  2067. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  2068. bits = 0;
  2069. if (s->mb_intra) {
  2070. start_i = 1;
  2071. length = s->intra_ac_vlc_length;
  2072. last_length = s->intra_ac_vlc_last_length;
  2073. bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
  2074. } else {
  2075. start_i = 0;
  2076. length = s->inter_ac_vlc_length;
  2077. last_length = s->inter_ac_vlc_last_length;
  2078. }
  2079. if (last >= start_i) {
  2080. run = 0;
  2081. for (i = start_i; i < last; i++) {
  2082. int j = scantable[i];
  2083. level = temp[j];
  2084. if (level) {
  2085. level += 64;
  2086. if ((level & (~127)) == 0)
  2087. bits += length[UNI_AC_ENC_INDEX(run, level)];
  2088. else
  2089. bits += esc_length;
  2090. run = 0;
  2091. } else
  2092. run++;
  2093. }
  2094. i = scantable[last];
  2095. level = temp[i] + 64;
  2096. av_assert2(level - 64);
  2097. if ((level & (~127)) == 0)
  2098. bits += last_length[UNI_AC_ENC_INDEX(run, level)];
  2099. else
  2100. bits += esc_length;
  2101. }
  2102. return bits;
  2103. }
  2104. #define VSAD_INTRA(size) \
  2105. static int vsad_intra ## size ## _c(MpegEncContext *c, \
  2106. uint8_t *s, uint8_t *dummy, \
  2107. int stride, int h) \
  2108. { \
  2109. int score = 0, x, y; \
  2110. \
  2111. for (y = 1; y < h; y++) { \
  2112. for (x = 0; x < size; x += 4) { \
  2113. score += FFABS(s[x] - s[x + stride]) + \
  2114. FFABS(s[x + 1] - s[x + stride + 1]) + \
  2115. FFABS(s[x + 2] - s[x + 2 + stride]) + \
  2116. FFABS(s[x + 3] - s[x + 3 + stride]); \
  2117. } \
  2118. s += stride; \
  2119. } \
  2120. \
  2121. return score; \
  2122. }
  2123. VSAD_INTRA(8)
  2124. VSAD_INTRA(16)
  2125. static int vsad16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
  2126. int stride, int h)
  2127. {
  2128. int score = 0, x, y;
  2129. for (y = 1; y < h; y++) {
  2130. for (x = 0; x < 16; x++)
  2131. score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
  2132. s1 += stride;
  2133. s2 += stride;
  2134. }
  2135. return score;
  2136. }
  2137. #define SQ(a) ((a) * (a))
  2138. #define VSSE_INTRA(size) \
  2139. static int vsse_intra ## size ## _c(MpegEncContext *c, \
  2140. uint8_t *s, uint8_t *dummy, \
  2141. int stride, int h) \
  2142. { \
  2143. int score = 0, x, y; \
  2144. \
  2145. for (y = 1; y < h; y++) { \
  2146. for (x = 0; x < size; x += 4) { \
  2147. score += SQ(s[x] - s[x + stride]) + \
  2148. SQ(s[x + 1] - s[x + stride + 1]) + \
  2149. SQ(s[x + 2] - s[x + stride + 2]) + \
  2150. SQ(s[x + 3] - s[x + stride + 3]); \
  2151. } \
  2152. s += stride; \
  2153. } \
  2154. \
  2155. return score; \
  2156. }
  2157. VSSE_INTRA(8)
  2158. VSSE_INTRA(16)
  2159. static int vsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
  2160. int stride, int h)
  2161. {
  2162. int score = 0, x, y;
  2163. for (y = 1; y < h; y++) {
  2164. for (x = 0; x < 16; x++)
  2165. score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
  2166. s1 += stride;
  2167. s2 += stride;
  2168. }
  2169. return score;
  2170. }
  2171. static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
  2172. int size)
  2173. {
  2174. int score = 0, i;
  2175. for (i = 0; i < size; i++)
  2176. score += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]);
  2177. return score;
  2178. }
  2179. #define WRAPPER8_16_SQ(name8, name16) \
  2180. static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src, \
  2181. int stride, int h) \
  2182. { \
  2183. int score = 0; \
  2184. \
  2185. score += name8(s, dst, src, stride, 8); \
  2186. score += name8(s, dst + 8, src + 8, stride, 8); \
  2187. if (h == 16) { \
  2188. dst += 8 * stride; \
  2189. src += 8 * stride; \
  2190. score += name8(s, dst, src, stride, 8); \
  2191. score += name8(s, dst + 8, src + 8, stride, 8); \
  2192. } \
  2193. return score; \
  2194. }
  2195. WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
  2196. WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
  2197. WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
  2198. #if CONFIG_GPL
  2199. WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
  2200. #endif
  2201. WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
  2202. WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
  2203. WRAPPER8_16_SQ(rd8x8_c, rd16_c)
  2204. WRAPPER8_16_SQ(bit8x8_c, bit16_c)
  2205. static inline uint32_t clipf_c_one(uint32_t a, uint32_t mini,
  2206. uint32_t maxi, uint32_t maxisign)
  2207. {
  2208. if (a > mini)
  2209. return mini;
  2210. else if ((a ^ (1U << 31)) > maxisign)
  2211. return maxi;
  2212. else
  2213. return a;
  2214. }
  2215. static void vector_clipf_c_opposite_sign(float *dst, const float *src,
  2216. float *min, float *max, int len)
  2217. {
  2218. int i;
  2219. uint32_t mini = *(uint32_t *) min;
  2220. uint32_t maxi = *(uint32_t *) max;
  2221. uint32_t maxisign = maxi ^ (1U << 31);
  2222. uint32_t *dsti = (uint32_t *) dst;
  2223. const uint32_t *srci = (const uint32_t *) src;
  2224. for (i = 0; i < len; i += 8) {
  2225. dsti[i + 0] = clipf_c_one(srci[i + 0], mini, maxi, maxisign);
  2226. dsti[i + 1] = clipf_c_one(srci[i + 1], mini, maxi, maxisign);
  2227. dsti[i + 2] = clipf_c_one(srci[i + 2], mini, maxi, maxisign);
  2228. dsti[i + 3] = clipf_c_one(srci[i + 3], mini, maxi, maxisign);
  2229. dsti[i + 4] = clipf_c_one(srci[i + 4], mini, maxi, maxisign);
  2230. dsti[i + 5] = clipf_c_one(srci[i + 5], mini, maxi, maxisign);
  2231. dsti[i + 6] = clipf_c_one(srci[i + 6], mini, maxi, maxisign);
  2232. dsti[i + 7] = clipf_c_one(srci[i + 7], mini, maxi, maxisign);
  2233. }
  2234. }
  2235. static void vector_clipf_c(float *dst, const float *src,
  2236. float min, float max, int len)
  2237. {
  2238. int i;
  2239. if (min < 0 && max > 0) {
  2240. vector_clipf_c_opposite_sign(dst, src, &min, &max, len);
  2241. } else {
  2242. for (i = 0; i < len; i += 8) {
  2243. dst[i] = av_clipf(src[i], min, max);
  2244. dst[i + 1] = av_clipf(src[i + 1], min, max);
  2245. dst[i + 2] = av_clipf(src[i + 2], min, max);
  2246. dst[i + 3] = av_clipf(src[i + 3], min, max);
  2247. dst[i + 4] = av_clipf(src[i + 4], min, max);
  2248. dst[i + 5] = av_clipf(src[i + 5], min, max);
  2249. dst[i + 6] = av_clipf(src[i + 6], min, max);
  2250. dst[i + 7] = av_clipf(src[i + 7], min, max);
  2251. }
  2252. }
  2253. }
  2254. static int32_t scalarproduct_int16_c(const int16_t *v1, const int16_t *v2,
  2255. int order)
  2256. {
  2257. int res = 0;
  2258. while (order--)
  2259. res += *v1++ **v2++;
  2260. return res;
  2261. }
  2262. static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2,
  2263. const int16_t *v3,
  2264. int order, int mul)
  2265. {
  2266. int res = 0;
  2267. while (order--) {
  2268. res += *v1 * *v2++;
  2269. *v1++ += mul * *v3++;
  2270. }
  2271. return res;
  2272. }
  2273. static void vector_clip_int32_c(int32_t *dst, const int32_t *src, int32_t min,
  2274. int32_t max, unsigned int len)
  2275. {
  2276. do {
  2277. *dst++ = av_clip(*src++, min, max);
  2278. *dst++ = av_clip(*src++, min, max);
  2279. *dst++ = av_clip(*src++, min, max);
  2280. *dst++ = av_clip(*src++, min, max);
  2281. *dst++ = av_clip(*src++, min, max);
  2282. *dst++ = av_clip(*src++, min, max);
  2283. *dst++ = av_clip(*src++, min, max);
  2284. *dst++ = av_clip(*src++, min, max);
  2285. len -= 8;
  2286. } while (len > 0);
  2287. }
  2288. static void jref_idct_put(uint8_t *dest, int line_size, int16_t *block)
  2289. {
  2290. ff_j_rev_dct(block);
  2291. put_pixels_clamped_c(block, dest, line_size);
  2292. }
  2293. static void jref_idct_add(uint8_t *dest, int line_size, int16_t *block)
  2294. {
  2295. ff_j_rev_dct(block);
  2296. add_pixels_clamped_c(block, dest, line_size);
  2297. }
  2298. static void ff_jref_idct4_put(uint8_t *dest, int line_size, int16_t *block)
  2299. {
  2300. ff_j_rev_dct4 (block);
  2301. put_pixels_clamped4_c(block, dest, line_size);
  2302. }
  2303. static void ff_jref_idct4_add(uint8_t *dest, int line_size, int16_t *block)
  2304. {
  2305. ff_j_rev_dct4 (block);
  2306. add_pixels_clamped4_c(block, dest, line_size);
  2307. }
  2308. static void ff_jref_idct2_put(uint8_t *dest, int line_size, int16_t *block)
  2309. {
  2310. ff_j_rev_dct2 (block);
  2311. put_pixels_clamped2_c(block, dest, line_size);
  2312. }
  2313. static void ff_jref_idct2_add(uint8_t *dest, int line_size, int16_t *block)
  2314. {
  2315. ff_j_rev_dct2 (block);
  2316. add_pixels_clamped2_c(block, dest, line_size);
  2317. }
  2318. static void ff_jref_idct1_put(uint8_t *dest, int line_size, int16_t *block)
  2319. {
  2320. dest[0] = av_clip_uint8((block[0] + 4)>>3);
  2321. }
  2322. static void ff_jref_idct1_add(uint8_t *dest, int line_size, int16_t *block)
  2323. {
  2324. dest[0] = av_clip_uint8(dest[0] + ((block[0] + 4)>>3));
  2325. }
  2326. /* init static data */
  2327. av_cold void ff_dsputil_static_init(void)
  2328. {
  2329. int i;
  2330. for (i = 0; i < 512; i++)
  2331. ff_square_tab[i] = (i - 256) * (i - 256);
  2332. }
  2333. int ff_check_alignment(void)
  2334. {
  2335. static int did_fail = 0;
  2336. LOCAL_ALIGNED_16(int, aligned, [4]);
  2337. if ((intptr_t)aligned & 15) {
  2338. if (!did_fail) {
  2339. #if HAVE_MMX || HAVE_ALTIVEC
  2340. av_log(NULL, AV_LOG_ERROR,
  2341. "Compiler did not align stack variables. Libavcodec has been miscompiled\n"
  2342. "and may be very slow or crash. This is not a bug in libavcodec,\n"
  2343. "but in the compiler. You may try recompiling using gcc >= 4.2.\n"
  2344. "Do not report crashes to FFmpeg developers.\n");
  2345. #endif
  2346. did_fail=1;
  2347. }
  2348. return -1;
  2349. }
  2350. return 0;
  2351. }
  2352. av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
  2353. {
  2354. const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8;
  2355. ff_check_alignment();
  2356. #if CONFIG_ENCODERS
  2357. if (avctx->bits_per_raw_sample == 10) {
  2358. c->fdct = ff_jpeg_fdct_islow_10;
  2359. c->fdct248 = ff_fdct248_islow_10;
  2360. } else {
  2361. if (avctx->dct_algo == FF_DCT_FASTINT) {
  2362. c->fdct = ff_fdct_ifast;
  2363. c->fdct248 = ff_fdct_ifast248;
  2364. } else if (avctx->dct_algo == FF_DCT_FAAN) {
  2365. c->fdct = ff_faandct;
  2366. c->fdct248 = ff_faandct248;
  2367. } else {
  2368. c->fdct = ff_jpeg_fdct_islow_8; // slow/accurate/default
  2369. c->fdct248 = ff_fdct248_islow_8;
  2370. }
  2371. }
  2372. #endif /* CONFIG_ENCODERS */
  2373. if (avctx->lowres==1) {
  2374. c->idct_put = ff_jref_idct4_put;
  2375. c->idct_add = ff_jref_idct4_add;
  2376. c->idct = ff_j_rev_dct4;
  2377. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2378. } else if (avctx->lowres==2) {
  2379. c->idct_put = ff_jref_idct2_put;
  2380. c->idct_add = ff_jref_idct2_add;
  2381. c->idct = ff_j_rev_dct2;
  2382. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2383. } else if (avctx->lowres==3) {
  2384. c->idct_put = ff_jref_idct1_put;
  2385. c->idct_add = ff_jref_idct1_add;
  2386. c->idct = ff_j_rev_dct1;
  2387. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2388. } else {
  2389. if (avctx->bits_per_raw_sample == 10) {
  2390. c->idct_put = ff_simple_idct_put_10;
  2391. c->idct_add = ff_simple_idct_add_10;
  2392. c->idct = ff_simple_idct_10;
  2393. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2394. } else if (avctx->bits_per_raw_sample == 12) {
  2395. c->idct_put = ff_simple_idct_put_12;
  2396. c->idct_add = ff_simple_idct_add_12;
  2397. c->idct = ff_simple_idct_12;
  2398. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2399. } else {
  2400. if (avctx->idct_algo == FF_IDCT_INT) {
  2401. c->idct_put = jref_idct_put;
  2402. c->idct_add = jref_idct_add;
  2403. c->idct = ff_j_rev_dct;
  2404. c->idct_permutation_type = FF_LIBMPEG2_IDCT_PERM;
  2405. } else if (avctx->idct_algo == FF_IDCT_FAAN) {
  2406. c->idct_put = ff_faanidct_put;
  2407. c->idct_add = ff_faanidct_add;
  2408. c->idct = ff_faanidct;
  2409. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2410. } else { // accurate/default
  2411. c->idct_put = ff_simple_idct_put_8;
  2412. c->idct_add = ff_simple_idct_add_8;
  2413. c->idct = ff_simple_idct_8;
  2414. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2415. }
  2416. }
  2417. }
  2418. c->diff_pixels = diff_pixels_c;
  2419. c->put_pixels_clamped = put_pixels_clamped_c;
  2420. c->put_signed_pixels_clamped = put_signed_pixels_clamped_c;
  2421. c->add_pixels_clamped = add_pixels_clamped_c;
  2422. c->sum_abs_dctelem = sum_abs_dctelem_c;
  2423. c->gmc1 = gmc1_c;
  2424. c->gmc = ff_gmc_c;
  2425. c->pix_sum = pix_sum_c;
  2426. c->pix_norm1 = pix_norm1_c;
  2427. c->fill_block_tab[0] = fill_block16_c;
  2428. c->fill_block_tab[1] = fill_block8_c;
  2429. /* TODO [0] 16 [1] 8 */
  2430. c->pix_abs[0][0] = pix_abs16_c;
  2431. c->pix_abs[0][1] = pix_abs16_x2_c;
  2432. c->pix_abs[0][2] = pix_abs16_y2_c;
  2433. c->pix_abs[0][3] = pix_abs16_xy2_c;
  2434. c->pix_abs[1][0] = pix_abs8_c;
  2435. c->pix_abs[1][1] = pix_abs8_x2_c;
  2436. c->pix_abs[1][2] = pix_abs8_y2_c;
  2437. c->pix_abs[1][3] = pix_abs8_xy2_c;
  2438. #define dspfunc(PFX, IDX, NUM) \
  2439. c->PFX ## _pixels_tab[IDX][0] = PFX ## NUM ## _mc00_c; \
  2440. c->PFX ## _pixels_tab[IDX][1] = PFX ## NUM ## _mc10_c; \
  2441. c->PFX ## _pixels_tab[IDX][2] = PFX ## NUM ## _mc20_c; \
  2442. c->PFX ## _pixels_tab[IDX][3] = PFX ## NUM ## _mc30_c; \
  2443. c->PFX ## _pixels_tab[IDX][4] = PFX ## NUM ## _mc01_c; \
  2444. c->PFX ## _pixels_tab[IDX][5] = PFX ## NUM ## _mc11_c; \
  2445. c->PFX ## _pixels_tab[IDX][6] = PFX ## NUM ## _mc21_c; \
  2446. c->PFX ## _pixels_tab[IDX][7] = PFX ## NUM ## _mc31_c; \
  2447. c->PFX ## _pixels_tab[IDX][8] = PFX ## NUM ## _mc02_c; \
  2448. c->PFX ## _pixels_tab[IDX][9] = PFX ## NUM ## _mc12_c; \
  2449. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
  2450. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
  2451. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
  2452. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
  2453. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
  2454. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
  2455. dspfunc(put_qpel, 0, 16);
  2456. dspfunc(put_qpel, 1, 8);
  2457. dspfunc(put_no_rnd_qpel, 0, 16);
  2458. dspfunc(put_no_rnd_qpel, 1, 8);
  2459. dspfunc(avg_qpel, 0, 16);
  2460. dspfunc(avg_qpel, 1, 8);
  2461. #undef dspfunc
  2462. c->put_mspel_pixels_tab[0] = ff_put_pixels8x8_c;
  2463. c->put_mspel_pixels_tab[1] = put_mspel8_mc10_c;
  2464. c->put_mspel_pixels_tab[2] = put_mspel8_mc20_c;
  2465. c->put_mspel_pixels_tab[3] = put_mspel8_mc30_c;
  2466. c->put_mspel_pixels_tab[4] = put_mspel8_mc02_c;
  2467. c->put_mspel_pixels_tab[5] = put_mspel8_mc12_c;
  2468. c->put_mspel_pixels_tab[6] = put_mspel8_mc22_c;
  2469. c->put_mspel_pixels_tab[7] = put_mspel8_mc32_c;
  2470. #define SET_CMP_FUNC(name) \
  2471. c->name[0] = name ## 16_c; \
  2472. c->name[1] = name ## 8x8_c;
  2473. SET_CMP_FUNC(hadamard8_diff)
  2474. c->hadamard8_diff[4] = hadamard8_intra16_c;
  2475. c->hadamard8_diff[5] = hadamard8_intra8x8_c;
  2476. SET_CMP_FUNC(dct_sad)
  2477. SET_CMP_FUNC(dct_max)
  2478. #if CONFIG_GPL
  2479. SET_CMP_FUNC(dct264_sad)
  2480. #endif
  2481. c->sad[0] = pix_abs16_c;
  2482. c->sad[1] = pix_abs8_c;
  2483. c->sse[0] = sse16_c;
  2484. c->sse[1] = sse8_c;
  2485. c->sse[2] = sse4_c;
  2486. SET_CMP_FUNC(quant_psnr)
  2487. SET_CMP_FUNC(rd)
  2488. SET_CMP_FUNC(bit)
  2489. c->vsad[0] = vsad16_c;
  2490. c->vsad[4] = vsad_intra16_c;
  2491. c->vsad[5] = vsad_intra8_c;
  2492. c->vsse[0] = vsse16_c;
  2493. c->vsse[4] = vsse_intra16_c;
  2494. c->vsse[5] = vsse_intra8_c;
  2495. c->nsse[0] = nsse16_c;
  2496. c->nsse[1] = nsse8_c;
  2497. #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
  2498. ff_dsputil_init_dwt(c);
  2499. #endif
  2500. c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
  2501. c->add_bytes = add_bytes_c;
  2502. c->add_hfyu_median_prediction = add_hfyu_median_prediction_c;
  2503. c->add_hfyu_left_prediction = add_hfyu_left_prediction_c;
  2504. c->add_hfyu_left_prediction_bgr32 = add_hfyu_left_prediction_bgr32_c;
  2505. c->diff_bytes = diff_bytes_c;
  2506. c->sub_hfyu_median_prediction = sub_hfyu_median_prediction_c;
  2507. c->bswap_buf = bswap_buf;
  2508. c->bswap16_buf = bswap16_buf;
  2509. c->try_8x8basis = try_8x8basis_c;
  2510. c->add_8x8basis = add_8x8basis_c;
  2511. c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c;
  2512. c->scalarproduct_int16 = scalarproduct_int16_c;
  2513. c->vector_clip_int32 = vector_clip_int32_c;
  2514. c->vector_clipf = vector_clipf_c;
  2515. c->shrink[0] = av_image_copy_plane;
  2516. c->shrink[1] = ff_shrink22;
  2517. c->shrink[2] = ff_shrink44;
  2518. c->shrink[3] = ff_shrink88;
  2519. c->add_pixels8 = add_pixels8_c;
  2520. #undef FUNC
  2521. #undef FUNCC
  2522. #define FUNC(f, depth) f ## _ ## depth
  2523. #define FUNCC(f, depth) f ## _ ## depth ## _c
  2524. c->draw_edges = FUNCC(draw_edges, 8);
  2525. c->clear_block = FUNCC(clear_block, 8);
  2526. c->clear_blocks = FUNCC(clear_blocks, 8);
  2527. #define BIT_DEPTH_FUNCS(depth) \
  2528. c->get_pixels = FUNCC(get_pixels, depth);
  2529. switch (avctx->bits_per_raw_sample) {
  2530. case 9:
  2531. case 10:
  2532. case 12:
  2533. case 14:
  2534. BIT_DEPTH_FUNCS(16);
  2535. break;
  2536. default:
  2537. if (avctx->bits_per_raw_sample<=8 || avctx->codec_type != AVMEDIA_TYPE_VIDEO) {
  2538. BIT_DEPTH_FUNCS(8);
  2539. }
  2540. break;
  2541. }
  2542. if (ARCH_ALPHA)
  2543. ff_dsputil_init_alpha(c, avctx);
  2544. if (ARCH_ARM)
  2545. ff_dsputil_init_arm(c, avctx, high_bit_depth);
  2546. if (ARCH_BFIN)
  2547. ff_dsputil_init_bfin(c, avctx, high_bit_depth);
  2548. if (ARCH_PPC)
  2549. ff_dsputil_init_ppc(c, avctx, high_bit_depth);
  2550. if (ARCH_X86)
  2551. ff_dsputil_init_x86(c, avctx, high_bit_depth);
  2552. ff_init_scantable_permutation(c->idct_permutation,
  2553. c->idct_permutation_type);
  2554. }
  2555. av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
  2556. {
  2557. ff_dsputil_init(c, avctx);
  2558. }
  2559. av_cold void avpriv_dsputil_init(DSPContext *c, AVCodecContext *avctx)
  2560. {
  2561. ff_dsputil_init(c, avctx);
  2562. }