You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2823 lines
122KB

  1. /*
  2. * DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * DSP utils
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/imgutils.h"
  30. #include "libavutil/internal.h"
  31. #include "avcodec.h"
  32. #include "copy_block.h"
  33. #include "dct.h"
  34. #include "dsputil.h"
  35. #include "simple_idct.h"
  36. #include "faandct.h"
  37. #include "faanidct.h"
  38. #include "imgconvert.h"
  39. #include "mathops.h"
  40. #include "mpegvideo.h"
  41. #include "config.h"
  42. #include "diracdsp.h"
  43. uint32_t ff_square_tab[512] = { 0, };
  44. #define BIT_DEPTH 16
  45. #include "dsputilenc_template.c"
  46. #undef BIT_DEPTH
  47. #define BIT_DEPTH 8
  48. #include "hpel_template.c"
  49. #include "tpel_template.c"
  50. #include "dsputil_template.c"
  51. #include "dsputilenc_template.c"
  52. // 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
  53. #define pb_7f (~0UL / 255 * 0x7f)
  54. #define pb_80 (~0UL / 255 * 0x80)
  55. /* Specific zigzag scan for 248 idct. NOTE that unlike the
  56. * specification, we interleave the fields */
  57. const uint8_t ff_zigzag248_direct[64] = {
  58. 0, 8, 1, 9, 16, 24, 2, 10,
  59. 17, 25, 32, 40, 48, 56, 33, 41,
  60. 18, 26, 3, 11, 4, 12, 19, 27,
  61. 34, 42, 49, 57, 50, 58, 35, 43,
  62. 20, 28, 5, 13, 6, 14, 21, 29,
  63. 36, 44, 51, 59, 52, 60, 37, 45,
  64. 22, 30, 7, 15, 23, 31, 38, 46,
  65. 53, 61, 54, 62, 39, 47, 55, 63,
  66. };
  67. const uint8_t ff_alternate_horizontal_scan[64] = {
  68. 0, 1, 2, 3, 8, 9, 16, 17,
  69. 10, 11, 4, 5, 6, 7, 15, 14,
  70. 13, 12, 19, 18, 24, 25, 32, 33,
  71. 26, 27, 20, 21, 22, 23, 28, 29,
  72. 30, 31, 34, 35, 40, 41, 48, 49,
  73. 42, 43, 36, 37, 38, 39, 44, 45,
  74. 46, 47, 50, 51, 56, 57, 58, 59,
  75. 52, 53, 54, 55, 60, 61, 62, 63,
  76. };
  77. const uint8_t ff_alternate_vertical_scan[64] = {
  78. 0, 8, 16, 24, 1, 9, 2, 10,
  79. 17, 25, 32, 40, 48, 56, 57, 49,
  80. 41, 33, 26, 18, 3, 11, 4, 12,
  81. 19, 27, 34, 42, 50, 58, 35, 43,
  82. 51, 59, 20, 28, 5, 13, 6, 14,
  83. 21, 29, 36, 44, 52, 60, 37, 45,
  84. 53, 61, 22, 30, 7, 15, 23, 31,
  85. 38, 46, 54, 62, 39, 47, 55, 63,
  86. };
  87. /* Input permutation for the simple_idct_mmx */
  88. static const uint8_t simple_mmx_permutation[64] = {
  89. 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
  90. 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
  91. 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
  92. 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
  93. 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
  94. 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
  95. 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
  96. 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
  97. };
  98. static const uint8_t idct_sse2_row_perm[8] = { 0, 4, 1, 5, 2, 6, 3, 7 };
  99. av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st,
  100. const uint8_t *src_scantable)
  101. {
  102. int i, end;
  103. st->scantable = src_scantable;
  104. for (i = 0; i < 64; i++) {
  105. int j = src_scantable[i];
  106. st->permutated[i] = permutation[j];
  107. }
  108. end = -1;
  109. for (i = 0; i < 64; i++) {
  110. int j = st->permutated[i];
  111. if (j > end)
  112. end = j;
  113. st->raster_end[i] = end;
  114. }
  115. }
  116. av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation,
  117. int idct_permutation_type)
  118. {
  119. int i;
  120. switch (idct_permutation_type) {
  121. case FF_NO_IDCT_PERM:
  122. for (i = 0; i < 64; i++)
  123. idct_permutation[i] = i;
  124. break;
  125. case FF_LIBMPEG2_IDCT_PERM:
  126. for (i = 0; i < 64; i++)
  127. idct_permutation[i] = (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
  128. break;
  129. case FF_SIMPLE_IDCT_PERM:
  130. for (i = 0; i < 64; i++)
  131. idct_permutation[i] = simple_mmx_permutation[i];
  132. break;
  133. case FF_TRANSPOSE_IDCT_PERM:
  134. for (i = 0; i < 64; i++)
  135. idct_permutation[i] = ((i & 7) << 3) | (i >> 3);
  136. break;
  137. case FF_PARTTRANS_IDCT_PERM:
  138. for (i = 0; i < 64; i++)
  139. idct_permutation[i] = (i & 0x24) | ((i & 3) << 3) | ((i >> 3) & 3);
  140. break;
  141. case FF_SSE2_IDCT_PERM:
  142. for (i = 0; i < 64; i++)
  143. idct_permutation[i] = (i & 0x38) | idct_sse2_row_perm[i & 7];
  144. break;
  145. default:
  146. av_log(NULL, AV_LOG_ERROR,
  147. "Internal error, IDCT permutation not set\n");
  148. }
  149. }
  150. static int pix_sum_c(uint8_t *pix, int line_size)
  151. {
  152. int s = 0, i, j;
  153. for (i = 0; i < 16; i++) {
  154. for (j = 0; j < 16; j += 8) {
  155. s += pix[0];
  156. s += pix[1];
  157. s += pix[2];
  158. s += pix[3];
  159. s += pix[4];
  160. s += pix[5];
  161. s += pix[6];
  162. s += pix[7];
  163. pix += 8;
  164. }
  165. pix += line_size - 16;
  166. }
  167. return s;
  168. }
  169. static int pix_norm1_c(uint8_t *pix, int line_size)
  170. {
  171. int s = 0, i, j;
  172. uint32_t *sq = ff_square_tab + 256;
  173. for (i = 0; i < 16; i++) {
  174. for (j = 0; j < 16; j += 8) {
  175. #if 0
  176. s += sq[pix[0]];
  177. s += sq[pix[1]];
  178. s += sq[pix[2]];
  179. s += sq[pix[3]];
  180. s += sq[pix[4]];
  181. s += sq[pix[5]];
  182. s += sq[pix[6]];
  183. s += sq[pix[7]];
  184. #else
  185. #if HAVE_FAST_64BIT
  186. register uint64_t x = *(uint64_t *) pix;
  187. s += sq[x & 0xff];
  188. s += sq[(x >> 8) & 0xff];
  189. s += sq[(x >> 16) & 0xff];
  190. s += sq[(x >> 24) & 0xff];
  191. s += sq[(x >> 32) & 0xff];
  192. s += sq[(x >> 40) & 0xff];
  193. s += sq[(x >> 48) & 0xff];
  194. s += sq[(x >> 56) & 0xff];
  195. #else
  196. register uint32_t x = *(uint32_t *) pix;
  197. s += sq[x & 0xff];
  198. s += sq[(x >> 8) & 0xff];
  199. s += sq[(x >> 16) & 0xff];
  200. s += sq[(x >> 24) & 0xff];
  201. x = *(uint32_t *) (pix + 4);
  202. s += sq[x & 0xff];
  203. s += sq[(x >> 8) & 0xff];
  204. s += sq[(x >> 16) & 0xff];
  205. s += sq[(x >> 24) & 0xff];
  206. #endif
  207. #endif
  208. pix += 8;
  209. }
  210. pix += line_size - 16;
  211. }
  212. return s;
  213. }
  214. static void bswap_buf(uint32_t *dst, const uint32_t *src, int w)
  215. {
  216. int i;
  217. for (i = 0; i + 8 <= w; i += 8) {
  218. dst[i + 0] = av_bswap32(src[i + 0]);
  219. dst[i + 1] = av_bswap32(src[i + 1]);
  220. dst[i + 2] = av_bswap32(src[i + 2]);
  221. dst[i + 3] = av_bswap32(src[i + 3]);
  222. dst[i + 4] = av_bswap32(src[i + 4]);
  223. dst[i + 5] = av_bswap32(src[i + 5]);
  224. dst[i + 6] = av_bswap32(src[i + 6]);
  225. dst[i + 7] = av_bswap32(src[i + 7]);
  226. }
  227. for (; i < w; i++)
  228. dst[i + 0] = av_bswap32(src[i + 0]);
  229. }
  230. static void bswap16_buf(uint16_t *dst, const uint16_t *src, int len)
  231. {
  232. while (len--)
  233. *dst++ = av_bswap16(*src++);
  234. }
  235. static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  236. int line_size, int h)
  237. {
  238. int s = 0, i;
  239. uint32_t *sq = ff_square_tab + 256;
  240. for (i = 0; i < h; i++) {
  241. s += sq[pix1[0] - pix2[0]];
  242. s += sq[pix1[1] - pix2[1]];
  243. s += sq[pix1[2] - pix2[2]];
  244. s += sq[pix1[3] - pix2[3]];
  245. pix1 += line_size;
  246. pix2 += line_size;
  247. }
  248. return s;
  249. }
  250. static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  251. int line_size, int h)
  252. {
  253. int s = 0, i;
  254. uint32_t *sq = ff_square_tab + 256;
  255. for (i = 0; i < h; i++) {
  256. s += sq[pix1[0] - pix2[0]];
  257. s += sq[pix1[1] - pix2[1]];
  258. s += sq[pix1[2] - pix2[2]];
  259. s += sq[pix1[3] - pix2[3]];
  260. s += sq[pix1[4] - pix2[4]];
  261. s += sq[pix1[5] - pix2[5]];
  262. s += sq[pix1[6] - pix2[6]];
  263. s += sq[pix1[7] - pix2[7]];
  264. pix1 += line_size;
  265. pix2 += line_size;
  266. }
  267. return s;
  268. }
  269. static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  270. int line_size, int h)
  271. {
  272. int s = 0, i;
  273. uint32_t *sq = ff_square_tab + 256;
  274. for (i = 0; i < h; i++) {
  275. s += sq[pix1[0] - pix2[0]];
  276. s += sq[pix1[1] - pix2[1]];
  277. s += sq[pix1[2] - pix2[2]];
  278. s += sq[pix1[3] - pix2[3]];
  279. s += sq[pix1[4] - pix2[4]];
  280. s += sq[pix1[5] - pix2[5]];
  281. s += sq[pix1[6] - pix2[6]];
  282. s += sq[pix1[7] - pix2[7]];
  283. s += sq[pix1[8] - pix2[8]];
  284. s += sq[pix1[9] - pix2[9]];
  285. s += sq[pix1[10] - pix2[10]];
  286. s += sq[pix1[11] - pix2[11]];
  287. s += sq[pix1[12] - pix2[12]];
  288. s += sq[pix1[13] - pix2[13]];
  289. s += sq[pix1[14] - pix2[14]];
  290. s += sq[pix1[15] - pix2[15]];
  291. pix1 += line_size;
  292. pix2 += line_size;
  293. }
  294. return s;
  295. }
  296. static void diff_pixels_c(int16_t *av_restrict block, const uint8_t *s1,
  297. const uint8_t *s2, int stride)
  298. {
  299. int i;
  300. /* read the pixels */
  301. for (i = 0; i < 8; i++) {
  302. block[0] = s1[0] - s2[0];
  303. block[1] = s1[1] - s2[1];
  304. block[2] = s1[2] - s2[2];
  305. block[3] = s1[3] - s2[3];
  306. block[4] = s1[4] - s2[4];
  307. block[5] = s1[5] - s2[5];
  308. block[6] = s1[6] - s2[6];
  309. block[7] = s1[7] - s2[7];
  310. s1 += stride;
  311. s2 += stride;
  312. block += 8;
  313. }
  314. }
  315. static void put_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
  316. int line_size)
  317. {
  318. int i;
  319. /* read the pixels */
  320. for (i = 0; i < 8; i++) {
  321. pixels[0] = av_clip_uint8(block[0]);
  322. pixels[1] = av_clip_uint8(block[1]);
  323. pixels[2] = av_clip_uint8(block[2]);
  324. pixels[3] = av_clip_uint8(block[3]);
  325. pixels[4] = av_clip_uint8(block[4]);
  326. pixels[5] = av_clip_uint8(block[5]);
  327. pixels[6] = av_clip_uint8(block[6]);
  328. pixels[7] = av_clip_uint8(block[7]);
  329. pixels += line_size;
  330. block += 8;
  331. }
  332. }
  333. static void put_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
  334. int line_size)
  335. {
  336. int i;
  337. /* read the pixels */
  338. for(i=0;i<4;i++) {
  339. pixels[0] = av_clip_uint8(block[0]);
  340. pixels[1] = av_clip_uint8(block[1]);
  341. pixels[2] = av_clip_uint8(block[2]);
  342. pixels[3] = av_clip_uint8(block[3]);
  343. pixels += line_size;
  344. block += 8;
  345. }
  346. }
  347. static void put_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
  348. int line_size)
  349. {
  350. int i;
  351. /* read the pixels */
  352. for(i=0;i<2;i++) {
  353. pixels[0] = av_clip_uint8(block[0]);
  354. pixels[1] = av_clip_uint8(block[1]);
  355. pixels += line_size;
  356. block += 8;
  357. }
  358. }
  359. static void put_signed_pixels_clamped_c(const int16_t *block,
  360. uint8_t *av_restrict pixels,
  361. int line_size)
  362. {
  363. int i, j;
  364. for (i = 0; i < 8; i++) {
  365. for (j = 0; j < 8; j++) {
  366. if (*block < -128)
  367. *pixels = 0;
  368. else if (*block > 127)
  369. *pixels = 255;
  370. else
  371. *pixels = (uint8_t) (*block + 128);
  372. block++;
  373. pixels++;
  374. }
  375. pixels += (line_size - 8);
  376. }
  377. }
  378. static void add_pixels8_c(uint8_t *av_restrict pixels, int16_t *block,
  379. int line_size)
  380. {
  381. int i;
  382. for (i = 0; i < 8; i++) {
  383. pixels[0] += block[0];
  384. pixels[1] += block[1];
  385. pixels[2] += block[2];
  386. pixels[3] += block[3];
  387. pixels[4] += block[4];
  388. pixels[5] += block[5];
  389. pixels[6] += block[6];
  390. pixels[7] += block[7];
  391. pixels += line_size;
  392. block += 8;
  393. }
  394. }
  395. static void add_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
  396. int line_size)
  397. {
  398. int i;
  399. /* read the pixels */
  400. for (i = 0; i < 8; i++) {
  401. pixels[0] = av_clip_uint8(pixels[0] + block[0]);
  402. pixels[1] = av_clip_uint8(pixels[1] + block[1]);
  403. pixels[2] = av_clip_uint8(pixels[2] + block[2]);
  404. pixels[3] = av_clip_uint8(pixels[3] + block[3]);
  405. pixels[4] = av_clip_uint8(pixels[4] + block[4]);
  406. pixels[5] = av_clip_uint8(pixels[5] + block[5]);
  407. pixels[6] = av_clip_uint8(pixels[6] + block[6]);
  408. pixels[7] = av_clip_uint8(pixels[7] + block[7]);
  409. pixels += line_size;
  410. block += 8;
  411. }
  412. }
  413. static void add_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
  414. int line_size)
  415. {
  416. int i;
  417. /* read the pixels */
  418. for(i=0;i<4;i++) {
  419. pixels[0] = av_clip_uint8(pixels[0] + block[0]);
  420. pixels[1] = av_clip_uint8(pixels[1] + block[1]);
  421. pixels[2] = av_clip_uint8(pixels[2] + block[2]);
  422. pixels[3] = av_clip_uint8(pixels[3] + block[3]);
  423. pixels += line_size;
  424. block += 8;
  425. }
  426. }
  427. static void add_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
  428. int line_size)
  429. {
  430. int i;
  431. /* read the pixels */
  432. for(i=0;i<2;i++) {
  433. pixels[0] = av_clip_uint8(pixels[0] + block[0]);
  434. pixels[1] = av_clip_uint8(pixels[1] + block[1]);
  435. pixels += line_size;
  436. block += 8;
  437. }
  438. }
  439. static int sum_abs_dctelem_c(int16_t *block)
  440. {
  441. int sum = 0, i;
  442. for (i = 0; i < 64; i++)
  443. sum += FFABS(block[i]);
  444. return sum;
  445. }
  446. static void fill_block16_c(uint8_t *block, uint8_t value, int line_size, int h)
  447. {
  448. int i;
  449. for (i = 0; i < h; i++) {
  450. memset(block, value, 16);
  451. block += line_size;
  452. }
  453. }
  454. static void fill_block8_c(uint8_t *block, uint8_t value, int line_size, int h)
  455. {
  456. int i;
  457. for (i = 0; i < h; i++) {
  458. memset(block, value, 8);
  459. block += line_size;
  460. }
  461. }
  462. #define avg2(a, b) ((a + b + 1) >> 1)
  463. #define avg4(a, b, c, d) ((a + b + c + d + 2) >> 2)
  464. static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h,
  465. int x16, int y16, int rounder)
  466. {
  467. const int A = (16 - x16) * (16 - y16);
  468. const int B = (x16) * (16 - y16);
  469. const int C = (16 - x16) * (y16);
  470. const int D = (x16) * (y16);
  471. int i;
  472. for (i = 0; i < h; i++) {
  473. dst[0] = (A * src[0] + B * src[1] + C * src[stride + 0] + D * src[stride + 1] + rounder) >> 8;
  474. dst[1] = (A * src[1] + B * src[2] + C * src[stride + 1] + D * src[stride + 2] + rounder) >> 8;
  475. dst[2] = (A * src[2] + B * src[3] + C * src[stride + 2] + D * src[stride + 3] + rounder) >> 8;
  476. dst[3] = (A * src[3] + B * src[4] + C * src[stride + 3] + D * src[stride + 4] + rounder) >> 8;
  477. dst[4] = (A * src[4] + B * src[5] + C * src[stride + 4] + D * src[stride + 5] + rounder) >> 8;
  478. dst[5] = (A * src[5] + B * src[6] + C * src[stride + 5] + D * src[stride + 6] + rounder) >> 8;
  479. dst[6] = (A * src[6] + B * src[7] + C * src[stride + 6] + D * src[stride + 7] + rounder) >> 8;
  480. dst[7] = (A * src[7] + B * src[8] + C * src[stride + 7] + D * src[stride + 8] + rounder) >> 8;
  481. dst += stride;
  482. src += stride;
  483. }
  484. }
  485. void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  486. int dxx, int dxy, int dyx, int dyy, int shift, int r,
  487. int width, int height)
  488. {
  489. int y, vx, vy;
  490. const int s = 1 << shift;
  491. width--;
  492. height--;
  493. for (y = 0; y < h; y++) {
  494. int x;
  495. vx = ox;
  496. vy = oy;
  497. for (x = 0; x < 8; x++) { // FIXME: optimize
  498. int index;
  499. int src_x = vx >> 16;
  500. int src_y = vy >> 16;
  501. int frac_x = src_x & (s - 1);
  502. int frac_y = src_y & (s - 1);
  503. src_x >>= shift;
  504. src_y >>= shift;
  505. if ((unsigned) src_x < width) {
  506. if ((unsigned) src_y < height) {
  507. index = src_x + src_y * stride;
  508. dst[y * stride + x] =
  509. ((src[index] * (s - frac_x) +
  510. src[index + 1] * frac_x) * (s - frac_y) +
  511. (src[index + stride] * (s - frac_x) +
  512. src[index + stride + 1] * frac_x) * frac_y +
  513. r) >> (shift * 2);
  514. } else {
  515. index = src_x + av_clip(src_y, 0, height) * stride;
  516. dst[y * stride + x] =
  517. ((src[index] * (s - frac_x) +
  518. src[index + 1] * frac_x) * s +
  519. r) >> (shift * 2);
  520. }
  521. } else {
  522. if ((unsigned) src_y < height) {
  523. index = av_clip(src_x, 0, width) + src_y * stride;
  524. dst[y * stride + x] =
  525. ((src[index] * (s - frac_y) +
  526. src[index + stride] * frac_y) * s +
  527. r) >> (shift * 2);
  528. } else {
  529. index = av_clip(src_x, 0, width) +
  530. av_clip(src_y, 0, height) * stride;
  531. dst[y * stride + x] = src[index];
  532. }
  533. }
  534. vx += dxx;
  535. vy += dyx;
  536. }
  537. ox += dxy;
  538. oy += dyy;
  539. }
  540. }
  541. #define QPEL_MC(r, OPNAME, RND, OP) \
  542. static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, \
  543. int dstStride, int srcStride, \
  544. int h) \
  545. { \
  546. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  547. int i; \
  548. \
  549. for (i = 0; i < h; i++) { \
  550. OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \
  551. OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \
  552. OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \
  553. OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \
  554. OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \
  555. OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[8])); \
  556. OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[8]) * 3 - (src[3] + src[7])); \
  557. OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[8]) * 6 + (src[5] + src[7]) * 3 - (src[4] + src[6])); \
  558. dst += dstStride; \
  559. src += srcStride; \
  560. } \
  561. } \
  562. \
  563. static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, \
  564. int dstStride, int srcStride) \
  565. { \
  566. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  567. const int w = 8; \
  568. int i; \
  569. \
  570. for (i = 0; i < w; i++) { \
  571. const int src0 = src[0 * srcStride]; \
  572. const int src1 = src[1 * srcStride]; \
  573. const int src2 = src[2 * srcStride]; \
  574. const int src3 = src[3 * srcStride]; \
  575. const int src4 = src[4 * srcStride]; \
  576. const int src5 = src[5 * srcStride]; \
  577. const int src6 = src[6 * srcStride]; \
  578. const int src7 = src[7 * srcStride]; \
  579. const int src8 = src[8 * srcStride]; \
  580. OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \
  581. OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \
  582. OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \
  583. OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \
  584. OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \
  585. OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src8)); \
  586. OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src8) * 3 - (src3 + src7)); \
  587. OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src8) * 6 + (src5 + src7) * 3 - (src4 + src6)); \
  588. dst++; \
  589. src++; \
  590. } \
  591. } \
  592. \
  593. static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, \
  594. int dstStride, int srcStride, \
  595. int h) \
  596. { \
  597. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  598. int i; \
  599. \
  600. for (i = 0; i < h; i++) { \
  601. OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \
  602. OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \
  603. OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \
  604. OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \
  605. OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \
  606. OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[9])); \
  607. OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[9]) * 3 - (src[3] + src[10])); \
  608. OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[9]) * 6 + (src[5] + src[10]) * 3 - (src[4] + src[11])); \
  609. OP(dst[8], (src[8] + src[9]) * 20 - (src[7] + src[10]) * 6 + (src[6] + src[11]) * 3 - (src[5] + src[12])); \
  610. OP(dst[9], (src[9] + src[10]) * 20 - (src[8] + src[11]) * 6 + (src[7] + src[12]) * 3 - (src[6] + src[13])); \
  611. OP(dst[10], (src[10] + src[11]) * 20 - (src[9] + src[12]) * 6 + (src[8] + src[13]) * 3 - (src[7] + src[14])); \
  612. OP(dst[11], (src[11] + src[12]) * 20 - (src[10] + src[13]) * 6 + (src[9] + src[14]) * 3 - (src[8] + src[15])); \
  613. OP(dst[12], (src[12] + src[13]) * 20 - (src[11] + src[14]) * 6 + (src[10] + src[15]) * 3 - (src[9] + src[16])); \
  614. OP(dst[13], (src[13] + src[14]) * 20 - (src[12] + src[15]) * 6 + (src[11] + src[16]) * 3 - (src[10] + src[16])); \
  615. OP(dst[14], (src[14] + src[15]) * 20 - (src[13] + src[16]) * 6 + (src[12] + src[16]) * 3 - (src[11] + src[15])); \
  616. OP(dst[15], (src[15] + src[16]) * 20 - (src[14] + src[16]) * 6 + (src[13] + src[15]) * 3 - (src[12] + src[14])); \
  617. dst += dstStride; \
  618. src += srcStride; \
  619. } \
  620. } \
  621. \
  622. static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, \
  623. int dstStride, int srcStride) \
  624. { \
  625. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
  626. const int w = 16; \
  627. int i; \
  628. \
  629. for (i = 0; i < w; i++) { \
  630. const int src0 = src[0 * srcStride]; \
  631. const int src1 = src[1 * srcStride]; \
  632. const int src2 = src[2 * srcStride]; \
  633. const int src3 = src[3 * srcStride]; \
  634. const int src4 = src[4 * srcStride]; \
  635. const int src5 = src[5 * srcStride]; \
  636. const int src6 = src[6 * srcStride]; \
  637. const int src7 = src[7 * srcStride]; \
  638. const int src8 = src[8 * srcStride]; \
  639. const int src9 = src[9 * srcStride]; \
  640. const int src10 = src[10 * srcStride]; \
  641. const int src11 = src[11 * srcStride]; \
  642. const int src12 = src[12 * srcStride]; \
  643. const int src13 = src[13 * srcStride]; \
  644. const int src14 = src[14 * srcStride]; \
  645. const int src15 = src[15 * srcStride]; \
  646. const int src16 = src[16 * srcStride]; \
  647. OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \
  648. OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \
  649. OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \
  650. OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \
  651. OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \
  652. OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src9)); \
  653. OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src9) * 3 - (src3 + src10)); \
  654. OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src9) * 6 + (src5 + src10) * 3 - (src4 + src11)); \
  655. OP(dst[8 * dstStride], (src8 + src9) * 20 - (src7 + src10) * 6 + (src6 + src11) * 3 - (src5 + src12)); \
  656. OP(dst[9 * dstStride], (src9 + src10) * 20 - (src8 + src11) * 6 + (src7 + src12) * 3 - (src6 + src13)); \
  657. OP(dst[10 * dstStride], (src10 + src11) * 20 - (src9 + src12) * 6 + (src8 + src13) * 3 - (src7 + src14)); \
  658. OP(dst[11 * dstStride], (src11 + src12) * 20 - (src10 + src13) * 6 + (src9 + src14) * 3 - (src8 + src15)); \
  659. OP(dst[12 * dstStride], (src12 + src13) * 20 - (src11 + src14) * 6 + (src10 + src15) * 3 - (src9 + src16)); \
  660. OP(dst[13 * dstStride], (src13 + src14) * 20 - (src12 + src15) * 6 + (src11 + src16) * 3 - (src10 + src16)); \
  661. OP(dst[14 * dstStride], (src14 + src15) * 20 - (src13 + src16) * 6 + (src12 + src16) * 3 - (src11 + src15)); \
  662. OP(dst[15 * dstStride], (src15 + src16) * 20 - (src14 + src16) * 6 + (src13 + src15) * 3 - (src12 + src14)); \
  663. dst++; \
  664. src++; \
  665. } \
  666. } \
  667. \
  668. static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, \
  669. ptrdiff_t stride) \
  670. { \
  671. uint8_t half[64]; \
  672. \
  673. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \
  674. OPNAME ## pixels8_l2_8(dst, src, half, stride, stride, 8, 8); \
  675. } \
  676. \
  677. static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, \
  678. ptrdiff_t stride) \
  679. { \
  680. OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8); \
  681. } \
  682. \
  683. static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, \
  684. ptrdiff_t stride) \
  685. { \
  686. uint8_t half[64]; \
  687. \
  688. put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \
  689. OPNAME ## pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8); \
  690. } \
  691. \
  692. static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, \
  693. ptrdiff_t stride) \
  694. { \
  695. uint8_t full[16 * 9]; \
  696. uint8_t half[64]; \
  697. \
  698. copy_block9(full, src, 16, stride, 9); \
  699. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \
  700. OPNAME ## pixels8_l2_8(dst, full, half, stride, 16, 8, 8); \
  701. } \
  702. \
  703. static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, \
  704. ptrdiff_t stride) \
  705. { \
  706. uint8_t full[16 * 9]; \
  707. \
  708. copy_block9(full, src, 16, stride, 9); \
  709. OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16); \
  710. } \
  711. \
  712. static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, \
  713. ptrdiff_t stride) \
  714. { \
  715. uint8_t full[16 * 9]; \
  716. uint8_t half[64]; \
  717. \
  718. copy_block9(full, src, 16, stride, 9); \
  719. put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \
  720. OPNAME ## pixels8_l2_8(dst, full + 16, half, stride, 16, 8, 8); \
  721. } \
  722. \
  723. void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, \
  724. ptrdiff_t stride) \
  725. { \
  726. uint8_t full[16 * 9]; \
  727. uint8_t halfH[72]; \
  728. uint8_t halfV[64]; \
  729. uint8_t halfHV[64]; \
  730. \
  731. copy_block9(full, src, 16, stride, 9); \
  732. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  733. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  734. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  735. OPNAME ## pixels8_l4_8(dst, full, halfH, halfV, halfHV, \
  736. stride, 16, 8, 8, 8, 8); \
  737. } \
  738. \
  739. static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, \
  740. ptrdiff_t stride) \
  741. { \
  742. uint8_t full[16 * 9]; \
  743. uint8_t halfH[72]; \
  744. uint8_t halfHV[64]; \
  745. \
  746. copy_block9(full, src, 16, stride, 9); \
  747. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  748. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  749. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  750. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  751. } \
  752. \
  753. void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, \
  754. ptrdiff_t stride) \
  755. { \
  756. uint8_t full[16 * 9]; \
  757. uint8_t halfH[72]; \
  758. uint8_t halfV[64]; \
  759. uint8_t halfHV[64]; \
  760. \
  761. copy_block9(full, src, 16, stride, 9); \
  762. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  763. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  764. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  765. OPNAME ## pixels8_l4_8(dst, full + 1, halfH, halfV, halfHV, \
  766. stride, 16, 8, 8, 8, 8); \
  767. } \
  768. \
  769. static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, \
  770. ptrdiff_t stride) \
  771. { \
  772. uint8_t full[16 * 9]; \
  773. uint8_t halfH[72]; \
  774. uint8_t halfHV[64]; \
  775. \
  776. copy_block9(full, src, 16, stride, 9); \
  777. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  778. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  779. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  780. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  781. } \
  782. \
  783. void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, \
  784. ptrdiff_t stride) \
  785. { \
  786. uint8_t full[16 * 9]; \
  787. uint8_t halfH[72]; \
  788. uint8_t halfV[64]; \
  789. uint8_t halfHV[64]; \
  790. \
  791. copy_block9(full, src, 16, stride, 9); \
  792. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  793. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  794. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  795. OPNAME ## pixels8_l4_8(dst, full + 16, halfH + 8, halfV, halfHV, \
  796. stride, 16, 8, 8, 8, 8); \
  797. } \
  798. \
  799. static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, \
  800. ptrdiff_t stride) \
  801. { \
  802. uint8_t full[16 * 9]; \
  803. uint8_t halfH[72]; \
  804. uint8_t halfHV[64]; \
  805. \
  806. copy_block9(full, src, 16, stride, 9); \
  807. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  808. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  809. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  810. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  811. } \
  812. \
  813. void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, \
  814. ptrdiff_t stride) \
  815. { \
  816. uint8_t full[16 * 9]; \
  817. uint8_t halfH[72]; \
  818. uint8_t halfV[64]; \
  819. uint8_t halfHV[64]; \
  820. \
  821. copy_block9(full, src, 16, stride, 9); \
  822. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  823. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  824. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  825. OPNAME ## pixels8_l4_8(dst, full + 17, halfH + 8, halfV, halfHV, \
  826. stride, 16, 8, 8, 8, 8); \
  827. } \
  828. \
  829. static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, \
  830. ptrdiff_t stride) \
  831. { \
  832. uint8_t full[16 * 9]; \
  833. uint8_t halfH[72]; \
  834. uint8_t halfHV[64]; \
  835. \
  836. copy_block9(full, src, 16, stride, 9); \
  837. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  838. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  839. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  840. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  841. } \
  842. \
  843. static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, \
  844. ptrdiff_t stride) \
  845. { \
  846. uint8_t halfH[72]; \
  847. uint8_t halfHV[64]; \
  848. \
  849. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  850. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  851. OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \
  852. } \
  853. \
  854. static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, \
  855. ptrdiff_t stride) \
  856. { \
  857. uint8_t halfH[72]; \
  858. uint8_t halfHV[64]; \
  859. \
  860. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  861. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  862. OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \
  863. } \
  864. \
  865. void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, \
  866. ptrdiff_t stride) \
  867. { \
  868. uint8_t full[16 * 9]; \
  869. uint8_t halfH[72]; \
  870. uint8_t halfV[64]; \
  871. uint8_t halfHV[64]; \
  872. \
  873. copy_block9(full, src, 16, stride, 9); \
  874. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  875. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \
  876. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  877. OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \
  878. } \
  879. \
  880. static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, \
  881. ptrdiff_t stride) \
  882. { \
  883. uint8_t full[16 * 9]; \
  884. uint8_t halfH[72]; \
  885. \
  886. copy_block9(full, src, 16, stride, 9); \
  887. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  888. put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \
  889. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  890. } \
  891. \
  892. void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, \
  893. ptrdiff_t stride) \
  894. { \
  895. uint8_t full[16 * 9]; \
  896. uint8_t halfH[72]; \
  897. uint8_t halfV[64]; \
  898. uint8_t halfHV[64]; \
  899. \
  900. copy_block9(full, src, 16, stride, 9); \
  901. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  902. put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \
  903. put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \
  904. OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \
  905. } \
  906. \
  907. static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, \
  908. ptrdiff_t stride) \
  909. { \
  910. uint8_t full[16 * 9]; \
  911. uint8_t halfH[72]; \
  912. \
  913. copy_block9(full, src, 16, stride, 9); \
  914. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \
  915. put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \
  916. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  917. } \
  918. \
  919. static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, \
  920. ptrdiff_t stride) \
  921. { \
  922. uint8_t halfH[72]; \
  923. \
  924. put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \
  925. OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \
  926. } \
  927. \
  928. static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, \
  929. ptrdiff_t stride) \
  930. { \
  931. uint8_t half[256]; \
  932. \
  933. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \
  934. OPNAME ## pixels16_l2_8(dst, src, half, stride, stride, 16, 16); \
  935. } \
  936. \
  937. static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, \
  938. ptrdiff_t stride) \
  939. { \
  940. OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16); \
  941. } \
  942. \
  943. static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, \
  944. ptrdiff_t stride) \
  945. { \
  946. uint8_t half[256]; \
  947. \
  948. put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \
  949. OPNAME ## pixels16_l2_8(dst, src + 1, half, stride, stride, 16, 16); \
  950. } \
  951. \
  952. static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, \
  953. ptrdiff_t stride) \
  954. { \
  955. uint8_t full[24 * 17]; \
  956. uint8_t half[256]; \
  957. \
  958. copy_block17(full, src, 24, stride, 17); \
  959. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \
  960. OPNAME ## pixels16_l2_8(dst, full, half, stride, 24, 16, 16); \
  961. } \
  962. \
  963. static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, \
  964. ptrdiff_t stride) \
  965. { \
  966. uint8_t full[24 * 17]; \
  967. \
  968. copy_block17(full, src, 24, stride, 17); \
  969. OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24); \
  970. } \
  971. \
  972. static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, \
  973. ptrdiff_t stride) \
  974. { \
  975. uint8_t full[24 * 17]; \
  976. uint8_t half[256]; \
  977. \
  978. copy_block17(full, src, 24, stride, 17); \
  979. put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \
  980. OPNAME ## pixels16_l2_8(dst, full + 24, half, stride, 24, 16, 16); \
  981. } \
  982. \
  983. void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, \
  984. ptrdiff_t stride) \
  985. { \
  986. uint8_t full[24 * 17]; \
  987. uint8_t halfH[272]; \
  988. uint8_t halfV[256]; \
  989. uint8_t halfHV[256]; \
  990. \
  991. copy_block17(full, src, 24, stride, 17); \
  992. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  993. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  994. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  995. OPNAME ## pixels16_l4_8(dst, full, halfH, halfV, halfHV, \
  996. stride, 24, 16, 16, 16, 16); \
  997. } \
  998. \
  999. static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, \
  1000. ptrdiff_t stride) \
  1001. { \
  1002. uint8_t full[24 * 17]; \
  1003. uint8_t halfH[272]; \
  1004. uint8_t halfHV[256]; \
  1005. \
  1006. copy_block17(full, src, 24, stride, 17); \
  1007. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1008. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1009. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1010. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  1011. } \
  1012. \
  1013. void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, \
  1014. ptrdiff_t stride) \
  1015. { \
  1016. uint8_t full[24 * 17]; \
  1017. uint8_t halfH[272]; \
  1018. uint8_t halfV[256]; \
  1019. uint8_t halfHV[256]; \
  1020. \
  1021. copy_block17(full, src, 24, stride, 17); \
  1022. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1023. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1024. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1025. OPNAME ## pixels16_l4_8(dst, full + 1, halfH, halfV, halfHV, \
  1026. stride, 24, 16, 16, 16, 16); \
  1027. } \
  1028. \
  1029. static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, \
  1030. ptrdiff_t stride) \
  1031. { \
  1032. uint8_t full[24 * 17]; \
  1033. uint8_t halfH[272]; \
  1034. uint8_t halfHV[256]; \
  1035. \
  1036. copy_block17(full, src, 24, stride, 17); \
  1037. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1038. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1039. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1040. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  1041. } \
  1042. \
  1043. void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, \
  1044. ptrdiff_t stride) \
  1045. { \
  1046. uint8_t full[24 * 17]; \
  1047. uint8_t halfH[272]; \
  1048. uint8_t halfV[256]; \
  1049. uint8_t halfHV[256]; \
  1050. \
  1051. copy_block17(full, src, 24, stride, 17); \
  1052. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1053. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  1054. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1055. OPNAME ## pixels16_l4_8(dst, full + 24, halfH + 16, halfV, halfHV, \
  1056. stride, 24, 16, 16, 16, 16); \
  1057. } \
  1058. \
  1059. static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, \
  1060. ptrdiff_t stride) \
  1061. { \
  1062. uint8_t full[24 * 17]; \
  1063. uint8_t halfH[272]; \
  1064. uint8_t halfHV[256]; \
  1065. \
  1066. copy_block17(full, src, 24, stride, 17); \
  1067. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1068. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1069. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1070. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1071. } \
  1072. \
  1073. void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, \
  1074. ptrdiff_t stride) \
  1075. { \
  1076. uint8_t full[24 * 17]; \
  1077. uint8_t halfH[272]; \
  1078. uint8_t halfV[256]; \
  1079. uint8_t halfHV[256]; \
  1080. \
  1081. copy_block17(full, src, 24, stride, 17); \
  1082. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1083. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1084. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1085. OPNAME ## pixels16_l4_8(dst, full + 25, halfH + 16, halfV, halfHV, \
  1086. stride, 24, 16, 16, 16, 16); \
  1087. } \
  1088. \
  1089. static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, \
  1090. ptrdiff_t stride) \
  1091. { \
  1092. uint8_t full[24 * 17]; \
  1093. uint8_t halfH[272]; \
  1094. uint8_t halfHV[256]; \
  1095. \
  1096. copy_block17(full, src, 24, stride, 17); \
  1097. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1098. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1099. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1100. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1101. } \
  1102. \
  1103. static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, \
  1104. ptrdiff_t stride) \
  1105. { \
  1106. uint8_t halfH[272]; \
  1107. uint8_t halfHV[256]; \
  1108. \
  1109. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1110. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1111. OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \
  1112. } \
  1113. \
  1114. static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, \
  1115. ptrdiff_t stride) \
  1116. { \
  1117. uint8_t halfH[272]; \
  1118. uint8_t halfHV[256]; \
  1119. \
  1120. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1121. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1122. OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \
  1123. } \
  1124. \
  1125. void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, \
  1126. ptrdiff_t stride) \
  1127. { \
  1128. uint8_t full[24 * 17]; \
  1129. uint8_t halfH[272]; \
  1130. uint8_t halfV[256]; \
  1131. uint8_t halfHV[256]; \
  1132. \
  1133. copy_block17(full, src, 24, stride, 17); \
  1134. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1135. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \
  1136. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1137. OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \
  1138. } \
  1139. \
  1140. static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, \
  1141. ptrdiff_t stride) \
  1142. { \
  1143. uint8_t full[24 * 17]; \
  1144. uint8_t halfH[272]; \
  1145. \
  1146. copy_block17(full, src, 24, stride, 17); \
  1147. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1148. put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \
  1149. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1150. } \
  1151. \
  1152. void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, \
  1153. ptrdiff_t stride) \
  1154. { \
  1155. uint8_t full[24 * 17]; \
  1156. uint8_t halfH[272]; \
  1157. uint8_t halfV[256]; \
  1158. uint8_t halfHV[256]; \
  1159. \
  1160. copy_block17(full, src, 24, stride, 17); \
  1161. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1162. put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \
  1163. put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \
  1164. OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \
  1165. } \
  1166. \
  1167. static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, \
  1168. ptrdiff_t stride) \
  1169. { \
  1170. uint8_t full[24 * 17]; \
  1171. uint8_t halfH[272]; \
  1172. \
  1173. copy_block17(full, src, 24, stride, 17); \
  1174. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \
  1175. put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \
  1176. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1177. } \
  1178. \
  1179. static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, \
  1180. ptrdiff_t stride) \
  1181. { \
  1182. uint8_t halfH[272]; \
  1183. \
  1184. put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \
  1185. OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \
  1186. }
  1187. #define op_avg(a, b) a = (((a) + cm[((b) + 16) >> 5] + 1) >> 1)
  1188. #define op_avg_no_rnd(a, b) a = (((a) + cm[((b) + 15) >> 5]) >> 1)
  1189. #define op_put(a, b) a = cm[((b) + 16) >> 5]
  1190. #define op_put_no_rnd(a, b) a = cm[((b) + 15) >> 5]
  1191. QPEL_MC(0, put_, _, op_put)
  1192. QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
  1193. QPEL_MC(0, avg_, _, op_avg)
  1194. #undef op_avg
  1195. #undef op_put
  1196. #undef op_put_no_rnd
  1197. void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1198. {
  1199. put_pixels8_8_c(dst, src, stride, 8);
  1200. }
  1201. void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1202. {
  1203. avg_pixels8_8_c(dst, src, stride, 8);
  1204. }
  1205. void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1206. {
  1207. put_pixels16_8_c(dst, src, stride, 16);
  1208. }
  1209. void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1210. {
  1211. avg_pixels16_8_c(dst, src, stride, 16);
  1212. }
  1213. #define put_qpel8_mc00_c ff_put_pixels8x8_c
  1214. #define avg_qpel8_mc00_c ff_avg_pixels8x8_c
  1215. #define put_qpel16_mc00_c ff_put_pixels16x16_c
  1216. #define avg_qpel16_mc00_c ff_avg_pixels16x16_c
  1217. #define put_no_rnd_qpel8_mc00_c ff_put_pixels8x8_c
  1218. #define put_no_rnd_qpel16_mc00_c ff_put_pixels16x16_c
  1219. static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src,
  1220. int dstStride, int srcStride, int h)
  1221. {
  1222. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1223. int i;
  1224. for (i = 0; i < h; i++) {
  1225. dst[0] = cm[(9 * (src[0] + src[1]) - (src[-1] + src[2]) + 8) >> 4];
  1226. dst[1] = cm[(9 * (src[1] + src[2]) - (src[0] + src[3]) + 8) >> 4];
  1227. dst[2] = cm[(9 * (src[2] + src[3]) - (src[1] + src[4]) + 8) >> 4];
  1228. dst[3] = cm[(9 * (src[3] + src[4]) - (src[2] + src[5]) + 8) >> 4];
  1229. dst[4] = cm[(9 * (src[4] + src[5]) - (src[3] + src[6]) + 8) >> 4];
  1230. dst[5] = cm[(9 * (src[5] + src[6]) - (src[4] + src[7]) + 8) >> 4];
  1231. dst[6] = cm[(9 * (src[6] + src[7]) - (src[5] + src[8]) + 8) >> 4];
  1232. dst[7] = cm[(9 * (src[7] + src[8]) - (src[6] + src[9]) + 8) >> 4];
  1233. dst += dstStride;
  1234. src += srcStride;
  1235. }
  1236. }
  1237. #if CONFIG_DIRAC_DECODER
  1238. #define DIRAC_MC(OPNAME)\
  1239. void ff_ ## OPNAME ## _dirac_pixels8_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1240. {\
  1241. OPNAME ## _pixels8_8_c(dst, src[0], stride, h);\
  1242. }\
  1243. void ff_ ## OPNAME ## _dirac_pixels16_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1244. {\
  1245. OPNAME ## _pixels16_8_c(dst, src[0], stride, h);\
  1246. }\
  1247. void ff_ ## OPNAME ## _dirac_pixels32_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1248. {\
  1249. OPNAME ## _pixels16_8_c(dst , src[0] , stride, h);\
  1250. OPNAME ## _pixels16_8_c(dst+16, src[0]+16, stride, h);\
  1251. }\
  1252. void ff_ ## OPNAME ## _dirac_pixels8_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1253. {\
  1254. OPNAME ## _pixels8_l2_8(dst, src[0], src[1], stride, stride, stride, h);\
  1255. }\
  1256. void ff_ ## OPNAME ## _dirac_pixels16_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1257. {\
  1258. OPNAME ## _pixels16_l2_8(dst, src[0], src[1], stride, stride, stride, h);\
  1259. }\
  1260. void ff_ ## OPNAME ## _dirac_pixels32_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1261. {\
  1262. OPNAME ## _pixels16_l2_8(dst , src[0] , src[1] , stride, stride, stride, h);\
  1263. OPNAME ## _pixels16_l2_8(dst+16, src[0]+16, src[1]+16, stride, stride, stride, h);\
  1264. }\
  1265. void ff_ ## OPNAME ## _dirac_pixels8_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1266. {\
  1267. OPNAME ## _pixels8_l4_8(dst, src[0], src[1], src[2], src[3], stride, stride, stride, stride, stride, h);\
  1268. }\
  1269. void ff_ ## OPNAME ## _dirac_pixels16_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1270. {\
  1271. OPNAME ## _pixels16_l4_8(dst, src[0], src[1], src[2], src[3], stride, stride, stride, stride, stride, h);\
  1272. }\
  1273. void ff_ ## OPNAME ## _dirac_pixels32_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1274. {\
  1275. OPNAME ## _pixels16_l4_8(dst , src[0] , src[1] , src[2] , src[3] , stride, stride, stride, stride, stride, h);\
  1276. OPNAME ## _pixels16_l4_8(dst+16, src[0]+16, src[1]+16, src[2]+16, src[3]+16, stride, stride, stride, stride, stride, h);\
  1277. }
  1278. DIRAC_MC(put)
  1279. DIRAC_MC(avg)
  1280. #endif
  1281. static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src,
  1282. int dstStride, int srcStride, int w)
  1283. {
  1284. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  1285. int i;
  1286. for (i = 0; i < w; i++) {
  1287. const int src_1 = src[-srcStride];
  1288. const int src0 = src[0];
  1289. const int src1 = src[srcStride];
  1290. const int src2 = src[2 * srcStride];
  1291. const int src3 = src[3 * srcStride];
  1292. const int src4 = src[4 * srcStride];
  1293. const int src5 = src[5 * srcStride];
  1294. const int src6 = src[6 * srcStride];
  1295. const int src7 = src[7 * srcStride];
  1296. const int src8 = src[8 * srcStride];
  1297. const int src9 = src[9 * srcStride];
  1298. dst[0 * dstStride] = cm[(9 * (src0 + src1) - (src_1 + src2) + 8) >> 4];
  1299. dst[1 * dstStride] = cm[(9 * (src1 + src2) - (src0 + src3) + 8) >> 4];
  1300. dst[2 * dstStride] = cm[(9 * (src2 + src3) - (src1 + src4) + 8) >> 4];
  1301. dst[3 * dstStride] = cm[(9 * (src3 + src4) - (src2 + src5) + 8) >> 4];
  1302. dst[4 * dstStride] = cm[(9 * (src4 + src5) - (src3 + src6) + 8) >> 4];
  1303. dst[5 * dstStride] = cm[(9 * (src5 + src6) - (src4 + src7) + 8) >> 4];
  1304. dst[6 * dstStride] = cm[(9 * (src6 + src7) - (src5 + src8) + 8) >> 4];
  1305. dst[7 * dstStride] = cm[(9 * (src7 + src8) - (src6 + src9) + 8) >> 4];
  1306. src++;
  1307. dst++;
  1308. }
  1309. }
  1310. static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1311. {
  1312. uint8_t half[64];
  1313. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1314. put_pixels8_l2_8(dst, src, half, stride, stride, 8, 8);
  1315. }
  1316. static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1317. {
  1318. wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
  1319. }
  1320. static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1321. {
  1322. uint8_t half[64];
  1323. wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
  1324. put_pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8);
  1325. }
  1326. static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1327. {
  1328. wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
  1329. }
  1330. static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1331. {
  1332. uint8_t halfH[88];
  1333. uint8_t halfV[64];
  1334. uint8_t halfHV[64];
  1335. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1336. wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
  1337. wmv2_mspel8_v_lowpass(halfHV, halfH + 8, 8, 8, 8);
  1338. put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
  1339. }
  1340. static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1341. {
  1342. uint8_t halfH[88];
  1343. uint8_t halfV[64];
  1344. uint8_t halfHV[64];
  1345. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1346. wmv2_mspel8_v_lowpass(halfV, src + 1, 8, stride, 8);
  1347. wmv2_mspel8_v_lowpass(halfHV, halfH + 8, 8, 8, 8);
  1348. put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
  1349. }
  1350. static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1351. {
  1352. uint8_t halfH[88];
  1353. wmv2_mspel8_h_lowpass(halfH, src - stride, 8, stride, 11);
  1354. wmv2_mspel8_v_lowpass(dst, halfH + 8, stride, 8, 8);
  1355. }
  1356. static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1357. int line_size, int h)
  1358. {
  1359. int s = 0, i;
  1360. for (i = 0; i < h; i++) {
  1361. s += abs(pix1[0] - pix2[0]);
  1362. s += abs(pix1[1] - pix2[1]);
  1363. s += abs(pix1[2] - pix2[2]);
  1364. s += abs(pix1[3] - pix2[3]);
  1365. s += abs(pix1[4] - pix2[4]);
  1366. s += abs(pix1[5] - pix2[5]);
  1367. s += abs(pix1[6] - pix2[6]);
  1368. s += abs(pix1[7] - pix2[7]);
  1369. s += abs(pix1[8] - pix2[8]);
  1370. s += abs(pix1[9] - pix2[9]);
  1371. s += abs(pix1[10] - pix2[10]);
  1372. s += abs(pix1[11] - pix2[11]);
  1373. s += abs(pix1[12] - pix2[12]);
  1374. s += abs(pix1[13] - pix2[13]);
  1375. s += abs(pix1[14] - pix2[14]);
  1376. s += abs(pix1[15] - pix2[15]);
  1377. pix1 += line_size;
  1378. pix2 += line_size;
  1379. }
  1380. return s;
  1381. }
  1382. static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1383. int line_size, int h)
  1384. {
  1385. int s = 0, i;
  1386. for (i = 0; i < h; i++) {
  1387. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  1388. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  1389. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  1390. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  1391. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  1392. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  1393. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  1394. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  1395. s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
  1396. s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
  1397. s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
  1398. s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
  1399. s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
  1400. s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
  1401. s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
  1402. s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
  1403. pix1 += line_size;
  1404. pix2 += line_size;
  1405. }
  1406. return s;
  1407. }
  1408. static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1409. int line_size, int h)
  1410. {
  1411. int s = 0, i;
  1412. uint8_t *pix3 = pix2 + line_size;
  1413. for (i = 0; i < h; i++) {
  1414. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  1415. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  1416. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  1417. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  1418. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  1419. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  1420. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  1421. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  1422. s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
  1423. s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
  1424. s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
  1425. s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
  1426. s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
  1427. s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
  1428. s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
  1429. s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
  1430. pix1 += line_size;
  1431. pix2 += line_size;
  1432. pix3 += line_size;
  1433. }
  1434. return s;
  1435. }
  1436. static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1437. int line_size, int h)
  1438. {
  1439. int s = 0, i;
  1440. uint8_t *pix3 = pix2 + line_size;
  1441. for (i = 0; i < h; i++) {
  1442. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  1443. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  1444. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  1445. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  1446. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  1447. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  1448. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  1449. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  1450. s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
  1451. s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
  1452. s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
  1453. s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
  1454. s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
  1455. s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
  1456. s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
  1457. s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
  1458. pix1 += line_size;
  1459. pix2 += line_size;
  1460. pix3 += line_size;
  1461. }
  1462. return s;
  1463. }
  1464. static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1465. int line_size, int h)
  1466. {
  1467. int s = 0, i;
  1468. for (i = 0; i < h; i++) {
  1469. s += abs(pix1[0] - pix2[0]);
  1470. s += abs(pix1[1] - pix2[1]);
  1471. s += abs(pix1[2] - pix2[2]);
  1472. s += abs(pix1[3] - pix2[3]);
  1473. s += abs(pix1[4] - pix2[4]);
  1474. s += abs(pix1[5] - pix2[5]);
  1475. s += abs(pix1[6] - pix2[6]);
  1476. s += abs(pix1[7] - pix2[7]);
  1477. pix1 += line_size;
  1478. pix2 += line_size;
  1479. }
  1480. return s;
  1481. }
  1482. static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1483. int line_size, int h)
  1484. {
  1485. int s = 0, i;
  1486. for (i = 0; i < h; i++) {
  1487. s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
  1488. s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
  1489. s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
  1490. s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
  1491. s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
  1492. s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
  1493. s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
  1494. s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
  1495. pix1 += line_size;
  1496. pix2 += line_size;
  1497. }
  1498. return s;
  1499. }
  1500. static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1501. int line_size, int h)
  1502. {
  1503. int s = 0, i;
  1504. uint8_t *pix3 = pix2 + line_size;
  1505. for (i = 0; i < h; i++) {
  1506. s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
  1507. s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
  1508. s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
  1509. s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
  1510. s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
  1511. s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
  1512. s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
  1513. s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
  1514. pix1 += line_size;
  1515. pix2 += line_size;
  1516. pix3 += line_size;
  1517. }
  1518. return s;
  1519. }
  1520. static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  1521. int line_size, int h)
  1522. {
  1523. int s = 0, i;
  1524. uint8_t *pix3 = pix2 + line_size;
  1525. for (i = 0; i < h; i++) {
  1526. s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
  1527. s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
  1528. s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
  1529. s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
  1530. s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
  1531. s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
  1532. s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
  1533. s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
  1534. pix1 += line_size;
  1535. pix2 += line_size;
  1536. pix3 += line_size;
  1537. }
  1538. return s;
  1539. }
  1540. static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
  1541. {
  1542. int score1 = 0, score2 = 0, x, y;
  1543. for (y = 0; y < h; y++) {
  1544. for (x = 0; x < 16; x++)
  1545. score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
  1546. if (y + 1 < h) {
  1547. for (x = 0; x < 15; x++)
  1548. score2 += FFABS(s1[x] - s1[x + stride] -
  1549. s1[x + 1] + s1[x + stride + 1]) -
  1550. FFABS(s2[x] - s2[x + stride] -
  1551. s2[x + 1] + s2[x + stride + 1]);
  1552. }
  1553. s1 += stride;
  1554. s2 += stride;
  1555. }
  1556. if (c)
  1557. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  1558. else
  1559. return score1 + FFABS(score2) * 8;
  1560. }
  1561. static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
  1562. {
  1563. int score1 = 0, score2 = 0, x, y;
  1564. for (y = 0; y < h; y++) {
  1565. for (x = 0; x < 8; x++)
  1566. score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
  1567. if (y + 1 < h) {
  1568. for (x = 0; x < 7; x++)
  1569. score2 += FFABS(s1[x] - s1[x + stride] -
  1570. s1[x + 1] + s1[x + stride + 1]) -
  1571. FFABS(s2[x] - s2[x + stride] -
  1572. s2[x + 1] + s2[x + stride + 1]);
  1573. }
  1574. s1 += stride;
  1575. s2 += stride;
  1576. }
  1577. if (c)
  1578. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  1579. else
  1580. return score1 + FFABS(score2) * 8;
  1581. }
  1582. static int try_8x8basis_c(int16_t rem[64], int16_t weight[64],
  1583. int16_t basis[64], int scale)
  1584. {
  1585. int i;
  1586. unsigned int sum = 0;
  1587. for (i = 0; i < 8 * 8; i++) {
  1588. int b = rem[i] + ((basis[i] * scale +
  1589. (1 << (BASIS_SHIFT - RECON_SHIFT - 1))) >>
  1590. (BASIS_SHIFT - RECON_SHIFT));
  1591. int w = weight[i];
  1592. b >>= RECON_SHIFT;
  1593. av_assert2(-512 < b && b < 512);
  1594. sum += (w * b) * (w * b) >> 4;
  1595. }
  1596. return sum >> 2;
  1597. }
  1598. static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale)
  1599. {
  1600. int i;
  1601. for (i = 0; i < 8 * 8; i++)
  1602. rem[i] += (basis[i] * scale +
  1603. (1 << (BASIS_SHIFT - RECON_SHIFT - 1))) >>
  1604. (BASIS_SHIFT - RECON_SHIFT);
  1605. }
  1606. static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
  1607. int stride, int h)
  1608. {
  1609. return 0;
  1610. }
  1611. void ff_set_cmp(DSPContext *c, me_cmp_func *cmp, int type)
  1612. {
  1613. int i;
  1614. memset(cmp, 0, sizeof(void *) * 6);
  1615. for (i = 0; i < 6; i++) {
  1616. switch (type & 0xFF) {
  1617. case FF_CMP_SAD:
  1618. cmp[i] = c->sad[i];
  1619. break;
  1620. case FF_CMP_SATD:
  1621. cmp[i] = c->hadamard8_diff[i];
  1622. break;
  1623. case FF_CMP_SSE:
  1624. cmp[i] = c->sse[i];
  1625. break;
  1626. case FF_CMP_DCT:
  1627. cmp[i] = c->dct_sad[i];
  1628. break;
  1629. case FF_CMP_DCT264:
  1630. cmp[i] = c->dct264_sad[i];
  1631. break;
  1632. case FF_CMP_DCTMAX:
  1633. cmp[i] = c->dct_max[i];
  1634. break;
  1635. case FF_CMP_PSNR:
  1636. cmp[i] = c->quant_psnr[i];
  1637. break;
  1638. case FF_CMP_BIT:
  1639. cmp[i] = c->bit[i];
  1640. break;
  1641. case FF_CMP_RD:
  1642. cmp[i] = c->rd[i];
  1643. break;
  1644. case FF_CMP_VSAD:
  1645. cmp[i] = c->vsad[i];
  1646. break;
  1647. case FF_CMP_VSSE:
  1648. cmp[i] = c->vsse[i];
  1649. break;
  1650. case FF_CMP_ZERO:
  1651. cmp[i] = zero_cmp;
  1652. break;
  1653. case FF_CMP_NSSE:
  1654. cmp[i] = c->nsse[i];
  1655. break;
  1656. #if CONFIG_DWT
  1657. case FF_CMP_W53:
  1658. cmp[i]= c->w53[i];
  1659. break;
  1660. case FF_CMP_W97:
  1661. cmp[i]= c->w97[i];
  1662. break;
  1663. #endif
  1664. default:
  1665. av_log(NULL, AV_LOG_ERROR,
  1666. "internal error in cmp function selection\n");
  1667. }
  1668. }
  1669. }
  1670. static void add_bytes_c(uint8_t *dst, uint8_t *src, int w)
  1671. {
  1672. long i;
  1673. for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
  1674. long a = *(long *) (src + i);
  1675. long b = *(long *) (dst + i);
  1676. *(long *) (dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
  1677. }
  1678. for (; i < w; i++)
  1679. dst[i + 0] += src[i + 0];
  1680. }
  1681. static void diff_bytes_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w)
  1682. {
  1683. long i;
  1684. #if !HAVE_FAST_UNALIGNED
  1685. if ((long) src2 & (sizeof(long) - 1)) {
  1686. for (i = 0; i + 7 < w; i += 8) {
  1687. dst[i + 0] = src1[i + 0] - src2[i + 0];
  1688. dst[i + 1] = src1[i + 1] - src2[i + 1];
  1689. dst[i + 2] = src1[i + 2] - src2[i + 2];
  1690. dst[i + 3] = src1[i + 3] - src2[i + 3];
  1691. dst[i + 4] = src1[i + 4] - src2[i + 4];
  1692. dst[i + 5] = src1[i + 5] - src2[i + 5];
  1693. dst[i + 6] = src1[i + 6] - src2[i + 6];
  1694. dst[i + 7] = src1[i + 7] - src2[i + 7];
  1695. }
  1696. } else
  1697. #endif
  1698. for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
  1699. long a = *(long *) (src1 + i);
  1700. long b = *(long *) (src2 + i);
  1701. *(long *) (dst + i) = ((a | pb_80) - (b & pb_7f)) ^
  1702. ((a ^ b ^ pb_80) & pb_80);
  1703. }
  1704. for (; i < w; i++)
  1705. dst[i + 0] = src1[i + 0] - src2[i + 0];
  1706. }
  1707. static void add_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1,
  1708. const uint8_t *diff, int w,
  1709. int *left, int *left_top)
  1710. {
  1711. int i;
  1712. uint8_t l, lt;
  1713. l = *left;
  1714. lt = *left_top;
  1715. for (i = 0; i < w; i++) {
  1716. l = mid_pred(l, src1[i], (l + src1[i] - lt) & 0xFF) + diff[i];
  1717. lt = src1[i];
  1718. dst[i] = l;
  1719. }
  1720. *left = l;
  1721. *left_top = lt;
  1722. }
  1723. static void sub_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1,
  1724. const uint8_t *src2, int w,
  1725. int *left, int *left_top)
  1726. {
  1727. int i;
  1728. uint8_t l, lt;
  1729. l = *left;
  1730. lt = *left_top;
  1731. for (i = 0; i < w; i++) {
  1732. const int pred = mid_pred(l, src1[i], (l + src1[i] - lt) & 0xFF);
  1733. lt = src1[i];
  1734. l = src2[i];
  1735. dst[i] = l - pred;
  1736. }
  1737. *left = l;
  1738. *left_top = lt;
  1739. }
  1740. static int add_hfyu_left_prediction_c(uint8_t *dst, const uint8_t *src,
  1741. int w, int acc)
  1742. {
  1743. int i;
  1744. for (i = 0; i < w - 1; i++) {
  1745. acc += src[i];
  1746. dst[i] = acc;
  1747. i++;
  1748. acc += src[i];
  1749. dst[i] = acc;
  1750. }
  1751. for (; i < w; i++) {
  1752. acc += src[i];
  1753. dst[i] = acc;
  1754. }
  1755. return acc;
  1756. }
  1757. #if HAVE_BIGENDIAN
  1758. #define B 3
  1759. #define G 2
  1760. #define R 1
  1761. #define A 0
  1762. #else
  1763. #define B 0
  1764. #define G 1
  1765. #define R 2
  1766. #define A 3
  1767. #endif
  1768. static void add_hfyu_left_prediction_bgr32_c(uint8_t *dst, const uint8_t *src,
  1769. int w, int *red, int *green,
  1770. int *blue, int *alpha)
  1771. {
  1772. int i, r = *red, g = *green, b = *blue, a = *alpha;
  1773. for (i = 0; i < w; i++) {
  1774. b += src[4 * i + B];
  1775. g += src[4 * i + G];
  1776. r += src[4 * i + R];
  1777. a += src[4 * i + A];
  1778. dst[4 * i + B] = b;
  1779. dst[4 * i + G] = g;
  1780. dst[4 * i + R] = r;
  1781. dst[4 * i + A] = a;
  1782. }
  1783. *red = r;
  1784. *green = g;
  1785. *blue = b;
  1786. *alpha = a;
  1787. }
  1788. #undef B
  1789. #undef G
  1790. #undef R
  1791. #undef A
  1792. #define BUTTERFLY2(o1, o2, i1, i2) \
  1793. o1 = (i1) + (i2); \
  1794. o2 = (i1) - (i2);
  1795. #define BUTTERFLY1(x, y) \
  1796. { \
  1797. int a, b; \
  1798. a = x; \
  1799. b = y; \
  1800. x = a + b; \
  1801. y = a - b; \
  1802. }
  1803. #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
  1804. static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
  1805. uint8_t *src, int stride, int h)
  1806. {
  1807. int i, temp[64], sum = 0;
  1808. av_assert2(h == 8);
  1809. for (i = 0; i < 8; i++) {
  1810. // FIXME: try pointer walks
  1811. BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
  1812. src[stride * i + 0] - dst[stride * i + 0],
  1813. src[stride * i + 1] - dst[stride * i + 1]);
  1814. BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
  1815. src[stride * i + 2] - dst[stride * i + 2],
  1816. src[stride * i + 3] - dst[stride * i + 3]);
  1817. BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
  1818. src[stride * i + 4] - dst[stride * i + 4],
  1819. src[stride * i + 5] - dst[stride * i + 5]);
  1820. BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
  1821. src[stride * i + 6] - dst[stride * i + 6],
  1822. src[stride * i + 7] - dst[stride * i + 7]);
  1823. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
  1824. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
  1825. BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
  1826. BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
  1827. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
  1828. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
  1829. BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
  1830. BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
  1831. }
  1832. for (i = 0; i < 8; i++) {
  1833. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
  1834. BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
  1835. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
  1836. BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
  1837. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
  1838. BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
  1839. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
  1840. BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
  1841. sum += BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i]) +
  1842. BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i]) +
  1843. BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i]) +
  1844. BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
  1845. }
  1846. return sum;
  1847. }
  1848. static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
  1849. uint8_t *dummy, int stride, int h)
  1850. {
  1851. int i, temp[64], sum = 0;
  1852. av_assert2(h == 8);
  1853. for (i = 0; i < 8; i++) {
  1854. // FIXME: try pointer walks
  1855. BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
  1856. src[stride * i + 0], src[stride * i + 1]);
  1857. BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
  1858. src[stride * i + 2], src[stride * i + 3]);
  1859. BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
  1860. src[stride * i + 4], src[stride * i + 5]);
  1861. BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
  1862. src[stride * i + 6], src[stride * i + 7]);
  1863. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
  1864. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
  1865. BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
  1866. BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
  1867. BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
  1868. BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
  1869. BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
  1870. BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
  1871. }
  1872. for (i = 0; i < 8; i++) {
  1873. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
  1874. BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
  1875. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
  1876. BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
  1877. BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
  1878. BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
  1879. BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
  1880. BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
  1881. sum +=
  1882. BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i])
  1883. + BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i])
  1884. + BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i])
  1885. + BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
  1886. }
  1887. sum -= FFABS(temp[8 * 0] + temp[8 * 4]); // -mean
  1888. return sum;
  1889. }
  1890. static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
  1891. uint8_t *src2, int stride, int h)
  1892. {
  1893. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1894. av_assert2(h == 8);
  1895. s->dsp.diff_pixels(temp, src1, src2, stride);
  1896. s->dsp.fdct(temp);
  1897. return s->dsp.sum_abs_dctelem(temp);
  1898. }
  1899. #if CONFIG_GPL
  1900. #define DCT8_1D \
  1901. { \
  1902. const int s07 = SRC(0) + SRC(7); \
  1903. const int s16 = SRC(1) + SRC(6); \
  1904. const int s25 = SRC(2) + SRC(5); \
  1905. const int s34 = SRC(3) + SRC(4); \
  1906. const int a0 = s07 + s34; \
  1907. const int a1 = s16 + s25; \
  1908. const int a2 = s07 - s34; \
  1909. const int a3 = s16 - s25; \
  1910. const int d07 = SRC(0) - SRC(7); \
  1911. const int d16 = SRC(1) - SRC(6); \
  1912. const int d25 = SRC(2) - SRC(5); \
  1913. const int d34 = SRC(3) - SRC(4); \
  1914. const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
  1915. const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
  1916. const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
  1917. const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
  1918. DST(0, a0 + a1); \
  1919. DST(1, a4 + (a7 >> 2)); \
  1920. DST(2, a2 + (a3 >> 1)); \
  1921. DST(3, a5 + (a6 >> 2)); \
  1922. DST(4, a0 - a1); \
  1923. DST(5, a6 - (a5 >> 2)); \
  1924. DST(6, (a2 >> 1) - a3); \
  1925. DST(7, (a4 >> 2) - a7); \
  1926. }
  1927. static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
  1928. uint8_t *src2, int stride, int h)
  1929. {
  1930. int16_t dct[8][8];
  1931. int i, sum = 0;
  1932. s->dsp.diff_pixels(dct[0], src1, src2, stride);
  1933. #define SRC(x) dct[i][x]
  1934. #define DST(x, v) dct[i][x] = v
  1935. for (i = 0; i < 8; i++)
  1936. DCT8_1D
  1937. #undef SRC
  1938. #undef DST
  1939. #define SRC(x) dct[x][i]
  1940. #define DST(x, v) sum += FFABS(v)
  1941. for (i = 0; i < 8; i++)
  1942. DCT8_1D
  1943. #undef SRC
  1944. #undef DST
  1945. return sum;
  1946. }
  1947. #endif
  1948. static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
  1949. uint8_t *src2, int stride, int h)
  1950. {
  1951. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1952. int sum = 0, i;
  1953. av_assert2(h == 8);
  1954. s->dsp.diff_pixels(temp, src1, src2, stride);
  1955. s->dsp.fdct(temp);
  1956. for (i = 0; i < 64; i++)
  1957. sum = FFMAX(sum, FFABS(temp[i]));
  1958. return sum;
  1959. }
  1960. static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
  1961. uint8_t *src2, int stride, int h)
  1962. {
  1963. LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
  1964. int16_t *const bak = temp + 64;
  1965. int sum = 0, i;
  1966. av_assert2(h == 8);
  1967. s->mb_intra = 0;
  1968. s->dsp.diff_pixels(temp, src1, src2, stride);
  1969. memcpy(bak, temp, 64 * sizeof(int16_t));
  1970. s->block_last_index[0 /* FIXME */] =
  1971. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  1972. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  1973. ff_simple_idct_8(temp); // FIXME
  1974. for (i = 0; i < 64; i++)
  1975. sum += (temp[i] - bak[i]) * (temp[i] - bak[i]);
  1976. return sum;
  1977. }
  1978. static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
  1979. int stride, int h)
  1980. {
  1981. const uint8_t *scantable = s->intra_scantable.permutated;
  1982. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  1983. LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
  1984. LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
  1985. int i, last, run, bits, level, distortion, start_i;
  1986. const int esc_length = s->ac_esc_length;
  1987. uint8_t *length, *last_length;
  1988. av_assert2(h == 8);
  1989. copy_block8(lsrc1, src1, 8, stride, 8);
  1990. copy_block8(lsrc2, src2, 8, stride, 8);
  1991. s->dsp.diff_pixels(temp, lsrc1, lsrc2, 8);
  1992. s->block_last_index[0 /* FIXME */] =
  1993. last =
  1994. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  1995. bits = 0;
  1996. if (s->mb_intra) {
  1997. start_i = 1;
  1998. length = s->intra_ac_vlc_length;
  1999. last_length = s->intra_ac_vlc_last_length;
  2000. bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
  2001. } else {
  2002. start_i = 0;
  2003. length = s->inter_ac_vlc_length;
  2004. last_length = s->inter_ac_vlc_last_length;
  2005. }
  2006. if (last >= start_i) {
  2007. run = 0;
  2008. for (i = start_i; i < last; i++) {
  2009. int j = scantable[i];
  2010. level = temp[j];
  2011. if (level) {
  2012. level += 64;
  2013. if ((level & (~127)) == 0)
  2014. bits += length[UNI_AC_ENC_INDEX(run, level)];
  2015. else
  2016. bits += esc_length;
  2017. run = 0;
  2018. } else
  2019. run++;
  2020. }
  2021. i = scantable[last];
  2022. level = temp[i] + 64;
  2023. av_assert2(level - 64);
  2024. if ((level & (~127)) == 0) {
  2025. bits += last_length[UNI_AC_ENC_INDEX(run, level)];
  2026. } else
  2027. bits += esc_length;
  2028. }
  2029. if (last >= 0) {
  2030. if (s->mb_intra)
  2031. s->dct_unquantize_intra(s, temp, 0, s->qscale);
  2032. else
  2033. s->dct_unquantize_inter(s, temp, 0, s->qscale);
  2034. }
  2035. s->dsp.idct_add(lsrc2, 8, temp);
  2036. distortion = s->dsp.sse[1](NULL, lsrc2, lsrc1, 8, 8);
  2037. return distortion + ((bits * s->qscale * s->qscale * 109 + 64) >> 7);
  2038. }
  2039. static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
  2040. int stride, int h)
  2041. {
  2042. const uint8_t *scantable = s->intra_scantable.permutated;
  2043. LOCAL_ALIGNED_16(int16_t, temp, [64]);
  2044. int i, last, run, bits, level, start_i;
  2045. const int esc_length = s->ac_esc_length;
  2046. uint8_t *length, *last_length;
  2047. av_assert2(h == 8);
  2048. s->dsp.diff_pixels(temp, src1, src2, stride);
  2049. s->block_last_index[0 /* FIXME */] =
  2050. last =
  2051. s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
  2052. bits = 0;
  2053. if (s->mb_intra) {
  2054. start_i = 1;
  2055. length = s->intra_ac_vlc_length;
  2056. last_length = s->intra_ac_vlc_last_length;
  2057. bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
  2058. } else {
  2059. start_i = 0;
  2060. length = s->inter_ac_vlc_length;
  2061. last_length = s->inter_ac_vlc_last_length;
  2062. }
  2063. if (last >= start_i) {
  2064. run = 0;
  2065. for (i = start_i; i < last; i++) {
  2066. int j = scantable[i];
  2067. level = temp[j];
  2068. if (level) {
  2069. level += 64;
  2070. if ((level & (~127)) == 0)
  2071. bits += length[UNI_AC_ENC_INDEX(run, level)];
  2072. else
  2073. bits += esc_length;
  2074. run = 0;
  2075. } else
  2076. run++;
  2077. }
  2078. i = scantable[last];
  2079. level = temp[i] + 64;
  2080. av_assert2(level - 64);
  2081. if ((level & (~127)) == 0)
  2082. bits += last_length[UNI_AC_ENC_INDEX(run, level)];
  2083. else
  2084. bits += esc_length;
  2085. }
  2086. return bits;
  2087. }
  2088. #define VSAD_INTRA(size) \
  2089. static int vsad_intra ## size ## _c(MpegEncContext *c, \
  2090. uint8_t *s, uint8_t *dummy, \
  2091. int stride, int h) \
  2092. { \
  2093. int score = 0, x, y; \
  2094. \
  2095. for (y = 1; y < h; y++) { \
  2096. for (x = 0; x < size; x += 4) { \
  2097. score += FFABS(s[x] - s[x + stride]) + \
  2098. FFABS(s[x + 1] - s[x + stride + 1]) + \
  2099. FFABS(s[x + 2] - s[x + 2 + stride]) + \
  2100. FFABS(s[x + 3] - s[x + 3 + stride]); \
  2101. } \
  2102. s += stride; \
  2103. } \
  2104. \
  2105. return score; \
  2106. }
  2107. VSAD_INTRA(8)
  2108. VSAD_INTRA(16)
  2109. static int vsad16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
  2110. int stride, int h)
  2111. {
  2112. int score = 0, x, y;
  2113. for (y = 1; y < h; y++) {
  2114. for (x = 0; x < 16; x++)
  2115. score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
  2116. s1 += stride;
  2117. s2 += stride;
  2118. }
  2119. return score;
  2120. }
  2121. #define SQ(a) ((a) * (a))
  2122. #define VSSE_INTRA(size) \
  2123. static int vsse_intra ## size ## _c(MpegEncContext *c, \
  2124. uint8_t *s, uint8_t *dummy, \
  2125. int stride, int h) \
  2126. { \
  2127. int score = 0, x, y; \
  2128. \
  2129. for (y = 1; y < h; y++) { \
  2130. for (x = 0; x < size; x += 4) { \
  2131. score += SQ(s[x] - s[x + stride]) + \
  2132. SQ(s[x + 1] - s[x + stride + 1]) + \
  2133. SQ(s[x + 2] - s[x + stride + 2]) + \
  2134. SQ(s[x + 3] - s[x + stride + 3]); \
  2135. } \
  2136. s += stride; \
  2137. } \
  2138. \
  2139. return score; \
  2140. }
  2141. VSSE_INTRA(8)
  2142. VSSE_INTRA(16)
  2143. static int vsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
  2144. int stride, int h)
  2145. {
  2146. int score = 0, x, y;
  2147. for (y = 1; y < h; y++) {
  2148. for (x = 0; x < 16; x++)
  2149. score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]);
  2150. s1 += stride;
  2151. s2 += stride;
  2152. }
  2153. return score;
  2154. }
  2155. static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
  2156. int size)
  2157. {
  2158. int score = 0, i;
  2159. for (i = 0; i < size; i++)
  2160. score += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]);
  2161. return score;
  2162. }
  2163. #define WRAPPER8_16_SQ(name8, name16) \
  2164. static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src, \
  2165. int stride, int h) \
  2166. { \
  2167. int score = 0; \
  2168. \
  2169. score += name8(s, dst, src, stride, 8); \
  2170. score += name8(s, dst + 8, src + 8, stride, 8); \
  2171. if (h == 16) { \
  2172. dst += 8 * stride; \
  2173. src += 8 * stride; \
  2174. score += name8(s, dst, src, stride, 8); \
  2175. score += name8(s, dst + 8, src + 8, stride, 8); \
  2176. } \
  2177. return score; \
  2178. }
  2179. WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
  2180. WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
  2181. WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
  2182. #if CONFIG_GPL
  2183. WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
  2184. #endif
  2185. WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
  2186. WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
  2187. WRAPPER8_16_SQ(rd8x8_c, rd16_c)
  2188. WRAPPER8_16_SQ(bit8x8_c, bit16_c)
  2189. static inline uint32_t clipf_c_one(uint32_t a, uint32_t mini,
  2190. uint32_t maxi, uint32_t maxisign)
  2191. {
  2192. if (a > mini)
  2193. return mini;
  2194. else if ((a ^ (1U << 31)) > maxisign)
  2195. return maxi;
  2196. else
  2197. return a;
  2198. }
  2199. static void vector_clipf_c_opposite_sign(float *dst, const float *src,
  2200. float *min, float *max, int len)
  2201. {
  2202. int i;
  2203. uint32_t mini = *(uint32_t *) min;
  2204. uint32_t maxi = *(uint32_t *) max;
  2205. uint32_t maxisign = maxi ^ (1U << 31);
  2206. uint32_t *dsti = (uint32_t *) dst;
  2207. const uint32_t *srci = (const uint32_t *) src;
  2208. for (i = 0; i < len; i += 8) {
  2209. dsti[i + 0] = clipf_c_one(srci[i + 0], mini, maxi, maxisign);
  2210. dsti[i + 1] = clipf_c_one(srci[i + 1], mini, maxi, maxisign);
  2211. dsti[i + 2] = clipf_c_one(srci[i + 2], mini, maxi, maxisign);
  2212. dsti[i + 3] = clipf_c_one(srci[i + 3], mini, maxi, maxisign);
  2213. dsti[i + 4] = clipf_c_one(srci[i + 4], mini, maxi, maxisign);
  2214. dsti[i + 5] = clipf_c_one(srci[i + 5], mini, maxi, maxisign);
  2215. dsti[i + 6] = clipf_c_one(srci[i + 6], mini, maxi, maxisign);
  2216. dsti[i + 7] = clipf_c_one(srci[i + 7], mini, maxi, maxisign);
  2217. }
  2218. }
  2219. static void vector_clipf_c(float *dst, const float *src,
  2220. float min, float max, int len)
  2221. {
  2222. int i;
  2223. if (min < 0 && max > 0) {
  2224. vector_clipf_c_opposite_sign(dst, src, &min, &max, len);
  2225. } else {
  2226. for (i = 0; i < len; i += 8) {
  2227. dst[i] = av_clipf(src[i], min, max);
  2228. dst[i + 1] = av_clipf(src[i + 1], min, max);
  2229. dst[i + 2] = av_clipf(src[i + 2], min, max);
  2230. dst[i + 3] = av_clipf(src[i + 3], min, max);
  2231. dst[i + 4] = av_clipf(src[i + 4], min, max);
  2232. dst[i + 5] = av_clipf(src[i + 5], min, max);
  2233. dst[i + 6] = av_clipf(src[i + 6], min, max);
  2234. dst[i + 7] = av_clipf(src[i + 7], min, max);
  2235. }
  2236. }
  2237. }
  2238. static int32_t scalarproduct_int16_c(const int16_t *v1, const int16_t *v2,
  2239. int order)
  2240. {
  2241. int res = 0;
  2242. while (order--)
  2243. res += *v1++ **v2++;
  2244. return res;
  2245. }
  2246. static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2,
  2247. const int16_t *v3,
  2248. int order, int mul)
  2249. {
  2250. int res = 0;
  2251. while (order--) {
  2252. res += *v1 * *v2++;
  2253. *v1++ += mul * *v3++;
  2254. }
  2255. return res;
  2256. }
  2257. static void vector_clip_int32_c(int32_t *dst, const int32_t *src, int32_t min,
  2258. int32_t max, unsigned int len)
  2259. {
  2260. do {
  2261. *dst++ = av_clip(*src++, min, max);
  2262. *dst++ = av_clip(*src++, min, max);
  2263. *dst++ = av_clip(*src++, min, max);
  2264. *dst++ = av_clip(*src++, min, max);
  2265. *dst++ = av_clip(*src++, min, max);
  2266. *dst++ = av_clip(*src++, min, max);
  2267. *dst++ = av_clip(*src++, min, max);
  2268. *dst++ = av_clip(*src++, min, max);
  2269. len -= 8;
  2270. } while (len > 0);
  2271. }
  2272. static void jref_idct_put(uint8_t *dest, int line_size, int16_t *block)
  2273. {
  2274. ff_j_rev_dct(block);
  2275. put_pixels_clamped_c(block, dest, line_size);
  2276. }
  2277. static void jref_idct_add(uint8_t *dest, int line_size, int16_t *block)
  2278. {
  2279. ff_j_rev_dct(block);
  2280. add_pixels_clamped_c(block, dest, line_size);
  2281. }
  2282. static void ff_jref_idct4_put(uint8_t *dest, int line_size, int16_t *block)
  2283. {
  2284. ff_j_rev_dct4 (block);
  2285. put_pixels_clamped4_c(block, dest, line_size);
  2286. }
  2287. static void ff_jref_idct4_add(uint8_t *dest, int line_size, int16_t *block)
  2288. {
  2289. ff_j_rev_dct4 (block);
  2290. add_pixels_clamped4_c(block, dest, line_size);
  2291. }
  2292. static void ff_jref_idct2_put(uint8_t *dest, int line_size, int16_t *block)
  2293. {
  2294. ff_j_rev_dct2 (block);
  2295. put_pixels_clamped2_c(block, dest, line_size);
  2296. }
  2297. static void ff_jref_idct2_add(uint8_t *dest, int line_size, int16_t *block)
  2298. {
  2299. ff_j_rev_dct2 (block);
  2300. add_pixels_clamped2_c(block, dest, line_size);
  2301. }
  2302. static void ff_jref_idct1_put(uint8_t *dest, int line_size, int16_t *block)
  2303. {
  2304. dest[0] = av_clip_uint8((block[0] + 4)>>3);
  2305. }
  2306. static void ff_jref_idct1_add(uint8_t *dest, int line_size, int16_t *block)
  2307. {
  2308. dest[0] = av_clip_uint8(dest[0] + ((block[0] + 4)>>3));
  2309. }
  2310. /* init static data */
  2311. av_cold void ff_dsputil_static_init(void)
  2312. {
  2313. int i;
  2314. for (i = 0; i < 512; i++)
  2315. ff_square_tab[i] = (i - 256) * (i - 256);
  2316. }
  2317. int ff_check_alignment(void)
  2318. {
  2319. static int did_fail = 0;
  2320. LOCAL_ALIGNED_16(int, aligned, [4]);
  2321. if ((intptr_t)aligned & 15) {
  2322. if (!did_fail) {
  2323. #if HAVE_MMX || HAVE_ALTIVEC
  2324. av_log(NULL, AV_LOG_ERROR,
  2325. "Compiler did not align stack variables. Libavcodec has been miscompiled\n"
  2326. "and may be very slow or crash. This is not a bug in libavcodec,\n"
  2327. "but in the compiler. You may try recompiling using gcc >= 4.2.\n"
  2328. "Do not report crashes to FFmpeg developers.\n");
  2329. #endif
  2330. did_fail=1;
  2331. }
  2332. return -1;
  2333. }
  2334. return 0;
  2335. }
  2336. av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
  2337. {
  2338. const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8;
  2339. ff_check_alignment();
  2340. #if CONFIG_ENCODERS
  2341. if (avctx->bits_per_raw_sample == 10) {
  2342. c->fdct = ff_jpeg_fdct_islow_10;
  2343. c->fdct248 = ff_fdct248_islow_10;
  2344. } else {
  2345. if (avctx->dct_algo == FF_DCT_FASTINT) {
  2346. c->fdct = ff_fdct_ifast;
  2347. c->fdct248 = ff_fdct_ifast248;
  2348. } else if (avctx->dct_algo == FF_DCT_FAAN) {
  2349. c->fdct = ff_faandct;
  2350. c->fdct248 = ff_faandct248;
  2351. } else {
  2352. c->fdct = ff_jpeg_fdct_islow_8; // slow/accurate/default
  2353. c->fdct248 = ff_fdct248_islow_8;
  2354. }
  2355. }
  2356. #endif /* CONFIG_ENCODERS */
  2357. if (avctx->lowres==1) {
  2358. c->idct_put = ff_jref_idct4_put;
  2359. c->idct_add = ff_jref_idct4_add;
  2360. c->idct = ff_j_rev_dct4;
  2361. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2362. } else if (avctx->lowres==2) {
  2363. c->idct_put = ff_jref_idct2_put;
  2364. c->idct_add = ff_jref_idct2_add;
  2365. c->idct = ff_j_rev_dct2;
  2366. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2367. } else if (avctx->lowres==3) {
  2368. c->idct_put = ff_jref_idct1_put;
  2369. c->idct_add = ff_jref_idct1_add;
  2370. c->idct = ff_j_rev_dct1;
  2371. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2372. } else {
  2373. if (avctx->bits_per_raw_sample == 10) {
  2374. c->idct_put = ff_simple_idct_put_10;
  2375. c->idct_add = ff_simple_idct_add_10;
  2376. c->idct = ff_simple_idct_10;
  2377. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2378. } else if (avctx->bits_per_raw_sample == 12) {
  2379. c->idct_put = ff_simple_idct_put_12;
  2380. c->idct_add = ff_simple_idct_add_12;
  2381. c->idct = ff_simple_idct_12;
  2382. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2383. } else {
  2384. if (avctx->idct_algo == FF_IDCT_INT) {
  2385. c->idct_put = jref_idct_put;
  2386. c->idct_add = jref_idct_add;
  2387. c->idct = ff_j_rev_dct;
  2388. c->idct_permutation_type = FF_LIBMPEG2_IDCT_PERM;
  2389. } else if (avctx->idct_algo == FF_IDCT_FAAN) {
  2390. c->idct_put = ff_faanidct_put;
  2391. c->idct_add = ff_faanidct_add;
  2392. c->idct = ff_faanidct;
  2393. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2394. } else { // accurate/default
  2395. c->idct_put = ff_simple_idct_put_8;
  2396. c->idct_add = ff_simple_idct_add_8;
  2397. c->idct = ff_simple_idct_8;
  2398. c->idct_permutation_type = FF_NO_IDCT_PERM;
  2399. }
  2400. }
  2401. }
  2402. c->diff_pixels = diff_pixels_c;
  2403. c->put_pixels_clamped = put_pixels_clamped_c;
  2404. c->put_signed_pixels_clamped = put_signed_pixels_clamped_c;
  2405. c->add_pixels_clamped = add_pixels_clamped_c;
  2406. c->sum_abs_dctelem = sum_abs_dctelem_c;
  2407. c->gmc1 = gmc1_c;
  2408. c->gmc = ff_gmc_c;
  2409. c->pix_sum = pix_sum_c;
  2410. c->pix_norm1 = pix_norm1_c;
  2411. c->fill_block_tab[0] = fill_block16_c;
  2412. c->fill_block_tab[1] = fill_block8_c;
  2413. /* TODO [0] 16 [1] 8 */
  2414. c->pix_abs[0][0] = pix_abs16_c;
  2415. c->pix_abs[0][1] = pix_abs16_x2_c;
  2416. c->pix_abs[0][2] = pix_abs16_y2_c;
  2417. c->pix_abs[0][3] = pix_abs16_xy2_c;
  2418. c->pix_abs[1][0] = pix_abs8_c;
  2419. c->pix_abs[1][1] = pix_abs8_x2_c;
  2420. c->pix_abs[1][2] = pix_abs8_y2_c;
  2421. c->pix_abs[1][3] = pix_abs8_xy2_c;
  2422. #define dspfunc(PFX, IDX, NUM) \
  2423. c->PFX ## _pixels_tab[IDX][0] = PFX ## NUM ## _mc00_c; \
  2424. c->PFX ## _pixels_tab[IDX][1] = PFX ## NUM ## _mc10_c; \
  2425. c->PFX ## _pixels_tab[IDX][2] = PFX ## NUM ## _mc20_c; \
  2426. c->PFX ## _pixels_tab[IDX][3] = PFX ## NUM ## _mc30_c; \
  2427. c->PFX ## _pixels_tab[IDX][4] = PFX ## NUM ## _mc01_c; \
  2428. c->PFX ## _pixels_tab[IDX][5] = PFX ## NUM ## _mc11_c; \
  2429. c->PFX ## _pixels_tab[IDX][6] = PFX ## NUM ## _mc21_c; \
  2430. c->PFX ## _pixels_tab[IDX][7] = PFX ## NUM ## _mc31_c; \
  2431. c->PFX ## _pixels_tab[IDX][8] = PFX ## NUM ## _mc02_c; \
  2432. c->PFX ## _pixels_tab[IDX][9] = PFX ## NUM ## _mc12_c; \
  2433. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
  2434. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
  2435. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
  2436. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
  2437. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
  2438. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
  2439. dspfunc(put_qpel, 0, 16);
  2440. dspfunc(put_qpel, 1, 8);
  2441. dspfunc(put_no_rnd_qpel, 0, 16);
  2442. dspfunc(put_no_rnd_qpel, 1, 8);
  2443. dspfunc(avg_qpel, 0, 16);
  2444. dspfunc(avg_qpel, 1, 8);
  2445. #undef dspfunc
  2446. c->put_mspel_pixels_tab[0] = ff_put_pixels8x8_c;
  2447. c->put_mspel_pixels_tab[1] = put_mspel8_mc10_c;
  2448. c->put_mspel_pixels_tab[2] = put_mspel8_mc20_c;
  2449. c->put_mspel_pixels_tab[3] = put_mspel8_mc30_c;
  2450. c->put_mspel_pixels_tab[4] = put_mspel8_mc02_c;
  2451. c->put_mspel_pixels_tab[5] = put_mspel8_mc12_c;
  2452. c->put_mspel_pixels_tab[6] = put_mspel8_mc22_c;
  2453. c->put_mspel_pixels_tab[7] = put_mspel8_mc32_c;
  2454. #define SET_CMP_FUNC(name) \
  2455. c->name[0] = name ## 16_c; \
  2456. c->name[1] = name ## 8x8_c;
  2457. SET_CMP_FUNC(hadamard8_diff)
  2458. c->hadamard8_diff[4] = hadamard8_intra16_c;
  2459. c->hadamard8_diff[5] = hadamard8_intra8x8_c;
  2460. SET_CMP_FUNC(dct_sad)
  2461. SET_CMP_FUNC(dct_max)
  2462. #if CONFIG_GPL
  2463. SET_CMP_FUNC(dct264_sad)
  2464. #endif
  2465. c->sad[0] = pix_abs16_c;
  2466. c->sad[1] = pix_abs8_c;
  2467. c->sse[0] = sse16_c;
  2468. c->sse[1] = sse8_c;
  2469. c->sse[2] = sse4_c;
  2470. SET_CMP_FUNC(quant_psnr)
  2471. SET_CMP_FUNC(rd)
  2472. SET_CMP_FUNC(bit)
  2473. c->vsad[0] = vsad16_c;
  2474. c->vsad[4] = vsad_intra16_c;
  2475. c->vsad[5] = vsad_intra8_c;
  2476. c->vsse[0] = vsse16_c;
  2477. c->vsse[4] = vsse_intra16_c;
  2478. c->vsse[5] = vsse_intra8_c;
  2479. c->nsse[0] = nsse16_c;
  2480. c->nsse[1] = nsse8_c;
  2481. #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
  2482. ff_dsputil_init_dwt(c);
  2483. #endif
  2484. c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
  2485. c->add_bytes = add_bytes_c;
  2486. c->add_hfyu_median_prediction = add_hfyu_median_prediction_c;
  2487. c->add_hfyu_left_prediction = add_hfyu_left_prediction_c;
  2488. c->add_hfyu_left_prediction_bgr32 = add_hfyu_left_prediction_bgr32_c;
  2489. c->diff_bytes = diff_bytes_c;
  2490. c->sub_hfyu_median_prediction = sub_hfyu_median_prediction_c;
  2491. c->bswap_buf = bswap_buf;
  2492. c->bswap16_buf = bswap16_buf;
  2493. c->try_8x8basis = try_8x8basis_c;
  2494. c->add_8x8basis = add_8x8basis_c;
  2495. c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c;
  2496. c->scalarproduct_int16 = scalarproduct_int16_c;
  2497. c->vector_clip_int32 = vector_clip_int32_c;
  2498. c->vector_clipf = vector_clipf_c;
  2499. c->shrink[0] = av_image_copy_plane;
  2500. c->shrink[1] = ff_shrink22;
  2501. c->shrink[2] = ff_shrink44;
  2502. c->shrink[3] = ff_shrink88;
  2503. c->add_pixels8 = add_pixels8_c;
  2504. c->draw_edges = draw_edges_8_c;
  2505. c->clear_block = clear_block_8_c;
  2506. c->clear_blocks = clear_blocks_8_c;
  2507. switch (avctx->bits_per_raw_sample) {
  2508. case 9:
  2509. case 10:
  2510. case 12:
  2511. case 14:
  2512. c->get_pixels = get_pixels_16_c;
  2513. break;
  2514. default:
  2515. if (avctx->bits_per_raw_sample<=8 || avctx->codec_type != AVMEDIA_TYPE_VIDEO) {
  2516. c->get_pixels = get_pixels_8_c;
  2517. }
  2518. break;
  2519. }
  2520. if (ARCH_ALPHA)
  2521. ff_dsputil_init_alpha(c, avctx);
  2522. if (ARCH_ARM)
  2523. ff_dsputil_init_arm(c, avctx, high_bit_depth);
  2524. if (ARCH_BFIN)
  2525. ff_dsputil_init_bfin(c, avctx, high_bit_depth);
  2526. if (ARCH_PPC)
  2527. ff_dsputil_init_ppc(c, avctx, high_bit_depth);
  2528. if (ARCH_X86)
  2529. ff_dsputil_init_x86(c, avctx, high_bit_depth);
  2530. ff_init_scantable_permutation(c->idct_permutation,
  2531. c->idct_permutation_type);
  2532. }
  2533. av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
  2534. {
  2535. ff_dsputil_init(c, avctx);
  2536. }
  2537. av_cold void avpriv_dsputil_init(DSPContext *c, AVCodecContext *avctx)
  2538. {
  2539. ff_dsputil_init(c, avctx);
  2540. }