You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1650 lines
60KB

  1. /*
  2. * Copyright (c) 2002 Brian Foley
  3. * Copyright (c) 2002 Dieter Shirley
  4. * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. */
  20. #include "../dsputil.h"
  21. #include "gcc_fixes.h"
  22. #include "dsputil_altivec.h"
  23. #ifdef CONFIG_DARWIN
  24. #include <sys/sysctl.h>
  25. #else /* CONFIG_DARWIN */
  26. #include <signal.h>
  27. #include <setjmp.h>
  28. static sigjmp_buf jmpbuf;
  29. static volatile sig_atomic_t canjump = 0;
  30. static void sigill_handler (int sig)
  31. {
  32. if (!canjump) {
  33. signal (sig, SIG_DFL);
  34. raise (sig);
  35. }
  36. canjump = 0;
  37. siglongjmp (jmpbuf, 1);
  38. }
  39. #endif /* CONFIG_DARWIN */
  40. int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  41. {
  42. int i;
  43. int s __attribute__((aligned(16)));
  44. const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
  45. vector unsigned char *tv;
  46. vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
  47. vector unsigned int sad;
  48. vector signed int sumdiffs;
  49. s = 0;
  50. sad = (vector unsigned int)vec_splat_u32(0);
  51. for(i=0;i<h;i++) {
  52. /*
  53. Read unaligned pixels into our vectors. The vectors are as follows:
  54. pix1v: pix1[0]-pix1[15]
  55. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
  56. */
  57. tv = (vector unsigned char *) pix1;
  58. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  59. tv = (vector unsigned char *) &pix2[0];
  60. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  61. tv = (vector unsigned char *) &pix2[1];
  62. pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
  63. /* Calculate the average vector */
  64. avgv = vec_avg(pix2v, pix2iv);
  65. /* Calculate a sum of abs differences vector */
  66. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  67. /* Add each 4 pixel group together and put 4 results into sad */
  68. sad = vec_sum4s(t5, sad);
  69. pix1 += line_size;
  70. pix2 += line_size;
  71. }
  72. /* Sum up the four partial sums, and put the result into s */
  73. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  74. sumdiffs = vec_splat(sumdiffs, 3);
  75. vec_ste(sumdiffs, 0, &s);
  76. return s;
  77. }
  78. int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  79. {
  80. int i;
  81. int s __attribute__((aligned(16)));
  82. const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
  83. vector unsigned char *tv;
  84. vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
  85. vector unsigned int sad;
  86. vector signed int sumdiffs;
  87. uint8_t *pix3 = pix2 + line_size;
  88. s = 0;
  89. sad = (vector unsigned int)vec_splat_u32(0);
  90. /*
  91. Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  92. iteration becomes pix2 in the next iteration. We can use this
  93. fact to avoid a potentially expensive unaligned read, each
  94. time around the loop.
  95. Read unaligned pixels into our vectors. The vectors are as follows:
  96. pix2v: pix2[0]-pix2[15]
  97. Split the pixel vectors into shorts
  98. */
  99. tv = (vector unsigned char *) &pix2[0];
  100. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  101. for(i=0;i<h;i++) {
  102. /*
  103. Read unaligned pixels into our vectors. The vectors are as follows:
  104. pix1v: pix1[0]-pix1[15]
  105. pix3v: pix3[0]-pix3[15]
  106. */
  107. tv = (vector unsigned char *) pix1;
  108. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  109. tv = (vector unsigned char *) &pix3[0];
  110. pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
  111. /* Calculate the average vector */
  112. avgv = vec_avg(pix2v, pix3v);
  113. /* Calculate a sum of abs differences vector */
  114. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  115. /* Add each 4 pixel group together and put 4 results into sad */
  116. sad = vec_sum4s(t5, sad);
  117. pix1 += line_size;
  118. pix2v = pix3v;
  119. pix3 += line_size;
  120. }
  121. /* Sum up the four partial sums, and put the result into s */
  122. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  123. sumdiffs = vec_splat(sumdiffs, 3);
  124. vec_ste(sumdiffs, 0, &s);
  125. return s;
  126. }
  127. int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  128. {
  129. int i;
  130. int s __attribute__((aligned(16)));
  131. uint8_t *pix3 = pix2 + line_size;
  132. const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
  133. const_vector unsigned short two = (const_vector unsigned short)vec_splat_u16(2);
  134. vector unsigned char *tv, avgv, t5;
  135. vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
  136. vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
  137. vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
  138. vector unsigned short avghv, avglv;
  139. vector unsigned short t1, t2, t3, t4;
  140. vector unsigned int sad;
  141. vector signed int sumdiffs;
  142. sad = (vector unsigned int)vec_splat_u32(0);
  143. s = 0;
  144. /*
  145. Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  146. iteration becomes pix2 in the next iteration. We can use this
  147. fact to avoid a potentially expensive unaligned read, as well
  148. as some splitting, and vector addition each time around the loop.
  149. Read unaligned pixels into our vectors. The vectors are as follows:
  150. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
  151. Split the pixel vectors into shorts
  152. */
  153. tv = (vector unsigned char *) &pix2[0];
  154. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  155. tv = (vector unsigned char *) &pix2[1];
  156. pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
  157. pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
  158. pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
  159. pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
  160. pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
  161. t1 = vec_add(pix2hv, pix2ihv);
  162. t2 = vec_add(pix2lv, pix2ilv);
  163. for(i=0;i<h;i++) {
  164. /*
  165. Read unaligned pixels into our vectors. The vectors are as follows:
  166. pix1v: pix1[0]-pix1[15]
  167. pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16]
  168. */
  169. tv = (vector unsigned char *) pix1;
  170. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  171. tv = (vector unsigned char *) &pix3[0];
  172. pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
  173. tv = (vector unsigned char *) &pix3[1];
  174. pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));
  175. /*
  176. Note that Altivec does have vec_avg, but this works on vector pairs
  177. and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
  178. would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
  179. Instead, we have to split the pixel vectors into vectors of shorts,
  180. and do the averaging by hand.
  181. */
  182. /* Split the pixel vectors into shorts */
  183. pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
  184. pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
  185. pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
  186. pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
  187. /* Do the averaging on them */
  188. t3 = vec_add(pix3hv, pix3ihv);
  189. t4 = vec_add(pix3lv, pix3ilv);
  190. avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
  191. avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
  192. /* Pack the shorts back into a result */
  193. avgv = vec_pack(avghv, avglv);
  194. /* Calculate a sum of abs differences vector */
  195. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  196. /* Add each 4 pixel group together and put 4 results into sad */
  197. sad = vec_sum4s(t5, sad);
  198. pix1 += line_size;
  199. pix3 += line_size;
  200. /* Transfer the calculated values for pix3 into pix2 */
  201. t1 = t3;
  202. t2 = t4;
  203. }
  204. /* Sum up the four partial sums, and put the result into s */
  205. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  206. sumdiffs = vec_splat(sumdiffs, 3);
  207. vec_ste(sumdiffs, 0, &s);
  208. return s;
  209. }
  210. int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  211. {
  212. int i;
  213. int s __attribute__((aligned(16)));
  214. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  215. vector unsigned char perm1, perm2, *pix1v, *pix2v;
  216. vector unsigned char t1, t2, t3,t4, t5;
  217. vector unsigned int sad;
  218. vector signed int sumdiffs;
  219. sad = (vector unsigned int)vec_splat_u32(0);
  220. for(i=0;i<h;i++) {
  221. /* Read potentially unaligned pixels into t1 and t2 */
  222. perm1 = vec_lvsl(0, pix1);
  223. pix1v = (vector unsigned char *) pix1;
  224. perm2 = vec_lvsl(0, pix2);
  225. pix2v = (vector unsigned char *) pix2;
  226. t1 = vec_perm(pix1v[0], pix1v[1], perm1);
  227. t2 = vec_perm(pix2v[0], pix2v[1], perm2);
  228. /* Calculate a sum of abs differences vector */
  229. t3 = vec_max(t1, t2);
  230. t4 = vec_min(t1, t2);
  231. t5 = vec_sub(t3, t4);
  232. /* Add each 4 pixel group together and put 4 results into sad */
  233. sad = vec_sum4s(t5, sad);
  234. pix1 += line_size;
  235. pix2 += line_size;
  236. }
  237. /* Sum up the four partial sums, and put the result into s */
  238. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  239. sumdiffs = vec_splat(sumdiffs, 3);
  240. vec_ste(sumdiffs, 0, &s);
  241. return s;
  242. }
  243. int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  244. {
  245. int i;
  246. int s __attribute__((aligned(16)));
  247. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  248. vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
  249. vector unsigned char t1, t2, t3,t4, t5;
  250. vector unsigned int sad;
  251. vector signed int sumdiffs;
  252. sad = (vector unsigned int)vec_splat_u32(0);
  253. permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
  254. for(i=0;i<h;i++) {
  255. /* Read potentially unaligned pixels into t1 and t2
  256. Since we're reading 16 pixels, and actually only want 8,
  257. mask out the last 8 pixels. The 0s don't change the sum. */
  258. perm1 = vec_lvsl(0, pix1);
  259. pix1v = (vector unsigned char *) pix1;
  260. perm2 = vec_lvsl(0, pix2);
  261. pix2v = (vector unsigned char *) pix2;
  262. t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
  263. t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
  264. /* Calculate a sum of abs differences vector */
  265. t3 = vec_max(t1, t2);
  266. t4 = vec_min(t1, t2);
  267. t5 = vec_sub(t3, t4);
  268. /* Add each 4 pixel group together and put 4 results into sad */
  269. sad = vec_sum4s(t5, sad);
  270. pix1 += line_size;
  271. pix2 += line_size;
  272. }
  273. /* Sum up the four partial sums, and put the result into s */
  274. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  275. sumdiffs = vec_splat(sumdiffs, 3);
  276. vec_ste(sumdiffs, 0, &s);
  277. return s;
  278. }
  279. int pix_norm1_altivec(uint8_t *pix, int line_size)
  280. {
  281. int i;
  282. int s __attribute__((aligned(16)));
  283. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  284. vector unsigned char *tv;
  285. vector unsigned char pixv;
  286. vector unsigned int sv;
  287. vector signed int sum;
  288. sv = (vector unsigned int)vec_splat_u32(0);
  289. s = 0;
  290. for (i = 0; i < 16; i++) {
  291. /* Read in the potentially unaligned pixels */
  292. tv = (vector unsigned char *) pix;
  293. pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));
  294. /* Square the values, and add them to our sum */
  295. sv = vec_msum(pixv, pixv, sv);
  296. pix += line_size;
  297. }
  298. /* Sum up the four partial sums, and put the result into s */
  299. sum = vec_sums((vector signed int) sv, (vector signed int) zero);
  300. sum = vec_splat(sum, 3);
  301. vec_ste(sum, 0, &s);
  302. return s;
  303. }
  304. /**
  305. * Sum of Squared Errors for a 8x8 block.
  306. * AltiVec-enhanced.
  307. * It's the sad8_altivec code above w/ squaring added.
  308. */
  309. int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  310. {
  311. int i;
  312. int s __attribute__((aligned(16)));
  313. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  314. vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
  315. vector unsigned char t1, t2, t3,t4, t5;
  316. vector unsigned int sum;
  317. vector signed int sumsqr;
  318. sum = (vector unsigned int)vec_splat_u32(0);
  319. permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
  320. for(i=0;i<h;i++) {
  321. /* Read potentially unaligned pixels into t1 and t2
  322. Since we're reading 16 pixels, and actually only want 8,
  323. mask out the last 8 pixels. The 0s don't change the sum. */
  324. perm1 = vec_lvsl(0, pix1);
  325. pix1v = (vector unsigned char *) pix1;
  326. perm2 = vec_lvsl(0, pix2);
  327. pix2v = (vector unsigned char *) pix2;
  328. t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
  329. t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
  330. /*
  331. Since we want to use unsigned chars, we can take advantage
  332. of the fact that abs(a-b)^2 = (a-b)^2.
  333. */
  334. /* Calculate abs differences vector */
  335. t3 = vec_max(t1, t2);
  336. t4 = vec_min(t1, t2);
  337. t5 = vec_sub(t3, t4);
  338. /* Square the values and add them to our sum */
  339. sum = vec_msum(t5, t5, sum);
  340. pix1 += line_size;
  341. pix2 += line_size;
  342. }
  343. /* Sum up the four partial sums, and put the result into s */
  344. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  345. sumsqr = vec_splat(sumsqr, 3);
  346. vec_ste(sumsqr, 0, &s);
  347. return s;
  348. }
  349. /**
  350. * Sum of Squared Errors for a 16x16 block.
  351. * AltiVec-enhanced.
  352. * It's the sad16_altivec code above w/ squaring added.
  353. */
  354. int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  355. {
  356. int i;
  357. int s __attribute__((aligned(16)));
  358. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  359. vector unsigned char perm1, perm2, *pix1v, *pix2v;
  360. vector unsigned char t1, t2, t3,t4, t5;
  361. vector unsigned int sum;
  362. vector signed int sumsqr;
  363. sum = (vector unsigned int)vec_splat_u32(0);
  364. for(i=0;i<h;i++) {
  365. /* Read potentially unaligned pixels into t1 and t2 */
  366. perm1 = vec_lvsl(0, pix1);
  367. pix1v = (vector unsigned char *) pix1;
  368. perm2 = vec_lvsl(0, pix2);
  369. pix2v = (vector unsigned char *) pix2;
  370. t1 = vec_perm(pix1v[0], pix1v[1], perm1);
  371. t2 = vec_perm(pix2v[0], pix2v[1], perm2);
  372. /*
  373. Since we want to use unsigned chars, we can take advantage
  374. of the fact that abs(a-b)^2 = (a-b)^2.
  375. */
  376. /* Calculate abs differences vector */
  377. t3 = vec_max(t1, t2);
  378. t4 = vec_min(t1, t2);
  379. t5 = vec_sub(t3, t4);
  380. /* Square the values and add them to our sum */
  381. sum = vec_msum(t5, t5, sum);
  382. pix1 += line_size;
  383. pix2 += line_size;
  384. }
  385. /* Sum up the four partial sums, and put the result into s */
  386. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  387. sumsqr = vec_splat(sumsqr, 3);
  388. vec_ste(sumsqr, 0, &s);
  389. return s;
  390. }
  391. int pix_sum_altivec(uint8_t * pix, int line_size)
  392. {
  393. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  394. vector unsigned char perm, *pixv;
  395. vector unsigned char t1;
  396. vector unsigned int sad;
  397. vector signed int sumdiffs;
  398. int i;
  399. int s __attribute__((aligned(16)));
  400. sad = (vector unsigned int)vec_splat_u32(0);
  401. for (i = 0; i < 16; i++) {
  402. /* Read the potentially unaligned 16 pixels into t1 */
  403. perm = vec_lvsl(0, pix);
  404. pixv = (vector unsigned char *) pix;
  405. t1 = vec_perm(pixv[0], pixv[1], perm);
  406. /* Add each 4 pixel group together and put 4 results into sad */
  407. sad = vec_sum4s(t1, sad);
  408. pix += line_size;
  409. }
  410. /* Sum up the four partial sums, and put the result into s */
  411. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  412. sumdiffs = vec_splat(sumdiffs, 3);
  413. vec_ste(sumdiffs, 0, &s);
  414. return s;
  415. }
  416. void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
  417. {
  418. int i;
  419. vector unsigned char perm, bytes, *pixv;
  420. const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
  421. vector signed short shorts;
  422. for(i=0;i<8;i++)
  423. {
  424. // Read potentially unaligned pixels.
  425. // We're reading 16 pixels, and actually only want 8,
  426. // but we simply ignore the extras.
  427. perm = vec_lvsl(0, pixels);
  428. pixv = (vector unsigned char *) pixels;
  429. bytes = vec_perm(pixv[0], pixv[1], perm);
  430. // convert the bytes into shorts
  431. shorts = (vector signed short)vec_mergeh(zero, bytes);
  432. // save the data to the block, we assume the block is 16-byte aligned
  433. vec_st(shorts, i*16, (vector signed short*)block);
  434. pixels += line_size;
  435. }
  436. }
  437. void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
  438. const uint8_t *s2, int stride)
  439. {
  440. int i;
  441. vector unsigned char perm, bytes, *pixv;
  442. const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
  443. vector signed short shorts1, shorts2;
  444. for(i=0;i<4;i++)
  445. {
  446. // Read potentially unaligned pixels
  447. // We're reading 16 pixels, and actually only want 8,
  448. // but we simply ignore the extras.
  449. perm = vec_lvsl(0, s1);
  450. pixv = (vector unsigned char *) s1;
  451. bytes = vec_perm(pixv[0], pixv[1], perm);
  452. // convert the bytes into shorts
  453. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  454. // Do the same for the second block of pixels
  455. perm = vec_lvsl(0, s2);
  456. pixv = (vector unsigned char *) s2;
  457. bytes = vec_perm(pixv[0], pixv[1], perm);
  458. // convert the bytes into shorts
  459. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  460. // Do the subtraction
  461. shorts1 = vec_sub(shorts1, shorts2);
  462. // save the data to the block, we assume the block is 16-byte aligned
  463. vec_st(shorts1, 0, (vector signed short*)block);
  464. s1 += stride;
  465. s2 += stride;
  466. block += 8;
  467. // The code below is a copy of the code above... This is a manual
  468. // unroll.
  469. // Read potentially unaligned pixels
  470. // We're reading 16 pixels, and actually only want 8,
  471. // but we simply ignore the extras.
  472. perm = vec_lvsl(0, s1);
  473. pixv = (vector unsigned char *) s1;
  474. bytes = vec_perm(pixv[0], pixv[1], perm);
  475. // convert the bytes into shorts
  476. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  477. // Do the same for the second block of pixels
  478. perm = vec_lvsl(0, s2);
  479. pixv = (vector unsigned char *) s2;
  480. bytes = vec_perm(pixv[0], pixv[1], perm);
  481. // convert the bytes into shorts
  482. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  483. // Do the subtraction
  484. shorts1 = vec_sub(shorts1, shorts2);
  485. // save the data to the block, we assume the block is 16-byte aligned
  486. vec_st(shorts1, 0, (vector signed short*)block);
  487. s1 += stride;
  488. s2 += stride;
  489. block += 8;
  490. }
  491. }
  492. void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
  493. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  494. int i;
  495. for(i=0; i+7<w; i++){
  496. dst[i+0] += src[i+0];
  497. dst[i+1] += src[i+1];
  498. dst[i+2] += src[i+2];
  499. dst[i+3] += src[i+3];
  500. dst[i+4] += src[i+4];
  501. dst[i+5] += src[i+5];
  502. dst[i+6] += src[i+6];
  503. dst[i+7] += src[i+7];
  504. }
  505. for(; i<w; i++)
  506. dst[i+0] += src[i+0];
  507. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  508. register int i;
  509. register vector unsigned char vdst, vsrc;
  510. /* dst and src are 16 bytes-aligned (guaranteed) */
  511. for(i = 0 ; (i + 15) < w ; i++)
  512. {
  513. vdst = vec_ld(i << 4, (unsigned char*)dst);
  514. vsrc = vec_ld(i << 4, (unsigned char*)src);
  515. vdst = vec_add(vsrc, vdst);
  516. vec_st(vdst, i << 4, (unsigned char*)dst);
  517. }
  518. /* if w is not a multiple of 16 */
  519. for (; (i < w) ; i++)
  520. {
  521. dst[i] = src[i];
  522. }
  523. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  524. }
  525. /* next one assumes that ((line_size % 16) == 0) */
  526. void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  527. {
  528. POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);
  529. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  530. int i;
  531. POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
  532. for(i=0; i<h; i++) {
  533. *((uint32_t*)(block)) = LD32(pixels);
  534. *((uint32_t*)(block+4)) = LD32(pixels+4);
  535. *((uint32_t*)(block+8)) = LD32(pixels+8);
  536. *((uint32_t*)(block+12)) = LD32(pixels+12);
  537. pixels+=line_size;
  538. block +=line_size;
  539. }
  540. POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
  541. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  542. register vector unsigned char pixelsv1, pixelsv2;
  543. register vector unsigned char pixelsv1B, pixelsv2B;
  544. register vector unsigned char pixelsv1C, pixelsv2C;
  545. register vector unsigned char pixelsv1D, pixelsv2D;
  546. register vector unsigned char perm = vec_lvsl(0, pixels);
  547. int i;
  548. register int line_size_2 = line_size << 1;
  549. register int line_size_3 = line_size + line_size_2;
  550. register int line_size_4 = line_size << 2;
  551. POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
  552. // hand-unrolling the loop by 4 gains about 15%
  553. // mininum execution time goes from 74 to 60 cycles
  554. // it's faster than -funroll-loops, but using
  555. // -funroll-loops w/ this is bad - 74 cycles again.
  556. // all this is on a 7450, tuning for the 7450
  557. #if 0
  558. for(i=0; i<h; i++) {
  559. pixelsv1 = vec_ld(0, (unsigned char*)pixels);
  560. pixelsv2 = vec_ld(16, (unsigned char*)pixels);
  561. vec_st(vec_perm(pixelsv1, pixelsv2, perm),
  562. 0, (unsigned char*)block);
  563. pixels+=line_size;
  564. block +=line_size;
  565. }
  566. #else
  567. for(i=0; i<h; i+=4) {
  568. pixelsv1 = vec_ld(0, (unsigned char*)pixels);
  569. pixelsv2 = vec_ld(16, (unsigned char*)pixels);
  570. pixelsv1B = vec_ld(line_size, (unsigned char*)pixels);
  571. pixelsv2B = vec_ld(16 + line_size, (unsigned char*)pixels);
  572. pixelsv1C = vec_ld(line_size_2, (unsigned char*)pixels);
  573. pixelsv2C = vec_ld(16 + line_size_2, (unsigned char*)pixels);
  574. pixelsv1D = vec_ld(line_size_3, (unsigned char*)pixels);
  575. pixelsv2D = vec_ld(16 + line_size_3, (unsigned char*)pixels);
  576. vec_st(vec_perm(pixelsv1, pixelsv2, perm),
  577. 0, (unsigned char*)block);
  578. vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
  579. line_size, (unsigned char*)block);
  580. vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
  581. line_size_2, (unsigned char*)block);
  582. vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
  583. line_size_3, (unsigned char*)block);
  584. pixels+=line_size_4;
  585. block +=line_size_4;
  586. }
  587. #endif
  588. POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
  589. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  590. }
  591. /* next one assumes that ((line_size % 16) == 0) */
  592. #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
  593. void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  594. {
  595. POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1);
  596. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  597. int i;
  598. POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
  599. for(i=0; i<h; i++) {
  600. op_avg(*((uint32_t*)(block)),LD32(pixels));
  601. op_avg(*((uint32_t*)(block+4)),LD32(pixels+4));
  602. op_avg(*((uint32_t*)(block+8)),LD32(pixels+8));
  603. op_avg(*((uint32_t*)(block+12)),LD32(pixels+12));
  604. pixels+=line_size;
  605. block +=line_size;
  606. }
  607. POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
  608. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  609. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  610. register vector unsigned char perm = vec_lvsl(0, pixels);
  611. int i;
  612. POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
  613. for(i=0; i<h; i++) {
  614. pixelsv1 = vec_ld(0, (unsigned char*)pixels);
  615. pixelsv2 = vec_ld(16, (unsigned char*)pixels);
  616. blockv = vec_ld(0, block);
  617. pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
  618. blockv = vec_avg(blockv,pixelsv);
  619. vec_st(blockv, 0, (unsigned char*)block);
  620. pixels+=line_size;
  621. block +=line_size;
  622. }
  623. POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
  624. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  625. }
  626. /* next one assumes that ((line_size % 8) == 0) */
  627. void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  628. {
  629. POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1);
  630. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  631. int i;
  632. POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
  633. for (i = 0; i < h; i++) {
  634. *((uint32_t *) (block)) =
  635. (((*((uint32_t *) (block))) |
  636. ((((const struct unaligned_32 *) (pixels))->l))) -
  637. ((((*((uint32_t *) (block))) ^
  638. ((((const struct unaligned_32 *) (pixels))->
  639. l))) & 0xFEFEFEFEUL) >> 1));
  640. *((uint32_t *) (block + 4)) =
  641. (((*((uint32_t *) (block + 4))) |
  642. ((((const struct unaligned_32 *) (pixels + 4))->l))) -
  643. ((((*((uint32_t *) (block + 4))) ^
  644. ((((const struct unaligned_32 *) (pixels +
  645. 4))->
  646. l))) & 0xFEFEFEFEUL) >> 1));
  647. pixels += line_size;
  648. block += line_size;
  649. }
  650. POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
  651. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  652. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  653. int i;
  654. POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
  655. for (i = 0; i < h; i++) {
  656. /*
  657. block is 8 bytes-aligned, so we're either in the
  658. left block (16 bytes-aligned) or in the right block (not)
  659. */
  660. int rightside = ((unsigned long)block & 0x0000000F);
  661. blockv = vec_ld(0, block);
  662. pixelsv1 = vec_ld(0, (unsigned char*)pixels);
  663. pixelsv2 = vec_ld(16, (unsigned char*)pixels);
  664. pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
  665. if (rightside)
  666. {
  667. pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
  668. }
  669. else
  670. {
  671. pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
  672. }
  673. blockv = vec_avg(blockv, pixelsv);
  674. vec_st(blockv, 0, block);
  675. pixels += line_size;
  676. block += line_size;
  677. }
  678. POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
  679. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  680. }
  681. /* next one assumes that ((line_size % 8) == 0) */
  682. void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  683. {
  684. POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1);
  685. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  686. int j;
  687. POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
  688. for (j = 0; j < 2; j++) {
  689. int i;
  690. const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  691. const uint32_t b =
  692. (((const struct unaligned_32 *) (pixels + 1))->l);
  693. uint32_t l0 =
  694. (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
  695. uint32_t h0 =
  696. ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  697. uint32_t l1, h1;
  698. pixels += line_size;
  699. for (i = 0; i < h; i += 2) {
  700. uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  701. uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
  702. l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
  703. h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  704. *((uint32_t *) block) =
  705. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  706. pixels += line_size;
  707. block += line_size;
  708. a = (((const struct unaligned_32 *) (pixels))->l);
  709. b = (((const struct unaligned_32 *) (pixels + 1))->l);
  710. l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
  711. h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  712. *((uint32_t *) block) =
  713. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  714. pixels += line_size;
  715. block += line_size;
  716. } pixels += 4 - line_size * (h + 1);
  717. block += 4 - line_size * h;
  718. }
  719. POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
  720. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  721. register int i;
  722. register vector unsigned char
  723. pixelsv1, pixelsv2,
  724. pixelsavg;
  725. register vector unsigned char
  726. blockv, temp1, temp2;
  727. register vector unsigned short
  728. pixelssum1, pixelssum2, temp3;
  729. register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
  730. register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
  731. temp1 = vec_ld(0, pixels);
  732. temp2 = vec_ld(16, pixels);
  733. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  734. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
  735. {
  736. pixelsv2 = temp2;
  737. }
  738. else
  739. {
  740. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  741. }
  742. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  743. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  744. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  745. (vector unsigned short)pixelsv2);
  746. pixelssum1 = vec_add(pixelssum1, vctwo);
  747. POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
  748. for (i = 0; i < h ; i++) {
  749. int rightside = ((unsigned long)block & 0x0000000F);
  750. blockv = vec_ld(0, block);
  751. temp1 = vec_ld(line_size, pixels);
  752. temp2 = vec_ld(line_size + 16, pixels);
  753. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  754. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
  755. {
  756. pixelsv2 = temp2;
  757. }
  758. else
  759. {
  760. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  761. }
  762. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  763. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  764. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  765. (vector unsigned short)pixelsv2);
  766. temp3 = vec_add(pixelssum1, pixelssum2);
  767. temp3 = vec_sra(temp3, vctwo);
  768. pixelssum1 = vec_add(pixelssum2, vctwo);
  769. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  770. if (rightside)
  771. {
  772. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  773. }
  774. else
  775. {
  776. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  777. }
  778. vec_st(blockv, 0, block);
  779. block += line_size;
  780. pixels += line_size;
  781. }
  782. POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
  783. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  784. }
  785. /* next one assumes that ((line_size % 8) == 0) */
  786. void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  787. {
  788. POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
  789. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  790. int j;
  791. POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
  792. for (j = 0; j < 2; j++) {
  793. int i;
  794. const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  795. const uint32_t b =
  796. (((const struct unaligned_32 *) (pixels + 1))->l);
  797. uint32_t l0 =
  798. (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
  799. uint32_t h0 =
  800. ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  801. uint32_t l1, h1;
  802. pixels += line_size;
  803. for (i = 0; i < h; i += 2) {
  804. uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  805. uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
  806. l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
  807. h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  808. *((uint32_t *) block) =
  809. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  810. pixels += line_size;
  811. block += line_size;
  812. a = (((const struct unaligned_32 *) (pixels))->l);
  813. b = (((const struct unaligned_32 *) (pixels + 1))->l);
  814. l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
  815. h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  816. *((uint32_t *) block) =
  817. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  818. pixels += line_size;
  819. block += line_size;
  820. } pixels += 4 - line_size * (h + 1);
  821. block += 4 - line_size * h;
  822. }
  823. POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
  824. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  825. register int i;
  826. register vector unsigned char
  827. pixelsv1, pixelsv2,
  828. pixelsavg;
  829. register vector unsigned char
  830. blockv, temp1, temp2;
  831. register vector unsigned short
  832. pixelssum1, pixelssum2, temp3;
  833. register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
  834. register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1);
  835. register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
  836. temp1 = vec_ld(0, pixels);
  837. temp2 = vec_ld(16, pixels);
  838. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  839. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
  840. {
  841. pixelsv2 = temp2;
  842. }
  843. else
  844. {
  845. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  846. }
  847. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  848. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  849. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  850. (vector unsigned short)pixelsv2);
  851. pixelssum1 = vec_add(pixelssum1, vcone);
  852. POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
  853. for (i = 0; i < h ; i++) {
  854. int rightside = ((unsigned long)block & 0x0000000F);
  855. blockv = vec_ld(0, block);
  856. temp1 = vec_ld(line_size, pixels);
  857. temp2 = vec_ld(line_size + 16, pixels);
  858. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  859. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
  860. {
  861. pixelsv2 = temp2;
  862. }
  863. else
  864. {
  865. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  866. }
  867. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  868. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  869. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  870. (vector unsigned short)pixelsv2);
  871. temp3 = vec_add(pixelssum1, pixelssum2);
  872. temp3 = vec_sra(temp3, vctwo);
  873. pixelssum1 = vec_add(pixelssum2, vcone);
  874. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  875. if (rightside)
  876. {
  877. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  878. }
  879. else
  880. {
  881. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  882. }
  883. vec_st(blockv, 0, block);
  884. block += line_size;
  885. pixels += line_size;
  886. }
  887. POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
  888. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  889. }
  890. /* next one assumes that ((line_size % 16) == 0) */
  891. void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  892. {
  893. POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1);
  894. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  895. int j;
  896. POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
  897. for (j = 0; j < 4; j++) {
  898. int i;
  899. const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  900. const uint32_t b =
  901. (((const struct unaligned_32 *) (pixels + 1))->l);
  902. uint32_t l0 =
  903. (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
  904. uint32_t h0 =
  905. ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  906. uint32_t l1, h1;
  907. pixels += line_size;
  908. for (i = 0; i < h; i += 2) {
  909. uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  910. uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
  911. l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
  912. h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  913. *((uint32_t *) block) =
  914. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  915. pixels += line_size;
  916. block += line_size;
  917. a = (((const struct unaligned_32 *) (pixels))->l);
  918. b = (((const struct unaligned_32 *) (pixels + 1))->l);
  919. l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
  920. h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  921. *((uint32_t *) block) =
  922. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  923. pixels += line_size;
  924. block += line_size;
  925. } pixels += 4 - line_size * (h + 1);
  926. block += 4 - line_size * h;
  927. }
  928. POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
  929. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  930. register int i;
  931. register vector unsigned char
  932. pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  933. register vector unsigned char
  934. blockv, temp1, temp2;
  935. register vector unsigned short
  936. pixelssum1, pixelssum2, temp3,
  937. pixelssum3, pixelssum4, temp4;
  938. register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
  939. register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
  940. POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
  941. temp1 = vec_ld(0, pixels);
  942. temp2 = vec_ld(16, pixels);
  943. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  944. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
  945. {
  946. pixelsv2 = temp2;
  947. }
  948. else
  949. {
  950. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  951. }
  952. pixelsv3 = vec_mergel(vczero, pixelsv1);
  953. pixelsv4 = vec_mergel(vczero, pixelsv2);
  954. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  955. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  956. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  957. (vector unsigned short)pixelsv4);
  958. pixelssum3 = vec_add(pixelssum3, vctwo);
  959. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  960. (vector unsigned short)pixelsv2);
  961. pixelssum1 = vec_add(pixelssum1, vctwo);
  962. for (i = 0; i < h ; i++) {
  963. blockv = vec_ld(0, block);
  964. temp1 = vec_ld(line_size, pixels);
  965. temp2 = vec_ld(line_size + 16, pixels);
  966. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  967. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
  968. {
  969. pixelsv2 = temp2;
  970. }
  971. else
  972. {
  973. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  974. }
  975. pixelsv3 = vec_mergel(vczero, pixelsv1);
  976. pixelsv4 = vec_mergel(vczero, pixelsv2);
  977. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  978. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  979. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  980. (vector unsigned short)pixelsv4);
  981. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  982. (vector unsigned short)pixelsv2);
  983. temp4 = vec_add(pixelssum3, pixelssum4);
  984. temp4 = vec_sra(temp4, vctwo);
  985. temp3 = vec_add(pixelssum1, pixelssum2);
  986. temp3 = vec_sra(temp3, vctwo);
  987. pixelssum3 = vec_add(pixelssum4, vctwo);
  988. pixelssum1 = vec_add(pixelssum2, vctwo);
  989. blockv = vec_packsu(temp3, temp4);
  990. vec_st(blockv, 0, block);
  991. block += line_size;
  992. pixels += line_size;
  993. }
  994. POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
  995. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  996. }
  997. /* next one assumes that ((line_size % 16) == 0) */
  998. void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  999. {
  1000. POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
  1001. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  1002. int j;
  1003. POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
  1004. for (j = 0; j < 4; j++) {
  1005. int i;
  1006. const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  1007. const uint32_t b =
  1008. (((const struct unaligned_32 *) (pixels + 1))->l);
  1009. uint32_t l0 =
  1010. (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
  1011. uint32_t h0 =
  1012. ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  1013. uint32_t l1, h1;
  1014. pixels += line_size;
  1015. for (i = 0; i < h; i += 2) {
  1016. uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  1017. uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
  1018. l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
  1019. h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  1020. *((uint32_t *) block) =
  1021. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  1022. pixels += line_size;
  1023. block += line_size;
  1024. a = (((const struct unaligned_32 *) (pixels))->l);
  1025. b = (((const struct unaligned_32 *) (pixels + 1))->l);
  1026. l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
  1027. h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  1028. *((uint32_t *) block) =
  1029. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  1030. pixels += line_size;
  1031. block += line_size;
  1032. } pixels += 4 - line_size * (h + 1);
  1033. block += 4 - line_size * h;
  1034. }
  1035. POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
  1036. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  1037. register int i;
  1038. register vector unsigned char
  1039. pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  1040. register vector unsigned char
  1041. blockv, temp1, temp2;
  1042. register vector unsigned short
  1043. pixelssum1, pixelssum2, temp3,
  1044. pixelssum3, pixelssum4, temp4;
  1045. register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
  1046. register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1);
  1047. register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
  1048. POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
  1049. temp1 = vec_ld(0, pixels);
  1050. temp2 = vec_ld(16, pixels);
  1051. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  1052. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
  1053. {
  1054. pixelsv2 = temp2;
  1055. }
  1056. else
  1057. {
  1058. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  1059. }
  1060. pixelsv3 = vec_mergel(vczero, pixelsv1);
  1061. pixelsv4 = vec_mergel(vczero, pixelsv2);
  1062. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  1063. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  1064. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  1065. (vector unsigned short)pixelsv4);
  1066. pixelssum3 = vec_add(pixelssum3, vcone);
  1067. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  1068. (vector unsigned short)pixelsv2);
  1069. pixelssum1 = vec_add(pixelssum1, vcone);
  1070. for (i = 0; i < h ; i++) {
  1071. blockv = vec_ld(0, block);
  1072. temp1 = vec_ld(line_size, pixels);
  1073. temp2 = vec_ld(line_size + 16, pixels);
  1074. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  1075. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
  1076. {
  1077. pixelsv2 = temp2;
  1078. }
  1079. else
  1080. {
  1081. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  1082. }
  1083. pixelsv3 = vec_mergel(vczero, pixelsv1);
  1084. pixelsv4 = vec_mergel(vczero, pixelsv2);
  1085. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  1086. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  1087. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  1088. (vector unsigned short)pixelsv4);
  1089. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  1090. (vector unsigned short)pixelsv2);
  1091. temp4 = vec_add(pixelssum3, pixelssum4);
  1092. temp4 = vec_sra(temp4, vctwo);
  1093. temp3 = vec_add(pixelssum1, pixelssum2);
  1094. temp3 = vec_sra(temp3, vctwo);
  1095. pixelssum3 = vec_add(pixelssum4, vcone);
  1096. pixelssum1 = vec_add(pixelssum2, vcone);
  1097. blockv = vec_packsu(temp3, temp4);
  1098. vec_st(blockv, 0, block);
  1099. block += line_size;
  1100. pixels += line_size;
  1101. }
  1102. POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
  1103. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  1104. }
  1105. #if (__GNUC__ * 100 + __GNUC_MINOR__ >= 330)
  1106. int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
  1107. POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1);
  1108. int sum;
  1109. POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
  1110. register const_vector unsigned char vzero = (const_vector unsigned char)vec_splat_u8(0);
  1111. register vector signed short temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
  1112. {
  1113. register const_vector signed short vprod1 = (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
  1114. register const_vector signed short vprod2 = (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
  1115. register const_vector signed short vprod3 = (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
  1116. register const_vector unsigned char perm1 = (const_vector unsigned char)
  1117. AVV(0x02, 0x03, 0x00, 0x01,
  1118. 0x06, 0x07, 0x04, 0x05,
  1119. 0x0A, 0x0B, 0x08, 0x09,
  1120. 0x0E, 0x0F, 0x0C, 0x0D);
  1121. register const_vector unsigned char perm2 = (const_vector unsigned char)
  1122. AVV(0x04, 0x05, 0x06, 0x07,
  1123. 0x00, 0x01, 0x02, 0x03,
  1124. 0x0C, 0x0D, 0x0E, 0x0F,
  1125. 0x08, 0x09, 0x0A, 0x0B);
  1126. register const_vector unsigned char perm3 = (const_vector unsigned char)
  1127. AVV(0x08, 0x09, 0x0A, 0x0B,
  1128. 0x0C, 0x0D, 0x0E, 0x0F,
  1129. 0x00, 0x01, 0x02, 0x03,
  1130. 0x04, 0x05, 0x06, 0x07);
  1131. #define ONEITERBUTTERFLY(i, res) \
  1132. { \
  1133. register vector unsigned char src1, src2, srcO; \
  1134. register vector unsigned char dst1, dst2, dstO; \
  1135. src1 = vec_ld(stride * i, src); \
  1136. if ((((stride * i) + (unsigned long)src) & 0x0000000F) > 8) \
  1137. src2 = vec_ld((stride * i) + 16, src); \
  1138. srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
  1139. dst1 = vec_ld(stride * i, dst); \
  1140. if ((((stride * i) + (unsigned long)dst) & 0x0000000F) > 8) \
  1141. dst2 = vec_ld((stride * i) + 16, dst); \
  1142. dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
  1143. /* promote the unsigned chars to signed shorts */ \
  1144. /* we're in the 8x8 function, we only care for the first 8 */ \
  1145. register vector signed short srcV = \
  1146. (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \
  1147. register vector signed short dstV = \
  1148. (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \
  1149. /* substractions inside the first butterfly */ \
  1150. register vector signed short but0 = vec_sub(srcV, dstV); \
  1151. register vector signed short op1 = vec_perm(but0, but0, perm1); \
  1152. register vector signed short but1 = vec_mladd(but0, vprod1, op1); \
  1153. register vector signed short op2 = vec_perm(but1, but1, perm2); \
  1154. register vector signed short but2 = vec_mladd(but1, vprod2, op2); \
  1155. register vector signed short op3 = vec_perm(but2, but2, perm3); \
  1156. res = vec_mladd(but2, vprod3, op3); \
  1157. }
  1158. ONEITERBUTTERFLY(0, temp0);
  1159. ONEITERBUTTERFLY(1, temp1);
  1160. ONEITERBUTTERFLY(2, temp2);
  1161. ONEITERBUTTERFLY(3, temp3);
  1162. ONEITERBUTTERFLY(4, temp4);
  1163. ONEITERBUTTERFLY(5, temp5);
  1164. ONEITERBUTTERFLY(6, temp6);
  1165. ONEITERBUTTERFLY(7, temp7);
  1166. }
  1167. #undef ONEITERBUTTERFLY
  1168. {
  1169. register vector signed int vsum;
  1170. register vector signed short line0 = vec_add(temp0, temp1);
  1171. register vector signed short line1 = vec_sub(temp0, temp1);
  1172. register vector signed short line2 = vec_add(temp2, temp3);
  1173. register vector signed short line3 = vec_sub(temp2, temp3);
  1174. register vector signed short line4 = vec_add(temp4, temp5);
  1175. register vector signed short line5 = vec_sub(temp4, temp5);
  1176. register vector signed short line6 = vec_add(temp6, temp7);
  1177. register vector signed short line7 = vec_sub(temp6, temp7);
  1178. register vector signed short line0B = vec_add(line0, line2);
  1179. register vector signed short line2B = vec_sub(line0, line2);
  1180. register vector signed short line1B = vec_add(line1, line3);
  1181. register vector signed short line3B = vec_sub(line1, line3);
  1182. register vector signed short line4B = vec_add(line4, line6);
  1183. register vector signed short line6B = vec_sub(line4, line6);
  1184. register vector signed short line5B = vec_add(line5, line7);
  1185. register vector signed short line7B = vec_sub(line5, line7);
  1186. register vector signed short line0C = vec_add(line0B, line4B);
  1187. register vector signed short line4C = vec_sub(line0B, line4B);
  1188. register vector signed short line1C = vec_add(line1B, line5B);
  1189. register vector signed short line5C = vec_sub(line1B, line5B);
  1190. register vector signed short line2C = vec_add(line2B, line6B);
  1191. register vector signed short line6C = vec_sub(line2B, line6B);
  1192. register vector signed short line3C = vec_add(line3B, line7B);
  1193. register vector signed short line7C = vec_sub(line3B, line7B);
  1194. vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
  1195. vsum = vec_sum4s(vec_abs(line1C), vsum);
  1196. vsum = vec_sum4s(vec_abs(line2C), vsum);
  1197. vsum = vec_sum4s(vec_abs(line3C), vsum);
  1198. vsum = vec_sum4s(vec_abs(line4C), vsum);
  1199. vsum = vec_sum4s(vec_abs(line5C), vsum);
  1200. vsum = vec_sum4s(vec_abs(line6C), vsum);
  1201. vsum = vec_sum4s(vec_abs(line7C), vsum);
  1202. vsum = vec_sums(vsum, (vector signed int)vzero);
  1203. vsum = vec_splat(vsum, 3);
  1204. vec_ste(vsum, 0, &sum);
  1205. }
  1206. POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1);
  1207. return sum;
  1208. }
  1209. /*
  1210. 16x8 works with 16 elements ; it allows to avoid replicating
  1211. loads, and give the compiler more rooms for scheduling.
  1212. It's only used from inside hadamard8_diff16_altivec.
  1213. Unfortunately, it seems gcc-3.3 is a bit dumb, and
  1214. the compiled code has a LOT of spill code, it seems
  1215. gcc (unlike xlc) cannot keep everything in registers
  1216. by itself. The following code include hand-made
  1217. registers allocation. It's not clean, but on
  1218. a 7450 the resulting code is much faster (best case
  1219. fall from 700+ cycles to 550).
  1220. xlc doesn't add spill code, but it doesn't know how to
  1221. schedule for the 7450, and its code isn't much faster than
  1222. gcc-3.3 on the 7450 (but uses 25% less instructions...)
  1223. On the 970, the hand-made RA is still a win (arount 690
  1224. vs. around 780), but xlc goes to around 660 on the
  1225. regular C code...
  1226. */
  1227. static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
  1228. int sum;
  1229. register vector signed short
  1230. temp0 asm ("v0"),
  1231. temp1 asm ("v1"),
  1232. temp2 asm ("v2"),
  1233. temp3 asm ("v3"),
  1234. temp4 asm ("v4"),
  1235. temp5 asm ("v5"),
  1236. temp6 asm ("v6"),
  1237. temp7 asm ("v7");
  1238. register vector signed short
  1239. temp0S asm ("v8"),
  1240. temp1S asm ("v9"),
  1241. temp2S asm ("v10"),
  1242. temp3S asm ("v11"),
  1243. temp4S asm ("v12"),
  1244. temp5S asm ("v13"),
  1245. temp6S asm ("v14"),
  1246. temp7S asm ("v15");
  1247. register const_vector unsigned char vzero asm ("v31")= (const_vector unsigned char)vec_splat_u8(0);
  1248. {
  1249. register const_vector signed short vprod1 asm ("v16")= (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
  1250. register const_vector signed short vprod2 asm ("v17")= (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
  1251. register const_vector signed short vprod3 asm ("v18")= (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
  1252. register const_vector unsigned char perm1 asm ("v19")= (const_vector unsigned char)
  1253. AVV(0x02, 0x03, 0x00, 0x01,
  1254. 0x06, 0x07, 0x04, 0x05,
  1255. 0x0A, 0x0B, 0x08, 0x09,
  1256. 0x0E, 0x0F, 0x0C, 0x0D);
  1257. register const_vector unsigned char perm2 asm ("v20")= (const_vector unsigned char)
  1258. AVV(0x04, 0x05, 0x06, 0x07,
  1259. 0x00, 0x01, 0x02, 0x03,
  1260. 0x0C, 0x0D, 0x0E, 0x0F,
  1261. 0x08, 0x09, 0x0A, 0x0B);
  1262. register const_vector unsigned char perm3 asm ("v21")= (const_vector unsigned char)
  1263. AVV(0x08, 0x09, 0x0A, 0x0B,
  1264. 0x0C, 0x0D, 0x0E, 0x0F,
  1265. 0x00, 0x01, 0x02, 0x03,
  1266. 0x04, 0x05, 0x06, 0x07);
  1267. #define ONEITERBUTTERFLY(i, res1, res2) \
  1268. { \
  1269. register vector unsigned char src1 asm ("v22"), src2 asm ("v23"); \
  1270. register vector unsigned char dst1 asm ("v24"), dst2 asm ("v25"); \
  1271. src1 = vec_ld(stride * i, src); \
  1272. src2 = vec_ld((stride * i) + 16, src); \
  1273. register vector unsigned char srcO asm ("v22") = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
  1274. dst1 = vec_ld(stride * i, dst); \
  1275. dst2 = vec_ld((stride * i) + 16, dst); \
  1276. register vector unsigned char dstO asm ("v23") = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
  1277. /* promote the unsigned chars to signed shorts */ \
  1278. register vector signed short srcV asm ("v24") = \
  1279. (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \
  1280. register vector signed short dstV asm ("v25") = \
  1281. (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \
  1282. register vector signed short srcW asm ("v26") = \
  1283. (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)srcO); \
  1284. register vector signed short dstW asm ("v27") = \
  1285. (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)dstO); \
  1286. /* substractions inside the first butterfly */ \
  1287. register vector signed short but0 asm ("v28") = vec_sub(srcV, dstV); \
  1288. register vector signed short but0S asm ("v29") = vec_sub(srcW, dstW); \
  1289. register vector signed short op1 asm ("v30") = vec_perm(but0, but0, perm1); \
  1290. register vector signed short but1 asm ("v22") = vec_mladd(but0, vprod1, op1); \
  1291. register vector signed short op1S asm ("v23") = vec_perm(but0S, but0S, perm1); \
  1292. register vector signed short but1S asm ("v24") = vec_mladd(but0S, vprod1, op1S); \
  1293. register vector signed short op2 asm ("v25") = vec_perm(but1, but1, perm2); \
  1294. register vector signed short but2 asm ("v26") = vec_mladd(but1, vprod2, op2); \
  1295. register vector signed short op2S asm ("v27") = vec_perm(but1S, but1S, perm2); \
  1296. register vector signed short but2S asm ("v28") = vec_mladd(but1S, vprod2, op2S); \
  1297. register vector signed short op3 asm ("v29") = vec_perm(but2, but2, perm3); \
  1298. res1 = vec_mladd(but2, vprod3, op3); \
  1299. register vector signed short op3S asm ("v30") = vec_perm(but2S, but2S, perm3); \
  1300. res2 = vec_mladd(but2S, vprod3, op3S); \
  1301. }
  1302. ONEITERBUTTERFLY(0, temp0, temp0S);
  1303. ONEITERBUTTERFLY(1, temp1, temp1S);
  1304. ONEITERBUTTERFLY(2, temp2, temp2S);
  1305. ONEITERBUTTERFLY(3, temp3, temp3S);
  1306. ONEITERBUTTERFLY(4, temp4, temp4S);
  1307. ONEITERBUTTERFLY(5, temp5, temp5S);
  1308. ONEITERBUTTERFLY(6, temp6, temp6S);
  1309. ONEITERBUTTERFLY(7, temp7, temp7S);
  1310. }
  1311. #undef ONEITERBUTTERFLY
  1312. {
  1313. register vector signed int vsum;
  1314. register vector signed short line0 = vec_add(temp0, temp1);
  1315. register vector signed short line1 = vec_sub(temp0, temp1);
  1316. register vector signed short line2 = vec_add(temp2, temp3);
  1317. register vector signed short line3 = vec_sub(temp2, temp3);
  1318. register vector signed short line4 = vec_add(temp4, temp5);
  1319. register vector signed short line5 = vec_sub(temp4, temp5);
  1320. register vector signed short line6 = vec_add(temp6, temp7);
  1321. register vector signed short line7 = vec_sub(temp6, temp7);
  1322. register vector signed short line0B = vec_add(line0, line2);
  1323. register vector signed short line2B = vec_sub(line0, line2);
  1324. register vector signed short line1B = vec_add(line1, line3);
  1325. register vector signed short line3B = vec_sub(line1, line3);
  1326. register vector signed short line4B = vec_add(line4, line6);
  1327. register vector signed short line6B = vec_sub(line4, line6);
  1328. register vector signed short line5B = vec_add(line5, line7);
  1329. register vector signed short line7B = vec_sub(line5, line7);
  1330. register vector signed short line0C = vec_add(line0B, line4B);
  1331. register vector signed short line4C = vec_sub(line0B, line4B);
  1332. register vector signed short line1C = vec_add(line1B, line5B);
  1333. register vector signed short line5C = vec_sub(line1B, line5B);
  1334. register vector signed short line2C = vec_add(line2B, line6B);
  1335. register vector signed short line6C = vec_sub(line2B, line6B);
  1336. register vector signed short line3C = vec_add(line3B, line7B);
  1337. register vector signed short line7C = vec_sub(line3B, line7B);
  1338. vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
  1339. vsum = vec_sum4s(vec_abs(line1C), vsum);
  1340. vsum = vec_sum4s(vec_abs(line2C), vsum);
  1341. vsum = vec_sum4s(vec_abs(line3C), vsum);
  1342. vsum = vec_sum4s(vec_abs(line4C), vsum);
  1343. vsum = vec_sum4s(vec_abs(line5C), vsum);
  1344. vsum = vec_sum4s(vec_abs(line6C), vsum);
  1345. vsum = vec_sum4s(vec_abs(line7C), vsum);
  1346. register vector signed short line0S = vec_add(temp0S, temp1S);
  1347. register vector signed short line1S = vec_sub(temp0S, temp1S);
  1348. register vector signed short line2S = vec_add(temp2S, temp3S);
  1349. register vector signed short line3S = vec_sub(temp2S, temp3S);
  1350. register vector signed short line4S = vec_add(temp4S, temp5S);
  1351. register vector signed short line5S = vec_sub(temp4S, temp5S);
  1352. register vector signed short line6S = vec_add(temp6S, temp7S);
  1353. register vector signed short line7S = vec_sub(temp6S, temp7S);
  1354. register vector signed short line0BS = vec_add(line0S, line2S);
  1355. register vector signed short line2BS = vec_sub(line0S, line2S);
  1356. register vector signed short line1BS = vec_add(line1S, line3S);
  1357. register vector signed short line3BS = vec_sub(line1S, line3S);
  1358. register vector signed short line4BS = vec_add(line4S, line6S);
  1359. register vector signed short line6BS = vec_sub(line4S, line6S);
  1360. register vector signed short line5BS = vec_add(line5S, line7S);
  1361. register vector signed short line7BS = vec_sub(line5S, line7S);
  1362. register vector signed short line0CS = vec_add(line0BS, line4BS);
  1363. register vector signed short line4CS = vec_sub(line0BS, line4BS);
  1364. register vector signed short line1CS = vec_add(line1BS, line5BS);
  1365. register vector signed short line5CS = vec_sub(line1BS, line5BS);
  1366. register vector signed short line2CS = vec_add(line2BS, line6BS);
  1367. register vector signed short line6CS = vec_sub(line2BS, line6BS);
  1368. register vector signed short line3CS = vec_add(line3BS, line7BS);
  1369. register vector signed short line7CS = vec_sub(line3BS, line7BS);
  1370. vsum = vec_sum4s(vec_abs(line0CS), vsum);
  1371. vsum = vec_sum4s(vec_abs(line1CS), vsum);
  1372. vsum = vec_sum4s(vec_abs(line2CS), vsum);
  1373. vsum = vec_sum4s(vec_abs(line3CS), vsum);
  1374. vsum = vec_sum4s(vec_abs(line4CS), vsum);
  1375. vsum = vec_sum4s(vec_abs(line5CS), vsum);
  1376. vsum = vec_sum4s(vec_abs(line6CS), vsum);
  1377. vsum = vec_sum4s(vec_abs(line7CS), vsum);
  1378. vsum = vec_sums(vsum, (vector signed int)vzero);
  1379. vsum = vec_splat(vsum, 3);
  1380. vec_ste(vsum, 0, &sum);
  1381. }
  1382. return sum;
  1383. }
  1384. int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
  1385. POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1);
  1386. int score;
  1387. POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1);
  1388. score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  1389. if (h==16) {
  1390. dst += 8*stride;
  1391. src += 8*stride;
  1392. score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  1393. }
  1394. POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1);
  1395. return score;
  1396. }
  1397. #endif
  1398. int has_altivec(void)
  1399. {
  1400. #ifdef CONFIG_DARWIN
  1401. int sels[2] = {CTL_HW, HW_VECTORUNIT};
  1402. int has_vu = 0;
  1403. size_t len = sizeof(has_vu);
  1404. int err;
  1405. err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
  1406. if (err == 0) return (has_vu != 0);
  1407. #else /* CONFIG_DARWIN */
  1408. /* no Darwin, do it the brute-force way */
  1409. /* this is borrowed from the libmpeg2 library */
  1410. {
  1411. signal (SIGILL, sigill_handler);
  1412. if (sigsetjmp (jmpbuf, 1)) {
  1413. signal (SIGILL, SIG_DFL);
  1414. } else {
  1415. canjump = 1;
  1416. asm volatile ("mtspr 256, %0\n\t"
  1417. "vand %%v0, %%v0, %%v0"
  1418. :
  1419. : "r" (-1));
  1420. signal (SIGILL, SIG_DFL);
  1421. return 1;
  1422. }
  1423. }
  1424. #endif /* CONFIG_DARWIN */
  1425. return 0;
  1426. }