You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1338 lines
45KB

  1. /*
  2. * Copyright (c) 2002 Brian Foley
  3. * Copyright (c) 2002 Dieter Shirley
  4. * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. */
  20. #include "../dsputil.h"
  21. #include "gcc_fixes.h"
  22. #include "dsputil_altivec.h"
  23. #ifdef CONFIG_DARWIN
  24. #include <sys/sysctl.h>
  25. #else /* CONFIG_DARWIN */
  26. #include <signal.h>
  27. #include <setjmp.h>
  28. static sigjmp_buf jmpbuf;
  29. static volatile sig_atomic_t canjump = 0;
  30. static void sigill_handler (int sig)
  31. {
  32. if (!canjump) {
  33. signal (sig, SIG_DFL);
  34. raise (sig);
  35. }
  36. canjump = 0;
  37. siglongjmp (jmpbuf, 1);
  38. }
  39. #endif /* CONFIG_DARWIN */
  40. int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  41. {
  42. int i;
  43. int s __attribute__((aligned(16)));
  44. const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
  45. vector unsigned char *tv;
  46. vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
  47. vector unsigned int sad;
  48. vector signed int sumdiffs;
  49. s = 0;
  50. sad = (vector unsigned int)vec_splat_u32(0);
  51. for(i=0;i<h;i++) {
  52. /*
  53. Read unaligned pixels into our vectors. The vectors are as follows:
  54. pix1v: pix1[0]-pix1[15]
  55. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
  56. */
  57. tv = (vector unsigned char *) pix1;
  58. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  59. tv = (vector unsigned char *) &pix2[0];
  60. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  61. tv = (vector unsigned char *) &pix2[1];
  62. pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
  63. /* Calculate the average vector */
  64. avgv = vec_avg(pix2v, pix2iv);
  65. /* Calculate a sum of abs differences vector */
  66. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  67. /* Add each 4 pixel group together and put 4 results into sad */
  68. sad = vec_sum4s(t5, sad);
  69. pix1 += line_size;
  70. pix2 += line_size;
  71. }
  72. /* Sum up the four partial sums, and put the result into s */
  73. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  74. sumdiffs = vec_splat(sumdiffs, 3);
  75. vec_ste(sumdiffs, 0, &s);
  76. return s;
  77. }
  78. int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  79. {
  80. int i;
  81. int s __attribute__((aligned(16)));
  82. const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
  83. vector unsigned char *tv;
  84. vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
  85. vector unsigned int sad;
  86. vector signed int sumdiffs;
  87. uint8_t *pix3 = pix2 + line_size;
  88. s = 0;
  89. sad = (vector unsigned int)vec_splat_u32(0);
  90. /*
  91. Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  92. iteration becomes pix2 in the next iteration. We can use this
  93. fact to avoid a potentially expensive unaligned read, each
  94. time around the loop.
  95. Read unaligned pixels into our vectors. The vectors are as follows:
  96. pix2v: pix2[0]-pix2[15]
  97. Split the pixel vectors into shorts
  98. */
  99. tv = (vector unsigned char *) &pix2[0];
  100. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  101. for(i=0;i<h;i++) {
  102. /*
  103. Read unaligned pixels into our vectors. The vectors are as follows:
  104. pix1v: pix1[0]-pix1[15]
  105. pix3v: pix3[0]-pix3[15]
  106. */
  107. tv = (vector unsigned char *) pix1;
  108. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  109. tv = (vector unsigned char *) &pix3[0];
  110. pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
  111. /* Calculate the average vector */
  112. avgv = vec_avg(pix2v, pix3v);
  113. /* Calculate a sum of abs differences vector */
  114. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  115. /* Add each 4 pixel group together and put 4 results into sad */
  116. sad = vec_sum4s(t5, sad);
  117. pix1 += line_size;
  118. pix2v = pix3v;
  119. pix3 += line_size;
  120. }
  121. /* Sum up the four partial sums, and put the result into s */
  122. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  123. sumdiffs = vec_splat(sumdiffs, 3);
  124. vec_ste(sumdiffs, 0, &s);
  125. return s;
  126. }
  127. int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  128. {
  129. int i;
  130. int s __attribute__((aligned(16)));
  131. uint8_t *pix3 = pix2 + line_size;
  132. const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
  133. const_vector unsigned short two = (const_vector unsigned short)vec_splat_u16(2);
  134. vector unsigned char *tv, avgv, t5;
  135. vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
  136. vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
  137. vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
  138. vector unsigned short avghv, avglv;
  139. vector unsigned short t1, t2, t3, t4;
  140. vector unsigned int sad;
  141. vector signed int sumdiffs;
  142. sad = (vector unsigned int)vec_splat_u32(0);
  143. s = 0;
  144. /*
  145. Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  146. iteration becomes pix2 in the next iteration. We can use this
  147. fact to avoid a potentially expensive unaligned read, as well
  148. as some splitting, and vector addition each time around the loop.
  149. Read unaligned pixels into our vectors. The vectors are as follows:
  150. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
  151. Split the pixel vectors into shorts
  152. */
  153. tv = (vector unsigned char *) &pix2[0];
  154. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  155. tv = (vector unsigned char *) &pix2[1];
  156. pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
  157. pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
  158. pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
  159. pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
  160. pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
  161. t1 = vec_add(pix2hv, pix2ihv);
  162. t2 = vec_add(pix2lv, pix2ilv);
  163. for(i=0;i<h;i++) {
  164. /*
  165. Read unaligned pixels into our vectors. The vectors are as follows:
  166. pix1v: pix1[0]-pix1[15]
  167. pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16]
  168. */
  169. tv = (vector unsigned char *) pix1;
  170. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  171. tv = (vector unsigned char *) &pix3[0];
  172. pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
  173. tv = (vector unsigned char *) &pix3[1];
  174. pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));
  175. /*
  176. Note that Altivec does have vec_avg, but this works on vector pairs
  177. and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
  178. would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
  179. Instead, we have to split the pixel vectors into vectors of shorts,
  180. and do the averaging by hand.
  181. */
  182. /* Split the pixel vectors into shorts */
  183. pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
  184. pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
  185. pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
  186. pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
  187. /* Do the averaging on them */
  188. t3 = vec_add(pix3hv, pix3ihv);
  189. t4 = vec_add(pix3lv, pix3ilv);
  190. avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
  191. avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
  192. /* Pack the shorts back into a result */
  193. avgv = vec_pack(avghv, avglv);
  194. /* Calculate a sum of abs differences vector */
  195. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  196. /* Add each 4 pixel group together and put 4 results into sad */
  197. sad = vec_sum4s(t5, sad);
  198. pix1 += line_size;
  199. pix3 += line_size;
  200. /* Transfer the calculated values for pix3 into pix2 */
  201. t1 = t3;
  202. t2 = t4;
  203. }
  204. /* Sum up the four partial sums, and put the result into s */
  205. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  206. sumdiffs = vec_splat(sumdiffs, 3);
  207. vec_ste(sumdiffs, 0, &s);
  208. return s;
  209. }
  210. int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  211. {
  212. int i;
  213. int s __attribute__((aligned(16)));
  214. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  215. vector unsigned char perm1, perm2, *pix1v, *pix2v;
  216. vector unsigned char t1, t2, t3,t4, t5;
  217. vector unsigned int sad;
  218. vector signed int sumdiffs;
  219. sad = (vector unsigned int)vec_splat_u32(0);
  220. for(i=0;i<h;i++) {
  221. /* Read potentially unaligned pixels into t1 and t2 */
  222. perm1 = vec_lvsl(0, pix1);
  223. pix1v = (vector unsigned char *) pix1;
  224. perm2 = vec_lvsl(0, pix2);
  225. pix2v = (vector unsigned char *) pix2;
  226. t1 = vec_perm(pix1v[0], pix1v[1], perm1);
  227. t2 = vec_perm(pix2v[0], pix2v[1], perm2);
  228. /* Calculate a sum of abs differences vector */
  229. t3 = vec_max(t1, t2);
  230. t4 = vec_min(t1, t2);
  231. t5 = vec_sub(t3, t4);
  232. /* Add each 4 pixel group together and put 4 results into sad */
  233. sad = vec_sum4s(t5, sad);
  234. pix1 += line_size;
  235. pix2 += line_size;
  236. }
  237. /* Sum up the four partial sums, and put the result into s */
  238. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  239. sumdiffs = vec_splat(sumdiffs, 3);
  240. vec_ste(sumdiffs, 0, &s);
  241. return s;
  242. }
  243. int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  244. {
  245. int i;
  246. int s __attribute__((aligned(16)));
  247. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  248. vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
  249. vector unsigned char t1, t2, t3,t4, t5;
  250. vector unsigned int sad;
  251. vector signed int sumdiffs;
  252. sad = (vector unsigned int)vec_splat_u32(0);
  253. permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
  254. for(i=0;i<h;i++) {
  255. /* Read potentially unaligned pixels into t1 and t2
  256. Since we're reading 16 pixels, and actually only want 8,
  257. mask out the last 8 pixels. The 0s don't change the sum. */
  258. perm1 = vec_lvsl(0, pix1);
  259. pix1v = (vector unsigned char *) pix1;
  260. perm2 = vec_lvsl(0, pix2);
  261. pix2v = (vector unsigned char *) pix2;
  262. t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
  263. t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
  264. /* Calculate a sum of abs differences vector */
  265. t3 = vec_max(t1, t2);
  266. t4 = vec_min(t1, t2);
  267. t5 = vec_sub(t3, t4);
  268. /* Add each 4 pixel group together and put 4 results into sad */
  269. sad = vec_sum4s(t5, sad);
  270. pix1 += line_size;
  271. pix2 += line_size;
  272. }
  273. /* Sum up the four partial sums, and put the result into s */
  274. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  275. sumdiffs = vec_splat(sumdiffs, 3);
  276. vec_ste(sumdiffs, 0, &s);
  277. return s;
  278. }
  279. int pix_norm1_altivec(uint8_t *pix, int line_size)
  280. {
  281. int i;
  282. int s __attribute__((aligned(16)));
  283. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  284. vector unsigned char *tv;
  285. vector unsigned char pixv;
  286. vector unsigned int sv;
  287. vector signed int sum;
  288. sv = (vector unsigned int)vec_splat_u32(0);
  289. s = 0;
  290. for (i = 0; i < 16; i++) {
  291. /* Read in the potentially unaligned pixels */
  292. tv = (vector unsigned char *) pix;
  293. pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));
  294. /* Square the values, and add them to our sum */
  295. sv = vec_msum(pixv, pixv, sv);
  296. pix += line_size;
  297. }
  298. /* Sum up the four partial sums, and put the result into s */
  299. sum = vec_sums((vector signed int) sv, (vector signed int) zero);
  300. sum = vec_splat(sum, 3);
  301. vec_ste(sum, 0, &s);
  302. return s;
  303. }
  304. /**
  305. * Sum of Squared Errors for a 8x8 block.
  306. * AltiVec-enhanced.
  307. * It's the sad8_altivec code above w/ squaring added.
  308. */
  309. int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  310. {
  311. int i;
  312. int s __attribute__((aligned(16)));
  313. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  314. vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
  315. vector unsigned char t1, t2, t3,t4, t5;
  316. vector unsigned int sum;
  317. vector signed int sumsqr;
  318. sum = (vector unsigned int)vec_splat_u32(0);
  319. permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
  320. for(i=0;i<h;i++) {
  321. /* Read potentially unaligned pixels into t1 and t2
  322. Since we're reading 16 pixels, and actually only want 8,
  323. mask out the last 8 pixels. The 0s don't change the sum. */
  324. perm1 = vec_lvsl(0, pix1);
  325. pix1v = (vector unsigned char *) pix1;
  326. perm2 = vec_lvsl(0, pix2);
  327. pix2v = (vector unsigned char *) pix2;
  328. t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
  329. t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
  330. /*
  331. Since we want to use unsigned chars, we can take advantage
  332. of the fact that abs(a-b)^2 = (a-b)^2.
  333. */
  334. /* Calculate abs differences vector */
  335. t3 = vec_max(t1, t2);
  336. t4 = vec_min(t1, t2);
  337. t5 = vec_sub(t3, t4);
  338. /* Square the values and add them to our sum */
  339. sum = vec_msum(t5, t5, sum);
  340. pix1 += line_size;
  341. pix2 += line_size;
  342. }
  343. /* Sum up the four partial sums, and put the result into s */
  344. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  345. sumsqr = vec_splat(sumsqr, 3);
  346. vec_ste(sumsqr, 0, &s);
  347. return s;
  348. }
  349. /**
  350. * Sum of Squared Errors for a 16x16 block.
  351. * AltiVec-enhanced.
  352. * It's the sad16_altivec code above w/ squaring added.
  353. */
  354. int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  355. {
  356. int i;
  357. int s __attribute__((aligned(16)));
  358. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  359. vector unsigned char perm1, perm2, *pix1v, *pix2v;
  360. vector unsigned char t1, t2, t3,t4, t5;
  361. vector unsigned int sum;
  362. vector signed int sumsqr;
  363. sum = (vector unsigned int)vec_splat_u32(0);
  364. for(i=0;i<h;i++) {
  365. /* Read potentially unaligned pixels into t1 and t2 */
  366. perm1 = vec_lvsl(0, pix1);
  367. pix1v = (vector unsigned char *) pix1;
  368. perm2 = vec_lvsl(0, pix2);
  369. pix2v = (vector unsigned char *) pix2;
  370. t1 = vec_perm(pix1v[0], pix1v[1], perm1);
  371. t2 = vec_perm(pix2v[0], pix2v[1], perm2);
  372. /*
  373. Since we want to use unsigned chars, we can take advantage
  374. of the fact that abs(a-b)^2 = (a-b)^2.
  375. */
  376. /* Calculate abs differences vector */
  377. t3 = vec_max(t1, t2);
  378. t4 = vec_min(t1, t2);
  379. t5 = vec_sub(t3, t4);
  380. /* Square the values and add them to our sum */
  381. sum = vec_msum(t5, t5, sum);
  382. pix1 += line_size;
  383. pix2 += line_size;
  384. }
  385. /* Sum up the four partial sums, and put the result into s */
  386. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  387. sumsqr = vec_splat(sumsqr, 3);
  388. vec_ste(sumsqr, 0, &s);
  389. return s;
  390. }
  391. int pix_sum_altivec(uint8_t * pix, int line_size)
  392. {
  393. const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
  394. vector unsigned char perm, *pixv;
  395. vector unsigned char t1;
  396. vector unsigned int sad;
  397. vector signed int sumdiffs;
  398. int i;
  399. int s __attribute__((aligned(16)));
  400. sad = (vector unsigned int)vec_splat_u32(0);
  401. for (i = 0; i < 16; i++) {
  402. /* Read the potentially unaligned 16 pixels into t1 */
  403. perm = vec_lvsl(0, pix);
  404. pixv = (vector unsigned char *) pix;
  405. t1 = vec_perm(pixv[0], pixv[1], perm);
  406. /* Add each 4 pixel group together and put 4 results into sad */
  407. sad = vec_sum4s(t1, sad);
  408. pix += line_size;
  409. }
  410. /* Sum up the four partial sums, and put the result into s */
  411. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  412. sumdiffs = vec_splat(sumdiffs, 3);
  413. vec_ste(sumdiffs, 0, &s);
  414. return s;
  415. }
  416. void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
  417. {
  418. int i;
  419. vector unsigned char perm, bytes, *pixv;
  420. const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
  421. vector signed short shorts;
  422. for(i=0;i<8;i++)
  423. {
  424. // Read potentially unaligned pixels.
  425. // We're reading 16 pixels, and actually only want 8,
  426. // but we simply ignore the extras.
  427. perm = vec_lvsl(0, pixels);
  428. pixv = (vector unsigned char *) pixels;
  429. bytes = vec_perm(pixv[0], pixv[1], perm);
  430. // convert the bytes into shorts
  431. shorts = (vector signed short)vec_mergeh(zero, bytes);
  432. // save the data to the block, we assume the block is 16-byte aligned
  433. vec_st(shorts, i*16, (vector signed short*)block);
  434. pixels += line_size;
  435. }
  436. }
  437. void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
  438. const uint8_t *s2, int stride)
  439. {
  440. int i;
  441. vector unsigned char perm, bytes, *pixv;
  442. const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
  443. vector signed short shorts1, shorts2;
  444. for(i=0;i<4;i++)
  445. {
  446. // Read potentially unaligned pixels
  447. // We're reading 16 pixels, and actually only want 8,
  448. // but we simply ignore the extras.
  449. perm = vec_lvsl(0, s1);
  450. pixv = (vector unsigned char *) s1;
  451. bytes = vec_perm(pixv[0], pixv[1], perm);
  452. // convert the bytes into shorts
  453. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  454. // Do the same for the second block of pixels
  455. perm = vec_lvsl(0, s2);
  456. pixv = (vector unsigned char *) s2;
  457. bytes = vec_perm(pixv[0], pixv[1], perm);
  458. // convert the bytes into shorts
  459. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  460. // Do the subtraction
  461. shorts1 = vec_sub(shorts1, shorts2);
  462. // save the data to the block, we assume the block is 16-byte aligned
  463. vec_st(shorts1, 0, (vector signed short*)block);
  464. s1 += stride;
  465. s2 += stride;
  466. block += 8;
  467. // The code below is a copy of the code above... This is a manual
  468. // unroll.
  469. // Read potentially unaligned pixels
  470. // We're reading 16 pixels, and actually only want 8,
  471. // but we simply ignore the extras.
  472. perm = vec_lvsl(0, s1);
  473. pixv = (vector unsigned char *) s1;
  474. bytes = vec_perm(pixv[0], pixv[1], perm);
  475. // convert the bytes into shorts
  476. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  477. // Do the same for the second block of pixels
  478. perm = vec_lvsl(0, s2);
  479. pixv = (vector unsigned char *) s2;
  480. bytes = vec_perm(pixv[0], pixv[1], perm);
  481. // convert the bytes into shorts
  482. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  483. // Do the subtraction
  484. shorts1 = vec_sub(shorts1, shorts2);
  485. // save the data to the block, we assume the block is 16-byte aligned
  486. vec_st(shorts1, 0, (vector signed short*)block);
  487. s1 += stride;
  488. s2 += stride;
  489. block += 8;
  490. }
  491. }
  492. void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
  493. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  494. int i;
  495. for(i=0; i+7<w; i++){
  496. dst[i+0] += src[i+0];
  497. dst[i+1] += src[i+1];
  498. dst[i+2] += src[i+2];
  499. dst[i+3] += src[i+3];
  500. dst[i+4] += src[i+4];
  501. dst[i+5] += src[i+5];
  502. dst[i+6] += src[i+6];
  503. dst[i+7] += src[i+7];
  504. }
  505. for(; i<w; i++)
  506. dst[i+0] += src[i+0];
  507. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  508. register int i;
  509. register vector unsigned char vdst, vsrc;
  510. /* dst and src are 16 bytes-aligned (guaranteed) */
  511. for(i = 0 ; (i + 15) < w ; i++)
  512. {
  513. vdst = vec_ld(i << 4, (unsigned char*)dst);
  514. vsrc = vec_ld(i << 4, (unsigned char*)src);
  515. vdst = vec_add(vsrc, vdst);
  516. vec_st(vdst, i << 4, (unsigned char*)dst);
  517. }
  518. /* if w is not a multiple of 16 */
  519. for (; (i < w) ; i++)
  520. {
  521. dst[i] = src[i];
  522. }
  523. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  524. }
  525. /* next one assumes that ((line_size % 16) == 0) */
  526. void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  527. {
  528. POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);
  529. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  530. int i;
  531. POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
  532. for(i=0; i<h; i++) {
  533. *((uint32_t*)(block)) = LD32(pixels);
  534. *((uint32_t*)(block+4)) = LD32(pixels+4);
  535. *((uint32_t*)(block+8)) = LD32(pixels+8);
  536. *((uint32_t*)(block+12)) = LD32(pixels+12);
  537. pixels+=line_size;
  538. block +=line_size;
  539. }
  540. POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
  541. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  542. register vector unsigned char pixelsv1, pixelsv2;
  543. register vector unsigned char pixelsv1B, pixelsv2B;
  544. register vector unsigned char pixelsv1C, pixelsv2C;
  545. register vector unsigned char pixelsv1D, pixelsv2D;
  546. register vector unsigned char perm = vec_lvsl(0, pixels);
  547. int i;
  548. register int line_size_2 = line_size << 1;
  549. register int line_size_3 = line_size + line_size_2;
  550. register int line_size_4 = line_size << 2;
  551. POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
  552. // hand-unrolling the loop by 4 gains about 15%
  553. // mininum execution time goes from 74 to 60 cycles
  554. // it's faster than -funroll-loops, but using
  555. // -funroll-loops w/ this is bad - 74 cycles again.
  556. // all this is on a 7450, tuning for the 7450
  557. #if 0
  558. for(i=0; i<h; i++) {
  559. pixelsv1 = vec_ld(0, (unsigned char*)pixels);
  560. pixelsv2 = vec_ld(16, (unsigned char*)pixels);
  561. vec_st(vec_perm(pixelsv1, pixelsv2, perm),
  562. 0, (unsigned char*)block);
  563. pixels+=line_size;
  564. block +=line_size;
  565. }
  566. #else
  567. for(i=0; i<h; i+=4) {
  568. pixelsv1 = vec_ld(0, (unsigned char*)pixels);
  569. pixelsv2 = vec_ld(16, (unsigned char*)pixels);
  570. pixelsv1B = vec_ld(line_size, (unsigned char*)pixels);
  571. pixelsv2B = vec_ld(16 + line_size, (unsigned char*)pixels);
  572. pixelsv1C = vec_ld(line_size_2, (unsigned char*)pixels);
  573. pixelsv2C = vec_ld(16 + line_size_2, (unsigned char*)pixels);
  574. pixelsv1D = vec_ld(line_size_3, (unsigned char*)pixels);
  575. pixelsv2D = vec_ld(16 + line_size_3, (unsigned char*)pixels);
  576. vec_st(vec_perm(pixelsv1, pixelsv2, perm),
  577. 0, (unsigned char*)block);
  578. vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
  579. line_size, (unsigned char*)block);
  580. vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
  581. line_size_2, (unsigned char*)block);
  582. vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
  583. line_size_3, (unsigned char*)block);
  584. pixels+=line_size_4;
  585. block +=line_size_4;
  586. }
  587. #endif
  588. POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
  589. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  590. }
  591. /* next one assumes that ((line_size % 16) == 0) */
  592. #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
  593. void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  594. {
  595. POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1);
  596. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  597. int i;
  598. POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
  599. for(i=0; i<h; i++) {
  600. op_avg(*((uint32_t*)(block)),LD32(pixels));
  601. op_avg(*((uint32_t*)(block+4)),LD32(pixels+4));
  602. op_avg(*((uint32_t*)(block+8)),LD32(pixels+8));
  603. op_avg(*((uint32_t*)(block+12)),LD32(pixels+12));
  604. pixels+=line_size;
  605. block +=line_size;
  606. }
  607. POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
  608. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  609. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  610. register vector unsigned char perm = vec_lvsl(0, pixels);
  611. int i;
  612. POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
  613. for(i=0; i<h; i++) {
  614. pixelsv1 = vec_ld(0, (unsigned char*)pixels);
  615. pixelsv2 = vec_ld(16, (unsigned char*)pixels);
  616. blockv = vec_ld(0, block);
  617. pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
  618. blockv = vec_avg(blockv,pixelsv);
  619. vec_st(blockv, 0, (unsigned char*)block);
  620. pixels+=line_size;
  621. block +=line_size;
  622. }
  623. POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
  624. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  625. }
  626. /* next one assumes that ((line_size % 8) == 0) */
  627. void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  628. {
  629. POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1);
  630. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  631. int i;
  632. POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
  633. for (i = 0; i < h; i++) {
  634. *((uint32_t *) (block)) =
  635. (((*((uint32_t *) (block))) |
  636. ((((const struct unaligned_32 *) (pixels))->l))) -
  637. ((((*((uint32_t *) (block))) ^
  638. ((((const struct unaligned_32 *) (pixels))->
  639. l))) & 0xFEFEFEFEUL) >> 1));
  640. *((uint32_t *) (block + 4)) =
  641. (((*((uint32_t *) (block + 4))) |
  642. ((((const struct unaligned_32 *) (pixels + 4))->l))) -
  643. ((((*((uint32_t *) (block + 4))) ^
  644. ((((const struct unaligned_32 *) (pixels +
  645. 4))->
  646. l))) & 0xFEFEFEFEUL) >> 1));
  647. pixels += line_size;
  648. block += line_size;
  649. }
  650. POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
  651. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  652. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  653. int i;
  654. POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
  655. for (i = 0; i < h; i++) {
  656. /*
  657. block is 8 bytes-aligned, so we're either in the
  658. left block (16 bytes-aligned) or in the right block (not)
  659. */
  660. int rightside = ((unsigned long)block & 0x0000000F);
  661. blockv = vec_ld(0, block);
  662. pixelsv1 = vec_ld(0, (unsigned char*)pixels);
  663. pixelsv2 = vec_ld(16, (unsigned char*)pixels);
  664. pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
  665. if (rightside)
  666. {
  667. pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
  668. }
  669. else
  670. {
  671. pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
  672. }
  673. blockv = vec_avg(blockv, pixelsv);
  674. vec_st(blockv, 0, block);
  675. pixels += line_size;
  676. block += line_size;
  677. }
  678. POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
  679. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  680. }
  681. /* next one assumes that ((line_size % 8) == 0) */
  682. void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  683. {
  684. POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1);
  685. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  686. int j;
  687. POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
  688. for (j = 0; j < 2; j++) {
  689. int i;
  690. const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  691. const uint32_t b =
  692. (((const struct unaligned_32 *) (pixels + 1))->l);
  693. uint32_t l0 =
  694. (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
  695. uint32_t h0 =
  696. ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  697. uint32_t l1, h1;
  698. pixels += line_size;
  699. for (i = 0; i < h; i += 2) {
  700. uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  701. uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
  702. l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
  703. h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  704. *((uint32_t *) block) =
  705. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  706. pixels += line_size;
  707. block += line_size;
  708. a = (((const struct unaligned_32 *) (pixels))->l);
  709. b = (((const struct unaligned_32 *) (pixels + 1))->l);
  710. l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
  711. h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  712. *((uint32_t *) block) =
  713. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  714. pixels += line_size;
  715. block += line_size;
  716. } pixels += 4 - line_size * (h + 1);
  717. block += 4 - line_size * h;
  718. }
  719. POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
  720. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  721. register int i;
  722. register vector unsigned char
  723. pixelsv1, pixelsv2,
  724. pixelsavg;
  725. register vector unsigned char
  726. blockv, temp1, temp2;
  727. register vector unsigned short
  728. pixelssum1, pixelssum2, temp3;
  729. register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
  730. register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
  731. temp1 = vec_ld(0, pixels);
  732. temp2 = vec_ld(16, pixels);
  733. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  734. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
  735. {
  736. pixelsv2 = temp2;
  737. }
  738. else
  739. {
  740. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  741. }
  742. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  743. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  744. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  745. (vector unsigned short)pixelsv2);
  746. pixelssum1 = vec_add(pixelssum1, vctwo);
  747. POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
  748. for (i = 0; i < h ; i++) {
  749. int rightside = ((unsigned long)block & 0x0000000F);
  750. blockv = vec_ld(0, block);
  751. temp1 = vec_ld(line_size, pixels);
  752. temp2 = vec_ld(line_size + 16, pixels);
  753. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  754. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
  755. {
  756. pixelsv2 = temp2;
  757. }
  758. else
  759. {
  760. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  761. }
  762. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  763. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  764. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  765. (vector unsigned short)pixelsv2);
  766. temp3 = vec_add(pixelssum1, pixelssum2);
  767. temp3 = vec_sra(temp3, vctwo);
  768. pixelssum1 = vec_add(pixelssum2, vctwo);
  769. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  770. if (rightside)
  771. {
  772. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  773. }
  774. else
  775. {
  776. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  777. }
  778. vec_st(blockv, 0, block);
  779. block += line_size;
  780. pixels += line_size;
  781. }
  782. POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
  783. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  784. }
  785. /* next one assumes that ((line_size % 8) == 0) */
  786. void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  787. {
  788. POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
  789. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  790. int j;
  791. POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
  792. for (j = 0; j < 2; j++) {
  793. int i;
  794. const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  795. const uint32_t b =
  796. (((const struct unaligned_32 *) (pixels + 1))->l);
  797. uint32_t l0 =
  798. (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
  799. uint32_t h0 =
  800. ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  801. uint32_t l1, h1;
  802. pixels += line_size;
  803. for (i = 0; i < h; i += 2) {
  804. uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  805. uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
  806. l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
  807. h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  808. *((uint32_t *) block) =
  809. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  810. pixels += line_size;
  811. block += line_size;
  812. a = (((const struct unaligned_32 *) (pixels))->l);
  813. b = (((const struct unaligned_32 *) (pixels + 1))->l);
  814. l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
  815. h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  816. *((uint32_t *) block) =
  817. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  818. pixels += line_size;
  819. block += line_size;
  820. } pixels += 4 - line_size * (h + 1);
  821. block += 4 - line_size * h;
  822. }
  823. POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
  824. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  825. register int i;
  826. register vector unsigned char
  827. pixelsv1, pixelsv2,
  828. pixelsavg;
  829. register vector unsigned char
  830. blockv, temp1, temp2;
  831. register vector unsigned short
  832. pixelssum1, pixelssum2, temp3;
  833. register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
  834. register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1);
  835. register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
  836. temp1 = vec_ld(0, pixels);
  837. temp2 = vec_ld(16, pixels);
  838. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  839. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
  840. {
  841. pixelsv2 = temp2;
  842. }
  843. else
  844. {
  845. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  846. }
  847. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  848. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  849. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  850. (vector unsigned short)pixelsv2);
  851. pixelssum1 = vec_add(pixelssum1, vcone);
  852. POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
  853. for (i = 0; i < h ; i++) {
  854. int rightside = ((unsigned long)block & 0x0000000F);
  855. blockv = vec_ld(0, block);
  856. temp1 = vec_ld(line_size, pixels);
  857. temp2 = vec_ld(line_size + 16, pixels);
  858. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  859. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
  860. {
  861. pixelsv2 = temp2;
  862. }
  863. else
  864. {
  865. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  866. }
  867. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  868. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  869. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  870. (vector unsigned short)pixelsv2);
  871. temp3 = vec_add(pixelssum1, pixelssum2);
  872. temp3 = vec_sra(temp3, vctwo);
  873. pixelssum1 = vec_add(pixelssum2, vcone);
  874. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  875. if (rightside)
  876. {
  877. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  878. }
  879. else
  880. {
  881. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  882. }
  883. vec_st(blockv, 0, block);
  884. block += line_size;
  885. pixels += line_size;
  886. }
  887. POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
  888. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  889. }
  890. /* next one assumes that ((line_size % 16) == 0) */
  891. void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  892. {
  893. POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1);
  894. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  895. int j;
  896. POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
  897. for (j = 0; j < 4; j++) {
  898. int i;
  899. const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  900. const uint32_t b =
  901. (((const struct unaligned_32 *) (pixels + 1))->l);
  902. uint32_t l0 =
  903. (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
  904. uint32_t h0 =
  905. ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  906. uint32_t l1, h1;
  907. pixels += line_size;
  908. for (i = 0; i < h; i += 2) {
  909. uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  910. uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
  911. l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
  912. h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  913. *((uint32_t *) block) =
  914. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  915. pixels += line_size;
  916. block += line_size;
  917. a = (((const struct unaligned_32 *) (pixels))->l);
  918. b = (((const struct unaligned_32 *) (pixels + 1))->l);
  919. l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
  920. h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  921. *((uint32_t *) block) =
  922. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  923. pixels += line_size;
  924. block += line_size;
  925. } pixels += 4 - line_size * (h + 1);
  926. block += 4 - line_size * h;
  927. }
  928. POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
  929. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  930. register int i;
  931. register vector unsigned char
  932. pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  933. register vector unsigned char
  934. blockv, temp1, temp2;
  935. register vector unsigned short
  936. pixelssum1, pixelssum2, temp3,
  937. pixelssum3, pixelssum4, temp4;
  938. register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
  939. register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
  940. POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
  941. temp1 = vec_ld(0, pixels);
  942. temp2 = vec_ld(16, pixels);
  943. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  944. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
  945. {
  946. pixelsv2 = temp2;
  947. }
  948. else
  949. {
  950. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  951. }
  952. pixelsv3 = vec_mergel(vczero, pixelsv1);
  953. pixelsv4 = vec_mergel(vczero, pixelsv2);
  954. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  955. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  956. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  957. (vector unsigned short)pixelsv4);
  958. pixelssum3 = vec_add(pixelssum3, vctwo);
  959. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  960. (vector unsigned short)pixelsv2);
  961. pixelssum1 = vec_add(pixelssum1, vctwo);
  962. for (i = 0; i < h ; i++) {
  963. blockv = vec_ld(0, block);
  964. temp1 = vec_ld(line_size, pixels);
  965. temp2 = vec_ld(line_size + 16, pixels);
  966. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  967. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
  968. {
  969. pixelsv2 = temp2;
  970. }
  971. else
  972. {
  973. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  974. }
  975. pixelsv3 = vec_mergel(vczero, pixelsv1);
  976. pixelsv4 = vec_mergel(vczero, pixelsv2);
  977. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  978. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  979. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  980. (vector unsigned short)pixelsv4);
  981. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  982. (vector unsigned short)pixelsv2);
  983. temp4 = vec_add(pixelssum3, pixelssum4);
  984. temp4 = vec_sra(temp4, vctwo);
  985. temp3 = vec_add(pixelssum1, pixelssum2);
  986. temp3 = vec_sra(temp3, vctwo);
  987. pixelssum3 = vec_add(pixelssum4, vctwo);
  988. pixelssum1 = vec_add(pixelssum2, vctwo);
  989. blockv = vec_packsu(temp3, temp4);
  990. vec_st(blockv, 0, block);
  991. block += line_size;
  992. pixels += line_size;
  993. }
  994. POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
  995. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  996. }
  997. /* next one assumes that ((line_size % 16) == 0) */
  998. void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  999. {
  1000. POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
  1001. #ifdef ALTIVEC_USE_REFERENCE_C_CODE
  1002. int j;
  1003. POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
  1004. for (j = 0; j < 4; j++) {
  1005. int i;
  1006. const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  1007. const uint32_t b =
  1008. (((const struct unaligned_32 *) (pixels + 1))->l);
  1009. uint32_t l0 =
  1010. (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
  1011. uint32_t h0 =
  1012. ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  1013. uint32_t l1, h1;
  1014. pixels += line_size;
  1015. for (i = 0; i < h; i += 2) {
  1016. uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
  1017. uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
  1018. l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
  1019. h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  1020. *((uint32_t *) block) =
  1021. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  1022. pixels += line_size;
  1023. block += line_size;
  1024. a = (((const struct unaligned_32 *) (pixels))->l);
  1025. b = (((const struct unaligned_32 *) (pixels + 1))->l);
  1026. l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
  1027. h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
  1028. *((uint32_t *) block) =
  1029. h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
  1030. pixels += line_size;
  1031. block += line_size;
  1032. } pixels += 4 - line_size * (h + 1);
  1033. block += 4 - line_size * h;
  1034. }
  1035. POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
  1036. #else /* ALTIVEC_USE_REFERENCE_C_CODE */
  1037. register int i;
  1038. register vector unsigned char
  1039. pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  1040. register vector unsigned char
  1041. blockv, temp1, temp2;
  1042. register vector unsigned short
  1043. pixelssum1, pixelssum2, temp3,
  1044. pixelssum3, pixelssum4, temp4;
  1045. register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
  1046. register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1);
  1047. register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
  1048. POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
  1049. temp1 = vec_ld(0, pixels);
  1050. temp2 = vec_ld(16, pixels);
  1051. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  1052. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
  1053. {
  1054. pixelsv2 = temp2;
  1055. }
  1056. else
  1057. {
  1058. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  1059. }
  1060. pixelsv3 = vec_mergel(vczero, pixelsv1);
  1061. pixelsv4 = vec_mergel(vczero, pixelsv2);
  1062. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  1063. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  1064. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  1065. (vector unsigned short)pixelsv4);
  1066. pixelssum3 = vec_add(pixelssum3, vcone);
  1067. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  1068. (vector unsigned short)pixelsv2);
  1069. pixelssum1 = vec_add(pixelssum1, vcone);
  1070. for (i = 0; i < h ; i++) {
  1071. blockv = vec_ld(0, block);
  1072. temp1 = vec_ld(line_size, pixels);
  1073. temp2 = vec_ld(line_size + 16, pixels);
  1074. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  1075. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
  1076. {
  1077. pixelsv2 = temp2;
  1078. }
  1079. else
  1080. {
  1081. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  1082. }
  1083. pixelsv3 = vec_mergel(vczero, pixelsv1);
  1084. pixelsv4 = vec_mergel(vczero, pixelsv2);
  1085. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  1086. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  1087. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  1088. (vector unsigned short)pixelsv4);
  1089. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  1090. (vector unsigned short)pixelsv2);
  1091. temp4 = vec_add(pixelssum3, pixelssum4);
  1092. temp4 = vec_sra(temp4, vctwo);
  1093. temp3 = vec_add(pixelssum1, pixelssum2);
  1094. temp3 = vec_sra(temp3, vctwo);
  1095. pixelssum3 = vec_add(pixelssum4, vcone);
  1096. pixelssum1 = vec_add(pixelssum2, vcone);
  1097. blockv = vec_packsu(temp3, temp4);
  1098. vec_st(blockv, 0, block);
  1099. block += line_size;
  1100. pixels += line_size;
  1101. }
  1102. POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
  1103. #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
  1104. }
  1105. int has_altivec(void)
  1106. {
  1107. #ifdef CONFIG_DARWIN
  1108. int sels[2] = {CTL_HW, HW_VECTORUNIT};
  1109. int has_vu = 0;
  1110. size_t len = sizeof(has_vu);
  1111. int err;
  1112. err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
  1113. if (err == 0) return (has_vu != 0);
  1114. #else /* CONFIG_DARWIN */
  1115. /* no Darwin, do it the brute-force way */
  1116. /* this is borrowed from the libmpeg2 library */
  1117. {
  1118. signal (SIGILL, sigill_handler);
  1119. if (sigsetjmp (jmpbuf, 1)) {
  1120. signal (SIGILL, SIG_DFL);
  1121. } else {
  1122. canjump = 1;
  1123. asm volatile ("mtspr 256, %0\n\t"
  1124. "vand %%v0, %%v0, %%v0"
  1125. :
  1126. : "r" (-1));
  1127. signal (SIGILL, SIG_DFL);
  1128. return 1;
  1129. }
  1130. }
  1131. #endif /* CONFIG_DARWIN */
  1132. return 0;
  1133. }