You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1458 lines
57KB

  1. /*
  2. * Copyright (c) 2002 Brian Foley
  3. * Copyright (c) 2002 Dieter Shirley
  4. * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if HAVE_ALTIVEC_H
  24. #include <altivec.h>
  25. #endif
  26. #include "libavcodec/dsputil.h"
  27. #include "dsputil_ppc.h"
  28. #include "util_altivec.h"
  29. #include "types_altivec.h"
  30. #include "dsputil_altivec.h"
  31. static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  32. {
  33. int i;
  34. int s;
  35. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  36. vector unsigned char *tv;
  37. vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
  38. vector unsigned int sad;
  39. vector signed int sumdiffs;
  40. s = 0;
  41. sad = (vector unsigned int)vec_splat_u32(0);
  42. for (i = 0; i < h; i++) {
  43. /* Read unaligned pixels into our vectors. The vectors are as follows:
  44. pix1v: pix1[0]-pix1[15]
  45. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] */
  46. tv = (vector unsigned char *) pix1;
  47. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  48. tv = (vector unsigned char *) &pix2[0];
  49. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  50. tv = (vector unsigned char *) &pix2[1];
  51. pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
  52. /* Calculate the average vector */
  53. avgv = vec_avg(pix2v, pix2iv);
  54. /* Calculate a sum of abs differences vector */
  55. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  56. /* Add each 4 pixel group together and put 4 results into sad */
  57. sad = vec_sum4s(t5, sad);
  58. pix1 += line_size;
  59. pix2 += line_size;
  60. }
  61. /* Sum up the four partial sums, and put the result into s */
  62. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  63. sumdiffs = vec_splat(sumdiffs, 3);
  64. vec_ste(sumdiffs, 0, &s);
  65. return s;
  66. }
  67. static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  68. {
  69. int i;
  70. int s;
  71. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  72. vector unsigned char *tv;
  73. vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
  74. vector unsigned int sad;
  75. vector signed int sumdiffs;
  76. uint8_t *pix3 = pix2 + line_size;
  77. s = 0;
  78. sad = (vector unsigned int)vec_splat_u32(0);
  79. /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  80. iteration becomes pix2 in the next iteration. We can use this
  81. fact to avoid a potentially expensive unaligned read, each
  82. time around the loop.
  83. Read unaligned pixels into our vectors. The vectors are as follows:
  84. pix2v: pix2[0]-pix2[15]
  85. Split the pixel vectors into shorts */
  86. tv = (vector unsigned char *) &pix2[0];
  87. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  88. for (i = 0; i < h; i++) {
  89. /* Read unaligned pixels into our vectors. The vectors are as follows:
  90. pix1v: pix1[0]-pix1[15]
  91. pix3v: pix3[0]-pix3[15] */
  92. tv = (vector unsigned char *) pix1;
  93. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  94. tv = (vector unsigned char *) &pix3[0];
  95. pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
  96. /* Calculate the average vector */
  97. avgv = vec_avg(pix2v, pix3v);
  98. /* Calculate a sum of abs differences vector */
  99. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  100. /* Add each 4 pixel group together and put 4 results into sad */
  101. sad = vec_sum4s(t5, sad);
  102. pix1 += line_size;
  103. pix2v = pix3v;
  104. pix3 += line_size;
  105. }
  106. /* Sum up the four partial sums, and put the result into s */
  107. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  108. sumdiffs = vec_splat(sumdiffs, 3);
  109. vec_ste(sumdiffs, 0, &s);
  110. return s;
  111. }
  112. static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  113. {
  114. int i;
  115. int s;
  116. uint8_t *pix3 = pix2 + line_size;
  117. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  118. const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
  119. vector unsigned char *tv, avgv, t5;
  120. vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
  121. vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
  122. vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
  123. vector unsigned short avghv, avglv;
  124. vector unsigned short t1, t2, t3, t4;
  125. vector unsigned int sad;
  126. vector signed int sumdiffs;
  127. sad = (vector unsigned int)vec_splat_u32(0);
  128. s = 0;
  129. /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  130. iteration becomes pix2 in the next iteration. We can use this
  131. fact to avoid a potentially expensive unaligned read, as well
  132. as some splitting, and vector addition each time around the loop.
  133. Read unaligned pixels into our vectors. The vectors are as follows:
  134. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
  135. Split the pixel vectors into shorts */
  136. tv = (vector unsigned char *) &pix2[0];
  137. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  138. tv = (vector unsigned char *) &pix2[1];
  139. pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
  140. pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
  141. pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
  142. pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
  143. pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
  144. t1 = vec_add(pix2hv, pix2ihv);
  145. t2 = vec_add(pix2lv, pix2ilv);
  146. for (i = 0; i < h; i++) {
  147. /* Read unaligned pixels into our vectors. The vectors are as follows:
  148. pix1v: pix1[0]-pix1[15]
  149. pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] */
  150. tv = (vector unsigned char *) pix1;
  151. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  152. tv = (vector unsigned char *) &pix3[0];
  153. pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
  154. tv = (vector unsigned char *) &pix3[1];
  155. pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));
  156. /* Note that AltiVec does have vec_avg, but this works on vector pairs
  157. and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
  158. would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
  159. Instead, we have to split the pixel vectors into vectors of shorts,
  160. and do the averaging by hand. */
  161. /* Split the pixel vectors into shorts */
  162. pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
  163. pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
  164. pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
  165. pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
  166. /* Do the averaging on them */
  167. t3 = vec_add(pix3hv, pix3ihv);
  168. t4 = vec_add(pix3lv, pix3ilv);
  169. avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
  170. avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
  171. /* Pack the shorts back into a result */
  172. avgv = vec_pack(avghv, avglv);
  173. /* Calculate a sum of abs differences vector */
  174. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  175. /* Add each 4 pixel group together and put 4 results into sad */
  176. sad = vec_sum4s(t5, sad);
  177. pix1 += line_size;
  178. pix3 += line_size;
  179. /* Transfer the calculated values for pix3 into pix2 */
  180. t1 = t3;
  181. t2 = t4;
  182. }
  183. /* Sum up the four partial sums, and put the result into s */
  184. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  185. sumdiffs = vec_splat(sumdiffs, 3);
  186. vec_ste(sumdiffs, 0, &s);
  187. return s;
  188. }
  189. static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  190. {
  191. int i;
  192. int s;
  193. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  194. vector unsigned char perm1, perm2, *pix1v, *pix2v;
  195. vector unsigned char t1, t2, t3,t4, t5;
  196. vector unsigned int sad;
  197. vector signed int sumdiffs;
  198. sad = (vector unsigned int)vec_splat_u32(0);
  199. for (i = 0; i < h; i++) {
  200. /* Read potentially unaligned pixels into t1 and t2 */
  201. perm1 = vec_lvsl(0, pix1);
  202. pix1v = (vector unsigned char *) pix1;
  203. perm2 = vec_lvsl(0, pix2);
  204. pix2v = (vector unsigned char *) pix2;
  205. t1 = vec_perm(pix1v[0], pix1v[1], perm1);
  206. t2 = vec_perm(pix2v[0], pix2v[1], perm2);
  207. /* Calculate a sum of abs differences vector */
  208. t3 = vec_max(t1, t2);
  209. t4 = vec_min(t1, t2);
  210. t5 = vec_sub(t3, t4);
  211. /* Add each 4 pixel group together and put 4 results into sad */
  212. sad = vec_sum4s(t5, sad);
  213. pix1 += line_size;
  214. pix2 += line_size;
  215. }
  216. /* Sum up the four partial sums, and put the result into s */
  217. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  218. sumdiffs = vec_splat(sumdiffs, 3);
  219. vec_ste(sumdiffs, 0, &s);
  220. return s;
  221. }
  222. static int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  223. {
  224. int i;
  225. int s;
  226. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  227. vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
  228. vector unsigned char t1, t2, t3,t4, t5;
  229. vector unsigned int sad;
  230. vector signed int sumdiffs;
  231. sad = (vector unsigned int)vec_splat_u32(0);
  232. permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
  233. for (i = 0; i < h; i++) {
  234. /* Read potentially unaligned pixels into t1 and t2
  235. Since we're reading 16 pixels, and actually only want 8,
  236. mask out the last 8 pixels. The 0s don't change the sum. */
  237. perm1 = vec_lvsl(0, pix1);
  238. pix1v = (vector unsigned char *) pix1;
  239. perm2 = vec_lvsl(0, pix2);
  240. pix2v = (vector unsigned char *) pix2;
  241. t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
  242. t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
  243. /* Calculate a sum of abs differences vector */
  244. t3 = vec_max(t1, t2);
  245. t4 = vec_min(t1, t2);
  246. t5 = vec_sub(t3, t4);
  247. /* Add each 4 pixel group together and put 4 results into sad */
  248. sad = vec_sum4s(t5, sad);
  249. pix1 += line_size;
  250. pix2 += line_size;
  251. }
  252. /* Sum up the four partial sums, and put the result into s */
  253. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  254. sumdiffs = vec_splat(sumdiffs, 3);
  255. vec_ste(sumdiffs, 0, &s);
  256. return s;
  257. }
  258. static int pix_norm1_altivec(uint8_t *pix, int line_size)
  259. {
  260. int i;
  261. int s;
  262. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  263. vector unsigned char *tv;
  264. vector unsigned char pixv;
  265. vector unsigned int sv;
  266. vector signed int sum;
  267. sv = (vector unsigned int)vec_splat_u32(0);
  268. s = 0;
  269. for (i = 0; i < 16; i++) {
  270. /* Read in the potentially unaligned pixels */
  271. tv = (vector unsigned char *) pix;
  272. pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));
  273. /* Square the values, and add them to our sum */
  274. sv = vec_msum(pixv, pixv, sv);
  275. pix += line_size;
  276. }
  277. /* Sum up the four partial sums, and put the result into s */
  278. sum = vec_sums((vector signed int) sv, (vector signed int) zero);
  279. sum = vec_splat(sum, 3);
  280. vec_ste(sum, 0, &s);
  281. return s;
  282. }
  283. /**
  284. * Sum of Squared Errors for a 8x8 block.
  285. * AltiVec-enhanced.
  286. * It's the sad8_altivec code above w/ squaring added.
  287. */
  288. static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  289. {
  290. int i;
  291. int s;
  292. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  293. vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
  294. vector unsigned char t1, t2, t3,t4, t5;
  295. vector unsigned int sum;
  296. vector signed int sumsqr;
  297. sum = (vector unsigned int)vec_splat_u32(0);
  298. permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
  299. for (i = 0; i < h; i++) {
  300. /* Read potentially unaligned pixels into t1 and t2
  301. Since we're reading 16 pixels, and actually only want 8,
  302. mask out the last 8 pixels. The 0s don't change the sum. */
  303. perm1 = vec_lvsl(0, pix1);
  304. pix1v = (vector unsigned char *) pix1;
  305. perm2 = vec_lvsl(0, pix2);
  306. pix2v = (vector unsigned char *) pix2;
  307. t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
  308. t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
  309. /* Since we want to use unsigned chars, we can take advantage
  310. of the fact that abs(a-b)^2 = (a-b)^2. */
  311. /* Calculate abs differences vector */
  312. t3 = vec_max(t1, t2);
  313. t4 = vec_min(t1, t2);
  314. t5 = vec_sub(t3, t4);
  315. /* Square the values and add them to our sum */
  316. sum = vec_msum(t5, t5, sum);
  317. pix1 += line_size;
  318. pix2 += line_size;
  319. }
  320. /* Sum up the four partial sums, and put the result into s */
  321. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  322. sumsqr = vec_splat(sumsqr, 3);
  323. vec_ste(sumsqr, 0, &s);
  324. return s;
  325. }
  326. /**
  327. * Sum of Squared Errors for a 16x16 block.
  328. * AltiVec-enhanced.
  329. * It's the sad16_altivec code above w/ squaring added.
  330. */
  331. static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  332. {
  333. int i;
  334. int s;
  335. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  336. vector unsigned char perm1, perm2, *pix1v, *pix2v;
  337. vector unsigned char t1, t2, t3,t4, t5;
  338. vector unsigned int sum;
  339. vector signed int sumsqr;
  340. sum = (vector unsigned int)vec_splat_u32(0);
  341. for (i = 0; i < h; i++) {
  342. /* Read potentially unaligned pixels into t1 and t2 */
  343. perm1 = vec_lvsl(0, pix1);
  344. pix1v = (vector unsigned char *) pix1;
  345. perm2 = vec_lvsl(0, pix2);
  346. pix2v = (vector unsigned char *) pix2;
  347. t1 = vec_perm(pix1v[0], pix1v[1], perm1);
  348. t2 = vec_perm(pix2v[0], pix2v[1], perm2);
  349. /* Since we want to use unsigned chars, we can take advantage
  350. of the fact that abs(a-b)^2 = (a-b)^2. */
  351. /* Calculate abs differences vector */
  352. t3 = vec_max(t1, t2);
  353. t4 = vec_min(t1, t2);
  354. t5 = vec_sub(t3, t4);
  355. /* Square the values and add them to our sum */
  356. sum = vec_msum(t5, t5, sum);
  357. pix1 += line_size;
  358. pix2 += line_size;
  359. }
  360. /* Sum up the four partial sums, and put the result into s */
  361. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  362. sumsqr = vec_splat(sumsqr, 3);
  363. vec_ste(sumsqr, 0, &s);
  364. return s;
  365. }
  366. static int pix_sum_altivec(uint8_t * pix, int line_size)
  367. {
  368. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  369. vector unsigned char perm, *pixv;
  370. vector unsigned char t1;
  371. vector unsigned int sad;
  372. vector signed int sumdiffs;
  373. int i;
  374. int s;
  375. sad = (vector unsigned int)vec_splat_u32(0);
  376. for (i = 0; i < 16; i++) {
  377. /* Read the potentially unaligned 16 pixels into t1 */
  378. perm = vec_lvsl(0, pix);
  379. pixv = (vector unsigned char *) pix;
  380. t1 = vec_perm(pixv[0], pixv[1], perm);
  381. /* Add each 4 pixel group together and put 4 results into sad */
  382. sad = vec_sum4s(t1, sad);
  383. pix += line_size;
  384. }
  385. /* Sum up the four partial sums, and put the result into s */
  386. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  387. sumdiffs = vec_splat(sumdiffs, 3);
  388. vec_ste(sumdiffs, 0, &s);
  389. return s;
  390. }
  391. static void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
  392. {
  393. int i;
  394. vector unsigned char perm, bytes, *pixv;
  395. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  396. vector signed short shorts;
  397. for (i = 0; i < 8; i++) {
  398. // Read potentially unaligned pixels.
  399. // We're reading 16 pixels, and actually only want 8,
  400. // but we simply ignore the extras.
  401. perm = vec_lvsl(0, pixels);
  402. pixv = (vector unsigned char *) pixels;
  403. bytes = vec_perm(pixv[0], pixv[1], perm);
  404. // convert the bytes into shorts
  405. shorts = (vector signed short)vec_mergeh(zero, bytes);
  406. // save the data to the block, we assume the block is 16-byte aligned
  407. vec_st(shorts, i*16, (vector signed short*)block);
  408. pixels += line_size;
  409. }
  410. }
  411. static void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
  412. const uint8_t *s2, int stride)
  413. {
  414. int i;
  415. vector unsigned char perm, bytes, *pixv;
  416. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  417. vector signed short shorts1, shorts2;
  418. for (i = 0; i < 4; i++) {
  419. // Read potentially unaligned pixels
  420. // We're reading 16 pixels, and actually only want 8,
  421. // but we simply ignore the extras.
  422. perm = vec_lvsl(0, s1);
  423. pixv = (vector unsigned char *) s1;
  424. bytes = vec_perm(pixv[0], pixv[1], perm);
  425. // convert the bytes into shorts
  426. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  427. // Do the same for the second block of pixels
  428. perm = vec_lvsl(0, s2);
  429. pixv = (vector unsigned char *) s2;
  430. bytes = vec_perm(pixv[0], pixv[1], perm);
  431. // convert the bytes into shorts
  432. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  433. // Do the subtraction
  434. shorts1 = vec_sub(shorts1, shorts2);
  435. // save the data to the block, we assume the block is 16-byte aligned
  436. vec_st(shorts1, 0, (vector signed short*)block);
  437. s1 += stride;
  438. s2 += stride;
  439. block += 8;
  440. // The code below is a copy of the code above... This is a manual
  441. // unroll.
  442. // Read potentially unaligned pixels
  443. // We're reading 16 pixels, and actually only want 8,
  444. // but we simply ignore the extras.
  445. perm = vec_lvsl(0, s1);
  446. pixv = (vector unsigned char *) s1;
  447. bytes = vec_perm(pixv[0], pixv[1], perm);
  448. // convert the bytes into shorts
  449. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  450. // Do the same for the second block of pixels
  451. perm = vec_lvsl(0, s2);
  452. pixv = (vector unsigned char *) s2;
  453. bytes = vec_perm(pixv[0], pixv[1], perm);
  454. // convert the bytes into shorts
  455. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  456. // Do the subtraction
  457. shorts1 = vec_sub(shorts1, shorts2);
  458. // save the data to the block, we assume the block is 16-byte aligned
  459. vec_st(shorts1, 0, (vector signed short*)block);
  460. s1 += stride;
  461. s2 += stride;
  462. block += 8;
  463. }
  464. }
  465. static void clear_block_altivec(DCTELEM *block) {
  466. LOAD_ZERO;
  467. vec_st(zero_s16v, 0, block);
  468. vec_st(zero_s16v, 16, block);
  469. vec_st(zero_s16v, 32, block);
  470. vec_st(zero_s16v, 48, block);
  471. vec_st(zero_s16v, 64, block);
  472. vec_st(zero_s16v, 80, block);
  473. vec_st(zero_s16v, 96, block);
  474. vec_st(zero_s16v, 112, block);
  475. }
  476. static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
  477. register int i;
  478. register vector unsigned char vdst, vsrc;
  479. /* dst and src are 16 bytes-aligned (guaranteed) */
  480. for (i = 0 ; (i + 15) < w ; i+=16) {
  481. vdst = vec_ld(i, (unsigned char*)dst);
  482. vsrc = vec_ld(i, (unsigned char*)src);
  483. vdst = vec_add(vsrc, vdst);
  484. vec_st(vdst, i, (unsigned char*)dst);
  485. }
  486. /* if w is not a multiple of 16 */
  487. for (; (i < w) ; i++) {
  488. dst[i] = src[i];
  489. }
  490. }
  491. /* next one assumes that ((line_size % 16) == 0) */
  492. void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  493. {
  494. POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);
  495. register vector unsigned char pixelsv1, pixelsv2;
  496. register vector unsigned char pixelsv1B, pixelsv2B;
  497. register vector unsigned char pixelsv1C, pixelsv2C;
  498. register vector unsigned char pixelsv1D, pixelsv2D;
  499. register vector unsigned char perm = vec_lvsl(0, pixels);
  500. int i;
  501. register int line_size_2 = line_size << 1;
  502. register int line_size_3 = line_size + line_size_2;
  503. register int line_size_4 = line_size << 2;
  504. POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
  505. // hand-unrolling the loop by 4 gains about 15%
  506. // mininum execution time goes from 74 to 60 cycles
  507. // it's faster than -funroll-loops, but using
  508. // -funroll-loops w/ this is bad - 74 cycles again.
  509. // all this is on a 7450, tuning for the 7450
  510. #if 0
  511. for (i = 0; i < h; i++) {
  512. pixelsv1 = vec_ld(0, pixels);
  513. pixelsv2 = vec_ld(16, pixels);
  514. vec_st(vec_perm(pixelsv1, pixelsv2, perm),
  515. 0, block);
  516. pixels+=line_size;
  517. block +=line_size;
  518. }
  519. #else
  520. for (i = 0; i < h; i += 4) {
  521. pixelsv1 = vec_ld( 0, pixels);
  522. pixelsv2 = vec_ld(15, pixels);
  523. pixelsv1B = vec_ld(line_size, pixels);
  524. pixelsv2B = vec_ld(15 + line_size, pixels);
  525. pixelsv1C = vec_ld(line_size_2, pixels);
  526. pixelsv2C = vec_ld(15 + line_size_2, pixels);
  527. pixelsv1D = vec_ld(line_size_3, pixels);
  528. pixelsv2D = vec_ld(15 + line_size_3, pixels);
  529. vec_st(vec_perm(pixelsv1, pixelsv2, perm),
  530. 0, (unsigned char*)block);
  531. vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
  532. line_size, (unsigned char*)block);
  533. vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
  534. line_size_2, (unsigned char*)block);
  535. vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
  536. line_size_3, (unsigned char*)block);
  537. pixels+=line_size_4;
  538. block +=line_size_4;
  539. }
  540. #endif
  541. POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
  542. }
  543. /* next one assumes that ((line_size % 16) == 0) */
  544. #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
  545. void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  546. {
  547. POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1);
  548. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  549. register vector unsigned char perm = vec_lvsl(0, pixels);
  550. int i;
  551. POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
  552. for (i = 0; i < h; i++) {
  553. pixelsv1 = vec_ld( 0, pixels);
  554. pixelsv2 = vec_ld(16,pixels);
  555. blockv = vec_ld(0, block);
  556. pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
  557. blockv = vec_avg(blockv,pixelsv);
  558. vec_st(blockv, 0, (unsigned char*)block);
  559. pixels+=line_size;
  560. block +=line_size;
  561. }
  562. POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
  563. }
  564. /* next one assumes that ((line_size % 8) == 0) */
  565. static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  566. {
  567. POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1);
  568. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  569. int i;
  570. POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
  571. for (i = 0; i < h; i++) {
  572. /* block is 8 bytes-aligned, so we're either in the
  573. left block (16 bytes-aligned) or in the right block (not) */
  574. int rightside = ((unsigned long)block & 0x0000000F);
  575. blockv = vec_ld(0, block);
  576. pixelsv1 = vec_ld( 0, pixels);
  577. pixelsv2 = vec_ld(16, pixels);
  578. pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
  579. if (rightside) {
  580. pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
  581. } else {
  582. pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
  583. }
  584. blockv = vec_avg(blockv, pixelsv);
  585. vec_st(blockv, 0, block);
  586. pixels += line_size;
  587. block += line_size;
  588. }
  589. POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
  590. }
  591. /* next one assumes that ((line_size % 8) == 0) */
  592. static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  593. {
  594. POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1);
  595. register int i;
  596. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  597. register vector unsigned char blockv, temp1, temp2;
  598. register vector unsigned short pixelssum1, pixelssum2, temp3;
  599. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  600. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  601. temp1 = vec_ld(0, pixels);
  602. temp2 = vec_ld(16, pixels);
  603. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  604. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  605. pixelsv2 = temp2;
  606. } else {
  607. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  608. }
  609. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  610. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  611. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  612. (vector unsigned short)pixelsv2);
  613. pixelssum1 = vec_add(pixelssum1, vctwo);
  614. POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
  615. for (i = 0; i < h ; i++) {
  616. int rightside = ((unsigned long)block & 0x0000000F);
  617. blockv = vec_ld(0, block);
  618. temp1 = vec_ld(line_size, pixels);
  619. temp2 = vec_ld(line_size + 16, pixels);
  620. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  621. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  622. pixelsv2 = temp2;
  623. } else {
  624. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  625. }
  626. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  627. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  628. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  629. (vector unsigned short)pixelsv2);
  630. temp3 = vec_add(pixelssum1, pixelssum2);
  631. temp3 = vec_sra(temp3, vctwo);
  632. pixelssum1 = vec_add(pixelssum2, vctwo);
  633. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  634. if (rightside) {
  635. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  636. } else {
  637. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  638. }
  639. vec_st(blockv, 0, block);
  640. block += line_size;
  641. pixels += line_size;
  642. }
  643. POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
  644. }
  645. /* next one assumes that ((line_size % 8) == 0) */
  646. static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  647. {
  648. POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
  649. register int i;
  650. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  651. register vector unsigned char blockv, temp1, temp2;
  652. register vector unsigned short pixelssum1, pixelssum2, temp3;
  653. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  654. register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
  655. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  656. temp1 = vec_ld(0, pixels);
  657. temp2 = vec_ld(16, pixels);
  658. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  659. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  660. pixelsv2 = temp2;
  661. } else {
  662. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  663. }
  664. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  665. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  666. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  667. (vector unsigned short)pixelsv2);
  668. pixelssum1 = vec_add(pixelssum1, vcone);
  669. POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
  670. for (i = 0; i < h ; i++) {
  671. int rightside = ((unsigned long)block & 0x0000000F);
  672. blockv = vec_ld(0, block);
  673. temp1 = vec_ld(line_size, pixels);
  674. temp2 = vec_ld(line_size + 16, pixels);
  675. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  676. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  677. pixelsv2 = temp2;
  678. } else {
  679. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  680. }
  681. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  682. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  683. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  684. (vector unsigned short)pixelsv2);
  685. temp3 = vec_add(pixelssum1, pixelssum2);
  686. temp3 = vec_sra(temp3, vctwo);
  687. pixelssum1 = vec_add(pixelssum2, vcone);
  688. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  689. if (rightside) {
  690. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  691. } else {
  692. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  693. }
  694. vec_st(blockv, 0, block);
  695. block += line_size;
  696. pixels += line_size;
  697. }
  698. POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
  699. }
  700. /* next one assumes that ((line_size % 16) == 0) */
  701. static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  702. {
  703. POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1);
  704. register int i;
  705. register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  706. register vector unsigned char blockv, temp1, temp2;
  707. register vector unsigned short temp3, temp4,
  708. pixelssum1, pixelssum2, pixelssum3, pixelssum4;
  709. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  710. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  711. POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
  712. temp1 = vec_ld(0, pixels);
  713. temp2 = vec_ld(16, pixels);
  714. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  715. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  716. pixelsv2 = temp2;
  717. } else {
  718. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  719. }
  720. pixelsv3 = vec_mergel(vczero, pixelsv1);
  721. pixelsv4 = vec_mergel(vczero, pixelsv2);
  722. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  723. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  724. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  725. (vector unsigned short)pixelsv4);
  726. pixelssum3 = vec_add(pixelssum3, vctwo);
  727. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  728. (vector unsigned short)pixelsv2);
  729. pixelssum1 = vec_add(pixelssum1, vctwo);
  730. for (i = 0; i < h ; i++) {
  731. blockv = vec_ld(0, block);
  732. temp1 = vec_ld(line_size, pixels);
  733. temp2 = vec_ld(line_size + 16, pixels);
  734. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  735. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  736. pixelsv2 = temp2;
  737. } else {
  738. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  739. }
  740. pixelsv3 = vec_mergel(vczero, pixelsv1);
  741. pixelsv4 = vec_mergel(vczero, pixelsv2);
  742. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  743. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  744. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  745. (vector unsigned short)pixelsv4);
  746. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  747. (vector unsigned short)pixelsv2);
  748. temp4 = vec_add(pixelssum3, pixelssum4);
  749. temp4 = vec_sra(temp4, vctwo);
  750. temp3 = vec_add(pixelssum1, pixelssum2);
  751. temp3 = vec_sra(temp3, vctwo);
  752. pixelssum3 = vec_add(pixelssum4, vctwo);
  753. pixelssum1 = vec_add(pixelssum2, vctwo);
  754. blockv = vec_packsu(temp3, temp4);
  755. vec_st(blockv, 0, block);
  756. block += line_size;
  757. pixels += line_size;
  758. }
  759. POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
  760. }
  761. /* next one assumes that ((line_size % 16) == 0) */
  762. static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  763. {
  764. POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
  765. register int i;
  766. register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  767. register vector unsigned char blockv, temp1, temp2;
  768. register vector unsigned short temp3, temp4,
  769. pixelssum1, pixelssum2, pixelssum3, pixelssum4;
  770. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  771. register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
  772. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  773. POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
  774. temp1 = vec_ld(0, pixels);
  775. temp2 = vec_ld(16, pixels);
  776. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  777. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  778. pixelsv2 = temp2;
  779. } else {
  780. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  781. }
  782. pixelsv3 = vec_mergel(vczero, pixelsv1);
  783. pixelsv4 = vec_mergel(vczero, pixelsv2);
  784. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  785. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  786. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  787. (vector unsigned short)pixelsv4);
  788. pixelssum3 = vec_add(pixelssum3, vcone);
  789. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  790. (vector unsigned short)pixelsv2);
  791. pixelssum1 = vec_add(pixelssum1, vcone);
  792. for (i = 0; i < h ; i++) {
  793. blockv = vec_ld(0, block);
  794. temp1 = vec_ld(line_size, pixels);
  795. temp2 = vec_ld(line_size + 16, pixels);
  796. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  797. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  798. pixelsv2 = temp2;
  799. } else {
  800. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  801. }
  802. pixelsv3 = vec_mergel(vczero, pixelsv1);
  803. pixelsv4 = vec_mergel(vczero, pixelsv2);
  804. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  805. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  806. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  807. (vector unsigned short)pixelsv4);
  808. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  809. (vector unsigned short)pixelsv2);
  810. temp4 = vec_add(pixelssum3, pixelssum4);
  811. temp4 = vec_sra(temp4, vctwo);
  812. temp3 = vec_add(pixelssum1, pixelssum2);
  813. temp3 = vec_sra(temp3, vctwo);
  814. pixelssum3 = vec_add(pixelssum4, vcone);
  815. pixelssum1 = vec_add(pixelssum2, vcone);
  816. blockv = vec_packsu(temp3, temp4);
  817. vec_st(blockv, 0, block);
  818. block += line_size;
  819. pixels += line_size;
  820. }
  821. POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
  822. }
  823. static int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
  824. POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1);
  825. int sum;
  826. register const vector unsigned char vzero =
  827. (const vector unsigned char)vec_splat_u8(0);
  828. register vector signed short temp0, temp1, temp2, temp3, temp4,
  829. temp5, temp6, temp7;
  830. POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
  831. {
  832. register const vector signed short vprod1 =(const vector signed short)
  833. { 1,-1, 1,-1, 1,-1, 1,-1 };
  834. register const vector signed short vprod2 =(const vector signed short)
  835. { 1, 1,-1,-1, 1, 1,-1,-1 };
  836. register const vector signed short vprod3 =(const vector signed short)
  837. { 1, 1, 1, 1,-1,-1,-1,-1 };
  838. register const vector unsigned char perm1 = (const vector unsigned char)
  839. {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
  840. 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
  841. register const vector unsigned char perm2 = (const vector unsigned char)
  842. {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
  843. 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
  844. register const vector unsigned char perm3 = (const vector unsigned char)
  845. {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
  846. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
  847. #define ONEITERBUTTERFLY(i, res) \
  848. { \
  849. register vector unsigned char src1, src2, srcO; \
  850. register vector unsigned char dst1, dst2, dstO; \
  851. register vector signed short srcV, dstV; \
  852. register vector signed short but0, but1, but2, op1, op2, op3; \
  853. src1 = vec_ld(stride * i, src); \
  854. src2 = vec_ld((stride * i) + 15, src); \
  855. srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
  856. dst1 = vec_ld(stride * i, dst); \
  857. dst2 = vec_ld((stride * i) + 15, dst); \
  858. dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
  859. /* promote the unsigned chars to signed shorts */ \
  860. /* we're in the 8x8 function, we only care for the first 8 */ \
  861. srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  862. (vector signed char)srcO); \
  863. dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  864. (vector signed char)dstO); \
  865. /* subtractions inside the first butterfly */ \
  866. but0 = vec_sub(srcV, dstV); \
  867. op1 = vec_perm(but0, but0, perm1); \
  868. but1 = vec_mladd(but0, vprod1, op1); \
  869. op2 = vec_perm(but1, but1, perm2); \
  870. but2 = vec_mladd(but1, vprod2, op2); \
  871. op3 = vec_perm(but2, but2, perm3); \
  872. res = vec_mladd(but2, vprod3, op3); \
  873. }
  874. ONEITERBUTTERFLY(0, temp0);
  875. ONEITERBUTTERFLY(1, temp1);
  876. ONEITERBUTTERFLY(2, temp2);
  877. ONEITERBUTTERFLY(3, temp3);
  878. ONEITERBUTTERFLY(4, temp4);
  879. ONEITERBUTTERFLY(5, temp5);
  880. ONEITERBUTTERFLY(6, temp6);
  881. ONEITERBUTTERFLY(7, temp7);
  882. }
  883. #undef ONEITERBUTTERFLY
  884. {
  885. register vector signed int vsum;
  886. register vector signed short line0 = vec_add(temp0, temp1);
  887. register vector signed short line1 = vec_sub(temp0, temp1);
  888. register vector signed short line2 = vec_add(temp2, temp3);
  889. register vector signed short line3 = vec_sub(temp2, temp3);
  890. register vector signed short line4 = vec_add(temp4, temp5);
  891. register vector signed short line5 = vec_sub(temp4, temp5);
  892. register vector signed short line6 = vec_add(temp6, temp7);
  893. register vector signed short line7 = vec_sub(temp6, temp7);
  894. register vector signed short line0B = vec_add(line0, line2);
  895. register vector signed short line2B = vec_sub(line0, line2);
  896. register vector signed short line1B = vec_add(line1, line3);
  897. register vector signed short line3B = vec_sub(line1, line3);
  898. register vector signed short line4B = vec_add(line4, line6);
  899. register vector signed short line6B = vec_sub(line4, line6);
  900. register vector signed short line5B = vec_add(line5, line7);
  901. register vector signed short line7B = vec_sub(line5, line7);
  902. register vector signed short line0C = vec_add(line0B, line4B);
  903. register vector signed short line4C = vec_sub(line0B, line4B);
  904. register vector signed short line1C = vec_add(line1B, line5B);
  905. register vector signed short line5C = vec_sub(line1B, line5B);
  906. register vector signed short line2C = vec_add(line2B, line6B);
  907. register vector signed short line6C = vec_sub(line2B, line6B);
  908. register vector signed short line3C = vec_add(line3B, line7B);
  909. register vector signed short line7C = vec_sub(line3B, line7B);
  910. vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
  911. vsum = vec_sum4s(vec_abs(line1C), vsum);
  912. vsum = vec_sum4s(vec_abs(line2C), vsum);
  913. vsum = vec_sum4s(vec_abs(line3C), vsum);
  914. vsum = vec_sum4s(vec_abs(line4C), vsum);
  915. vsum = vec_sum4s(vec_abs(line5C), vsum);
  916. vsum = vec_sum4s(vec_abs(line6C), vsum);
  917. vsum = vec_sum4s(vec_abs(line7C), vsum);
  918. vsum = vec_sums(vsum, (vector signed int)vzero);
  919. vsum = vec_splat(vsum, 3);
  920. vec_ste(vsum, 0, &sum);
  921. }
  922. POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1);
  923. return sum;
  924. }
  925. /*
  926. 16x8 works with 16 elements; it allows to avoid replicating loads, and
  927. give the compiler more rooms for scheduling. It's only used from
  928. inside hadamard8_diff16_altivec.
  929. Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT
  930. of spill code, it seems gcc (unlike xlc) cannot keep everything in registers
  931. by itself. The following code include hand-made registers allocation. It's not
  932. clean, but on a 7450 the resulting code is much faster (best case fall from
  933. 700+ cycles to 550).
  934. xlc doesn't add spill code, but it doesn't know how to schedule for the 7450,
  935. and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less
  936. instructions...)
  937. On the 970, the hand-made RA is still a win (around 690 vs. around 780), but
  938. xlc goes to around 660 on the regular C code...
  939. */
  940. static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
  941. int sum;
  942. register vector signed short
  943. temp0 __asm__ ("v0"),
  944. temp1 __asm__ ("v1"),
  945. temp2 __asm__ ("v2"),
  946. temp3 __asm__ ("v3"),
  947. temp4 __asm__ ("v4"),
  948. temp5 __asm__ ("v5"),
  949. temp6 __asm__ ("v6"),
  950. temp7 __asm__ ("v7");
  951. register vector signed short
  952. temp0S __asm__ ("v8"),
  953. temp1S __asm__ ("v9"),
  954. temp2S __asm__ ("v10"),
  955. temp3S __asm__ ("v11"),
  956. temp4S __asm__ ("v12"),
  957. temp5S __asm__ ("v13"),
  958. temp6S __asm__ ("v14"),
  959. temp7S __asm__ ("v15");
  960. register const vector unsigned char vzero __asm__ ("v31") =
  961. (const vector unsigned char)vec_splat_u8(0);
  962. {
  963. register const vector signed short vprod1 __asm__ ("v16") =
  964. (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 };
  965. register const vector signed short vprod2 __asm__ ("v17") =
  966. (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 };
  967. register const vector signed short vprod3 __asm__ ("v18") =
  968. (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 };
  969. register const vector unsigned char perm1 __asm__ ("v19") =
  970. (const vector unsigned char)
  971. {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
  972. 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
  973. register const vector unsigned char perm2 __asm__ ("v20") =
  974. (const vector unsigned char)
  975. {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
  976. 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
  977. register const vector unsigned char perm3 __asm__ ("v21") =
  978. (const vector unsigned char)
  979. {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
  980. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
  981. #define ONEITERBUTTERFLY(i, res1, res2) \
  982. { \
  983. register vector unsigned char src1 __asm__ ("v22"), \
  984. src2 __asm__ ("v23"), \
  985. dst1 __asm__ ("v24"), \
  986. dst2 __asm__ ("v25"), \
  987. srcO __asm__ ("v22"), \
  988. dstO __asm__ ("v23"); \
  989. \
  990. register vector signed short srcV __asm__ ("v24"), \
  991. dstV __asm__ ("v25"), \
  992. srcW __asm__ ("v26"), \
  993. dstW __asm__ ("v27"), \
  994. but0 __asm__ ("v28"), \
  995. but0S __asm__ ("v29"), \
  996. op1 __asm__ ("v30"), \
  997. but1 __asm__ ("v22"), \
  998. op1S __asm__ ("v23"), \
  999. but1S __asm__ ("v24"), \
  1000. op2 __asm__ ("v25"), \
  1001. but2 __asm__ ("v26"), \
  1002. op2S __asm__ ("v27"), \
  1003. but2S __asm__ ("v28"), \
  1004. op3 __asm__ ("v29"), \
  1005. op3S __asm__ ("v30"); \
  1006. \
  1007. src1 = vec_ld(stride * i, src); \
  1008. src2 = vec_ld((stride * i) + 16, src); \
  1009. srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
  1010. dst1 = vec_ld(stride * i, dst); \
  1011. dst2 = vec_ld((stride * i) + 16, dst); \
  1012. dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
  1013. /* promote the unsigned chars to signed shorts */ \
  1014. srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  1015. (vector signed char)srcO); \
  1016. dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  1017. (vector signed char)dstO); \
  1018. srcW = (vector signed short)vec_mergel((vector signed char)vzero, \
  1019. (vector signed char)srcO); \
  1020. dstW = (vector signed short)vec_mergel((vector signed char)vzero, \
  1021. (vector signed char)dstO); \
  1022. /* subtractions inside the first butterfly */ \
  1023. but0 = vec_sub(srcV, dstV); \
  1024. but0S = vec_sub(srcW, dstW); \
  1025. op1 = vec_perm(but0, but0, perm1); \
  1026. but1 = vec_mladd(but0, vprod1, op1); \
  1027. op1S = vec_perm(but0S, but0S, perm1); \
  1028. but1S = vec_mladd(but0S, vprod1, op1S); \
  1029. op2 = vec_perm(but1, but1, perm2); \
  1030. but2 = vec_mladd(but1, vprod2, op2); \
  1031. op2S = vec_perm(but1S, but1S, perm2); \
  1032. but2S = vec_mladd(but1S, vprod2, op2S); \
  1033. op3 = vec_perm(but2, but2, perm3); \
  1034. res1 = vec_mladd(but2, vprod3, op3); \
  1035. op3S = vec_perm(but2S, but2S, perm3); \
  1036. res2 = vec_mladd(but2S, vprod3, op3S); \
  1037. }
  1038. ONEITERBUTTERFLY(0, temp0, temp0S);
  1039. ONEITERBUTTERFLY(1, temp1, temp1S);
  1040. ONEITERBUTTERFLY(2, temp2, temp2S);
  1041. ONEITERBUTTERFLY(3, temp3, temp3S);
  1042. ONEITERBUTTERFLY(4, temp4, temp4S);
  1043. ONEITERBUTTERFLY(5, temp5, temp5S);
  1044. ONEITERBUTTERFLY(6, temp6, temp6S);
  1045. ONEITERBUTTERFLY(7, temp7, temp7S);
  1046. }
  1047. #undef ONEITERBUTTERFLY
  1048. {
  1049. register vector signed int vsum;
  1050. register vector signed short line0S, line1S, line2S, line3S, line4S,
  1051. line5S, line6S, line7S, line0BS,line2BS,
  1052. line1BS,line3BS,line4BS,line6BS,line5BS,
  1053. line7BS,line0CS,line4CS,line1CS,line5CS,
  1054. line2CS,line6CS,line3CS,line7CS;
  1055. register vector signed short line0 = vec_add(temp0, temp1);
  1056. register vector signed short line1 = vec_sub(temp0, temp1);
  1057. register vector signed short line2 = vec_add(temp2, temp3);
  1058. register vector signed short line3 = vec_sub(temp2, temp3);
  1059. register vector signed short line4 = vec_add(temp4, temp5);
  1060. register vector signed short line5 = vec_sub(temp4, temp5);
  1061. register vector signed short line6 = vec_add(temp6, temp7);
  1062. register vector signed short line7 = vec_sub(temp6, temp7);
  1063. register vector signed short line0B = vec_add(line0, line2);
  1064. register vector signed short line2B = vec_sub(line0, line2);
  1065. register vector signed short line1B = vec_add(line1, line3);
  1066. register vector signed short line3B = vec_sub(line1, line3);
  1067. register vector signed short line4B = vec_add(line4, line6);
  1068. register vector signed short line6B = vec_sub(line4, line6);
  1069. register vector signed short line5B = vec_add(line5, line7);
  1070. register vector signed short line7B = vec_sub(line5, line7);
  1071. register vector signed short line0C = vec_add(line0B, line4B);
  1072. register vector signed short line4C = vec_sub(line0B, line4B);
  1073. register vector signed short line1C = vec_add(line1B, line5B);
  1074. register vector signed short line5C = vec_sub(line1B, line5B);
  1075. register vector signed short line2C = vec_add(line2B, line6B);
  1076. register vector signed short line6C = vec_sub(line2B, line6B);
  1077. register vector signed short line3C = vec_add(line3B, line7B);
  1078. register vector signed short line7C = vec_sub(line3B, line7B);
  1079. vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
  1080. vsum = vec_sum4s(vec_abs(line1C), vsum);
  1081. vsum = vec_sum4s(vec_abs(line2C), vsum);
  1082. vsum = vec_sum4s(vec_abs(line3C), vsum);
  1083. vsum = vec_sum4s(vec_abs(line4C), vsum);
  1084. vsum = vec_sum4s(vec_abs(line5C), vsum);
  1085. vsum = vec_sum4s(vec_abs(line6C), vsum);
  1086. vsum = vec_sum4s(vec_abs(line7C), vsum);
  1087. line0S = vec_add(temp0S, temp1S);
  1088. line1S = vec_sub(temp0S, temp1S);
  1089. line2S = vec_add(temp2S, temp3S);
  1090. line3S = vec_sub(temp2S, temp3S);
  1091. line4S = vec_add(temp4S, temp5S);
  1092. line5S = vec_sub(temp4S, temp5S);
  1093. line6S = vec_add(temp6S, temp7S);
  1094. line7S = vec_sub(temp6S, temp7S);
  1095. line0BS = vec_add(line0S, line2S);
  1096. line2BS = vec_sub(line0S, line2S);
  1097. line1BS = vec_add(line1S, line3S);
  1098. line3BS = vec_sub(line1S, line3S);
  1099. line4BS = vec_add(line4S, line6S);
  1100. line6BS = vec_sub(line4S, line6S);
  1101. line5BS = vec_add(line5S, line7S);
  1102. line7BS = vec_sub(line5S, line7S);
  1103. line0CS = vec_add(line0BS, line4BS);
  1104. line4CS = vec_sub(line0BS, line4BS);
  1105. line1CS = vec_add(line1BS, line5BS);
  1106. line5CS = vec_sub(line1BS, line5BS);
  1107. line2CS = vec_add(line2BS, line6BS);
  1108. line6CS = vec_sub(line2BS, line6BS);
  1109. line3CS = vec_add(line3BS, line7BS);
  1110. line7CS = vec_sub(line3BS, line7BS);
  1111. vsum = vec_sum4s(vec_abs(line0CS), vsum);
  1112. vsum = vec_sum4s(vec_abs(line1CS), vsum);
  1113. vsum = vec_sum4s(vec_abs(line2CS), vsum);
  1114. vsum = vec_sum4s(vec_abs(line3CS), vsum);
  1115. vsum = vec_sum4s(vec_abs(line4CS), vsum);
  1116. vsum = vec_sum4s(vec_abs(line5CS), vsum);
  1117. vsum = vec_sum4s(vec_abs(line6CS), vsum);
  1118. vsum = vec_sum4s(vec_abs(line7CS), vsum);
  1119. vsum = vec_sums(vsum, (vector signed int)vzero);
  1120. vsum = vec_splat(vsum, 3);
  1121. vec_ste(vsum, 0, &sum);
  1122. }
  1123. return sum;
  1124. }
  1125. static int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
  1126. POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1);
  1127. int score;
  1128. POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1);
  1129. score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  1130. if (h==16) {
  1131. dst += 8*stride;
  1132. src += 8*stride;
  1133. score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  1134. }
  1135. POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1);
  1136. return score;
  1137. }
  1138. static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
  1139. int blocksize)
  1140. {
  1141. int i;
  1142. vector float m, a;
  1143. vector bool int t0, t1;
  1144. const vector unsigned int v_31 = //XXX
  1145. vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
  1146. for (i = 0; i < blocksize; i += 4) {
  1147. m = vec_ld(0, mag+i);
  1148. a = vec_ld(0, ang+i);
  1149. t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
  1150. t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
  1151. a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
  1152. t0 = (vector bool int)vec_and(a, t1);
  1153. t1 = (vector bool int)vec_andc(a, t1);
  1154. a = vec_sub(m, (vector float)t1);
  1155. m = vec_add(m, (vector float)t0);
  1156. vec_stl(a, 0, ang+i);
  1157. vec_stl(m, 0, mag+i);
  1158. }
  1159. }
  1160. /* next one assumes that ((line_size % 8) == 0) */
  1161. static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  1162. {
  1163. POWERPC_PERF_DECLARE(altivec_avg_pixels8_xy2_num, 1);
  1164. register int i;
  1165. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  1166. register vector unsigned char blockv, temp1, temp2, blocktemp;
  1167. register vector unsigned short pixelssum1, pixelssum2, temp3;
  1168. register const vector unsigned char vczero = (const vector unsigned char)
  1169. vec_splat_u8(0);
  1170. register const vector unsigned short vctwo = (const vector unsigned short)
  1171. vec_splat_u16(2);
  1172. temp1 = vec_ld(0, pixels);
  1173. temp2 = vec_ld(16, pixels);
  1174. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  1175. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  1176. pixelsv2 = temp2;
  1177. } else {
  1178. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  1179. }
  1180. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  1181. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  1182. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  1183. (vector unsigned short)pixelsv2);
  1184. pixelssum1 = vec_add(pixelssum1, vctwo);
  1185. POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
  1186. for (i = 0; i < h ; i++) {
  1187. int rightside = ((unsigned long)block & 0x0000000F);
  1188. blockv = vec_ld(0, block);
  1189. temp1 = vec_ld(line_size, pixels);
  1190. temp2 = vec_ld(line_size + 16, pixels);
  1191. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  1192. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  1193. pixelsv2 = temp2;
  1194. } else {
  1195. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  1196. }
  1197. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  1198. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  1199. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  1200. (vector unsigned short)pixelsv2);
  1201. temp3 = vec_add(pixelssum1, pixelssum2);
  1202. temp3 = vec_sra(temp3, vctwo);
  1203. pixelssum1 = vec_add(pixelssum2, vctwo);
  1204. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  1205. if (rightside) {
  1206. blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  1207. } else {
  1208. blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  1209. }
  1210. blockv = vec_avg(blocktemp, blockv);
  1211. vec_st(blockv, 0, block);
  1212. block += line_size;
  1213. pixels += line_size;
  1214. }
  1215. POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
  1216. }
  1217. void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
  1218. {
  1219. c->pix_abs[0][1] = sad16_x2_altivec;
  1220. c->pix_abs[0][2] = sad16_y2_altivec;
  1221. c->pix_abs[0][3] = sad16_xy2_altivec;
  1222. c->pix_abs[0][0] = sad16_altivec;
  1223. c->pix_abs[1][0] = sad8_altivec;
  1224. c->sad[0]= sad16_altivec;
  1225. c->sad[1]= sad8_altivec;
  1226. c->pix_norm1 = pix_norm1_altivec;
  1227. c->sse[1]= sse8_altivec;
  1228. c->sse[0]= sse16_altivec;
  1229. c->pix_sum = pix_sum_altivec;
  1230. c->diff_pixels = diff_pixels_altivec;
  1231. c->get_pixels = get_pixels_altivec;
  1232. c->clear_block = clear_block_altivec;
  1233. c->add_bytes= add_bytes_altivec;
  1234. c->put_pixels_tab[0][0] = put_pixels16_altivec;
  1235. /* the two functions do the same thing, so use the same code */
  1236. c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
  1237. c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
  1238. c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
  1239. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
  1240. c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
  1241. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
  1242. c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
  1243. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
  1244. c->hadamard8_diff[0] = hadamard8_diff16_altivec;
  1245. c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
  1246. if (CONFIG_VORBIS_DECODER)
  1247. c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
  1248. }