You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1416 lines
55KB

  1. /*
  2. * Copyright (c) 2002 Brian Foley
  3. * Copyright (c) 2002 Dieter Shirley
  4. * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if HAVE_ALTIVEC_H
  24. #include <altivec.h>
  25. #endif
  26. #include "libavcodec/dsputil.h"
  27. #include "util_altivec.h"
  28. #include "types_altivec.h"
  29. #include "dsputil_altivec.h"
  30. static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  31. {
  32. int i;
  33. int s;
  34. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  35. vector unsigned char *tv;
  36. vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
  37. vector unsigned int sad;
  38. vector signed int sumdiffs;
  39. s = 0;
  40. sad = (vector unsigned int)vec_splat_u32(0);
  41. for (i = 0; i < h; i++) {
  42. /* Read unaligned pixels into our vectors. The vectors are as follows:
  43. pix1v: pix1[0]-pix1[15]
  44. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] */
  45. tv = (vector unsigned char *) pix1;
  46. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  47. tv = (vector unsigned char *) &pix2[0];
  48. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  49. tv = (vector unsigned char *) &pix2[1];
  50. pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
  51. /* Calculate the average vector */
  52. avgv = vec_avg(pix2v, pix2iv);
  53. /* Calculate a sum of abs differences vector */
  54. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  55. /* Add each 4 pixel group together and put 4 results into sad */
  56. sad = vec_sum4s(t5, sad);
  57. pix1 += line_size;
  58. pix2 += line_size;
  59. }
  60. /* Sum up the four partial sums, and put the result into s */
  61. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  62. sumdiffs = vec_splat(sumdiffs, 3);
  63. vec_ste(sumdiffs, 0, &s);
  64. return s;
  65. }
  66. static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  67. {
  68. int i;
  69. int s;
  70. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  71. vector unsigned char *tv;
  72. vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
  73. vector unsigned int sad;
  74. vector signed int sumdiffs;
  75. uint8_t *pix3 = pix2 + line_size;
  76. s = 0;
  77. sad = (vector unsigned int)vec_splat_u32(0);
  78. /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  79. iteration becomes pix2 in the next iteration. We can use this
  80. fact to avoid a potentially expensive unaligned read, each
  81. time around the loop.
  82. Read unaligned pixels into our vectors. The vectors are as follows:
  83. pix2v: pix2[0]-pix2[15]
  84. Split the pixel vectors into shorts */
  85. tv = (vector unsigned char *) &pix2[0];
  86. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  87. for (i = 0; i < h; i++) {
  88. /* Read unaligned pixels into our vectors. The vectors are as follows:
  89. pix1v: pix1[0]-pix1[15]
  90. pix3v: pix3[0]-pix3[15] */
  91. tv = (vector unsigned char *) pix1;
  92. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  93. tv = (vector unsigned char *) &pix3[0];
  94. pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
  95. /* Calculate the average vector */
  96. avgv = vec_avg(pix2v, pix3v);
  97. /* Calculate a sum of abs differences vector */
  98. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  99. /* Add each 4 pixel group together and put 4 results into sad */
  100. sad = vec_sum4s(t5, sad);
  101. pix1 += line_size;
  102. pix2v = pix3v;
  103. pix3 += line_size;
  104. }
  105. /* Sum up the four partial sums, and put the result into s */
  106. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  107. sumdiffs = vec_splat(sumdiffs, 3);
  108. vec_ste(sumdiffs, 0, &s);
  109. return s;
  110. }
  111. static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  112. {
  113. int i;
  114. int s;
  115. uint8_t *pix3 = pix2 + line_size;
  116. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  117. const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
  118. vector unsigned char *tv, avgv, t5;
  119. vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
  120. vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
  121. vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
  122. vector unsigned short avghv, avglv;
  123. vector unsigned short t1, t2, t3, t4;
  124. vector unsigned int sad;
  125. vector signed int sumdiffs;
  126. sad = (vector unsigned int)vec_splat_u32(0);
  127. s = 0;
  128. /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  129. iteration becomes pix2 in the next iteration. We can use this
  130. fact to avoid a potentially expensive unaligned read, as well
  131. as some splitting, and vector addition each time around the loop.
  132. Read unaligned pixels into our vectors. The vectors are as follows:
  133. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
  134. Split the pixel vectors into shorts */
  135. tv = (vector unsigned char *) &pix2[0];
  136. pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
  137. tv = (vector unsigned char *) &pix2[1];
  138. pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
  139. pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
  140. pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
  141. pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
  142. pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
  143. t1 = vec_add(pix2hv, pix2ihv);
  144. t2 = vec_add(pix2lv, pix2ilv);
  145. for (i = 0; i < h; i++) {
  146. /* Read unaligned pixels into our vectors. The vectors are as follows:
  147. pix1v: pix1[0]-pix1[15]
  148. pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] */
  149. tv = (vector unsigned char *) pix1;
  150. pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
  151. tv = (vector unsigned char *) &pix3[0];
  152. pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
  153. tv = (vector unsigned char *) &pix3[1];
  154. pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));
  155. /* Note that AltiVec does have vec_avg, but this works on vector pairs
  156. and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
  157. would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
  158. Instead, we have to split the pixel vectors into vectors of shorts,
  159. and do the averaging by hand. */
  160. /* Split the pixel vectors into shorts */
  161. pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
  162. pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
  163. pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
  164. pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
  165. /* Do the averaging on them */
  166. t3 = vec_add(pix3hv, pix3ihv);
  167. t4 = vec_add(pix3lv, pix3ilv);
  168. avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
  169. avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
  170. /* Pack the shorts back into a result */
  171. avgv = vec_pack(avghv, avglv);
  172. /* Calculate a sum of abs differences vector */
  173. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  174. /* Add each 4 pixel group together and put 4 results into sad */
  175. sad = vec_sum4s(t5, sad);
  176. pix1 += line_size;
  177. pix3 += line_size;
  178. /* Transfer the calculated values for pix3 into pix2 */
  179. t1 = t3;
  180. t2 = t4;
  181. }
  182. /* Sum up the four partial sums, and put the result into s */
  183. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  184. sumdiffs = vec_splat(sumdiffs, 3);
  185. vec_ste(sumdiffs, 0, &s);
  186. return s;
  187. }
  188. static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  189. {
  190. int i;
  191. int s;
  192. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  193. vector unsigned char perm1, perm2, *pix1v, *pix2v;
  194. vector unsigned char t1, t2, t3,t4, t5;
  195. vector unsigned int sad;
  196. vector signed int sumdiffs;
  197. sad = (vector unsigned int)vec_splat_u32(0);
  198. for (i = 0; i < h; i++) {
  199. /* Read potentially unaligned pixels into t1 and t2 */
  200. perm1 = vec_lvsl(0, pix1);
  201. pix1v = (vector unsigned char *) pix1;
  202. perm2 = vec_lvsl(0, pix2);
  203. pix2v = (vector unsigned char *) pix2;
  204. t1 = vec_perm(pix1v[0], pix1v[1], perm1);
  205. t2 = vec_perm(pix2v[0], pix2v[1], perm2);
  206. /* Calculate a sum of abs differences vector */
  207. t3 = vec_max(t1, t2);
  208. t4 = vec_min(t1, t2);
  209. t5 = vec_sub(t3, t4);
  210. /* Add each 4 pixel group together and put 4 results into sad */
  211. sad = vec_sum4s(t5, sad);
  212. pix1 += line_size;
  213. pix2 += line_size;
  214. }
  215. /* Sum up the four partial sums, and put the result into s */
  216. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  217. sumdiffs = vec_splat(sumdiffs, 3);
  218. vec_ste(sumdiffs, 0, &s);
  219. return s;
  220. }
  221. static int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  222. {
  223. int i;
  224. int s;
  225. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  226. vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
  227. vector unsigned char t1, t2, t3,t4, t5;
  228. vector unsigned int sad;
  229. vector signed int sumdiffs;
  230. sad = (vector unsigned int)vec_splat_u32(0);
  231. permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
  232. for (i = 0; i < h; i++) {
  233. /* Read potentially unaligned pixels into t1 and t2
  234. Since we're reading 16 pixels, and actually only want 8,
  235. mask out the last 8 pixels. The 0s don't change the sum. */
  236. perm1 = vec_lvsl(0, pix1);
  237. pix1v = (vector unsigned char *) pix1;
  238. perm2 = vec_lvsl(0, pix2);
  239. pix2v = (vector unsigned char *) pix2;
  240. t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
  241. t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
  242. /* Calculate a sum of abs differences vector */
  243. t3 = vec_max(t1, t2);
  244. t4 = vec_min(t1, t2);
  245. t5 = vec_sub(t3, t4);
  246. /* Add each 4 pixel group together and put 4 results into sad */
  247. sad = vec_sum4s(t5, sad);
  248. pix1 += line_size;
  249. pix2 += line_size;
  250. }
  251. /* Sum up the four partial sums, and put the result into s */
  252. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  253. sumdiffs = vec_splat(sumdiffs, 3);
  254. vec_ste(sumdiffs, 0, &s);
  255. return s;
  256. }
  257. static int pix_norm1_altivec(uint8_t *pix, int line_size)
  258. {
  259. int i;
  260. int s;
  261. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  262. vector unsigned char *tv;
  263. vector unsigned char pixv;
  264. vector unsigned int sv;
  265. vector signed int sum;
  266. sv = (vector unsigned int)vec_splat_u32(0);
  267. s = 0;
  268. for (i = 0; i < 16; i++) {
  269. /* Read in the potentially unaligned pixels */
  270. tv = (vector unsigned char *) pix;
  271. pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));
  272. /* Square the values, and add them to our sum */
  273. sv = vec_msum(pixv, pixv, sv);
  274. pix += line_size;
  275. }
  276. /* Sum up the four partial sums, and put the result into s */
  277. sum = vec_sums((vector signed int) sv, (vector signed int) zero);
  278. sum = vec_splat(sum, 3);
  279. vec_ste(sum, 0, &s);
  280. return s;
  281. }
  282. /**
  283. * Sum of Squared Errors for a 8x8 block.
  284. * AltiVec-enhanced.
  285. * It's the sad8_altivec code above w/ squaring added.
  286. */
  287. static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  288. {
  289. int i;
  290. int s;
  291. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  292. vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
  293. vector unsigned char t1, t2, t3,t4, t5;
  294. vector unsigned int sum;
  295. vector signed int sumsqr;
  296. sum = (vector unsigned int)vec_splat_u32(0);
  297. permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
  298. for (i = 0; i < h; i++) {
  299. /* Read potentially unaligned pixels into t1 and t2
  300. Since we're reading 16 pixels, and actually only want 8,
  301. mask out the last 8 pixels. The 0s don't change the sum. */
  302. perm1 = vec_lvsl(0, pix1);
  303. pix1v = (vector unsigned char *) pix1;
  304. perm2 = vec_lvsl(0, pix2);
  305. pix2v = (vector unsigned char *) pix2;
  306. t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
  307. t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
  308. /* Since we want to use unsigned chars, we can take advantage
  309. of the fact that abs(a-b)^2 = (a-b)^2. */
  310. /* Calculate abs differences vector */
  311. t3 = vec_max(t1, t2);
  312. t4 = vec_min(t1, t2);
  313. t5 = vec_sub(t3, t4);
  314. /* Square the values and add them to our sum */
  315. sum = vec_msum(t5, t5, sum);
  316. pix1 += line_size;
  317. pix2 += line_size;
  318. }
  319. /* Sum up the four partial sums, and put the result into s */
  320. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  321. sumsqr = vec_splat(sumsqr, 3);
  322. vec_ste(sumsqr, 0, &s);
  323. return s;
  324. }
  325. /**
  326. * Sum of Squared Errors for a 16x16 block.
  327. * AltiVec-enhanced.
  328. * It's the sad16_altivec code above w/ squaring added.
  329. */
  330. static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  331. {
  332. int i;
  333. int s;
  334. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  335. vector unsigned char perm1, perm2, *pix1v, *pix2v;
  336. vector unsigned char t1, t2, t3,t4, t5;
  337. vector unsigned int sum;
  338. vector signed int sumsqr;
  339. sum = (vector unsigned int)vec_splat_u32(0);
  340. for (i = 0; i < h; i++) {
  341. /* Read potentially unaligned pixels into t1 and t2 */
  342. perm1 = vec_lvsl(0, pix1);
  343. pix1v = (vector unsigned char *) pix1;
  344. perm2 = vec_lvsl(0, pix2);
  345. pix2v = (vector unsigned char *) pix2;
  346. t1 = vec_perm(pix1v[0], pix1v[1], perm1);
  347. t2 = vec_perm(pix2v[0], pix2v[1], perm2);
  348. /* Since we want to use unsigned chars, we can take advantage
  349. of the fact that abs(a-b)^2 = (a-b)^2. */
  350. /* Calculate abs differences vector */
  351. t3 = vec_max(t1, t2);
  352. t4 = vec_min(t1, t2);
  353. t5 = vec_sub(t3, t4);
  354. /* Square the values and add them to our sum */
  355. sum = vec_msum(t5, t5, sum);
  356. pix1 += line_size;
  357. pix2 += line_size;
  358. }
  359. /* Sum up the four partial sums, and put the result into s */
  360. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  361. sumsqr = vec_splat(sumsqr, 3);
  362. vec_ste(sumsqr, 0, &s);
  363. return s;
  364. }
  365. static int pix_sum_altivec(uint8_t * pix, int line_size)
  366. {
  367. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  368. vector unsigned char perm, *pixv;
  369. vector unsigned char t1;
  370. vector unsigned int sad;
  371. vector signed int sumdiffs;
  372. int i;
  373. int s;
  374. sad = (vector unsigned int)vec_splat_u32(0);
  375. for (i = 0; i < 16; i++) {
  376. /* Read the potentially unaligned 16 pixels into t1 */
  377. perm = vec_lvsl(0, pix);
  378. pixv = (vector unsigned char *) pix;
  379. t1 = vec_perm(pixv[0], pixv[1], perm);
  380. /* Add each 4 pixel group together and put 4 results into sad */
  381. sad = vec_sum4s(t1, sad);
  382. pix += line_size;
  383. }
  384. /* Sum up the four partial sums, and put the result into s */
  385. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  386. sumdiffs = vec_splat(sumdiffs, 3);
  387. vec_ste(sumdiffs, 0, &s);
  388. return s;
  389. }
  390. static void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
  391. {
  392. int i;
  393. vector unsigned char perm, bytes, *pixv;
  394. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  395. vector signed short shorts;
  396. for (i = 0; i < 8; i++) {
  397. // Read potentially unaligned pixels.
  398. // We're reading 16 pixels, and actually only want 8,
  399. // but we simply ignore the extras.
  400. perm = vec_lvsl(0, pixels);
  401. pixv = (vector unsigned char *) pixels;
  402. bytes = vec_perm(pixv[0], pixv[1], perm);
  403. // convert the bytes into shorts
  404. shorts = (vector signed short)vec_mergeh(zero, bytes);
  405. // save the data to the block, we assume the block is 16-byte aligned
  406. vec_st(shorts, i*16, (vector signed short*)block);
  407. pixels += line_size;
  408. }
  409. }
  410. static void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
  411. const uint8_t *s2, int stride)
  412. {
  413. int i;
  414. vector unsigned char perm, bytes, *pixv;
  415. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  416. vector signed short shorts1, shorts2;
  417. for (i = 0; i < 4; i++) {
  418. // Read potentially unaligned pixels
  419. // We're reading 16 pixels, and actually only want 8,
  420. // but we simply ignore the extras.
  421. perm = vec_lvsl(0, s1);
  422. pixv = (vector unsigned char *) s1;
  423. bytes = vec_perm(pixv[0], pixv[1], perm);
  424. // convert the bytes into shorts
  425. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  426. // Do the same for the second block of pixels
  427. perm = vec_lvsl(0, s2);
  428. pixv = (vector unsigned char *) s2;
  429. bytes = vec_perm(pixv[0], pixv[1], perm);
  430. // convert the bytes into shorts
  431. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  432. // Do the subtraction
  433. shorts1 = vec_sub(shorts1, shorts2);
  434. // save the data to the block, we assume the block is 16-byte aligned
  435. vec_st(shorts1, 0, (vector signed short*)block);
  436. s1 += stride;
  437. s2 += stride;
  438. block += 8;
  439. // The code below is a copy of the code above... This is a manual
  440. // unroll.
  441. // Read potentially unaligned pixels
  442. // We're reading 16 pixels, and actually only want 8,
  443. // but we simply ignore the extras.
  444. perm = vec_lvsl(0, s1);
  445. pixv = (vector unsigned char *) s1;
  446. bytes = vec_perm(pixv[0], pixv[1], perm);
  447. // convert the bytes into shorts
  448. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  449. // Do the same for the second block of pixels
  450. perm = vec_lvsl(0, s2);
  451. pixv = (vector unsigned char *) s2;
  452. bytes = vec_perm(pixv[0], pixv[1], perm);
  453. // convert the bytes into shorts
  454. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  455. // Do the subtraction
  456. shorts1 = vec_sub(shorts1, shorts2);
  457. // save the data to the block, we assume the block is 16-byte aligned
  458. vec_st(shorts1, 0, (vector signed short*)block);
  459. s1 += stride;
  460. s2 += stride;
  461. block += 8;
  462. }
  463. }
  464. static void clear_block_altivec(DCTELEM *block) {
  465. LOAD_ZERO;
  466. vec_st(zero_s16v, 0, block);
  467. vec_st(zero_s16v, 16, block);
  468. vec_st(zero_s16v, 32, block);
  469. vec_st(zero_s16v, 48, block);
  470. vec_st(zero_s16v, 64, block);
  471. vec_st(zero_s16v, 80, block);
  472. vec_st(zero_s16v, 96, block);
  473. vec_st(zero_s16v, 112, block);
  474. }
  475. static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
  476. register int i;
  477. register vector unsigned char vdst, vsrc;
  478. /* dst and src are 16 bytes-aligned (guaranteed) */
  479. for (i = 0 ; (i + 15) < w ; i+=16) {
  480. vdst = vec_ld(i, (unsigned char*)dst);
  481. vsrc = vec_ld(i, (unsigned char*)src);
  482. vdst = vec_add(vsrc, vdst);
  483. vec_st(vdst, i, (unsigned char*)dst);
  484. }
  485. /* if w is not a multiple of 16 */
  486. for (; (i < w) ; i++) {
  487. dst[i] = src[i];
  488. }
  489. }
  490. /* next one assumes that ((line_size % 16) == 0) */
  491. void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  492. {
  493. register vector unsigned char pixelsv1, pixelsv2;
  494. register vector unsigned char pixelsv1B, pixelsv2B;
  495. register vector unsigned char pixelsv1C, pixelsv2C;
  496. register vector unsigned char pixelsv1D, pixelsv2D;
  497. register vector unsigned char perm = vec_lvsl(0, pixels);
  498. int i;
  499. register int line_size_2 = line_size << 1;
  500. register int line_size_3 = line_size + line_size_2;
  501. register int line_size_4 = line_size << 2;
  502. // hand-unrolling the loop by 4 gains about 15%
  503. // mininum execution time goes from 74 to 60 cycles
  504. // it's faster than -funroll-loops, but using
  505. // -funroll-loops w/ this is bad - 74 cycles again.
  506. // all this is on a 7450, tuning for the 7450
  507. #if 0
  508. for (i = 0; i < h; i++) {
  509. pixelsv1 = vec_ld(0, pixels);
  510. pixelsv2 = vec_ld(16, pixels);
  511. vec_st(vec_perm(pixelsv1, pixelsv2, perm),
  512. 0, block);
  513. pixels+=line_size;
  514. block +=line_size;
  515. }
  516. #else
  517. for (i = 0; i < h; i += 4) {
  518. pixelsv1 = vec_ld( 0, pixels);
  519. pixelsv2 = vec_ld(15, pixels);
  520. pixelsv1B = vec_ld(line_size, pixels);
  521. pixelsv2B = vec_ld(15 + line_size, pixels);
  522. pixelsv1C = vec_ld(line_size_2, pixels);
  523. pixelsv2C = vec_ld(15 + line_size_2, pixels);
  524. pixelsv1D = vec_ld(line_size_3, pixels);
  525. pixelsv2D = vec_ld(15 + line_size_3, pixels);
  526. vec_st(vec_perm(pixelsv1, pixelsv2, perm),
  527. 0, (unsigned char*)block);
  528. vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
  529. line_size, (unsigned char*)block);
  530. vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
  531. line_size_2, (unsigned char*)block);
  532. vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
  533. line_size_3, (unsigned char*)block);
  534. pixels+=line_size_4;
  535. block +=line_size_4;
  536. }
  537. #endif
  538. }
  539. /* next one assumes that ((line_size % 16) == 0) */
  540. #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
  541. void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  542. {
  543. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  544. register vector unsigned char perm = vec_lvsl(0, pixels);
  545. int i;
  546. for (i = 0; i < h; i++) {
  547. pixelsv1 = vec_ld( 0, pixels);
  548. pixelsv2 = vec_ld(16,pixels);
  549. blockv = vec_ld(0, block);
  550. pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
  551. blockv = vec_avg(blockv,pixelsv);
  552. vec_st(blockv, 0, (unsigned char*)block);
  553. pixels+=line_size;
  554. block +=line_size;
  555. }
  556. }
  557. /* next one assumes that ((line_size % 8) == 0) */
  558. static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  559. {
  560. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  561. int i;
  562. for (i = 0; i < h; i++) {
  563. /* block is 8 bytes-aligned, so we're either in the
  564. left block (16 bytes-aligned) or in the right block (not) */
  565. int rightside = ((unsigned long)block & 0x0000000F);
  566. blockv = vec_ld(0, block);
  567. pixelsv1 = vec_ld( 0, pixels);
  568. pixelsv2 = vec_ld(16, pixels);
  569. pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
  570. if (rightside) {
  571. pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
  572. } else {
  573. pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
  574. }
  575. blockv = vec_avg(blockv, pixelsv);
  576. vec_st(blockv, 0, block);
  577. pixels += line_size;
  578. block += line_size;
  579. }
  580. }
  581. /* next one assumes that ((line_size % 8) == 0) */
  582. static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  583. {
  584. register int i;
  585. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  586. register vector unsigned char blockv, temp1, temp2;
  587. register vector unsigned short pixelssum1, pixelssum2, temp3;
  588. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  589. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  590. temp1 = vec_ld(0, pixels);
  591. temp2 = vec_ld(16, pixels);
  592. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  593. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  594. pixelsv2 = temp2;
  595. } else {
  596. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  597. }
  598. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  599. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  600. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  601. (vector unsigned short)pixelsv2);
  602. pixelssum1 = vec_add(pixelssum1, vctwo);
  603. for (i = 0; i < h ; i++) {
  604. int rightside = ((unsigned long)block & 0x0000000F);
  605. blockv = vec_ld(0, block);
  606. temp1 = vec_ld(line_size, pixels);
  607. temp2 = vec_ld(line_size + 16, pixels);
  608. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  609. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  610. pixelsv2 = temp2;
  611. } else {
  612. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  613. }
  614. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  615. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  616. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  617. (vector unsigned short)pixelsv2);
  618. temp3 = vec_add(pixelssum1, pixelssum2);
  619. temp3 = vec_sra(temp3, vctwo);
  620. pixelssum1 = vec_add(pixelssum2, vctwo);
  621. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  622. if (rightside) {
  623. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  624. } else {
  625. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  626. }
  627. vec_st(blockv, 0, block);
  628. block += line_size;
  629. pixels += line_size;
  630. }
  631. }
  632. /* next one assumes that ((line_size % 8) == 0) */
  633. static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  634. {
  635. register int i;
  636. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  637. register vector unsigned char blockv, temp1, temp2;
  638. register vector unsigned short pixelssum1, pixelssum2, temp3;
  639. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  640. register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
  641. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  642. temp1 = vec_ld(0, pixels);
  643. temp2 = vec_ld(16, pixels);
  644. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  645. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  646. pixelsv2 = temp2;
  647. } else {
  648. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  649. }
  650. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  651. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  652. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  653. (vector unsigned short)pixelsv2);
  654. pixelssum1 = vec_add(pixelssum1, vcone);
  655. for (i = 0; i < h ; i++) {
  656. int rightside = ((unsigned long)block & 0x0000000F);
  657. blockv = vec_ld(0, block);
  658. temp1 = vec_ld(line_size, pixels);
  659. temp2 = vec_ld(line_size + 16, pixels);
  660. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  661. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  662. pixelsv2 = temp2;
  663. } else {
  664. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  665. }
  666. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  667. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  668. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  669. (vector unsigned short)pixelsv2);
  670. temp3 = vec_add(pixelssum1, pixelssum2);
  671. temp3 = vec_sra(temp3, vctwo);
  672. pixelssum1 = vec_add(pixelssum2, vcone);
  673. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  674. if (rightside) {
  675. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  676. } else {
  677. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  678. }
  679. vec_st(blockv, 0, block);
  680. block += line_size;
  681. pixels += line_size;
  682. }
  683. }
  684. /* next one assumes that ((line_size % 16) == 0) */
  685. static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  686. {
  687. register int i;
  688. register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  689. register vector unsigned char blockv, temp1, temp2;
  690. register vector unsigned short temp3, temp4,
  691. pixelssum1, pixelssum2, pixelssum3, pixelssum4;
  692. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  693. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  694. temp1 = vec_ld(0, pixels);
  695. temp2 = vec_ld(16, pixels);
  696. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  697. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  698. pixelsv2 = temp2;
  699. } else {
  700. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  701. }
  702. pixelsv3 = vec_mergel(vczero, pixelsv1);
  703. pixelsv4 = vec_mergel(vczero, pixelsv2);
  704. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  705. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  706. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  707. (vector unsigned short)pixelsv4);
  708. pixelssum3 = vec_add(pixelssum3, vctwo);
  709. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  710. (vector unsigned short)pixelsv2);
  711. pixelssum1 = vec_add(pixelssum1, vctwo);
  712. for (i = 0; i < h ; i++) {
  713. blockv = vec_ld(0, block);
  714. temp1 = vec_ld(line_size, pixels);
  715. temp2 = vec_ld(line_size + 16, pixels);
  716. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  717. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  718. pixelsv2 = temp2;
  719. } else {
  720. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  721. }
  722. pixelsv3 = vec_mergel(vczero, pixelsv1);
  723. pixelsv4 = vec_mergel(vczero, pixelsv2);
  724. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  725. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  726. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  727. (vector unsigned short)pixelsv4);
  728. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  729. (vector unsigned short)pixelsv2);
  730. temp4 = vec_add(pixelssum3, pixelssum4);
  731. temp4 = vec_sra(temp4, vctwo);
  732. temp3 = vec_add(pixelssum1, pixelssum2);
  733. temp3 = vec_sra(temp3, vctwo);
  734. pixelssum3 = vec_add(pixelssum4, vctwo);
  735. pixelssum1 = vec_add(pixelssum2, vctwo);
  736. blockv = vec_packsu(temp3, temp4);
  737. vec_st(blockv, 0, block);
  738. block += line_size;
  739. pixels += line_size;
  740. }
  741. }
  742. /* next one assumes that ((line_size % 16) == 0) */
  743. static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  744. {
  745. register int i;
  746. register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  747. register vector unsigned char blockv, temp1, temp2;
  748. register vector unsigned short temp3, temp4,
  749. pixelssum1, pixelssum2, pixelssum3, pixelssum4;
  750. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  751. register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
  752. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  753. temp1 = vec_ld(0, pixels);
  754. temp2 = vec_ld(16, pixels);
  755. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  756. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  757. pixelsv2 = temp2;
  758. } else {
  759. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  760. }
  761. pixelsv3 = vec_mergel(vczero, pixelsv1);
  762. pixelsv4 = vec_mergel(vczero, pixelsv2);
  763. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  764. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  765. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  766. (vector unsigned short)pixelsv4);
  767. pixelssum3 = vec_add(pixelssum3, vcone);
  768. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  769. (vector unsigned short)pixelsv2);
  770. pixelssum1 = vec_add(pixelssum1, vcone);
  771. for (i = 0; i < h ; i++) {
  772. blockv = vec_ld(0, block);
  773. temp1 = vec_ld(line_size, pixels);
  774. temp2 = vec_ld(line_size + 16, pixels);
  775. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  776. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  777. pixelsv2 = temp2;
  778. } else {
  779. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  780. }
  781. pixelsv3 = vec_mergel(vczero, pixelsv1);
  782. pixelsv4 = vec_mergel(vczero, pixelsv2);
  783. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  784. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  785. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  786. (vector unsigned short)pixelsv4);
  787. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  788. (vector unsigned short)pixelsv2);
  789. temp4 = vec_add(pixelssum3, pixelssum4);
  790. temp4 = vec_sra(temp4, vctwo);
  791. temp3 = vec_add(pixelssum1, pixelssum2);
  792. temp3 = vec_sra(temp3, vctwo);
  793. pixelssum3 = vec_add(pixelssum4, vcone);
  794. pixelssum1 = vec_add(pixelssum2, vcone);
  795. blockv = vec_packsu(temp3, temp4);
  796. vec_st(blockv, 0, block);
  797. block += line_size;
  798. pixels += line_size;
  799. }
  800. }
  801. static int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
  802. int sum;
  803. register const vector unsigned char vzero =
  804. (const vector unsigned char)vec_splat_u8(0);
  805. register vector signed short temp0, temp1, temp2, temp3, temp4,
  806. temp5, temp6, temp7;
  807. {
  808. register const vector signed short vprod1 =(const vector signed short)
  809. { 1,-1, 1,-1, 1,-1, 1,-1 };
  810. register const vector signed short vprod2 =(const vector signed short)
  811. { 1, 1,-1,-1, 1, 1,-1,-1 };
  812. register const vector signed short vprod3 =(const vector signed short)
  813. { 1, 1, 1, 1,-1,-1,-1,-1 };
  814. register const vector unsigned char perm1 = (const vector unsigned char)
  815. {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
  816. 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
  817. register const vector unsigned char perm2 = (const vector unsigned char)
  818. {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
  819. 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
  820. register const vector unsigned char perm3 = (const vector unsigned char)
  821. {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
  822. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
  823. #define ONEITERBUTTERFLY(i, res) \
  824. { \
  825. register vector unsigned char src1, src2, srcO; \
  826. register vector unsigned char dst1, dst2, dstO; \
  827. register vector signed short srcV, dstV; \
  828. register vector signed short but0, but1, but2, op1, op2, op3; \
  829. src1 = vec_ld(stride * i, src); \
  830. src2 = vec_ld((stride * i) + 15, src); \
  831. srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
  832. dst1 = vec_ld(stride * i, dst); \
  833. dst2 = vec_ld((stride * i) + 15, dst); \
  834. dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
  835. /* promote the unsigned chars to signed shorts */ \
  836. /* we're in the 8x8 function, we only care for the first 8 */ \
  837. srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  838. (vector signed char)srcO); \
  839. dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  840. (vector signed char)dstO); \
  841. /* subtractions inside the first butterfly */ \
  842. but0 = vec_sub(srcV, dstV); \
  843. op1 = vec_perm(but0, but0, perm1); \
  844. but1 = vec_mladd(but0, vprod1, op1); \
  845. op2 = vec_perm(but1, but1, perm2); \
  846. but2 = vec_mladd(but1, vprod2, op2); \
  847. op3 = vec_perm(but2, but2, perm3); \
  848. res = vec_mladd(but2, vprod3, op3); \
  849. }
  850. ONEITERBUTTERFLY(0, temp0);
  851. ONEITERBUTTERFLY(1, temp1);
  852. ONEITERBUTTERFLY(2, temp2);
  853. ONEITERBUTTERFLY(3, temp3);
  854. ONEITERBUTTERFLY(4, temp4);
  855. ONEITERBUTTERFLY(5, temp5);
  856. ONEITERBUTTERFLY(6, temp6);
  857. ONEITERBUTTERFLY(7, temp7);
  858. }
  859. #undef ONEITERBUTTERFLY
  860. {
  861. register vector signed int vsum;
  862. register vector signed short line0 = vec_add(temp0, temp1);
  863. register vector signed short line1 = vec_sub(temp0, temp1);
  864. register vector signed short line2 = vec_add(temp2, temp3);
  865. register vector signed short line3 = vec_sub(temp2, temp3);
  866. register vector signed short line4 = vec_add(temp4, temp5);
  867. register vector signed short line5 = vec_sub(temp4, temp5);
  868. register vector signed short line6 = vec_add(temp6, temp7);
  869. register vector signed short line7 = vec_sub(temp6, temp7);
  870. register vector signed short line0B = vec_add(line0, line2);
  871. register vector signed short line2B = vec_sub(line0, line2);
  872. register vector signed short line1B = vec_add(line1, line3);
  873. register vector signed short line3B = vec_sub(line1, line3);
  874. register vector signed short line4B = vec_add(line4, line6);
  875. register vector signed short line6B = vec_sub(line4, line6);
  876. register vector signed short line5B = vec_add(line5, line7);
  877. register vector signed short line7B = vec_sub(line5, line7);
  878. register vector signed short line0C = vec_add(line0B, line4B);
  879. register vector signed short line4C = vec_sub(line0B, line4B);
  880. register vector signed short line1C = vec_add(line1B, line5B);
  881. register vector signed short line5C = vec_sub(line1B, line5B);
  882. register vector signed short line2C = vec_add(line2B, line6B);
  883. register vector signed short line6C = vec_sub(line2B, line6B);
  884. register vector signed short line3C = vec_add(line3B, line7B);
  885. register vector signed short line7C = vec_sub(line3B, line7B);
  886. vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
  887. vsum = vec_sum4s(vec_abs(line1C), vsum);
  888. vsum = vec_sum4s(vec_abs(line2C), vsum);
  889. vsum = vec_sum4s(vec_abs(line3C), vsum);
  890. vsum = vec_sum4s(vec_abs(line4C), vsum);
  891. vsum = vec_sum4s(vec_abs(line5C), vsum);
  892. vsum = vec_sum4s(vec_abs(line6C), vsum);
  893. vsum = vec_sum4s(vec_abs(line7C), vsum);
  894. vsum = vec_sums(vsum, (vector signed int)vzero);
  895. vsum = vec_splat(vsum, 3);
  896. vec_ste(vsum, 0, &sum);
  897. }
  898. return sum;
  899. }
  900. /*
  901. 16x8 works with 16 elements; it allows to avoid replicating loads, and
  902. give the compiler more rooms for scheduling. It's only used from
  903. inside hadamard8_diff16_altivec.
  904. Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT
  905. of spill code, it seems gcc (unlike xlc) cannot keep everything in registers
  906. by itself. The following code include hand-made registers allocation. It's not
  907. clean, but on a 7450 the resulting code is much faster (best case fall from
  908. 700+ cycles to 550).
  909. xlc doesn't add spill code, but it doesn't know how to schedule for the 7450,
  910. and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less
  911. instructions...)
  912. On the 970, the hand-made RA is still a win (around 690 vs. around 780), but
  913. xlc goes to around 660 on the regular C code...
  914. */
  915. static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
  916. int sum;
  917. register vector signed short
  918. temp0 __asm__ ("v0"),
  919. temp1 __asm__ ("v1"),
  920. temp2 __asm__ ("v2"),
  921. temp3 __asm__ ("v3"),
  922. temp4 __asm__ ("v4"),
  923. temp5 __asm__ ("v5"),
  924. temp6 __asm__ ("v6"),
  925. temp7 __asm__ ("v7");
  926. register vector signed short
  927. temp0S __asm__ ("v8"),
  928. temp1S __asm__ ("v9"),
  929. temp2S __asm__ ("v10"),
  930. temp3S __asm__ ("v11"),
  931. temp4S __asm__ ("v12"),
  932. temp5S __asm__ ("v13"),
  933. temp6S __asm__ ("v14"),
  934. temp7S __asm__ ("v15");
  935. register const vector unsigned char vzero __asm__ ("v31") =
  936. (const vector unsigned char)vec_splat_u8(0);
  937. {
  938. register const vector signed short vprod1 __asm__ ("v16") =
  939. (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 };
  940. register const vector signed short vprod2 __asm__ ("v17") =
  941. (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 };
  942. register const vector signed short vprod3 __asm__ ("v18") =
  943. (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 };
  944. register const vector unsigned char perm1 __asm__ ("v19") =
  945. (const vector unsigned char)
  946. {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
  947. 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
  948. register const vector unsigned char perm2 __asm__ ("v20") =
  949. (const vector unsigned char)
  950. {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
  951. 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
  952. register const vector unsigned char perm3 __asm__ ("v21") =
  953. (const vector unsigned char)
  954. {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
  955. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
  956. #define ONEITERBUTTERFLY(i, res1, res2) \
  957. { \
  958. register vector unsigned char src1 __asm__ ("v22"), \
  959. src2 __asm__ ("v23"), \
  960. dst1 __asm__ ("v24"), \
  961. dst2 __asm__ ("v25"), \
  962. srcO __asm__ ("v22"), \
  963. dstO __asm__ ("v23"); \
  964. \
  965. register vector signed short srcV __asm__ ("v24"), \
  966. dstV __asm__ ("v25"), \
  967. srcW __asm__ ("v26"), \
  968. dstW __asm__ ("v27"), \
  969. but0 __asm__ ("v28"), \
  970. but0S __asm__ ("v29"), \
  971. op1 __asm__ ("v30"), \
  972. but1 __asm__ ("v22"), \
  973. op1S __asm__ ("v23"), \
  974. but1S __asm__ ("v24"), \
  975. op2 __asm__ ("v25"), \
  976. but2 __asm__ ("v26"), \
  977. op2S __asm__ ("v27"), \
  978. but2S __asm__ ("v28"), \
  979. op3 __asm__ ("v29"), \
  980. op3S __asm__ ("v30"); \
  981. \
  982. src1 = vec_ld(stride * i, src); \
  983. src2 = vec_ld((stride * i) + 16, src); \
  984. srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
  985. dst1 = vec_ld(stride * i, dst); \
  986. dst2 = vec_ld((stride * i) + 16, dst); \
  987. dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
  988. /* promote the unsigned chars to signed shorts */ \
  989. srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  990. (vector signed char)srcO); \
  991. dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  992. (vector signed char)dstO); \
  993. srcW = (vector signed short)vec_mergel((vector signed char)vzero, \
  994. (vector signed char)srcO); \
  995. dstW = (vector signed short)vec_mergel((vector signed char)vzero, \
  996. (vector signed char)dstO); \
  997. /* subtractions inside the first butterfly */ \
  998. but0 = vec_sub(srcV, dstV); \
  999. but0S = vec_sub(srcW, dstW); \
  1000. op1 = vec_perm(but0, but0, perm1); \
  1001. but1 = vec_mladd(but0, vprod1, op1); \
  1002. op1S = vec_perm(but0S, but0S, perm1); \
  1003. but1S = vec_mladd(but0S, vprod1, op1S); \
  1004. op2 = vec_perm(but1, but1, perm2); \
  1005. but2 = vec_mladd(but1, vprod2, op2); \
  1006. op2S = vec_perm(but1S, but1S, perm2); \
  1007. but2S = vec_mladd(but1S, vprod2, op2S); \
  1008. op3 = vec_perm(but2, but2, perm3); \
  1009. res1 = vec_mladd(but2, vprod3, op3); \
  1010. op3S = vec_perm(but2S, but2S, perm3); \
  1011. res2 = vec_mladd(but2S, vprod3, op3S); \
  1012. }
  1013. ONEITERBUTTERFLY(0, temp0, temp0S);
  1014. ONEITERBUTTERFLY(1, temp1, temp1S);
  1015. ONEITERBUTTERFLY(2, temp2, temp2S);
  1016. ONEITERBUTTERFLY(3, temp3, temp3S);
  1017. ONEITERBUTTERFLY(4, temp4, temp4S);
  1018. ONEITERBUTTERFLY(5, temp5, temp5S);
  1019. ONEITERBUTTERFLY(6, temp6, temp6S);
  1020. ONEITERBUTTERFLY(7, temp7, temp7S);
  1021. }
  1022. #undef ONEITERBUTTERFLY
  1023. {
  1024. register vector signed int vsum;
  1025. register vector signed short line0S, line1S, line2S, line3S, line4S,
  1026. line5S, line6S, line7S, line0BS,line2BS,
  1027. line1BS,line3BS,line4BS,line6BS,line5BS,
  1028. line7BS,line0CS,line4CS,line1CS,line5CS,
  1029. line2CS,line6CS,line3CS,line7CS;
  1030. register vector signed short line0 = vec_add(temp0, temp1);
  1031. register vector signed short line1 = vec_sub(temp0, temp1);
  1032. register vector signed short line2 = vec_add(temp2, temp3);
  1033. register vector signed short line3 = vec_sub(temp2, temp3);
  1034. register vector signed short line4 = vec_add(temp4, temp5);
  1035. register vector signed short line5 = vec_sub(temp4, temp5);
  1036. register vector signed short line6 = vec_add(temp6, temp7);
  1037. register vector signed short line7 = vec_sub(temp6, temp7);
  1038. register vector signed short line0B = vec_add(line0, line2);
  1039. register vector signed short line2B = vec_sub(line0, line2);
  1040. register vector signed short line1B = vec_add(line1, line3);
  1041. register vector signed short line3B = vec_sub(line1, line3);
  1042. register vector signed short line4B = vec_add(line4, line6);
  1043. register vector signed short line6B = vec_sub(line4, line6);
  1044. register vector signed short line5B = vec_add(line5, line7);
  1045. register vector signed short line7B = vec_sub(line5, line7);
  1046. register vector signed short line0C = vec_add(line0B, line4B);
  1047. register vector signed short line4C = vec_sub(line0B, line4B);
  1048. register vector signed short line1C = vec_add(line1B, line5B);
  1049. register vector signed short line5C = vec_sub(line1B, line5B);
  1050. register vector signed short line2C = vec_add(line2B, line6B);
  1051. register vector signed short line6C = vec_sub(line2B, line6B);
  1052. register vector signed short line3C = vec_add(line3B, line7B);
  1053. register vector signed short line7C = vec_sub(line3B, line7B);
  1054. vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
  1055. vsum = vec_sum4s(vec_abs(line1C), vsum);
  1056. vsum = vec_sum4s(vec_abs(line2C), vsum);
  1057. vsum = vec_sum4s(vec_abs(line3C), vsum);
  1058. vsum = vec_sum4s(vec_abs(line4C), vsum);
  1059. vsum = vec_sum4s(vec_abs(line5C), vsum);
  1060. vsum = vec_sum4s(vec_abs(line6C), vsum);
  1061. vsum = vec_sum4s(vec_abs(line7C), vsum);
  1062. line0S = vec_add(temp0S, temp1S);
  1063. line1S = vec_sub(temp0S, temp1S);
  1064. line2S = vec_add(temp2S, temp3S);
  1065. line3S = vec_sub(temp2S, temp3S);
  1066. line4S = vec_add(temp4S, temp5S);
  1067. line5S = vec_sub(temp4S, temp5S);
  1068. line6S = vec_add(temp6S, temp7S);
  1069. line7S = vec_sub(temp6S, temp7S);
  1070. line0BS = vec_add(line0S, line2S);
  1071. line2BS = vec_sub(line0S, line2S);
  1072. line1BS = vec_add(line1S, line3S);
  1073. line3BS = vec_sub(line1S, line3S);
  1074. line4BS = vec_add(line4S, line6S);
  1075. line6BS = vec_sub(line4S, line6S);
  1076. line5BS = vec_add(line5S, line7S);
  1077. line7BS = vec_sub(line5S, line7S);
  1078. line0CS = vec_add(line0BS, line4BS);
  1079. line4CS = vec_sub(line0BS, line4BS);
  1080. line1CS = vec_add(line1BS, line5BS);
  1081. line5CS = vec_sub(line1BS, line5BS);
  1082. line2CS = vec_add(line2BS, line6BS);
  1083. line6CS = vec_sub(line2BS, line6BS);
  1084. line3CS = vec_add(line3BS, line7BS);
  1085. line7CS = vec_sub(line3BS, line7BS);
  1086. vsum = vec_sum4s(vec_abs(line0CS), vsum);
  1087. vsum = vec_sum4s(vec_abs(line1CS), vsum);
  1088. vsum = vec_sum4s(vec_abs(line2CS), vsum);
  1089. vsum = vec_sum4s(vec_abs(line3CS), vsum);
  1090. vsum = vec_sum4s(vec_abs(line4CS), vsum);
  1091. vsum = vec_sum4s(vec_abs(line5CS), vsum);
  1092. vsum = vec_sum4s(vec_abs(line6CS), vsum);
  1093. vsum = vec_sum4s(vec_abs(line7CS), vsum);
  1094. vsum = vec_sums(vsum, (vector signed int)vzero);
  1095. vsum = vec_splat(vsum, 3);
  1096. vec_ste(vsum, 0, &sum);
  1097. }
  1098. return sum;
  1099. }
  1100. static int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
  1101. int score;
  1102. score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  1103. if (h==16) {
  1104. dst += 8*stride;
  1105. src += 8*stride;
  1106. score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  1107. }
  1108. return score;
  1109. }
  1110. static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
  1111. int blocksize)
  1112. {
  1113. int i;
  1114. vector float m, a;
  1115. vector bool int t0, t1;
  1116. const vector unsigned int v_31 = //XXX
  1117. vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
  1118. for (i = 0; i < blocksize; i += 4) {
  1119. m = vec_ld(0, mag+i);
  1120. a = vec_ld(0, ang+i);
  1121. t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
  1122. t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
  1123. a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
  1124. t0 = (vector bool int)vec_and(a, t1);
  1125. t1 = (vector bool int)vec_andc(a, t1);
  1126. a = vec_sub(m, (vector float)t1);
  1127. m = vec_add(m, (vector float)t0);
  1128. vec_stl(a, 0, ang+i);
  1129. vec_stl(m, 0, mag+i);
  1130. }
  1131. }
  1132. /* next one assumes that ((line_size % 8) == 0) */
  1133. static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  1134. {
  1135. register int i;
  1136. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  1137. register vector unsigned char blockv, temp1, temp2, blocktemp;
  1138. register vector unsigned short pixelssum1, pixelssum2, temp3;
  1139. register const vector unsigned char vczero = (const vector unsigned char)
  1140. vec_splat_u8(0);
  1141. register const vector unsigned short vctwo = (const vector unsigned short)
  1142. vec_splat_u16(2);
  1143. temp1 = vec_ld(0, pixels);
  1144. temp2 = vec_ld(16, pixels);
  1145. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  1146. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  1147. pixelsv2 = temp2;
  1148. } else {
  1149. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  1150. }
  1151. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  1152. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  1153. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  1154. (vector unsigned short)pixelsv2);
  1155. pixelssum1 = vec_add(pixelssum1, vctwo);
  1156. for (i = 0; i < h ; i++) {
  1157. int rightside = ((unsigned long)block & 0x0000000F);
  1158. blockv = vec_ld(0, block);
  1159. temp1 = vec_ld(line_size, pixels);
  1160. temp2 = vec_ld(line_size + 16, pixels);
  1161. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  1162. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  1163. pixelsv2 = temp2;
  1164. } else {
  1165. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  1166. }
  1167. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  1168. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  1169. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  1170. (vector unsigned short)pixelsv2);
  1171. temp3 = vec_add(pixelssum1, pixelssum2);
  1172. temp3 = vec_sra(temp3, vctwo);
  1173. pixelssum1 = vec_add(pixelssum2, vctwo);
  1174. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  1175. if (rightside) {
  1176. blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  1177. } else {
  1178. blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  1179. }
  1180. blockv = vec_avg(blocktemp, blockv);
  1181. vec_st(blockv, 0, block);
  1182. block += line_size;
  1183. pixels += line_size;
  1184. }
  1185. }
  1186. void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
  1187. {
  1188. c->pix_abs[0][1] = sad16_x2_altivec;
  1189. c->pix_abs[0][2] = sad16_y2_altivec;
  1190. c->pix_abs[0][3] = sad16_xy2_altivec;
  1191. c->pix_abs[0][0] = sad16_altivec;
  1192. c->pix_abs[1][0] = sad8_altivec;
  1193. c->sad[0]= sad16_altivec;
  1194. c->sad[1]= sad8_altivec;
  1195. c->pix_norm1 = pix_norm1_altivec;
  1196. c->sse[1]= sse8_altivec;
  1197. c->sse[0]= sse16_altivec;
  1198. c->pix_sum = pix_sum_altivec;
  1199. c->diff_pixels = diff_pixels_altivec;
  1200. c->get_pixels = get_pixels_altivec;
  1201. c->clear_block = clear_block_altivec;
  1202. c->add_bytes= add_bytes_altivec;
  1203. c->put_pixels_tab[0][0] = put_pixels16_altivec;
  1204. /* the two functions do the same thing, so use the same code */
  1205. c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
  1206. c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
  1207. c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
  1208. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
  1209. c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
  1210. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
  1211. c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
  1212. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
  1213. c->hadamard8_diff[0] = hadamard8_diff16_altivec;
  1214. c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
  1215. if (CONFIG_VORBIS_DECODER)
  1216. c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
  1217. }