You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1409 lines
55KB

  1. /*
  2. * Copyright (c) 2002 Brian Foley
  3. * Copyright (c) 2002 Dieter Shirley
  4. * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if HAVE_ALTIVEC_H
  24. #include <altivec.h>
  25. #endif
  26. #include "libavutil/ppc/types_altivec.h"
  27. #include "libavutil/ppc/util_altivec.h"
  28. #include "libavcodec/dsputil.h"
  29. #include "dsputil_altivec.h"
  30. static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  31. {
  32. int i;
  33. int s;
  34. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  35. vector unsigned char perm1 = vec_lvsl(0, pix2);
  36. vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
  37. vector unsigned char pix2l, pix2r;
  38. vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
  39. vector unsigned int sad;
  40. vector signed int sumdiffs;
  41. s = 0;
  42. sad = (vector unsigned int)vec_splat_u32(0);
  43. for (i = 0; i < h; i++) {
  44. /* Read unaligned pixels into our vectors. The vectors are as follows:
  45. pix1v: pix1[0]-pix1[15]
  46. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] */
  47. pix1v = vec_ld( 0, pix1);
  48. pix2l = vec_ld( 0, pix2);
  49. pix2r = vec_ld(16, pix2);
  50. pix2v = vec_perm(pix2l, pix2r, perm1);
  51. pix2iv = vec_perm(pix2l, pix2r, perm2);
  52. /* Calculate the average vector */
  53. avgv = vec_avg(pix2v, pix2iv);
  54. /* Calculate a sum of abs differences vector */
  55. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  56. /* Add each 4 pixel group together and put 4 results into sad */
  57. sad = vec_sum4s(t5, sad);
  58. pix1 += line_size;
  59. pix2 += line_size;
  60. }
  61. /* Sum up the four partial sums, and put the result into s */
  62. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  63. sumdiffs = vec_splat(sumdiffs, 3);
  64. vec_ste(sumdiffs, 0, &s);
  65. return s;
  66. }
  67. static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  68. {
  69. int i;
  70. int s;
  71. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  72. vector unsigned char perm = vec_lvsl(0, pix2);
  73. vector unsigned char pix2l, pix2r;
  74. vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
  75. vector unsigned int sad;
  76. vector signed int sumdiffs;
  77. uint8_t *pix3 = pix2 + line_size;
  78. s = 0;
  79. sad = (vector unsigned int)vec_splat_u32(0);
  80. /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  81. iteration becomes pix2 in the next iteration. We can use this
  82. fact to avoid a potentially expensive unaligned read, each
  83. time around the loop.
  84. Read unaligned pixels into our vectors. The vectors are as follows:
  85. pix2v: pix2[0]-pix2[15]
  86. Split the pixel vectors into shorts */
  87. pix2l = vec_ld( 0, pix2);
  88. pix2r = vec_ld(15, pix2);
  89. pix2v = vec_perm(pix2l, pix2r, perm);
  90. for (i = 0; i < h; i++) {
  91. /* Read unaligned pixels into our vectors. The vectors are as follows:
  92. pix1v: pix1[0]-pix1[15]
  93. pix3v: pix3[0]-pix3[15] */
  94. pix1v = vec_ld(0, pix1);
  95. pix2l = vec_ld( 0, pix3);
  96. pix2r = vec_ld(15, pix3);
  97. pix3v = vec_perm(pix2l, pix2r, perm);
  98. /* Calculate the average vector */
  99. avgv = vec_avg(pix2v, pix3v);
  100. /* Calculate a sum of abs differences vector */
  101. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  102. /* Add each 4 pixel group together and put 4 results into sad */
  103. sad = vec_sum4s(t5, sad);
  104. pix1 += line_size;
  105. pix2v = pix3v;
  106. pix3 += line_size;
  107. }
  108. /* Sum up the four partial sums, and put the result into s */
  109. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  110. sumdiffs = vec_splat(sumdiffs, 3);
  111. vec_ste(sumdiffs, 0, &s);
  112. return s;
  113. }
  114. static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  115. {
  116. int i;
  117. int s;
  118. uint8_t *pix3 = pix2 + line_size;
  119. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  120. const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
  121. vector unsigned char avgv, t5;
  122. vector unsigned char perm1 = vec_lvsl(0, pix2);
  123. vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
  124. vector unsigned char pix2l, pix2r;
  125. vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
  126. vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
  127. vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
  128. vector unsigned short avghv, avglv;
  129. vector unsigned short t1, t2, t3, t4;
  130. vector unsigned int sad;
  131. vector signed int sumdiffs;
  132. sad = (vector unsigned int)vec_splat_u32(0);
  133. s = 0;
  134. /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  135. iteration becomes pix2 in the next iteration. We can use this
  136. fact to avoid a potentially expensive unaligned read, as well
  137. as some splitting, and vector addition each time around the loop.
  138. Read unaligned pixels into our vectors. The vectors are as follows:
  139. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
  140. Split the pixel vectors into shorts */
  141. pix2l = vec_ld( 0, pix2);
  142. pix2r = vec_ld(16, pix2);
  143. pix2v = vec_perm(pix2l, pix2r, perm1);
  144. pix2iv = vec_perm(pix2l, pix2r, perm2);
  145. pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
  146. pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
  147. pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
  148. pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
  149. t1 = vec_add(pix2hv, pix2ihv);
  150. t2 = vec_add(pix2lv, pix2ilv);
  151. for (i = 0; i < h; i++) {
  152. /* Read unaligned pixels into our vectors. The vectors are as follows:
  153. pix1v: pix1[0]-pix1[15]
  154. pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] */
  155. pix1v = vec_ld(0, pix1);
  156. pix2l = vec_ld( 0, pix3);
  157. pix2r = vec_ld(16, pix3);
  158. pix3v = vec_perm(pix2l, pix2r, perm1);
  159. pix3iv = vec_perm(pix2l, pix2r, perm2);
  160. /* Note that AltiVec does have vec_avg, but this works on vector pairs
  161. and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
  162. would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
  163. Instead, we have to split the pixel vectors into vectors of shorts,
  164. and do the averaging by hand. */
  165. /* Split the pixel vectors into shorts */
  166. pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
  167. pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
  168. pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
  169. pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
  170. /* Do the averaging on them */
  171. t3 = vec_add(pix3hv, pix3ihv);
  172. t4 = vec_add(pix3lv, pix3ilv);
  173. avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
  174. avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
  175. /* Pack the shorts back into a result */
  176. avgv = vec_pack(avghv, avglv);
  177. /* Calculate a sum of abs differences vector */
  178. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  179. /* Add each 4 pixel group together and put 4 results into sad */
  180. sad = vec_sum4s(t5, sad);
  181. pix1 += line_size;
  182. pix3 += line_size;
  183. /* Transfer the calculated values for pix3 into pix2 */
  184. t1 = t3;
  185. t2 = t4;
  186. }
  187. /* Sum up the four partial sums, and put the result into s */
  188. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  189. sumdiffs = vec_splat(sumdiffs, 3);
  190. vec_ste(sumdiffs, 0, &s);
  191. return s;
  192. }
  193. static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  194. {
  195. int i;
  196. int s;
  197. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  198. vector unsigned char perm = vec_lvsl(0, pix2);
  199. vector unsigned char t1, t2, t3,t4, t5;
  200. vector unsigned int sad;
  201. vector signed int sumdiffs;
  202. sad = (vector unsigned int)vec_splat_u32(0);
  203. for (i = 0; i < h; i++) {
  204. /* Read potentially unaligned pixels into t1 and t2 */
  205. vector unsigned char pix2l = vec_ld( 0, pix2);
  206. vector unsigned char pix2r = vec_ld(15, pix2);
  207. t1 = vec_ld(0, pix1);
  208. t2 = vec_perm(pix2l, pix2r, perm);
  209. /* Calculate a sum of abs differences vector */
  210. t3 = vec_max(t1, t2);
  211. t4 = vec_min(t1, t2);
  212. t5 = vec_sub(t3, t4);
  213. /* Add each 4 pixel group together and put 4 results into sad */
  214. sad = vec_sum4s(t5, sad);
  215. pix1 += line_size;
  216. pix2 += line_size;
  217. }
  218. /* Sum up the four partial sums, and put the result into s */
  219. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  220. sumdiffs = vec_splat(sumdiffs, 3);
  221. vec_ste(sumdiffs, 0, &s);
  222. return s;
  223. }
  224. static int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  225. {
  226. int i;
  227. int s;
  228. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  229. const vector unsigned char permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
  230. vector unsigned char perm1 = vec_lvsl(0, pix1);
  231. vector unsigned char perm2 = vec_lvsl(0, pix2);
  232. vector unsigned char t1, t2, t3,t4, t5;
  233. vector unsigned int sad;
  234. vector signed int sumdiffs;
  235. sad = (vector unsigned int)vec_splat_u32(0);
  236. for (i = 0; i < h; i++) {
  237. /* Read potentially unaligned pixels into t1 and t2
  238. Since we're reading 16 pixels, and actually only want 8,
  239. mask out the last 8 pixels. The 0s don't change the sum. */
  240. vector unsigned char pix1l = vec_ld( 0, pix1);
  241. vector unsigned char pix1r = vec_ld(15, pix1);
  242. vector unsigned char pix2l = vec_ld( 0, pix2);
  243. vector unsigned char pix2r = vec_ld(15, pix2);
  244. t1 = vec_and(vec_perm(pix1l, pix1r, perm1), permclear);
  245. t2 = vec_and(vec_perm(pix2l, pix2r, perm2), permclear);
  246. /* Calculate a sum of abs differences vector */
  247. t3 = vec_max(t1, t2);
  248. t4 = vec_min(t1, t2);
  249. t5 = vec_sub(t3, t4);
  250. /* Add each 4 pixel group together and put 4 results into sad */
  251. sad = vec_sum4s(t5, sad);
  252. pix1 += line_size;
  253. pix2 += line_size;
  254. }
  255. /* Sum up the four partial sums, and put the result into s */
  256. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  257. sumdiffs = vec_splat(sumdiffs, 3);
  258. vec_ste(sumdiffs, 0, &s);
  259. return s;
  260. }
  261. static int pix_norm1_altivec(uint8_t *pix, int line_size)
  262. {
  263. int i;
  264. int s;
  265. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  266. vector unsigned char perm = vec_lvsl(0, pix);
  267. vector unsigned char pixv;
  268. vector unsigned int sv;
  269. vector signed int sum;
  270. sv = (vector unsigned int)vec_splat_u32(0);
  271. s = 0;
  272. for (i = 0; i < 16; i++) {
  273. /* Read in the potentially unaligned pixels */
  274. vector unsigned char pixl = vec_ld( 0, pix);
  275. vector unsigned char pixr = vec_ld(15, pix);
  276. pixv = vec_perm(pixl, pixr, perm);
  277. /* Square the values, and add them to our sum */
  278. sv = vec_msum(pixv, pixv, sv);
  279. pix += line_size;
  280. }
  281. /* Sum up the four partial sums, and put the result into s */
  282. sum = vec_sums((vector signed int) sv, (vector signed int) zero);
  283. sum = vec_splat(sum, 3);
  284. vec_ste(sum, 0, &s);
  285. return s;
  286. }
  287. /**
  288. * Sum of Squared Errors for a 8x8 block.
  289. * AltiVec-enhanced.
  290. * It's the sad8_altivec code above w/ squaring added.
  291. */
  292. static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  293. {
  294. int i;
  295. int s;
  296. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  297. const vector unsigned char permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
  298. vector unsigned char perm1 = vec_lvsl(0, pix1);
  299. vector unsigned char perm2 = vec_lvsl(0, pix2);
  300. vector unsigned char t1, t2, t3,t4, t5;
  301. vector unsigned int sum;
  302. vector signed int sumsqr;
  303. sum = (vector unsigned int)vec_splat_u32(0);
  304. for (i = 0; i < h; i++) {
  305. /* Read potentially unaligned pixels into t1 and t2
  306. Since we're reading 16 pixels, and actually only want 8,
  307. mask out the last 8 pixels. The 0s don't change the sum. */
  308. vector unsigned char pix1l = vec_ld( 0, pix1);
  309. vector unsigned char pix1r = vec_ld(15, pix1);
  310. vector unsigned char pix2l = vec_ld( 0, pix2);
  311. vector unsigned char pix2r = vec_ld(15, pix2);
  312. t1 = vec_and(vec_perm(pix1l, pix1r, perm1), permclear);
  313. t2 = vec_and(vec_perm(pix2l, pix2r, perm2), permclear);
  314. /* Since we want to use unsigned chars, we can take advantage
  315. of the fact that abs(a-b)^2 = (a-b)^2. */
  316. /* Calculate abs differences vector */
  317. t3 = vec_max(t1, t2);
  318. t4 = vec_min(t1, t2);
  319. t5 = vec_sub(t3, t4);
  320. /* Square the values and add them to our sum */
  321. sum = vec_msum(t5, t5, sum);
  322. pix1 += line_size;
  323. pix2 += line_size;
  324. }
  325. /* Sum up the four partial sums, and put the result into s */
  326. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  327. sumsqr = vec_splat(sumsqr, 3);
  328. vec_ste(sumsqr, 0, &s);
  329. return s;
  330. }
  331. /**
  332. * Sum of Squared Errors for a 16x16 block.
  333. * AltiVec-enhanced.
  334. * It's the sad16_altivec code above w/ squaring added.
  335. */
  336. static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  337. {
  338. int i;
  339. int s;
  340. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  341. vector unsigned char perm = vec_lvsl(0, pix2);
  342. vector unsigned char t1, t2, t3,t4, t5;
  343. vector unsigned int sum;
  344. vector signed int sumsqr;
  345. sum = (vector unsigned int)vec_splat_u32(0);
  346. for (i = 0; i < h; i++) {
  347. /* Read potentially unaligned pixels into t1 and t2 */
  348. vector unsigned char pix2l = vec_ld( 0, pix2);
  349. vector unsigned char pix2r = vec_ld(15, pix2);
  350. t1 = vec_ld(0, pix1);
  351. t2 = vec_perm(pix2l, pix2r, perm);
  352. /* Since we want to use unsigned chars, we can take advantage
  353. of the fact that abs(a-b)^2 = (a-b)^2. */
  354. /* Calculate abs differences vector */
  355. t3 = vec_max(t1, t2);
  356. t4 = vec_min(t1, t2);
  357. t5 = vec_sub(t3, t4);
  358. /* Square the values and add them to our sum */
  359. sum = vec_msum(t5, t5, sum);
  360. pix1 += line_size;
  361. pix2 += line_size;
  362. }
  363. /* Sum up the four partial sums, and put the result into s */
  364. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  365. sumsqr = vec_splat(sumsqr, 3);
  366. vec_ste(sumsqr, 0, &s);
  367. return s;
  368. }
  369. static int pix_sum_altivec(uint8_t * pix, int line_size)
  370. {
  371. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  372. vector unsigned char perm = vec_lvsl(0, pix);
  373. vector unsigned char t1;
  374. vector unsigned int sad;
  375. vector signed int sumdiffs;
  376. int i;
  377. int s;
  378. sad = (vector unsigned int)vec_splat_u32(0);
  379. for (i = 0; i < 16; i++) {
  380. /* Read the potentially unaligned 16 pixels into t1 */
  381. vector unsigned char pixl = vec_ld( 0, pix);
  382. vector unsigned char pixr = vec_ld(15, pix);
  383. t1 = vec_perm(pixl, pixr, perm);
  384. /* Add each 4 pixel group together and put 4 results into sad */
  385. sad = vec_sum4s(t1, sad);
  386. pix += line_size;
  387. }
  388. /* Sum up the four partial sums, and put the result into s */
  389. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  390. sumdiffs = vec_splat(sumdiffs, 3);
  391. vec_ste(sumdiffs, 0, &s);
  392. return s;
  393. }
  394. static void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
  395. {
  396. int i;
  397. vector unsigned char perm = vec_lvsl(0, pixels);
  398. vector unsigned char bytes;
  399. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  400. vector signed short shorts;
  401. for (i = 0; i < 8; i++) {
  402. // Read potentially unaligned pixels.
  403. // We're reading 16 pixels, and actually only want 8,
  404. // but we simply ignore the extras.
  405. vector unsigned char pixl = vec_ld( 0, pixels);
  406. vector unsigned char pixr = vec_ld(15, pixels);
  407. bytes = vec_perm(pixl, pixr, perm);
  408. // convert the bytes into shorts
  409. shorts = (vector signed short)vec_mergeh(zero, bytes);
  410. // save the data to the block, we assume the block is 16-byte aligned
  411. vec_st(shorts, i*16, (vector signed short*)block);
  412. pixels += line_size;
  413. }
  414. }
  415. static void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
  416. const uint8_t *s2, int stride)
  417. {
  418. int i;
  419. vector unsigned char perm1 = vec_lvsl(0, s1);
  420. vector unsigned char perm2 = vec_lvsl(0, s2);
  421. vector unsigned char bytes, pixl, pixr;
  422. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  423. vector signed short shorts1, shorts2;
  424. for (i = 0; i < 4; i++) {
  425. // Read potentially unaligned pixels
  426. // We're reading 16 pixels, and actually only want 8,
  427. // but we simply ignore the extras.
  428. pixl = vec_ld( 0, s1);
  429. pixr = vec_ld(15, s1);
  430. bytes = vec_perm(pixl, pixr, perm1);
  431. // convert the bytes into shorts
  432. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  433. // Do the same for the second block of pixels
  434. pixl = vec_ld( 0, s2);
  435. pixr = vec_ld(15, s2);
  436. bytes = vec_perm(pixl, pixr, perm2);
  437. // convert the bytes into shorts
  438. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  439. // Do the subtraction
  440. shorts1 = vec_sub(shorts1, shorts2);
  441. // save the data to the block, we assume the block is 16-byte aligned
  442. vec_st(shorts1, 0, (vector signed short*)block);
  443. s1 += stride;
  444. s2 += stride;
  445. block += 8;
  446. // The code below is a copy of the code above... This is a manual
  447. // unroll.
  448. // Read potentially unaligned pixels
  449. // We're reading 16 pixels, and actually only want 8,
  450. // but we simply ignore the extras.
  451. pixl = vec_ld( 0, s1);
  452. pixr = vec_ld(15, s1);
  453. bytes = vec_perm(pixl, pixr, perm1);
  454. // convert the bytes into shorts
  455. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  456. // Do the same for the second block of pixels
  457. pixl = vec_ld( 0, s2);
  458. pixr = vec_ld(15, s2);
  459. bytes = vec_perm(pixl, pixr, perm2);
  460. // convert the bytes into shorts
  461. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  462. // Do the subtraction
  463. shorts1 = vec_sub(shorts1, shorts2);
  464. // save the data to the block, we assume the block is 16-byte aligned
  465. vec_st(shorts1, 0, (vector signed short*)block);
  466. s1 += stride;
  467. s2 += stride;
  468. block += 8;
  469. }
  470. }
  471. static void clear_block_altivec(DCTELEM *block) {
  472. LOAD_ZERO;
  473. vec_st(zero_s16v, 0, block);
  474. vec_st(zero_s16v, 16, block);
  475. vec_st(zero_s16v, 32, block);
  476. vec_st(zero_s16v, 48, block);
  477. vec_st(zero_s16v, 64, block);
  478. vec_st(zero_s16v, 80, block);
  479. vec_st(zero_s16v, 96, block);
  480. vec_st(zero_s16v, 112, block);
  481. }
  482. static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
  483. register int i;
  484. register vector unsigned char vdst, vsrc;
  485. /* dst and src are 16 bytes-aligned (guaranteed) */
  486. for (i = 0 ; (i + 15) < w ; i+=16) {
  487. vdst = vec_ld(i, (unsigned char*)dst);
  488. vsrc = vec_ld(i, (unsigned char*)src);
  489. vdst = vec_add(vsrc, vdst);
  490. vec_st(vdst, i, (unsigned char*)dst);
  491. }
  492. /* if w is not a multiple of 16 */
  493. for (; (i < w) ; i++) {
  494. dst[i] = src[i];
  495. }
  496. }
  497. /* next one assumes that ((line_size % 16) == 0) */
  498. void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  499. {
  500. register vector unsigned char pixelsv1, pixelsv2;
  501. register vector unsigned char pixelsv1B, pixelsv2B;
  502. register vector unsigned char pixelsv1C, pixelsv2C;
  503. register vector unsigned char pixelsv1D, pixelsv2D;
  504. register vector unsigned char perm = vec_lvsl(0, pixels);
  505. int i;
  506. register int line_size_2 = line_size << 1;
  507. register int line_size_3 = line_size + line_size_2;
  508. register int line_size_4 = line_size << 2;
  509. // hand-unrolling the loop by 4 gains about 15%
  510. // mininum execution time goes from 74 to 60 cycles
  511. // it's faster than -funroll-loops, but using
  512. // -funroll-loops w/ this is bad - 74 cycles again.
  513. // all this is on a 7450, tuning for the 7450
  514. for (i = 0; i < h; i += 4) {
  515. pixelsv1 = vec_ld( 0, pixels);
  516. pixelsv2 = vec_ld(15, pixels);
  517. pixelsv1B = vec_ld(line_size, pixels);
  518. pixelsv2B = vec_ld(15 + line_size, pixels);
  519. pixelsv1C = vec_ld(line_size_2, pixels);
  520. pixelsv2C = vec_ld(15 + line_size_2, pixels);
  521. pixelsv1D = vec_ld(line_size_3, pixels);
  522. pixelsv2D = vec_ld(15 + line_size_3, pixels);
  523. vec_st(vec_perm(pixelsv1, pixelsv2, perm),
  524. 0, (unsigned char*)block);
  525. vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
  526. line_size, (unsigned char*)block);
  527. vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
  528. line_size_2, (unsigned char*)block);
  529. vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
  530. line_size_3, (unsigned char*)block);
  531. pixels+=line_size_4;
  532. block +=line_size_4;
  533. }
  534. }
  535. /* next one assumes that ((line_size % 16) == 0) */
  536. #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
  537. void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  538. {
  539. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  540. register vector unsigned char perm = vec_lvsl(0, pixels);
  541. int i;
  542. for (i = 0; i < h; i++) {
  543. pixelsv1 = vec_ld( 0, pixels);
  544. pixelsv2 = vec_ld(16,pixels);
  545. blockv = vec_ld(0, block);
  546. pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
  547. blockv = vec_avg(blockv,pixelsv);
  548. vec_st(blockv, 0, (unsigned char*)block);
  549. pixels+=line_size;
  550. block +=line_size;
  551. }
  552. }
  553. /* next one assumes that ((line_size % 8) == 0) */
  554. static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  555. {
  556. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  557. int i;
  558. for (i = 0; i < h; i++) {
  559. /* block is 8 bytes-aligned, so we're either in the
  560. left block (16 bytes-aligned) or in the right block (not) */
  561. int rightside = ((unsigned long)block & 0x0000000F);
  562. blockv = vec_ld(0, block);
  563. pixelsv1 = vec_ld( 0, pixels);
  564. pixelsv2 = vec_ld(16, pixels);
  565. pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
  566. if (rightside) {
  567. pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
  568. } else {
  569. pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
  570. }
  571. blockv = vec_avg(blockv, pixelsv);
  572. vec_st(blockv, 0, block);
  573. pixels += line_size;
  574. block += line_size;
  575. }
  576. }
  577. /* next one assumes that ((line_size % 8) == 0) */
  578. static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  579. {
  580. register int i;
  581. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  582. register vector unsigned char blockv, temp1, temp2;
  583. register vector unsigned short pixelssum1, pixelssum2, temp3;
  584. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  585. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  586. temp1 = vec_ld(0, pixels);
  587. temp2 = vec_ld(16, pixels);
  588. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  589. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  590. pixelsv2 = temp2;
  591. } else {
  592. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  593. }
  594. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  595. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  596. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  597. (vector unsigned short)pixelsv2);
  598. pixelssum1 = vec_add(pixelssum1, vctwo);
  599. for (i = 0; i < h ; i++) {
  600. int rightside = ((unsigned long)block & 0x0000000F);
  601. blockv = vec_ld(0, block);
  602. temp1 = vec_ld(line_size, pixels);
  603. temp2 = vec_ld(line_size + 16, pixels);
  604. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  605. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  606. pixelsv2 = temp2;
  607. } else {
  608. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  609. }
  610. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  611. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  612. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  613. (vector unsigned short)pixelsv2);
  614. temp3 = vec_add(pixelssum1, pixelssum2);
  615. temp3 = vec_sra(temp3, vctwo);
  616. pixelssum1 = vec_add(pixelssum2, vctwo);
  617. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  618. if (rightside) {
  619. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  620. } else {
  621. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  622. }
  623. vec_st(blockv, 0, block);
  624. block += line_size;
  625. pixels += line_size;
  626. }
  627. }
  628. /* next one assumes that ((line_size % 8) == 0) */
  629. static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  630. {
  631. register int i;
  632. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  633. register vector unsigned char blockv, temp1, temp2;
  634. register vector unsigned short pixelssum1, pixelssum2, temp3;
  635. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  636. register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
  637. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  638. temp1 = vec_ld(0, pixels);
  639. temp2 = vec_ld(16, pixels);
  640. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  641. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  642. pixelsv2 = temp2;
  643. } else {
  644. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  645. }
  646. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  647. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  648. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  649. (vector unsigned short)pixelsv2);
  650. pixelssum1 = vec_add(pixelssum1, vcone);
  651. for (i = 0; i < h ; i++) {
  652. int rightside = ((unsigned long)block & 0x0000000F);
  653. blockv = vec_ld(0, block);
  654. temp1 = vec_ld(line_size, pixels);
  655. temp2 = vec_ld(line_size + 16, pixels);
  656. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  657. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  658. pixelsv2 = temp2;
  659. } else {
  660. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  661. }
  662. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  663. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  664. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  665. (vector unsigned short)pixelsv2);
  666. temp3 = vec_add(pixelssum1, pixelssum2);
  667. temp3 = vec_sra(temp3, vctwo);
  668. pixelssum1 = vec_add(pixelssum2, vcone);
  669. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  670. if (rightside) {
  671. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  672. } else {
  673. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  674. }
  675. vec_st(blockv, 0, block);
  676. block += line_size;
  677. pixels += line_size;
  678. }
  679. }
  680. /* next one assumes that ((line_size % 16) == 0) */
  681. static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  682. {
  683. register int i;
  684. register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  685. register vector unsigned char blockv, temp1, temp2;
  686. register vector unsigned short temp3, temp4,
  687. pixelssum1, pixelssum2, pixelssum3, pixelssum4;
  688. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  689. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  690. temp1 = vec_ld(0, pixels);
  691. temp2 = vec_ld(16, pixels);
  692. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  693. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  694. pixelsv2 = temp2;
  695. } else {
  696. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  697. }
  698. pixelsv3 = vec_mergel(vczero, pixelsv1);
  699. pixelsv4 = vec_mergel(vczero, pixelsv2);
  700. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  701. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  702. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  703. (vector unsigned short)pixelsv4);
  704. pixelssum3 = vec_add(pixelssum3, vctwo);
  705. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  706. (vector unsigned short)pixelsv2);
  707. pixelssum1 = vec_add(pixelssum1, vctwo);
  708. for (i = 0; i < h ; i++) {
  709. blockv = vec_ld(0, block);
  710. temp1 = vec_ld(line_size, pixels);
  711. temp2 = vec_ld(line_size + 16, pixels);
  712. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  713. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  714. pixelsv2 = temp2;
  715. } else {
  716. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  717. }
  718. pixelsv3 = vec_mergel(vczero, pixelsv1);
  719. pixelsv4 = vec_mergel(vczero, pixelsv2);
  720. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  721. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  722. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  723. (vector unsigned short)pixelsv4);
  724. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  725. (vector unsigned short)pixelsv2);
  726. temp4 = vec_add(pixelssum3, pixelssum4);
  727. temp4 = vec_sra(temp4, vctwo);
  728. temp3 = vec_add(pixelssum1, pixelssum2);
  729. temp3 = vec_sra(temp3, vctwo);
  730. pixelssum3 = vec_add(pixelssum4, vctwo);
  731. pixelssum1 = vec_add(pixelssum2, vctwo);
  732. blockv = vec_packsu(temp3, temp4);
  733. vec_st(blockv, 0, block);
  734. block += line_size;
  735. pixels += line_size;
  736. }
  737. }
  738. /* next one assumes that ((line_size % 16) == 0) */
  739. static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
  740. {
  741. register int i;
  742. register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  743. register vector unsigned char blockv, temp1, temp2;
  744. register vector unsigned short temp3, temp4,
  745. pixelssum1, pixelssum2, pixelssum3, pixelssum4;
  746. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  747. register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
  748. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  749. temp1 = vec_ld(0, pixels);
  750. temp2 = vec_ld(16, pixels);
  751. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  752. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  753. pixelsv2 = temp2;
  754. } else {
  755. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  756. }
  757. pixelsv3 = vec_mergel(vczero, pixelsv1);
  758. pixelsv4 = vec_mergel(vczero, pixelsv2);
  759. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  760. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  761. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  762. (vector unsigned short)pixelsv4);
  763. pixelssum3 = vec_add(pixelssum3, vcone);
  764. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  765. (vector unsigned short)pixelsv2);
  766. pixelssum1 = vec_add(pixelssum1, vcone);
  767. for (i = 0; i < h ; i++) {
  768. blockv = vec_ld(0, block);
  769. temp1 = vec_ld(line_size, pixels);
  770. temp2 = vec_ld(line_size + 16, pixels);
  771. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  772. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  773. pixelsv2 = temp2;
  774. } else {
  775. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  776. }
  777. pixelsv3 = vec_mergel(vczero, pixelsv1);
  778. pixelsv4 = vec_mergel(vczero, pixelsv2);
  779. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  780. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  781. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  782. (vector unsigned short)pixelsv4);
  783. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  784. (vector unsigned short)pixelsv2);
  785. temp4 = vec_add(pixelssum3, pixelssum4);
  786. temp4 = vec_sra(temp4, vctwo);
  787. temp3 = vec_add(pixelssum1, pixelssum2);
  788. temp3 = vec_sra(temp3, vctwo);
  789. pixelssum3 = vec_add(pixelssum4, vcone);
  790. pixelssum1 = vec_add(pixelssum2, vcone);
  791. blockv = vec_packsu(temp3, temp4);
  792. vec_st(blockv, 0, block);
  793. block += line_size;
  794. pixels += line_size;
  795. }
  796. }
  797. static int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
  798. int sum;
  799. register const vector unsigned char vzero =
  800. (const vector unsigned char)vec_splat_u8(0);
  801. register vector signed short temp0, temp1, temp2, temp3, temp4,
  802. temp5, temp6, temp7;
  803. {
  804. register const vector signed short vprod1 =(const vector signed short)
  805. { 1,-1, 1,-1, 1,-1, 1,-1 };
  806. register const vector signed short vprod2 =(const vector signed short)
  807. { 1, 1,-1,-1, 1, 1,-1,-1 };
  808. register const vector signed short vprod3 =(const vector signed short)
  809. { 1, 1, 1, 1,-1,-1,-1,-1 };
  810. register const vector unsigned char perm1 = (const vector unsigned char)
  811. {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
  812. 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
  813. register const vector unsigned char perm2 = (const vector unsigned char)
  814. {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
  815. 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
  816. register const vector unsigned char perm3 = (const vector unsigned char)
  817. {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
  818. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
  819. #define ONEITERBUTTERFLY(i, res) \
  820. { \
  821. register vector unsigned char src1, src2, srcO; \
  822. register vector unsigned char dst1, dst2, dstO; \
  823. register vector signed short srcV, dstV; \
  824. register vector signed short but0, but1, but2, op1, op2, op3; \
  825. src1 = vec_ld(stride * i, src); \
  826. src2 = vec_ld((stride * i) + 15, src); \
  827. srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
  828. dst1 = vec_ld(stride * i, dst); \
  829. dst2 = vec_ld((stride * i) + 15, dst); \
  830. dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
  831. /* promote the unsigned chars to signed shorts */ \
  832. /* we're in the 8x8 function, we only care for the first 8 */ \
  833. srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  834. (vector signed char)srcO); \
  835. dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  836. (vector signed char)dstO); \
  837. /* subtractions inside the first butterfly */ \
  838. but0 = vec_sub(srcV, dstV); \
  839. op1 = vec_perm(but0, but0, perm1); \
  840. but1 = vec_mladd(but0, vprod1, op1); \
  841. op2 = vec_perm(but1, but1, perm2); \
  842. but2 = vec_mladd(but1, vprod2, op2); \
  843. op3 = vec_perm(but2, but2, perm3); \
  844. res = vec_mladd(but2, vprod3, op3); \
  845. }
  846. ONEITERBUTTERFLY(0, temp0);
  847. ONEITERBUTTERFLY(1, temp1);
  848. ONEITERBUTTERFLY(2, temp2);
  849. ONEITERBUTTERFLY(3, temp3);
  850. ONEITERBUTTERFLY(4, temp4);
  851. ONEITERBUTTERFLY(5, temp5);
  852. ONEITERBUTTERFLY(6, temp6);
  853. ONEITERBUTTERFLY(7, temp7);
  854. }
  855. #undef ONEITERBUTTERFLY
  856. {
  857. register vector signed int vsum;
  858. register vector signed short line0 = vec_add(temp0, temp1);
  859. register vector signed short line1 = vec_sub(temp0, temp1);
  860. register vector signed short line2 = vec_add(temp2, temp3);
  861. register vector signed short line3 = vec_sub(temp2, temp3);
  862. register vector signed short line4 = vec_add(temp4, temp5);
  863. register vector signed short line5 = vec_sub(temp4, temp5);
  864. register vector signed short line6 = vec_add(temp6, temp7);
  865. register vector signed short line7 = vec_sub(temp6, temp7);
  866. register vector signed short line0B = vec_add(line0, line2);
  867. register vector signed short line2B = vec_sub(line0, line2);
  868. register vector signed short line1B = vec_add(line1, line3);
  869. register vector signed short line3B = vec_sub(line1, line3);
  870. register vector signed short line4B = vec_add(line4, line6);
  871. register vector signed short line6B = vec_sub(line4, line6);
  872. register vector signed short line5B = vec_add(line5, line7);
  873. register vector signed short line7B = vec_sub(line5, line7);
  874. register vector signed short line0C = vec_add(line0B, line4B);
  875. register vector signed short line4C = vec_sub(line0B, line4B);
  876. register vector signed short line1C = vec_add(line1B, line5B);
  877. register vector signed short line5C = vec_sub(line1B, line5B);
  878. register vector signed short line2C = vec_add(line2B, line6B);
  879. register vector signed short line6C = vec_sub(line2B, line6B);
  880. register vector signed short line3C = vec_add(line3B, line7B);
  881. register vector signed short line7C = vec_sub(line3B, line7B);
  882. vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
  883. vsum = vec_sum4s(vec_abs(line1C), vsum);
  884. vsum = vec_sum4s(vec_abs(line2C), vsum);
  885. vsum = vec_sum4s(vec_abs(line3C), vsum);
  886. vsum = vec_sum4s(vec_abs(line4C), vsum);
  887. vsum = vec_sum4s(vec_abs(line5C), vsum);
  888. vsum = vec_sum4s(vec_abs(line6C), vsum);
  889. vsum = vec_sum4s(vec_abs(line7C), vsum);
  890. vsum = vec_sums(vsum, (vector signed int)vzero);
  891. vsum = vec_splat(vsum, 3);
  892. vec_ste(vsum, 0, &sum);
  893. }
  894. return sum;
  895. }
  896. /*
  897. 16x8 works with 16 elements; it allows to avoid replicating loads, and
  898. give the compiler more rooms for scheduling. It's only used from
  899. inside hadamard8_diff16_altivec.
  900. Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT
  901. of spill code, it seems gcc (unlike xlc) cannot keep everything in registers
  902. by itself. The following code include hand-made registers allocation. It's not
  903. clean, but on a 7450 the resulting code is much faster (best case fall from
  904. 700+ cycles to 550).
  905. xlc doesn't add spill code, but it doesn't know how to schedule for the 7450,
  906. and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less
  907. instructions...)
  908. On the 970, the hand-made RA is still a win (around 690 vs. around 780), but
  909. xlc goes to around 660 on the regular C code...
  910. */
  911. static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
  912. int sum;
  913. register vector signed short
  914. temp0 __asm__ ("v0"),
  915. temp1 __asm__ ("v1"),
  916. temp2 __asm__ ("v2"),
  917. temp3 __asm__ ("v3"),
  918. temp4 __asm__ ("v4"),
  919. temp5 __asm__ ("v5"),
  920. temp6 __asm__ ("v6"),
  921. temp7 __asm__ ("v7");
  922. register vector signed short
  923. temp0S __asm__ ("v8"),
  924. temp1S __asm__ ("v9"),
  925. temp2S __asm__ ("v10"),
  926. temp3S __asm__ ("v11"),
  927. temp4S __asm__ ("v12"),
  928. temp5S __asm__ ("v13"),
  929. temp6S __asm__ ("v14"),
  930. temp7S __asm__ ("v15");
  931. register const vector unsigned char vzero __asm__ ("v31") =
  932. (const vector unsigned char)vec_splat_u8(0);
  933. {
  934. register const vector signed short vprod1 __asm__ ("v16") =
  935. (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 };
  936. register const vector signed short vprod2 __asm__ ("v17") =
  937. (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 };
  938. register const vector signed short vprod3 __asm__ ("v18") =
  939. (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 };
  940. register const vector unsigned char perm1 __asm__ ("v19") =
  941. (const vector unsigned char)
  942. {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
  943. 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
  944. register const vector unsigned char perm2 __asm__ ("v20") =
  945. (const vector unsigned char)
  946. {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
  947. 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
  948. register const vector unsigned char perm3 __asm__ ("v21") =
  949. (const vector unsigned char)
  950. {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
  951. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
  952. #define ONEITERBUTTERFLY(i, res1, res2) \
  953. { \
  954. register vector unsigned char src1 __asm__ ("v22"), \
  955. src2 __asm__ ("v23"), \
  956. dst1 __asm__ ("v24"), \
  957. dst2 __asm__ ("v25"), \
  958. srcO __asm__ ("v22"), \
  959. dstO __asm__ ("v23"); \
  960. \
  961. register vector signed short srcV __asm__ ("v24"), \
  962. dstV __asm__ ("v25"), \
  963. srcW __asm__ ("v26"), \
  964. dstW __asm__ ("v27"), \
  965. but0 __asm__ ("v28"), \
  966. but0S __asm__ ("v29"), \
  967. op1 __asm__ ("v30"), \
  968. but1 __asm__ ("v22"), \
  969. op1S __asm__ ("v23"), \
  970. but1S __asm__ ("v24"), \
  971. op2 __asm__ ("v25"), \
  972. but2 __asm__ ("v26"), \
  973. op2S __asm__ ("v27"), \
  974. but2S __asm__ ("v28"), \
  975. op3 __asm__ ("v29"), \
  976. op3S __asm__ ("v30"); \
  977. \
  978. src1 = vec_ld(stride * i, src); \
  979. src2 = vec_ld((stride * i) + 16, src); \
  980. srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
  981. dst1 = vec_ld(stride * i, dst); \
  982. dst2 = vec_ld((stride * i) + 16, dst); \
  983. dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
  984. /* promote the unsigned chars to signed shorts */ \
  985. srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  986. (vector signed char)srcO); \
  987. dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  988. (vector signed char)dstO); \
  989. srcW = (vector signed short)vec_mergel((vector signed char)vzero, \
  990. (vector signed char)srcO); \
  991. dstW = (vector signed short)vec_mergel((vector signed char)vzero, \
  992. (vector signed char)dstO); \
  993. /* subtractions inside the first butterfly */ \
  994. but0 = vec_sub(srcV, dstV); \
  995. but0S = vec_sub(srcW, dstW); \
  996. op1 = vec_perm(but0, but0, perm1); \
  997. but1 = vec_mladd(but0, vprod1, op1); \
  998. op1S = vec_perm(but0S, but0S, perm1); \
  999. but1S = vec_mladd(but0S, vprod1, op1S); \
  1000. op2 = vec_perm(but1, but1, perm2); \
  1001. but2 = vec_mladd(but1, vprod2, op2); \
  1002. op2S = vec_perm(but1S, but1S, perm2); \
  1003. but2S = vec_mladd(but1S, vprod2, op2S); \
  1004. op3 = vec_perm(but2, but2, perm3); \
  1005. res1 = vec_mladd(but2, vprod3, op3); \
  1006. op3S = vec_perm(but2S, but2S, perm3); \
  1007. res2 = vec_mladd(but2S, vprod3, op3S); \
  1008. }
  1009. ONEITERBUTTERFLY(0, temp0, temp0S);
  1010. ONEITERBUTTERFLY(1, temp1, temp1S);
  1011. ONEITERBUTTERFLY(2, temp2, temp2S);
  1012. ONEITERBUTTERFLY(3, temp3, temp3S);
  1013. ONEITERBUTTERFLY(4, temp4, temp4S);
  1014. ONEITERBUTTERFLY(5, temp5, temp5S);
  1015. ONEITERBUTTERFLY(6, temp6, temp6S);
  1016. ONEITERBUTTERFLY(7, temp7, temp7S);
  1017. }
  1018. #undef ONEITERBUTTERFLY
  1019. {
  1020. register vector signed int vsum;
  1021. register vector signed short line0S, line1S, line2S, line3S, line4S,
  1022. line5S, line6S, line7S, line0BS,line2BS,
  1023. line1BS,line3BS,line4BS,line6BS,line5BS,
  1024. line7BS,line0CS,line4CS,line1CS,line5CS,
  1025. line2CS,line6CS,line3CS,line7CS;
  1026. register vector signed short line0 = vec_add(temp0, temp1);
  1027. register vector signed short line1 = vec_sub(temp0, temp1);
  1028. register vector signed short line2 = vec_add(temp2, temp3);
  1029. register vector signed short line3 = vec_sub(temp2, temp3);
  1030. register vector signed short line4 = vec_add(temp4, temp5);
  1031. register vector signed short line5 = vec_sub(temp4, temp5);
  1032. register vector signed short line6 = vec_add(temp6, temp7);
  1033. register vector signed short line7 = vec_sub(temp6, temp7);
  1034. register vector signed short line0B = vec_add(line0, line2);
  1035. register vector signed short line2B = vec_sub(line0, line2);
  1036. register vector signed short line1B = vec_add(line1, line3);
  1037. register vector signed short line3B = vec_sub(line1, line3);
  1038. register vector signed short line4B = vec_add(line4, line6);
  1039. register vector signed short line6B = vec_sub(line4, line6);
  1040. register vector signed short line5B = vec_add(line5, line7);
  1041. register vector signed short line7B = vec_sub(line5, line7);
  1042. register vector signed short line0C = vec_add(line0B, line4B);
  1043. register vector signed short line4C = vec_sub(line0B, line4B);
  1044. register vector signed short line1C = vec_add(line1B, line5B);
  1045. register vector signed short line5C = vec_sub(line1B, line5B);
  1046. register vector signed short line2C = vec_add(line2B, line6B);
  1047. register vector signed short line6C = vec_sub(line2B, line6B);
  1048. register vector signed short line3C = vec_add(line3B, line7B);
  1049. register vector signed short line7C = vec_sub(line3B, line7B);
  1050. vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
  1051. vsum = vec_sum4s(vec_abs(line1C), vsum);
  1052. vsum = vec_sum4s(vec_abs(line2C), vsum);
  1053. vsum = vec_sum4s(vec_abs(line3C), vsum);
  1054. vsum = vec_sum4s(vec_abs(line4C), vsum);
  1055. vsum = vec_sum4s(vec_abs(line5C), vsum);
  1056. vsum = vec_sum4s(vec_abs(line6C), vsum);
  1057. vsum = vec_sum4s(vec_abs(line7C), vsum);
  1058. line0S = vec_add(temp0S, temp1S);
  1059. line1S = vec_sub(temp0S, temp1S);
  1060. line2S = vec_add(temp2S, temp3S);
  1061. line3S = vec_sub(temp2S, temp3S);
  1062. line4S = vec_add(temp4S, temp5S);
  1063. line5S = vec_sub(temp4S, temp5S);
  1064. line6S = vec_add(temp6S, temp7S);
  1065. line7S = vec_sub(temp6S, temp7S);
  1066. line0BS = vec_add(line0S, line2S);
  1067. line2BS = vec_sub(line0S, line2S);
  1068. line1BS = vec_add(line1S, line3S);
  1069. line3BS = vec_sub(line1S, line3S);
  1070. line4BS = vec_add(line4S, line6S);
  1071. line6BS = vec_sub(line4S, line6S);
  1072. line5BS = vec_add(line5S, line7S);
  1073. line7BS = vec_sub(line5S, line7S);
  1074. line0CS = vec_add(line0BS, line4BS);
  1075. line4CS = vec_sub(line0BS, line4BS);
  1076. line1CS = vec_add(line1BS, line5BS);
  1077. line5CS = vec_sub(line1BS, line5BS);
  1078. line2CS = vec_add(line2BS, line6BS);
  1079. line6CS = vec_sub(line2BS, line6BS);
  1080. line3CS = vec_add(line3BS, line7BS);
  1081. line7CS = vec_sub(line3BS, line7BS);
  1082. vsum = vec_sum4s(vec_abs(line0CS), vsum);
  1083. vsum = vec_sum4s(vec_abs(line1CS), vsum);
  1084. vsum = vec_sum4s(vec_abs(line2CS), vsum);
  1085. vsum = vec_sum4s(vec_abs(line3CS), vsum);
  1086. vsum = vec_sum4s(vec_abs(line4CS), vsum);
  1087. vsum = vec_sum4s(vec_abs(line5CS), vsum);
  1088. vsum = vec_sum4s(vec_abs(line6CS), vsum);
  1089. vsum = vec_sum4s(vec_abs(line7CS), vsum);
  1090. vsum = vec_sums(vsum, (vector signed int)vzero);
  1091. vsum = vec_splat(vsum, 3);
  1092. vec_ste(vsum, 0, &sum);
  1093. }
  1094. return sum;
  1095. }
  1096. static int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
  1097. int score;
  1098. score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  1099. if (h==16) {
  1100. dst += 8*stride;
  1101. src += 8*stride;
  1102. score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  1103. }
  1104. return score;
  1105. }
  1106. static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
  1107. int blocksize)
  1108. {
  1109. int i;
  1110. vector float m, a;
  1111. vector bool int t0, t1;
  1112. const vector unsigned int v_31 = //XXX
  1113. vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
  1114. for (i = 0; i < blocksize; i += 4) {
  1115. m = vec_ld(0, mag+i);
  1116. a = vec_ld(0, ang+i);
  1117. t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
  1118. t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
  1119. a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
  1120. t0 = (vector bool int)vec_and(a, t1);
  1121. t1 = (vector bool int)vec_andc(a, t1);
  1122. a = vec_sub(m, (vector float)t1);
  1123. m = vec_add(m, (vector float)t0);
  1124. vec_stl(a, 0, ang+i);
  1125. vec_stl(m, 0, mag+i);
  1126. }
  1127. }
  1128. /* next one assumes that ((line_size % 8) == 0) */
  1129. static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  1130. {
  1131. register int i;
  1132. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  1133. register vector unsigned char blockv, temp1, temp2, blocktemp;
  1134. register vector unsigned short pixelssum1, pixelssum2, temp3;
  1135. register const vector unsigned char vczero = (const vector unsigned char)
  1136. vec_splat_u8(0);
  1137. register const vector unsigned short vctwo = (const vector unsigned short)
  1138. vec_splat_u16(2);
  1139. temp1 = vec_ld(0, pixels);
  1140. temp2 = vec_ld(16, pixels);
  1141. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  1142. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  1143. pixelsv2 = temp2;
  1144. } else {
  1145. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  1146. }
  1147. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  1148. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  1149. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  1150. (vector unsigned short)pixelsv2);
  1151. pixelssum1 = vec_add(pixelssum1, vctwo);
  1152. for (i = 0; i < h ; i++) {
  1153. int rightside = ((unsigned long)block & 0x0000000F);
  1154. blockv = vec_ld(0, block);
  1155. temp1 = vec_ld(line_size, pixels);
  1156. temp2 = vec_ld(line_size + 16, pixels);
  1157. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  1158. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  1159. pixelsv2 = temp2;
  1160. } else {
  1161. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  1162. }
  1163. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  1164. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  1165. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  1166. (vector unsigned short)pixelsv2);
  1167. temp3 = vec_add(pixelssum1, pixelssum2);
  1168. temp3 = vec_sra(temp3, vctwo);
  1169. pixelssum1 = vec_add(pixelssum2, vctwo);
  1170. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  1171. if (rightside) {
  1172. blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  1173. } else {
  1174. blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  1175. }
  1176. blockv = vec_avg(blocktemp, blockv);
  1177. vec_st(blockv, 0, block);
  1178. block += line_size;
  1179. pixels += line_size;
  1180. }
  1181. }
  1182. void ff_dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
  1183. {
  1184. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1185. c->pix_abs[0][1] = sad16_x2_altivec;
  1186. c->pix_abs[0][2] = sad16_y2_altivec;
  1187. c->pix_abs[0][3] = sad16_xy2_altivec;
  1188. c->pix_abs[0][0] = sad16_altivec;
  1189. c->pix_abs[1][0] = sad8_altivec;
  1190. c->sad[0]= sad16_altivec;
  1191. c->sad[1]= sad8_altivec;
  1192. c->pix_norm1 = pix_norm1_altivec;
  1193. c->sse[1]= sse8_altivec;
  1194. c->sse[0]= sse16_altivec;
  1195. c->pix_sum = pix_sum_altivec;
  1196. c->diff_pixels = diff_pixels_altivec;
  1197. c->add_bytes= add_bytes_altivec;
  1198. if (!high_bit_depth) {
  1199. c->get_pixels = get_pixels_altivec;
  1200. c->clear_block = clear_block_altivec;
  1201. c->put_pixels_tab[0][0] = ff_put_pixels16_altivec;
  1202. /* the two functions do the same thing, so use the same code */
  1203. c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_altivec;
  1204. c->avg_pixels_tab[0][0] = ff_avg_pixels16_altivec;
  1205. c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
  1206. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
  1207. c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
  1208. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
  1209. c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
  1210. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
  1211. }
  1212. c->hadamard8_diff[0] = hadamard8_diff16_altivec;
  1213. c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
  1214. if (CONFIG_VORBIS_DECODER)
  1215. c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
  1216. }