You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

965 lines
38KB

  1. /*
  2. * Copyright (c) 2002 Brian Foley
  3. * Copyright (c) 2002 Dieter Shirley
  4. * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if HAVE_ALTIVEC_H
  24. #include <altivec.h>
  25. #endif
  26. #include "libavutil/attributes.h"
  27. #include "libavutil/ppc/types_altivec.h"
  28. #include "libavutil/ppc/util_altivec.h"
  29. #include "libavcodec/avcodec.h"
  30. #include "libavcodec/dsputil.h"
  31. #include "dsputil_altivec.h"
  32. static int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  33. {
  34. int i;
  35. int s;
  36. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  37. vector unsigned char perm1 = vec_lvsl(0, pix2);
  38. vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
  39. vector unsigned char pix2l, pix2r;
  40. vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
  41. vector unsigned int sad;
  42. vector signed int sumdiffs;
  43. s = 0;
  44. sad = (vector unsigned int)vec_splat_u32(0);
  45. for (i = 0; i < h; i++) {
  46. /* Read unaligned pixels into our vectors. The vectors are as follows:
  47. pix1v: pix1[0]-pix1[15]
  48. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] */
  49. pix1v = vec_ld( 0, pix1);
  50. pix2l = vec_ld( 0, pix2);
  51. pix2r = vec_ld(16, pix2);
  52. pix2v = vec_perm(pix2l, pix2r, perm1);
  53. pix2iv = vec_perm(pix2l, pix2r, perm2);
  54. /* Calculate the average vector */
  55. avgv = vec_avg(pix2v, pix2iv);
  56. /* Calculate a sum of abs differences vector */
  57. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  58. /* Add each 4 pixel group together and put 4 results into sad */
  59. sad = vec_sum4s(t5, sad);
  60. pix1 += line_size;
  61. pix2 += line_size;
  62. }
  63. /* Sum up the four partial sums, and put the result into s */
  64. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  65. sumdiffs = vec_splat(sumdiffs, 3);
  66. vec_ste(sumdiffs, 0, &s);
  67. return s;
  68. }
  69. static int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  70. {
  71. int i;
  72. int s;
  73. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  74. vector unsigned char perm = vec_lvsl(0, pix2);
  75. vector unsigned char pix2l, pix2r;
  76. vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
  77. vector unsigned int sad;
  78. vector signed int sumdiffs;
  79. uint8_t *pix3 = pix2 + line_size;
  80. s = 0;
  81. sad = (vector unsigned int)vec_splat_u32(0);
  82. /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  83. iteration becomes pix2 in the next iteration. We can use this
  84. fact to avoid a potentially expensive unaligned read, each
  85. time around the loop.
  86. Read unaligned pixels into our vectors. The vectors are as follows:
  87. pix2v: pix2[0]-pix2[15]
  88. Split the pixel vectors into shorts */
  89. pix2l = vec_ld( 0, pix2);
  90. pix2r = vec_ld(15, pix2);
  91. pix2v = vec_perm(pix2l, pix2r, perm);
  92. for (i = 0; i < h; i++) {
  93. /* Read unaligned pixels into our vectors. The vectors are as follows:
  94. pix1v: pix1[0]-pix1[15]
  95. pix3v: pix3[0]-pix3[15] */
  96. pix1v = vec_ld(0, pix1);
  97. pix2l = vec_ld( 0, pix3);
  98. pix2r = vec_ld(15, pix3);
  99. pix3v = vec_perm(pix2l, pix2r, perm);
  100. /* Calculate the average vector */
  101. avgv = vec_avg(pix2v, pix3v);
  102. /* Calculate a sum of abs differences vector */
  103. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  104. /* Add each 4 pixel group together and put 4 results into sad */
  105. sad = vec_sum4s(t5, sad);
  106. pix1 += line_size;
  107. pix2v = pix3v;
  108. pix3 += line_size;
  109. }
  110. /* Sum up the four partial sums, and put the result into s */
  111. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  112. sumdiffs = vec_splat(sumdiffs, 3);
  113. vec_ste(sumdiffs, 0, &s);
  114. return s;
  115. }
  116. static int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  117. {
  118. int i;
  119. int s;
  120. uint8_t *pix3 = pix2 + line_size;
  121. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  122. const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
  123. vector unsigned char avgv, t5;
  124. vector unsigned char perm1 = vec_lvsl(0, pix2);
  125. vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
  126. vector unsigned char pix2l, pix2r;
  127. vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
  128. vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
  129. vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
  130. vector unsigned short avghv, avglv;
  131. vector unsigned short t1, t2, t3, t4;
  132. vector unsigned int sad;
  133. vector signed int sumdiffs;
  134. sad = (vector unsigned int)vec_splat_u32(0);
  135. s = 0;
  136. /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
  137. iteration becomes pix2 in the next iteration. We can use this
  138. fact to avoid a potentially expensive unaligned read, as well
  139. as some splitting, and vector addition each time around the loop.
  140. Read unaligned pixels into our vectors. The vectors are as follows:
  141. pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
  142. Split the pixel vectors into shorts */
  143. pix2l = vec_ld( 0, pix2);
  144. pix2r = vec_ld(16, pix2);
  145. pix2v = vec_perm(pix2l, pix2r, perm1);
  146. pix2iv = vec_perm(pix2l, pix2r, perm2);
  147. pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
  148. pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
  149. pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
  150. pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
  151. t1 = vec_add(pix2hv, pix2ihv);
  152. t2 = vec_add(pix2lv, pix2ilv);
  153. for (i = 0; i < h; i++) {
  154. /* Read unaligned pixels into our vectors. The vectors are as follows:
  155. pix1v: pix1[0]-pix1[15]
  156. pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] */
  157. pix1v = vec_ld(0, pix1);
  158. pix2l = vec_ld( 0, pix3);
  159. pix2r = vec_ld(16, pix3);
  160. pix3v = vec_perm(pix2l, pix2r, perm1);
  161. pix3iv = vec_perm(pix2l, pix2r, perm2);
  162. /* Note that AltiVec does have vec_avg, but this works on vector pairs
  163. and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
  164. would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
  165. Instead, we have to split the pixel vectors into vectors of shorts,
  166. and do the averaging by hand. */
  167. /* Split the pixel vectors into shorts */
  168. pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
  169. pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
  170. pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
  171. pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
  172. /* Do the averaging on them */
  173. t3 = vec_add(pix3hv, pix3ihv);
  174. t4 = vec_add(pix3lv, pix3ilv);
  175. avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
  176. avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
  177. /* Pack the shorts back into a result */
  178. avgv = vec_pack(avghv, avglv);
  179. /* Calculate a sum of abs differences vector */
  180. t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
  181. /* Add each 4 pixel group together and put 4 results into sad */
  182. sad = vec_sum4s(t5, sad);
  183. pix1 += line_size;
  184. pix3 += line_size;
  185. /* Transfer the calculated values for pix3 into pix2 */
  186. t1 = t3;
  187. t2 = t4;
  188. }
  189. /* Sum up the four partial sums, and put the result into s */
  190. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  191. sumdiffs = vec_splat(sumdiffs, 3);
  192. vec_ste(sumdiffs, 0, &s);
  193. return s;
  194. }
  195. static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  196. {
  197. int i;
  198. int s;
  199. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  200. vector unsigned char perm = vec_lvsl(0, pix2);
  201. vector unsigned char t1, t2, t3,t4, t5;
  202. vector unsigned int sad;
  203. vector signed int sumdiffs;
  204. sad = (vector unsigned int)vec_splat_u32(0);
  205. for (i = 0; i < h; i++) {
  206. /* Read potentially unaligned pixels into t1 and t2 */
  207. vector unsigned char pix2l = vec_ld( 0, pix2);
  208. vector unsigned char pix2r = vec_ld(15, pix2);
  209. t1 = vec_ld(0, pix1);
  210. t2 = vec_perm(pix2l, pix2r, perm);
  211. /* Calculate a sum of abs differences vector */
  212. t3 = vec_max(t1, t2);
  213. t4 = vec_min(t1, t2);
  214. t5 = vec_sub(t3, t4);
  215. /* Add each 4 pixel group together and put 4 results into sad */
  216. sad = vec_sum4s(t5, sad);
  217. pix1 += line_size;
  218. pix2 += line_size;
  219. }
  220. /* Sum up the four partial sums, and put the result into s */
  221. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  222. sumdiffs = vec_splat(sumdiffs, 3);
  223. vec_ste(sumdiffs, 0, &s);
  224. return s;
  225. }
  226. static int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  227. {
  228. int i;
  229. int s;
  230. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  231. const vector unsigned char permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
  232. vector unsigned char perm1 = vec_lvsl(0, pix1);
  233. vector unsigned char perm2 = vec_lvsl(0, pix2);
  234. vector unsigned char t1, t2, t3,t4, t5;
  235. vector unsigned int sad;
  236. vector signed int sumdiffs;
  237. sad = (vector unsigned int)vec_splat_u32(0);
  238. for (i = 0; i < h; i++) {
  239. /* Read potentially unaligned pixels into t1 and t2
  240. Since we're reading 16 pixels, and actually only want 8,
  241. mask out the last 8 pixels. The 0s don't change the sum. */
  242. vector unsigned char pix1l = vec_ld(0, pix1);
  243. vector unsigned char pix1r = vec_ld(7, pix1);
  244. vector unsigned char pix2l = vec_ld(0, pix2);
  245. vector unsigned char pix2r = vec_ld(7, pix2);
  246. t1 = vec_and(vec_perm(pix1l, pix1r, perm1), permclear);
  247. t2 = vec_and(vec_perm(pix2l, pix2r, perm2), permclear);
  248. /* Calculate a sum of abs differences vector */
  249. t3 = vec_max(t1, t2);
  250. t4 = vec_min(t1, t2);
  251. t5 = vec_sub(t3, t4);
  252. /* Add each 4 pixel group together and put 4 results into sad */
  253. sad = vec_sum4s(t5, sad);
  254. pix1 += line_size;
  255. pix2 += line_size;
  256. }
  257. /* Sum up the four partial sums, and put the result into s */
  258. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  259. sumdiffs = vec_splat(sumdiffs, 3);
  260. vec_ste(sumdiffs, 0, &s);
  261. return s;
  262. }
  263. static int pix_norm1_altivec(uint8_t *pix, int line_size)
  264. {
  265. int i;
  266. int s;
  267. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  268. vector unsigned char perm = vec_lvsl(0, pix);
  269. vector unsigned char pixv;
  270. vector unsigned int sv;
  271. vector signed int sum;
  272. sv = (vector unsigned int)vec_splat_u32(0);
  273. s = 0;
  274. for (i = 0; i < 16; i++) {
  275. /* Read in the potentially unaligned pixels */
  276. vector unsigned char pixl = vec_ld( 0, pix);
  277. vector unsigned char pixr = vec_ld(15, pix);
  278. pixv = vec_perm(pixl, pixr, perm);
  279. /* Square the values, and add them to our sum */
  280. sv = vec_msum(pixv, pixv, sv);
  281. pix += line_size;
  282. }
  283. /* Sum up the four partial sums, and put the result into s */
  284. sum = vec_sums((vector signed int) sv, (vector signed int) zero);
  285. sum = vec_splat(sum, 3);
  286. vec_ste(sum, 0, &s);
  287. return s;
  288. }
  289. /**
  290. * Sum of Squared Errors for a 8x8 block.
  291. * AltiVec-enhanced.
  292. * It's the sad8_altivec code above w/ squaring added.
  293. */
  294. static int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  295. {
  296. int i;
  297. int s;
  298. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  299. const vector unsigned char permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
  300. vector unsigned char perm1 = vec_lvsl(0, pix1);
  301. vector unsigned char perm2 = vec_lvsl(0, pix2);
  302. vector unsigned char t1, t2, t3,t4, t5;
  303. vector unsigned int sum;
  304. vector signed int sumsqr;
  305. sum = (vector unsigned int)vec_splat_u32(0);
  306. for (i = 0; i < h; i++) {
  307. /* Read potentially unaligned pixels into t1 and t2
  308. Since we're reading 16 pixels, and actually only want 8,
  309. mask out the last 8 pixels. The 0s don't change the sum. */
  310. vector unsigned char pix1l = vec_ld(0, pix1);
  311. vector unsigned char pix1r = vec_ld(7, pix1);
  312. vector unsigned char pix2l = vec_ld(0, pix2);
  313. vector unsigned char pix2r = vec_ld(7, pix2);
  314. t1 = vec_and(vec_perm(pix1l, pix1r, perm1), permclear);
  315. t2 = vec_and(vec_perm(pix2l, pix2r, perm2), permclear);
  316. /* Since we want to use unsigned chars, we can take advantage
  317. of the fact that abs(a-b)^2 = (a-b)^2. */
  318. /* Calculate abs differences vector */
  319. t3 = vec_max(t1, t2);
  320. t4 = vec_min(t1, t2);
  321. t5 = vec_sub(t3, t4);
  322. /* Square the values and add them to our sum */
  323. sum = vec_msum(t5, t5, sum);
  324. pix1 += line_size;
  325. pix2 += line_size;
  326. }
  327. /* Sum up the four partial sums, and put the result into s */
  328. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  329. sumsqr = vec_splat(sumsqr, 3);
  330. vec_ste(sumsqr, 0, &s);
  331. return s;
  332. }
  333. /**
  334. * Sum of Squared Errors for a 16x16 block.
  335. * AltiVec-enhanced.
  336. * It's the sad16_altivec code above w/ squaring added.
  337. */
  338. static int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
  339. {
  340. int i;
  341. int s;
  342. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  343. vector unsigned char perm = vec_lvsl(0, pix2);
  344. vector unsigned char t1, t2, t3,t4, t5;
  345. vector unsigned int sum;
  346. vector signed int sumsqr;
  347. sum = (vector unsigned int)vec_splat_u32(0);
  348. for (i = 0; i < h; i++) {
  349. /* Read potentially unaligned pixels into t1 and t2 */
  350. vector unsigned char pix2l = vec_ld( 0, pix2);
  351. vector unsigned char pix2r = vec_ld(15, pix2);
  352. t1 = vec_ld(0, pix1);
  353. t2 = vec_perm(pix2l, pix2r, perm);
  354. /* Since we want to use unsigned chars, we can take advantage
  355. of the fact that abs(a-b)^2 = (a-b)^2. */
  356. /* Calculate abs differences vector */
  357. t3 = vec_max(t1, t2);
  358. t4 = vec_min(t1, t2);
  359. t5 = vec_sub(t3, t4);
  360. /* Square the values and add them to our sum */
  361. sum = vec_msum(t5, t5, sum);
  362. pix1 += line_size;
  363. pix2 += line_size;
  364. }
  365. /* Sum up the four partial sums, and put the result into s */
  366. sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
  367. sumsqr = vec_splat(sumsqr, 3);
  368. vec_ste(sumsqr, 0, &s);
  369. return s;
  370. }
  371. static int pix_sum_altivec(uint8_t * pix, int line_size)
  372. {
  373. const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
  374. vector unsigned char perm = vec_lvsl(0, pix);
  375. vector unsigned char t1;
  376. vector unsigned int sad;
  377. vector signed int sumdiffs;
  378. int i;
  379. int s;
  380. sad = (vector unsigned int)vec_splat_u32(0);
  381. for (i = 0; i < 16; i++) {
  382. /* Read the potentially unaligned 16 pixels into t1 */
  383. vector unsigned char pixl = vec_ld( 0, pix);
  384. vector unsigned char pixr = vec_ld(15, pix);
  385. t1 = vec_perm(pixl, pixr, perm);
  386. /* Add each 4 pixel group together and put 4 results into sad */
  387. sad = vec_sum4s(t1, sad);
  388. pix += line_size;
  389. }
  390. /* Sum up the four partial sums, and put the result into s */
  391. sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
  392. sumdiffs = vec_splat(sumdiffs, 3);
  393. vec_ste(sumdiffs, 0, &s);
  394. return s;
  395. }
  396. static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels, int line_size)
  397. {
  398. int i;
  399. vector unsigned char perm = vec_lvsl(0, pixels);
  400. vector unsigned char bytes;
  401. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  402. vector signed short shorts;
  403. for (i = 0; i < 8; i++) {
  404. // Read potentially unaligned pixels.
  405. // We're reading 16 pixels, and actually only want 8,
  406. // but we simply ignore the extras.
  407. vector unsigned char pixl = vec_ld(0, pixels);
  408. vector unsigned char pixr = vec_ld(7, pixels);
  409. bytes = vec_perm(pixl, pixr, perm);
  410. // convert the bytes into shorts
  411. shorts = (vector signed short)vec_mergeh(zero, bytes);
  412. // save the data to the block, we assume the block is 16-byte aligned
  413. vec_st(shorts, i*16, (vector signed short*)block);
  414. pixels += line_size;
  415. }
  416. }
  417. static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
  418. const uint8_t *s2, int stride)
  419. {
  420. int i;
  421. vector unsigned char perm1 = vec_lvsl(0, s1);
  422. vector unsigned char perm2 = vec_lvsl(0, s2);
  423. vector unsigned char bytes, pixl, pixr;
  424. const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
  425. vector signed short shorts1, shorts2;
  426. for (i = 0; i < 4; i++) {
  427. // Read potentially unaligned pixels
  428. // We're reading 16 pixels, and actually only want 8,
  429. // but we simply ignore the extras.
  430. pixl = vec_ld( 0, s1);
  431. pixr = vec_ld(15, s1);
  432. bytes = vec_perm(pixl, pixr, perm1);
  433. // convert the bytes into shorts
  434. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  435. // Do the same for the second block of pixels
  436. pixl = vec_ld( 0, s2);
  437. pixr = vec_ld(15, s2);
  438. bytes = vec_perm(pixl, pixr, perm2);
  439. // convert the bytes into shorts
  440. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  441. // Do the subtraction
  442. shorts1 = vec_sub(shorts1, shorts2);
  443. // save the data to the block, we assume the block is 16-byte aligned
  444. vec_st(shorts1, 0, (vector signed short*)block);
  445. s1 += stride;
  446. s2 += stride;
  447. block += 8;
  448. // The code below is a copy of the code above... This is a manual
  449. // unroll.
  450. // Read potentially unaligned pixels
  451. // We're reading 16 pixels, and actually only want 8,
  452. // but we simply ignore the extras.
  453. pixl = vec_ld( 0, s1);
  454. pixr = vec_ld(15, s1);
  455. bytes = vec_perm(pixl, pixr, perm1);
  456. // convert the bytes into shorts
  457. shorts1 = (vector signed short)vec_mergeh(zero, bytes);
  458. // Do the same for the second block of pixels
  459. pixl = vec_ld( 0, s2);
  460. pixr = vec_ld(15, s2);
  461. bytes = vec_perm(pixl, pixr, perm2);
  462. // convert the bytes into shorts
  463. shorts2 = (vector signed short)vec_mergeh(zero, bytes);
  464. // Do the subtraction
  465. shorts1 = vec_sub(shorts1, shorts2);
  466. // save the data to the block, we assume the block is 16-byte aligned
  467. vec_st(shorts1, 0, (vector signed short*)block);
  468. s1 += stride;
  469. s2 += stride;
  470. block += 8;
  471. }
  472. }
  473. static void clear_block_altivec(int16_t *block) {
  474. LOAD_ZERO;
  475. vec_st(zero_s16v, 0, block);
  476. vec_st(zero_s16v, 16, block);
  477. vec_st(zero_s16v, 32, block);
  478. vec_st(zero_s16v, 48, block);
  479. vec_st(zero_s16v, 64, block);
  480. vec_st(zero_s16v, 80, block);
  481. vec_st(zero_s16v, 96, block);
  482. vec_st(zero_s16v, 112, block);
  483. }
  484. static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
  485. register int i;
  486. register vector unsigned char vdst, vsrc;
  487. /* dst and src are 16 bytes-aligned (guaranteed) */
  488. for (i = 0 ; (i + 15) < w ; i+=16) {
  489. vdst = vec_ld(i, (unsigned char*)dst);
  490. vsrc = vec_ld(i, (unsigned char*)src);
  491. vdst = vec_add(vsrc, vdst);
  492. vec_st(vdst, i, (unsigned char*)dst);
  493. }
  494. /* if w is not a multiple of 16 */
  495. for (; (i < w) ; i++) {
  496. dst[i] = src[i];
  497. }
  498. }
  499. static int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
  500. int sum;
  501. register const vector unsigned char vzero =
  502. (const vector unsigned char)vec_splat_u8(0);
  503. register vector signed short temp0, temp1, temp2, temp3, temp4,
  504. temp5, temp6, temp7;
  505. {
  506. register const vector signed short vprod1 =(const vector signed short)
  507. { 1,-1, 1,-1, 1,-1, 1,-1 };
  508. register const vector signed short vprod2 =(const vector signed short)
  509. { 1, 1,-1,-1, 1, 1,-1,-1 };
  510. register const vector signed short vprod3 =(const vector signed short)
  511. { 1, 1, 1, 1,-1,-1,-1,-1 };
  512. register const vector unsigned char perm1 = (const vector unsigned char)
  513. {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
  514. 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
  515. register const vector unsigned char perm2 = (const vector unsigned char)
  516. {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
  517. 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
  518. register const vector unsigned char perm3 = (const vector unsigned char)
  519. {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
  520. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
  521. #define ONEITERBUTTERFLY(i, res) \
  522. { \
  523. register vector unsigned char src1, src2, srcO; \
  524. register vector unsigned char dst1, dst2, dstO; \
  525. register vector signed short srcV, dstV; \
  526. register vector signed short but0, but1, but2, op1, op2, op3; \
  527. src1 = vec_ld(stride * i, src); \
  528. src2 = vec_ld((stride * i) + 15, src); \
  529. srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
  530. dst1 = vec_ld(stride * i, dst); \
  531. dst2 = vec_ld((stride * i) + 15, dst); \
  532. dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
  533. /* promote the unsigned chars to signed shorts */ \
  534. /* we're in the 8x8 function, we only care for the first 8 */ \
  535. srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  536. (vector signed char)srcO); \
  537. dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  538. (vector signed char)dstO); \
  539. /* subtractions inside the first butterfly */ \
  540. but0 = vec_sub(srcV, dstV); \
  541. op1 = vec_perm(but0, but0, perm1); \
  542. but1 = vec_mladd(but0, vprod1, op1); \
  543. op2 = vec_perm(but1, but1, perm2); \
  544. but2 = vec_mladd(but1, vprod2, op2); \
  545. op3 = vec_perm(but2, but2, perm3); \
  546. res = vec_mladd(but2, vprod3, op3); \
  547. }
  548. ONEITERBUTTERFLY(0, temp0);
  549. ONEITERBUTTERFLY(1, temp1);
  550. ONEITERBUTTERFLY(2, temp2);
  551. ONEITERBUTTERFLY(3, temp3);
  552. ONEITERBUTTERFLY(4, temp4);
  553. ONEITERBUTTERFLY(5, temp5);
  554. ONEITERBUTTERFLY(6, temp6);
  555. ONEITERBUTTERFLY(7, temp7);
  556. }
  557. #undef ONEITERBUTTERFLY
  558. {
  559. register vector signed int vsum;
  560. register vector signed short line0 = vec_add(temp0, temp1);
  561. register vector signed short line1 = vec_sub(temp0, temp1);
  562. register vector signed short line2 = vec_add(temp2, temp3);
  563. register vector signed short line3 = vec_sub(temp2, temp3);
  564. register vector signed short line4 = vec_add(temp4, temp5);
  565. register vector signed short line5 = vec_sub(temp4, temp5);
  566. register vector signed short line6 = vec_add(temp6, temp7);
  567. register vector signed short line7 = vec_sub(temp6, temp7);
  568. register vector signed short line0B = vec_add(line0, line2);
  569. register vector signed short line2B = vec_sub(line0, line2);
  570. register vector signed short line1B = vec_add(line1, line3);
  571. register vector signed short line3B = vec_sub(line1, line3);
  572. register vector signed short line4B = vec_add(line4, line6);
  573. register vector signed short line6B = vec_sub(line4, line6);
  574. register vector signed short line5B = vec_add(line5, line7);
  575. register vector signed short line7B = vec_sub(line5, line7);
  576. register vector signed short line0C = vec_add(line0B, line4B);
  577. register vector signed short line4C = vec_sub(line0B, line4B);
  578. register vector signed short line1C = vec_add(line1B, line5B);
  579. register vector signed short line5C = vec_sub(line1B, line5B);
  580. register vector signed short line2C = vec_add(line2B, line6B);
  581. register vector signed short line6C = vec_sub(line2B, line6B);
  582. register vector signed short line3C = vec_add(line3B, line7B);
  583. register vector signed short line7C = vec_sub(line3B, line7B);
  584. vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
  585. vsum = vec_sum4s(vec_abs(line1C), vsum);
  586. vsum = vec_sum4s(vec_abs(line2C), vsum);
  587. vsum = vec_sum4s(vec_abs(line3C), vsum);
  588. vsum = vec_sum4s(vec_abs(line4C), vsum);
  589. vsum = vec_sum4s(vec_abs(line5C), vsum);
  590. vsum = vec_sum4s(vec_abs(line6C), vsum);
  591. vsum = vec_sum4s(vec_abs(line7C), vsum);
  592. vsum = vec_sums(vsum, (vector signed int)vzero);
  593. vsum = vec_splat(vsum, 3);
  594. vec_ste(vsum, 0, &sum);
  595. }
  596. return sum;
  597. }
  598. /*
  599. 16x8 works with 16 elements; it allows to avoid replicating loads, and
  600. give the compiler more rooms for scheduling. It's only used from
  601. inside hadamard8_diff16_altivec.
  602. Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT
  603. of spill code, it seems gcc (unlike xlc) cannot keep everything in registers
  604. by itself. The following code include hand-made registers allocation. It's not
  605. clean, but on a 7450 the resulting code is much faster (best case fall from
  606. 700+ cycles to 550).
  607. xlc doesn't add spill code, but it doesn't know how to schedule for the 7450,
  608. and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less
  609. instructions...)
  610. On the 970, the hand-made RA is still a win (around 690 vs. around 780), but
  611. xlc goes to around 660 on the regular C code...
  612. */
  613. static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
  614. int sum;
  615. register vector signed short
  616. temp0 __asm__ ("v0"),
  617. temp1 __asm__ ("v1"),
  618. temp2 __asm__ ("v2"),
  619. temp3 __asm__ ("v3"),
  620. temp4 __asm__ ("v4"),
  621. temp5 __asm__ ("v5"),
  622. temp6 __asm__ ("v6"),
  623. temp7 __asm__ ("v7");
  624. register vector signed short
  625. temp0S __asm__ ("v8"),
  626. temp1S __asm__ ("v9"),
  627. temp2S __asm__ ("v10"),
  628. temp3S __asm__ ("v11"),
  629. temp4S __asm__ ("v12"),
  630. temp5S __asm__ ("v13"),
  631. temp6S __asm__ ("v14"),
  632. temp7S __asm__ ("v15");
  633. register const vector unsigned char vzero __asm__ ("v31") =
  634. (const vector unsigned char)vec_splat_u8(0);
  635. {
  636. register const vector signed short vprod1 __asm__ ("v16") =
  637. (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 };
  638. register const vector signed short vprod2 __asm__ ("v17") =
  639. (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 };
  640. register const vector signed short vprod3 __asm__ ("v18") =
  641. (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 };
  642. register const vector unsigned char perm1 __asm__ ("v19") =
  643. (const vector unsigned char)
  644. {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
  645. 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
  646. register const vector unsigned char perm2 __asm__ ("v20") =
  647. (const vector unsigned char)
  648. {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
  649. 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
  650. register const vector unsigned char perm3 __asm__ ("v21") =
  651. (const vector unsigned char)
  652. {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
  653. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
  654. #define ONEITERBUTTERFLY(i, res1, res2) \
  655. { \
  656. register vector unsigned char src1 __asm__ ("v22"), \
  657. src2 __asm__ ("v23"), \
  658. dst1 __asm__ ("v24"), \
  659. dst2 __asm__ ("v25"), \
  660. srcO __asm__ ("v22"), \
  661. dstO __asm__ ("v23"); \
  662. \
  663. register vector signed short srcV __asm__ ("v24"), \
  664. dstV __asm__ ("v25"), \
  665. srcW __asm__ ("v26"), \
  666. dstW __asm__ ("v27"), \
  667. but0 __asm__ ("v28"), \
  668. but0S __asm__ ("v29"), \
  669. op1 __asm__ ("v30"), \
  670. but1 __asm__ ("v22"), \
  671. op1S __asm__ ("v23"), \
  672. but1S __asm__ ("v24"), \
  673. op2 __asm__ ("v25"), \
  674. but2 __asm__ ("v26"), \
  675. op2S __asm__ ("v27"), \
  676. but2S __asm__ ("v28"), \
  677. op3 __asm__ ("v29"), \
  678. op3S __asm__ ("v30"); \
  679. \
  680. src1 = vec_ld(stride * i, src); \
  681. src2 = vec_ld((stride * i) + 16, src); \
  682. srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
  683. dst1 = vec_ld(stride * i, dst); \
  684. dst2 = vec_ld((stride * i) + 16, dst); \
  685. dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
  686. /* promote the unsigned chars to signed shorts */ \
  687. srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  688. (vector signed char)srcO); \
  689. dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
  690. (vector signed char)dstO); \
  691. srcW = (vector signed short)vec_mergel((vector signed char)vzero, \
  692. (vector signed char)srcO); \
  693. dstW = (vector signed short)vec_mergel((vector signed char)vzero, \
  694. (vector signed char)dstO); \
  695. /* subtractions inside the first butterfly */ \
  696. but0 = vec_sub(srcV, dstV); \
  697. but0S = vec_sub(srcW, dstW); \
  698. op1 = vec_perm(but0, but0, perm1); \
  699. but1 = vec_mladd(but0, vprod1, op1); \
  700. op1S = vec_perm(but0S, but0S, perm1); \
  701. but1S = vec_mladd(but0S, vprod1, op1S); \
  702. op2 = vec_perm(but1, but1, perm2); \
  703. but2 = vec_mladd(but1, vprod2, op2); \
  704. op2S = vec_perm(but1S, but1S, perm2); \
  705. but2S = vec_mladd(but1S, vprod2, op2S); \
  706. op3 = vec_perm(but2, but2, perm3); \
  707. res1 = vec_mladd(but2, vprod3, op3); \
  708. op3S = vec_perm(but2S, but2S, perm3); \
  709. res2 = vec_mladd(but2S, vprod3, op3S); \
  710. }
  711. ONEITERBUTTERFLY(0, temp0, temp0S);
  712. ONEITERBUTTERFLY(1, temp1, temp1S);
  713. ONEITERBUTTERFLY(2, temp2, temp2S);
  714. ONEITERBUTTERFLY(3, temp3, temp3S);
  715. ONEITERBUTTERFLY(4, temp4, temp4S);
  716. ONEITERBUTTERFLY(5, temp5, temp5S);
  717. ONEITERBUTTERFLY(6, temp6, temp6S);
  718. ONEITERBUTTERFLY(7, temp7, temp7S);
  719. }
  720. #undef ONEITERBUTTERFLY
  721. {
  722. register vector signed int vsum;
  723. register vector signed short line0S, line1S, line2S, line3S, line4S,
  724. line5S, line6S, line7S, line0BS,line2BS,
  725. line1BS,line3BS,line4BS,line6BS,line5BS,
  726. line7BS,line0CS,line4CS,line1CS,line5CS,
  727. line2CS,line6CS,line3CS,line7CS;
  728. register vector signed short line0 = vec_add(temp0, temp1);
  729. register vector signed short line1 = vec_sub(temp0, temp1);
  730. register vector signed short line2 = vec_add(temp2, temp3);
  731. register vector signed short line3 = vec_sub(temp2, temp3);
  732. register vector signed short line4 = vec_add(temp4, temp5);
  733. register vector signed short line5 = vec_sub(temp4, temp5);
  734. register vector signed short line6 = vec_add(temp6, temp7);
  735. register vector signed short line7 = vec_sub(temp6, temp7);
  736. register vector signed short line0B = vec_add(line0, line2);
  737. register vector signed short line2B = vec_sub(line0, line2);
  738. register vector signed short line1B = vec_add(line1, line3);
  739. register vector signed short line3B = vec_sub(line1, line3);
  740. register vector signed short line4B = vec_add(line4, line6);
  741. register vector signed short line6B = vec_sub(line4, line6);
  742. register vector signed short line5B = vec_add(line5, line7);
  743. register vector signed short line7B = vec_sub(line5, line7);
  744. register vector signed short line0C = vec_add(line0B, line4B);
  745. register vector signed short line4C = vec_sub(line0B, line4B);
  746. register vector signed short line1C = vec_add(line1B, line5B);
  747. register vector signed short line5C = vec_sub(line1B, line5B);
  748. register vector signed short line2C = vec_add(line2B, line6B);
  749. register vector signed short line6C = vec_sub(line2B, line6B);
  750. register vector signed short line3C = vec_add(line3B, line7B);
  751. register vector signed short line7C = vec_sub(line3B, line7B);
  752. vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
  753. vsum = vec_sum4s(vec_abs(line1C), vsum);
  754. vsum = vec_sum4s(vec_abs(line2C), vsum);
  755. vsum = vec_sum4s(vec_abs(line3C), vsum);
  756. vsum = vec_sum4s(vec_abs(line4C), vsum);
  757. vsum = vec_sum4s(vec_abs(line5C), vsum);
  758. vsum = vec_sum4s(vec_abs(line6C), vsum);
  759. vsum = vec_sum4s(vec_abs(line7C), vsum);
  760. line0S = vec_add(temp0S, temp1S);
  761. line1S = vec_sub(temp0S, temp1S);
  762. line2S = vec_add(temp2S, temp3S);
  763. line3S = vec_sub(temp2S, temp3S);
  764. line4S = vec_add(temp4S, temp5S);
  765. line5S = vec_sub(temp4S, temp5S);
  766. line6S = vec_add(temp6S, temp7S);
  767. line7S = vec_sub(temp6S, temp7S);
  768. line0BS = vec_add(line0S, line2S);
  769. line2BS = vec_sub(line0S, line2S);
  770. line1BS = vec_add(line1S, line3S);
  771. line3BS = vec_sub(line1S, line3S);
  772. line4BS = vec_add(line4S, line6S);
  773. line6BS = vec_sub(line4S, line6S);
  774. line5BS = vec_add(line5S, line7S);
  775. line7BS = vec_sub(line5S, line7S);
  776. line0CS = vec_add(line0BS, line4BS);
  777. line4CS = vec_sub(line0BS, line4BS);
  778. line1CS = vec_add(line1BS, line5BS);
  779. line5CS = vec_sub(line1BS, line5BS);
  780. line2CS = vec_add(line2BS, line6BS);
  781. line6CS = vec_sub(line2BS, line6BS);
  782. line3CS = vec_add(line3BS, line7BS);
  783. line7CS = vec_sub(line3BS, line7BS);
  784. vsum = vec_sum4s(vec_abs(line0CS), vsum);
  785. vsum = vec_sum4s(vec_abs(line1CS), vsum);
  786. vsum = vec_sum4s(vec_abs(line2CS), vsum);
  787. vsum = vec_sum4s(vec_abs(line3CS), vsum);
  788. vsum = vec_sum4s(vec_abs(line4CS), vsum);
  789. vsum = vec_sum4s(vec_abs(line5CS), vsum);
  790. vsum = vec_sum4s(vec_abs(line6CS), vsum);
  791. vsum = vec_sum4s(vec_abs(line7CS), vsum);
  792. vsum = vec_sums(vsum, (vector signed int)vzero);
  793. vsum = vec_splat(vsum, 3);
  794. vec_ste(vsum, 0, &sum);
  795. }
  796. return sum;
  797. }
  798. static int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
  799. int score;
  800. score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  801. if (h==16) {
  802. dst += 8*stride;
  803. src += 8*stride;
  804. score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  805. }
  806. return score;
  807. }
  808. av_cold void ff_dsputil_init_altivec(DSPContext *c, AVCodecContext *avctx)
  809. {
  810. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  811. c->pix_abs[0][1] = sad16_x2_altivec;
  812. c->pix_abs[0][2] = sad16_y2_altivec;
  813. c->pix_abs[0][3] = sad16_xy2_altivec;
  814. c->pix_abs[0][0] = sad16_altivec;
  815. c->pix_abs[1][0] = sad8_altivec;
  816. c->sad[0]= sad16_altivec;
  817. c->sad[1]= sad8_altivec;
  818. c->pix_norm1 = pix_norm1_altivec;
  819. c->sse[1]= sse8_altivec;
  820. c->sse[0]= sse16_altivec;
  821. c->pix_sum = pix_sum_altivec;
  822. c->diff_pixels = diff_pixels_altivec;
  823. c->add_bytes= add_bytes_altivec;
  824. if (!high_bit_depth) {
  825. c->get_pixels = get_pixels_altivec;
  826. c->clear_block = clear_block_altivec;
  827. }
  828. c->hadamard8_diff[0] = hadamard8_diff16_altivec;
  829. c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
  830. }