You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

469 lines
18KB

  1. /*
  2. * Copyright (c) 2002 Brian Foley
  3. * Copyright (c) 2002 Dieter Shirley
  4. * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if HAVE_ALTIVEC_H
  24. #include <altivec.h>
  25. #endif
  26. #include "libavutil/attributes.h"
  27. #include "libavutil/cpu.h"
  28. #include "libavutil/ppc/cpu.h"
  29. #include "libavutil/ppc/types_altivec.h"
  30. #include "libavutil/ppc/util_altivec.h"
  31. #include "libavcodec/hpeldsp.h"
  32. #include "hpeldsp_altivec.h"
  33. #if HAVE_ALTIVEC
  34. /* next one assumes that ((line_size % 16) == 0) */
  35. void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  36. {
  37. register vector unsigned char pixelsv1, pixelsv2;
  38. register vector unsigned char pixelsv1B, pixelsv2B;
  39. register vector unsigned char pixelsv1C, pixelsv2C;
  40. register vector unsigned char pixelsv1D, pixelsv2D;
  41. register vector unsigned char perm = vec_lvsl(0, pixels);
  42. int i;
  43. register ptrdiff_t line_size_2 = line_size << 1;
  44. register ptrdiff_t line_size_3 = line_size + line_size_2;
  45. register ptrdiff_t line_size_4 = line_size << 2;
  46. // hand-unrolling the loop by 4 gains about 15%
  47. // mininum execution time goes from 74 to 60 cycles
  48. // it's faster than -funroll-loops, but using
  49. // -funroll-loops w/ this is bad - 74 cycles again.
  50. // all this is on a 7450, tuning for the 7450
  51. for (i = 0; i < h; i += 4) {
  52. pixelsv1 = vec_ld( 0, pixels);
  53. pixelsv2 = vec_ld(15, pixels);
  54. pixelsv1B = vec_ld(line_size, pixels);
  55. pixelsv2B = vec_ld(15 + line_size, pixels);
  56. pixelsv1C = vec_ld(line_size_2, pixels);
  57. pixelsv2C = vec_ld(15 + line_size_2, pixels);
  58. pixelsv1D = vec_ld(line_size_3, pixels);
  59. pixelsv2D = vec_ld(15 + line_size_3, pixels);
  60. vec_st(vec_perm(pixelsv1, pixelsv2, perm),
  61. 0, (unsigned char*)block);
  62. vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
  63. line_size, (unsigned char*)block);
  64. vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
  65. line_size_2, (unsigned char*)block);
  66. vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
  67. line_size_3, (unsigned char*)block);
  68. pixels+=line_size_4;
  69. block +=line_size_4;
  70. }
  71. }
  72. /* next one assumes that ((line_size % 16) == 0) */
  73. #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
  74. void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  75. {
  76. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  77. register vector unsigned char perm = vec_lvsl(0, pixels);
  78. int i;
  79. for (i = 0; i < h; i++) {
  80. pixelsv1 = vec_ld( 0, pixels);
  81. pixelsv2 = vec_ld(16,pixels);
  82. blockv = vec_ld(0, block);
  83. pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
  84. blockv = vec_avg(blockv,pixelsv);
  85. vec_st(blockv, 0, (unsigned char*)block);
  86. pixels+=line_size;
  87. block +=line_size;
  88. }
  89. }
  90. /* next one assumes that ((line_size % 8) == 0) */
  91. static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
  92. {
  93. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  94. int i;
  95. for (i = 0; i < h; i++) {
  96. /* block is 8 bytes-aligned, so we're either in the
  97. left block (16 bytes-aligned) or in the right block (not) */
  98. int rightside = ((unsigned long)block & 0x0000000F);
  99. blockv = vec_ld(0, block);
  100. pixelsv1 = vec_ld( 0, pixels);
  101. pixelsv2 = vec_ld(16, pixels);
  102. pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
  103. if (rightside) {
  104. pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
  105. } else {
  106. pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
  107. }
  108. blockv = vec_avg(blockv, pixelsv);
  109. vec_st(blockv, 0, block);
  110. pixels += line_size;
  111. block += line_size;
  112. }
  113. }
  114. /* next one assumes that ((line_size % 8) == 0) */
  115. static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  116. {
  117. register int i;
  118. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  119. register vector unsigned char blockv, temp1, temp2;
  120. register vector unsigned short pixelssum1, pixelssum2, temp3;
  121. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  122. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  123. temp1 = vec_ld(0, pixels);
  124. temp2 = vec_ld(16, pixels);
  125. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  126. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  127. pixelsv2 = temp2;
  128. } else {
  129. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  130. }
  131. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  132. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  133. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  134. (vector unsigned short)pixelsv2);
  135. pixelssum1 = vec_add(pixelssum1, vctwo);
  136. for (i = 0; i < h ; i++) {
  137. int rightside = ((unsigned long)block & 0x0000000F);
  138. blockv = vec_ld(0, block);
  139. temp1 = vec_ld(line_size, pixels);
  140. temp2 = vec_ld(line_size + 16, pixels);
  141. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  142. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  143. pixelsv2 = temp2;
  144. } else {
  145. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  146. }
  147. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  148. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  149. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  150. (vector unsigned short)pixelsv2);
  151. temp3 = vec_add(pixelssum1, pixelssum2);
  152. temp3 = vec_sra(temp3, vctwo);
  153. pixelssum1 = vec_add(pixelssum2, vctwo);
  154. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  155. if (rightside) {
  156. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  157. } else {
  158. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  159. }
  160. vec_st(blockv, 0, block);
  161. block += line_size;
  162. pixels += line_size;
  163. }
  164. }
  165. /* next one assumes that ((line_size % 8) == 0) */
  166. static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  167. {
  168. register int i;
  169. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  170. register vector unsigned char blockv, temp1, temp2;
  171. register vector unsigned short pixelssum1, pixelssum2, temp3;
  172. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  173. register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
  174. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  175. temp1 = vec_ld(0, pixels);
  176. temp2 = vec_ld(16, pixels);
  177. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  178. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  179. pixelsv2 = temp2;
  180. } else {
  181. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  182. }
  183. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  184. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  185. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  186. (vector unsigned short)pixelsv2);
  187. pixelssum1 = vec_add(pixelssum1, vcone);
  188. for (i = 0; i < h ; i++) {
  189. int rightside = ((unsigned long)block & 0x0000000F);
  190. blockv = vec_ld(0, block);
  191. temp1 = vec_ld(line_size, pixels);
  192. temp2 = vec_ld(line_size + 16, pixels);
  193. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  194. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  195. pixelsv2 = temp2;
  196. } else {
  197. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  198. }
  199. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  200. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  201. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  202. (vector unsigned short)pixelsv2);
  203. temp3 = vec_add(pixelssum1, pixelssum2);
  204. temp3 = vec_sra(temp3, vctwo);
  205. pixelssum1 = vec_add(pixelssum2, vcone);
  206. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  207. if (rightside) {
  208. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  209. } else {
  210. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  211. }
  212. vec_st(blockv, 0, block);
  213. block += line_size;
  214. pixels += line_size;
  215. }
  216. }
  217. /* next one assumes that ((line_size % 16) == 0) */
  218. static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
  219. {
  220. register int i;
  221. register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  222. register vector unsigned char blockv, temp1, temp2;
  223. register vector unsigned short temp3, temp4,
  224. pixelssum1, pixelssum2, pixelssum3, pixelssum4;
  225. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  226. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  227. temp1 = vec_ld(0, pixels);
  228. temp2 = vec_ld(16, pixels);
  229. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  230. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  231. pixelsv2 = temp2;
  232. } else {
  233. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  234. }
  235. pixelsv3 = vec_mergel(vczero, pixelsv1);
  236. pixelsv4 = vec_mergel(vczero, pixelsv2);
  237. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  238. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  239. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  240. (vector unsigned short)pixelsv4);
  241. pixelssum3 = vec_add(pixelssum3, vctwo);
  242. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  243. (vector unsigned short)pixelsv2);
  244. pixelssum1 = vec_add(pixelssum1, vctwo);
  245. for (i = 0; i < h ; i++) {
  246. blockv = vec_ld(0, block);
  247. temp1 = vec_ld(line_size, pixels);
  248. temp2 = vec_ld(line_size + 16, pixels);
  249. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  250. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  251. pixelsv2 = temp2;
  252. } else {
  253. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  254. }
  255. pixelsv3 = vec_mergel(vczero, pixelsv1);
  256. pixelsv4 = vec_mergel(vczero, pixelsv2);
  257. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  258. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  259. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  260. (vector unsigned short)pixelsv4);
  261. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  262. (vector unsigned short)pixelsv2);
  263. temp4 = vec_add(pixelssum3, pixelssum4);
  264. temp4 = vec_sra(temp4, vctwo);
  265. temp3 = vec_add(pixelssum1, pixelssum2);
  266. temp3 = vec_sra(temp3, vctwo);
  267. pixelssum3 = vec_add(pixelssum4, vctwo);
  268. pixelssum1 = vec_add(pixelssum2, vctwo);
  269. blockv = vec_packsu(temp3, temp4);
  270. vec_st(blockv, 0, block);
  271. block += line_size;
  272. pixels += line_size;
  273. }
  274. }
  275. /* next one assumes that ((line_size % 16) == 0) */
  276. static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
  277. {
  278. register int i;
  279. register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  280. register vector unsigned char blockv, temp1, temp2;
  281. register vector unsigned short temp3, temp4,
  282. pixelssum1, pixelssum2, pixelssum3, pixelssum4;
  283. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  284. register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
  285. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  286. temp1 = vec_ld(0, pixels);
  287. temp2 = vec_ld(16, pixels);
  288. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  289. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  290. pixelsv2 = temp2;
  291. } else {
  292. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  293. }
  294. pixelsv3 = vec_mergel(vczero, pixelsv1);
  295. pixelsv4 = vec_mergel(vczero, pixelsv2);
  296. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  297. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  298. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  299. (vector unsigned short)pixelsv4);
  300. pixelssum3 = vec_add(pixelssum3, vcone);
  301. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  302. (vector unsigned short)pixelsv2);
  303. pixelssum1 = vec_add(pixelssum1, vcone);
  304. for (i = 0; i < h ; i++) {
  305. blockv = vec_ld(0, block);
  306. temp1 = vec_ld(line_size, pixels);
  307. temp2 = vec_ld(line_size + 16, pixels);
  308. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  309. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  310. pixelsv2 = temp2;
  311. } else {
  312. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  313. }
  314. pixelsv3 = vec_mergel(vczero, pixelsv1);
  315. pixelsv4 = vec_mergel(vczero, pixelsv2);
  316. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  317. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  318. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  319. (vector unsigned short)pixelsv4);
  320. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  321. (vector unsigned short)pixelsv2);
  322. temp4 = vec_add(pixelssum3, pixelssum4);
  323. temp4 = vec_sra(temp4, vctwo);
  324. temp3 = vec_add(pixelssum1, pixelssum2);
  325. temp3 = vec_sra(temp3, vctwo);
  326. pixelssum3 = vec_add(pixelssum4, vcone);
  327. pixelssum1 = vec_add(pixelssum2, vcone);
  328. blockv = vec_packsu(temp3, temp4);
  329. vec_st(blockv, 0, block);
  330. block += line_size;
  331. pixels += line_size;
  332. }
  333. }
  334. /* next one assumes that ((line_size % 8) == 0) */
  335. static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  336. {
  337. register int i;
  338. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  339. register vector unsigned char blockv, temp1, temp2, blocktemp;
  340. register vector unsigned short pixelssum1, pixelssum2, temp3;
  341. register const vector unsigned char vczero = (const vector unsigned char)
  342. vec_splat_u8(0);
  343. register const vector unsigned short vctwo = (const vector unsigned short)
  344. vec_splat_u16(2);
  345. temp1 = vec_ld(0, pixels);
  346. temp2 = vec_ld(16, pixels);
  347. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  348. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  349. pixelsv2 = temp2;
  350. } else {
  351. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  352. }
  353. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  354. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  355. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  356. (vector unsigned short)pixelsv2);
  357. pixelssum1 = vec_add(pixelssum1, vctwo);
  358. for (i = 0; i < h ; i++) {
  359. int rightside = ((unsigned long)block & 0x0000000F);
  360. blockv = vec_ld(0, block);
  361. temp1 = vec_ld(line_size, pixels);
  362. temp2 = vec_ld(line_size + 16, pixels);
  363. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  364. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  365. pixelsv2 = temp2;
  366. } else {
  367. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  368. }
  369. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  370. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  371. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  372. (vector unsigned short)pixelsv2);
  373. temp3 = vec_add(pixelssum1, pixelssum2);
  374. temp3 = vec_sra(temp3, vctwo);
  375. pixelssum1 = vec_add(pixelssum2, vctwo);
  376. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  377. if (rightside) {
  378. blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  379. } else {
  380. blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  381. }
  382. blockv = vec_avg(blocktemp, blockv);
  383. vec_st(blockv, 0, block);
  384. block += line_size;
  385. pixels += line_size;
  386. }
  387. }
  388. #endif /* HAVE_ALTIVEC */
  389. av_cold void ff_hpeldsp_init_ppc(HpelDSPContext *c, int flags)
  390. {
  391. #if HAVE_ALTIVEC
  392. if (!PPC_ALTIVEC(av_get_cpu_flags()))
  393. return;
  394. c->avg_pixels_tab[0][0] = ff_avg_pixels16_altivec;
  395. c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
  396. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
  397. c->put_pixels_tab[0][0] = ff_put_pixels16_altivec;
  398. c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
  399. c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
  400. c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_altivec;
  401. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
  402. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
  403. #endif /* HAVE_ALTIVEC */
  404. }