You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

468 lines
18KB

  1. /*
  2. * Copyright (c) 2002 Brian Foley
  3. * Copyright (c) 2002 Dieter Shirley
  4. * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if HAVE_ALTIVEC_H
  24. #include <altivec.h>
  25. #endif
  26. #include "libavutil/attributes.h"
  27. #include "libavutil/cpu.h"
  28. #include "libavutil/ppc/types_altivec.h"
  29. #include "libavutil/ppc/util_altivec.h"
  30. #include "libavcodec/hpeldsp.h"
  31. #include "dsputil_altivec.h"
  32. #if HAVE_ALTIVEC
  33. /* next one assumes that ((line_size % 16) == 0) */
  34. void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  35. {
  36. register vector unsigned char pixelsv1, pixelsv2;
  37. register vector unsigned char pixelsv1B, pixelsv2B;
  38. register vector unsigned char pixelsv1C, pixelsv2C;
  39. register vector unsigned char pixelsv1D, pixelsv2D;
  40. register vector unsigned char perm = vec_lvsl(0, pixels);
  41. int i;
  42. register ptrdiff_t line_size_2 = line_size << 1;
  43. register ptrdiff_t line_size_3 = line_size + line_size_2;
  44. register ptrdiff_t line_size_4 = line_size << 2;
  45. // hand-unrolling the loop by 4 gains about 15%
  46. // mininum execution time goes from 74 to 60 cycles
  47. // it's faster than -funroll-loops, but using
  48. // -funroll-loops w/ this is bad - 74 cycles again.
  49. // all this is on a 7450, tuning for the 7450
  50. for (i = 0; i < h; i += 4) {
  51. pixelsv1 = vec_ld( 0, pixels);
  52. pixelsv2 = vec_ld(15, pixels);
  53. pixelsv1B = vec_ld(line_size, pixels);
  54. pixelsv2B = vec_ld(15 + line_size, pixels);
  55. pixelsv1C = vec_ld(line_size_2, pixels);
  56. pixelsv2C = vec_ld(15 + line_size_2, pixels);
  57. pixelsv1D = vec_ld(line_size_3, pixels);
  58. pixelsv2D = vec_ld(15 + line_size_3, pixels);
  59. vec_st(vec_perm(pixelsv1, pixelsv2, perm),
  60. 0, (unsigned char*)block);
  61. vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
  62. line_size, (unsigned char*)block);
  63. vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
  64. line_size_2, (unsigned char*)block);
  65. vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
  66. line_size_3, (unsigned char*)block);
  67. pixels+=line_size_4;
  68. block +=line_size_4;
  69. }
  70. }
  71. /* next one assumes that ((line_size % 16) == 0) */
  72. #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
  73. void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  74. {
  75. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  76. register vector unsigned char perm = vec_lvsl(0, pixels);
  77. int i;
  78. for (i = 0; i < h; i++) {
  79. pixelsv1 = vec_ld( 0, pixels);
  80. pixelsv2 = vec_ld(16,pixels);
  81. blockv = vec_ld(0, block);
  82. pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
  83. blockv = vec_avg(blockv,pixelsv);
  84. vec_st(blockv, 0, (unsigned char*)block);
  85. pixels+=line_size;
  86. block +=line_size;
  87. }
  88. }
  89. /* next one assumes that ((line_size % 8) == 0) */
  90. static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
  91. {
  92. register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
  93. int i;
  94. for (i = 0; i < h; i++) {
  95. /* block is 8 bytes-aligned, so we're either in the
  96. left block (16 bytes-aligned) or in the right block (not) */
  97. int rightside = ((unsigned long)block & 0x0000000F);
  98. blockv = vec_ld(0, block);
  99. pixelsv1 = vec_ld( 0, pixels);
  100. pixelsv2 = vec_ld(16, pixels);
  101. pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
  102. if (rightside) {
  103. pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
  104. } else {
  105. pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
  106. }
  107. blockv = vec_avg(blockv, pixelsv);
  108. vec_st(blockv, 0, block);
  109. pixels += line_size;
  110. block += line_size;
  111. }
  112. }
  113. /* next one assumes that ((line_size % 8) == 0) */
  114. static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  115. {
  116. register int i;
  117. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  118. register vector unsigned char blockv, temp1, temp2;
  119. register vector unsigned short pixelssum1, pixelssum2, temp3;
  120. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  121. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  122. temp1 = vec_ld(0, pixels);
  123. temp2 = vec_ld(16, pixels);
  124. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  125. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  126. pixelsv2 = temp2;
  127. } else {
  128. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  129. }
  130. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  131. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  132. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  133. (vector unsigned short)pixelsv2);
  134. pixelssum1 = vec_add(pixelssum1, vctwo);
  135. for (i = 0; i < h ; i++) {
  136. int rightside = ((unsigned long)block & 0x0000000F);
  137. blockv = vec_ld(0, block);
  138. temp1 = vec_ld(line_size, pixels);
  139. temp2 = vec_ld(line_size + 16, pixels);
  140. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  141. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  142. pixelsv2 = temp2;
  143. } else {
  144. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  145. }
  146. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  147. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  148. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  149. (vector unsigned short)pixelsv2);
  150. temp3 = vec_add(pixelssum1, pixelssum2);
  151. temp3 = vec_sra(temp3, vctwo);
  152. pixelssum1 = vec_add(pixelssum2, vctwo);
  153. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  154. if (rightside) {
  155. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  156. } else {
  157. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  158. }
  159. vec_st(blockv, 0, block);
  160. block += line_size;
  161. pixels += line_size;
  162. }
  163. }
  164. /* next one assumes that ((line_size % 8) == 0) */
  165. static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  166. {
  167. register int i;
  168. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  169. register vector unsigned char blockv, temp1, temp2;
  170. register vector unsigned short pixelssum1, pixelssum2, temp3;
  171. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  172. register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
  173. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  174. temp1 = vec_ld(0, pixels);
  175. temp2 = vec_ld(16, pixels);
  176. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  177. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  178. pixelsv2 = temp2;
  179. } else {
  180. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  181. }
  182. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  183. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  184. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  185. (vector unsigned short)pixelsv2);
  186. pixelssum1 = vec_add(pixelssum1, vcone);
  187. for (i = 0; i < h ; i++) {
  188. int rightside = ((unsigned long)block & 0x0000000F);
  189. blockv = vec_ld(0, block);
  190. temp1 = vec_ld(line_size, pixels);
  191. temp2 = vec_ld(line_size + 16, pixels);
  192. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  193. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  194. pixelsv2 = temp2;
  195. } else {
  196. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  197. }
  198. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  199. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  200. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  201. (vector unsigned short)pixelsv2);
  202. temp3 = vec_add(pixelssum1, pixelssum2);
  203. temp3 = vec_sra(temp3, vctwo);
  204. pixelssum1 = vec_add(pixelssum2, vcone);
  205. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  206. if (rightside) {
  207. blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  208. } else {
  209. blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  210. }
  211. vec_st(blockv, 0, block);
  212. block += line_size;
  213. pixels += line_size;
  214. }
  215. }
  216. /* next one assumes that ((line_size % 16) == 0) */
  217. static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
  218. {
  219. register int i;
  220. register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  221. register vector unsigned char blockv, temp1, temp2;
  222. register vector unsigned short temp3, temp4,
  223. pixelssum1, pixelssum2, pixelssum3, pixelssum4;
  224. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  225. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  226. temp1 = vec_ld(0, pixels);
  227. temp2 = vec_ld(16, pixels);
  228. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  229. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  230. pixelsv2 = temp2;
  231. } else {
  232. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  233. }
  234. pixelsv3 = vec_mergel(vczero, pixelsv1);
  235. pixelsv4 = vec_mergel(vczero, pixelsv2);
  236. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  237. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  238. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  239. (vector unsigned short)pixelsv4);
  240. pixelssum3 = vec_add(pixelssum3, vctwo);
  241. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  242. (vector unsigned short)pixelsv2);
  243. pixelssum1 = vec_add(pixelssum1, vctwo);
  244. for (i = 0; i < h ; i++) {
  245. blockv = vec_ld(0, block);
  246. temp1 = vec_ld(line_size, pixels);
  247. temp2 = vec_ld(line_size + 16, pixels);
  248. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  249. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  250. pixelsv2 = temp2;
  251. } else {
  252. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  253. }
  254. pixelsv3 = vec_mergel(vczero, pixelsv1);
  255. pixelsv4 = vec_mergel(vczero, pixelsv2);
  256. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  257. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  258. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  259. (vector unsigned short)pixelsv4);
  260. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  261. (vector unsigned short)pixelsv2);
  262. temp4 = vec_add(pixelssum3, pixelssum4);
  263. temp4 = vec_sra(temp4, vctwo);
  264. temp3 = vec_add(pixelssum1, pixelssum2);
  265. temp3 = vec_sra(temp3, vctwo);
  266. pixelssum3 = vec_add(pixelssum4, vctwo);
  267. pixelssum1 = vec_add(pixelssum2, vctwo);
  268. blockv = vec_packsu(temp3, temp4);
  269. vec_st(blockv, 0, block);
  270. block += line_size;
  271. pixels += line_size;
  272. }
  273. }
  274. /* next one assumes that ((line_size % 16) == 0) */
  275. static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff_t line_size, int h)
  276. {
  277. register int i;
  278. register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
  279. register vector unsigned char blockv, temp1, temp2;
  280. register vector unsigned short temp3, temp4,
  281. pixelssum1, pixelssum2, pixelssum3, pixelssum4;
  282. register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
  283. register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
  284. register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
  285. temp1 = vec_ld(0, pixels);
  286. temp2 = vec_ld(16, pixels);
  287. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  288. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  289. pixelsv2 = temp2;
  290. } else {
  291. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  292. }
  293. pixelsv3 = vec_mergel(vczero, pixelsv1);
  294. pixelsv4 = vec_mergel(vczero, pixelsv2);
  295. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  296. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  297. pixelssum3 = vec_add((vector unsigned short)pixelsv3,
  298. (vector unsigned short)pixelsv4);
  299. pixelssum3 = vec_add(pixelssum3, vcone);
  300. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  301. (vector unsigned short)pixelsv2);
  302. pixelssum1 = vec_add(pixelssum1, vcone);
  303. for (i = 0; i < h ; i++) {
  304. blockv = vec_ld(0, block);
  305. temp1 = vec_ld(line_size, pixels);
  306. temp2 = vec_ld(line_size + 16, pixels);
  307. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  308. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  309. pixelsv2 = temp2;
  310. } else {
  311. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  312. }
  313. pixelsv3 = vec_mergel(vczero, pixelsv1);
  314. pixelsv4 = vec_mergel(vczero, pixelsv2);
  315. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  316. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  317. pixelssum4 = vec_add((vector unsigned short)pixelsv3,
  318. (vector unsigned short)pixelsv4);
  319. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  320. (vector unsigned short)pixelsv2);
  321. temp4 = vec_add(pixelssum3, pixelssum4);
  322. temp4 = vec_sra(temp4, vctwo);
  323. temp3 = vec_add(pixelssum1, pixelssum2);
  324. temp3 = vec_sra(temp3, vctwo);
  325. pixelssum3 = vec_add(pixelssum4, vcone);
  326. pixelssum1 = vec_add(pixelssum2, vcone);
  327. blockv = vec_packsu(temp3, temp4);
  328. vec_st(blockv, 0, block);
  329. block += line_size;
  330. pixels += line_size;
  331. }
  332. }
  333. /* next one assumes that ((line_size % 8) == 0) */
  334. static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
  335. {
  336. register int i;
  337. register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
  338. register vector unsigned char blockv, temp1, temp2, blocktemp;
  339. register vector unsigned short pixelssum1, pixelssum2, temp3;
  340. register const vector unsigned char vczero = (const vector unsigned char)
  341. vec_splat_u8(0);
  342. register const vector unsigned short vctwo = (const vector unsigned short)
  343. vec_splat_u16(2);
  344. temp1 = vec_ld(0, pixels);
  345. temp2 = vec_ld(16, pixels);
  346. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
  347. if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
  348. pixelsv2 = temp2;
  349. } else {
  350. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
  351. }
  352. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  353. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  354. pixelssum1 = vec_add((vector unsigned short)pixelsv1,
  355. (vector unsigned short)pixelsv2);
  356. pixelssum1 = vec_add(pixelssum1, vctwo);
  357. for (i = 0; i < h ; i++) {
  358. int rightside = ((unsigned long)block & 0x0000000F);
  359. blockv = vec_ld(0, block);
  360. temp1 = vec_ld(line_size, pixels);
  361. temp2 = vec_ld(line_size + 16, pixels);
  362. pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
  363. if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
  364. pixelsv2 = temp2;
  365. } else {
  366. pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
  367. }
  368. pixelsv1 = vec_mergeh(vczero, pixelsv1);
  369. pixelsv2 = vec_mergeh(vczero, pixelsv2);
  370. pixelssum2 = vec_add((vector unsigned short)pixelsv1,
  371. (vector unsigned short)pixelsv2);
  372. temp3 = vec_add(pixelssum1, pixelssum2);
  373. temp3 = vec_sra(temp3, vctwo);
  374. pixelssum1 = vec_add(pixelssum2, vctwo);
  375. pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
  376. if (rightside) {
  377. blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
  378. } else {
  379. blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
  380. }
  381. blockv = vec_avg(blocktemp, blockv);
  382. vec_st(blockv, 0, block);
  383. block += line_size;
  384. pixels += line_size;
  385. }
  386. }
  387. #endif /* HAVE_ALTIVEC */
  388. av_cold void ff_hpeldsp_init_ppc(HpelDSPContext *c, int flags)
  389. {
  390. #if HAVE_ALTIVEC
  391. if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
  392. return;
  393. c->avg_pixels_tab[0][0] = ff_avg_pixels16_altivec;
  394. c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
  395. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
  396. c->put_pixels_tab[0][0] = ff_put_pixels16_altivec;
  397. c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
  398. c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
  399. c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_altivec;
  400. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
  401. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
  402. #endif /* HAVE_ALTIVEC */
  403. }