You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2268 lines
83KB

  1. /*
  2. * AltiVec-enhanced yuv2yuvX
  3. *
  4. * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
  5. * based on the equivalent C code in swscale.c
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <inttypes.h>
  24. #include "config.h"
  25. #include "libswscale/swscale.h"
  26. #include "libswscale/swscale_internal.h"
  27. #include "libavutil/attributes.h"
  28. #include "libavutil/cpu.h"
  29. #include "yuv2rgb_altivec.h"
  30. #include "libavutil/ppc/util_altivec.h"
  31. #if HAVE_VSX
  32. #define vzero vec_splat_s32(0)
  33. #if !HAVE_BIGENDIAN
  34. #define GET_LS(a,b,c,s) {\
  35. ls = a;\
  36. a = vec_vsx_ld(((b) << 1) + 16, s);\
  37. }
  38. #define yuv2planeX_8(d1, d2, l1, src, x, perm, filter) do {\
  39. vector signed short ls;\
  40. vector signed int vf1, vf2, i1, i2;\
  41. GET_LS(l1, x, perm, src);\
  42. i1 = vec_mule(filter, ls);\
  43. i2 = vec_mulo(filter, ls);\
  44. vf1 = vec_mergeh(i1, i2);\
  45. vf2 = vec_mergel(i1, i2);\
  46. d1 = vec_add(d1, vf1);\
  47. d2 = vec_add(d2, vf2);\
  48. } while (0)
  49. #define LOAD_FILTER(vf,f) {\
  50. vf = vec_vsx_ld(joffset, f);\
  51. }
  52. #define LOAD_L1(ll1,s,p){\
  53. ll1 = vec_vsx_ld(xoffset, s);\
  54. }
  55. // The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2).
  56. // The neat trick: We only care for half the elements,
  57. // high or low depending on (i<<3)%16 (it's 0 or 8 here),
  58. // and we're going to use vec_mule, so we choose
  59. // carefully how to "unpack" the elements into the even slots.
  60. #define GET_VF4(a, vf, f) {\
  61. vf = (vector signed short)vec_vsx_ld(a << 3, f);\
  62. vf = vec_mergeh(vf, (vector signed short)vzero);\
  63. }
  64. #define FIRST_LOAD(sv, pos, s, per) {}
  65. #define UPDATE_PTR(s0, d0, s1, d1) {}
  66. #define LOAD_SRCV(pos, a, s, per, v0, v1, vf) {\
  67. vf = vec_vsx_ld(pos + a, s);\
  68. }
  69. #define LOAD_SRCV8(pos, a, s, per, v0, v1, vf) LOAD_SRCV(pos, a, s, per, v0, v1, vf)
  70. #define GET_VFD(a, b, f, vf0, vf1, per, vf, off) {\
  71. vf = vec_vsx_ld((a * 2 * filterSize) + (b * 2) + off, f);\
  72. }
  73. #define FUNC(name) name ## _vsx
  74. #include "swscale_ppc_template.c"
  75. #undef FUNC
  76. #undef vzero
  77. #endif /* !HAVE_BIGENDIAN */
  78. static void yuv2plane1_8_u(const int16_t *src, uint8_t *dest, int dstW,
  79. const uint8_t *dither, int offset, int start)
  80. {
  81. int i;
  82. for (i = start; i < dstW; i++) {
  83. int val = (src[i] + dither[(i + offset) & 7]) >> 7;
  84. dest[i] = av_clip_uint8(val);
  85. }
  86. }
  87. static void yuv2plane1_8_vsx(const int16_t *src, uint8_t *dest, int dstW,
  88. const uint8_t *dither, int offset)
  89. {
  90. const int dst_u = -(uintptr_t)dest & 15;
  91. int i, j;
  92. LOCAL_ALIGNED(16, int16_t, val, [16]);
  93. const vec_u16 shifts = (vec_u16) {7, 7, 7, 7, 7, 7, 7, 7};
  94. vec_s16 vi, vileft, ditherleft, ditherright;
  95. vec_u8 vd;
  96. for (j = 0; j < 16; j++) {
  97. val[j] = dither[(dst_u + offset + j) & 7];
  98. }
  99. ditherleft = vec_ld(0, val);
  100. ditherright = vec_ld(0, &val[8]);
  101. yuv2plane1_8_u(src, dest, dst_u, dither, offset, 0);
  102. for (i = dst_u; i < dstW - 15; i += 16) {
  103. vi = vec_vsx_ld(0, &src[i]);
  104. vi = vec_adds(ditherleft, vi);
  105. vileft = vec_sra(vi, shifts);
  106. vi = vec_vsx_ld(0, &src[i + 8]);
  107. vi = vec_adds(ditherright, vi);
  108. vi = vec_sra(vi, shifts);
  109. vd = vec_packsu(vileft, vi);
  110. vec_st(vd, 0, &dest[i]);
  111. }
  112. yuv2plane1_8_u(src, dest, dstW, dither, offset, i);
  113. }
  114. #if !HAVE_BIGENDIAN
  115. #define output_pixel(pos, val) \
  116. if (big_endian) { \
  117. AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
  118. } else { \
  119. AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
  120. }
  121. static void yuv2plane1_nbps_u(const int16_t *src, uint16_t *dest, int dstW,
  122. int big_endian, int output_bits, int start)
  123. {
  124. int i;
  125. int shift = 15 - output_bits;
  126. for (i = start; i < dstW; i++) {
  127. int val = src[i] + (1 << (shift - 1));
  128. output_pixel(&dest[i], val);
  129. }
  130. }
  131. static av_always_inline void yuv2plane1_nbps_vsx(const int16_t *src,
  132. uint16_t *dest, int dstW,
  133. const int big_endian,
  134. const int output_bits)
  135. {
  136. const int dst_u = -(uintptr_t)dest & 7;
  137. const int shift = 15 - output_bits;
  138. const int add = (1 << (shift - 1));
  139. const int clip = (1 << output_bits) - 1;
  140. const vec_u16 vadd = (vec_u16) {add, add, add, add, add, add, add, add};
  141. const vec_u16 vswap = (vec_u16) vec_splat_u16(big_endian ? 8 : 0);
  142. const vec_u16 vshift = (vec_u16) vec_splat_u16(shift);
  143. const vec_u16 vlargest = (vec_u16) {clip, clip, clip, clip, clip, clip, clip, clip};
  144. vec_u16 v;
  145. int i;
  146. yuv2plane1_nbps_u(src, dest, dst_u, big_endian, output_bits, 0);
  147. for (i = dst_u; i < dstW - 7; i += 8) {
  148. v = vec_vsx_ld(0, (const uint16_t *) &src[i]);
  149. v = vec_add(v, vadd);
  150. v = vec_sr(v, vshift);
  151. v = vec_min(v, vlargest);
  152. v = vec_rl(v, vswap);
  153. vec_st(v, 0, &dest[i]);
  154. }
  155. yuv2plane1_nbps_u(src, dest, dstW, big_endian, output_bits, i);
  156. }
  157. static void yuv2planeX_nbps_u(const int16_t *filter, int filterSize,
  158. const int16_t **src, uint16_t *dest, int dstW,
  159. int big_endian, int output_bits, int start)
  160. {
  161. int i;
  162. int shift = 11 + 16 - output_bits;
  163. for (i = start; i < dstW; i++) {
  164. int val = 1 << (shift - 1);
  165. int j;
  166. for (j = 0; j < filterSize; j++)
  167. val += src[j][i] * filter[j];
  168. output_pixel(&dest[i], val);
  169. }
  170. }
  171. static void yuv2planeX_nbps_vsx(const int16_t *filter, int filterSize,
  172. const int16_t **src, uint16_t *dest, int dstW,
  173. int big_endian, int output_bits)
  174. {
  175. const int dst_u = -(uintptr_t)dest & 7;
  176. const int shift = 11 + 16 - output_bits;
  177. const int add = (1 << (shift - 1));
  178. const int clip = (1 << output_bits) - 1;
  179. const uint16_t swap = big_endian ? 8 : 0;
  180. const vec_u32 vadd = (vec_u32) {add, add, add, add};
  181. const vec_u32 vshift = (vec_u32) {shift, shift, shift, shift};
  182. const vec_u16 vswap = (vec_u16) {swap, swap, swap, swap, swap, swap, swap, swap};
  183. const vec_u16 vlargest = (vec_u16) {clip, clip, clip, clip, clip, clip, clip, clip};
  184. const vec_s16 vzero = vec_splat_s16(0);
  185. const vec_u8 vperm = (vec_u8) {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15};
  186. vec_s16 vfilter[MAX_FILTER_SIZE], vin;
  187. vec_u16 v;
  188. vec_u32 vleft, vright, vtmp;
  189. int i, j;
  190. for (i = 0; i < filterSize; i++) {
  191. vfilter[i] = (vec_s16) {filter[i], filter[i], filter[i], filter[i],
  192. filter[i], filter[i], filter[i], filter[i]};
  193. }
  194. yuv2planeX_nbps_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
  195. for (i = dst_u; i < dstW - 7; i += 8) {
  196. vleft = vright = vadd;
  197. for (j = 0; j < filterSize; j++) {
  198. vin = vec_vsx_ld(0, &src[j][i]);
  199. vtmp = (vec_u32) vec_mule(vin, vfilter[j]);
  200. vleft = vec_add(vleft, vtmp);
  201. vtmp = (vec_u32) vec_mulo(vin, vfilter[j]);
  202. vright = vec_add(vright, vtmp);
  203. }
  204. vleft = vec_sra(vleft, vshift);
  205. vright = vec_sra(vright, vshift);
  206. v = vec_packsu(vleft, vright);
  207. v = (vec_u16) vec_max((vec_s16) v, vzero);
  208. v = vec_min(v, vlargest);
  209. v = vec_rl(v, vswap);
  210. v = vec_perm(v, v, vperm);
  211. vec_st(v, 0, &dest[i]);
  212. }
  213. yuv2planeX_nbps_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
  214. }
  215. #undef output_pixel
  216. #define output_pixel(pos, val, bias, signedness) \
  217. if (big_endian) { \
  218. AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
  219. } else { \
  220. AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
  221. }
  222. static void yuv2plane1_16_u(const int32_t *src, uint16_t *dest, int dstW,
  223. int big_endian, int output_bits, int start)
  224. {
  225. int i;
  226. const int shift = 3;
  227. for (i = start; i < dstW; i++) {
  228. int val = src[i] + (1 << (shift - 1));
  229. output_pixel(&dest[i], val, 0, uint);
  230. }
  231. }
  232. static av_always_inline void yuv2plane1_16_vsx(const int32_t *src,
  233. uint16_t *dest, int dstW,
  234. const int big_endian,
  235. int output_bits)
  236. {
  237. const int dst_u = -(uintptr_t)dest & 7;
  238. const int shift = 3;
  239. const int add = (1 << (shift - 1));
  240. const vec_u32 vadd = (vec_u32) {add, add, add, add};
  241. const vec_u16 vswap = (vec_u16) vec_splat_u16(big_endian ? 8 : 0);
  242. const vec_u32 vshift = (vec_u32) vec_splat_u32(shift);
  243. vec_u32 v, v2;
  244. vec_u16 vd;
  245. int i;
  246. yuv2plane1_16_u(src, dest, dst_u, big_endian, output_bits, 0);
  247. for (i = dst_u; i < dstW - 7; i += 8) {
  248. v = vec_vsx_ld(0, (const uint32_t *) &src[i]);
  249. v = vec_add(v, vadd);
  250. v = vec_sr(v, vshift);
  251. v2 = vec_vsx_ld(0, (const uint32_t *) &src[i + 4]);
  252. v2 = vec_add(v2, vadd);
  253. v2 = vec_sr(v2, vshift);
  254. vd = vec_packsu(v, v2);
  255. vd = vec_rl(vd, vswap);
  256. vec_st(vd, 0, &dest[i]);
  257. }
  258. yuv2plane1_16_u(src, dest, dstW, big_endian, output_bits, i);
  259. }
  260. #if HAVE_POWER8
  261. static void yuv2planeX_16_u(const int16_t *filter, int filterSize,
  262. const int32_t **src, uint16_t *dest, int dstW,
  263. int big_endian, int output_bits, int start)
  264. {
  265. int i;
  266. int shift = 15;
  267. for (i = start; i < dstW; i++) {
  268. int val = 1 << (shift - 1);
  269. int j;
  270. /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
  271. * filters (or anything with negative coeffs, the range can be slightly
  272. * wider in both directions. To account for this overflow, we subtract
  273. * a constant so it always fits in the signed range (assuming a
  274. * reasonable filterSize), and re-add that at the end. */
  275. val -= 0x40000000;
  276. for (j = 0; j < filterSize; j++)
  277. val += src[j][i] * (unsigned)filter[j];
  278. output_pixel(&dest[i], val, 0x8000, int);
  279. }
  280. }
  281. static void yuv2planeX_16_vsx(const int16_t *filter, int filterSize,
  282. const int32_t **src, uint16_t *dest, int dstW,
  283. int big_endian, int output_bits)
  284. {
  285. const int dst_u = -(uintptr_t)dest & 7;
  286. const int shift = 15;
  287. const int bias = 0x8000;
  288. const int add = (1 << (shift - 1)) - 0x40000000;
  289. const uint16_t swap = big_endian ? 8 : 0;
  290. const vec_u32 vadd = (vec_u32) {add, add, add, add};
  291. const vec_u32 vshift = (vec_u32) {shift, shift, shift, shift};
  292. const vec_u16 vswap = (vec_u16) {swap, swap, swap, swap, swap, swap, swap, swap};
  293. const vec_u16 vbias = (vec_u16) {bias, bias, bias, bias, bias, bias, bias, bias};
  294. vec_s32 vfilter[MAX_FILTER_SIZE];
  295. vec_u16 v;
  296. vec_u32 vleft, vright, vtmp;
  297. vec_s32 vin32l, vin32r;
  298. int i, j;
  299. for (i = 0; i < filterSize; i++) {
  300. vfilter[i] = (vec_s32) {filter[i], filter[i], filter[i], filter[i]};
  301. }
  302. yuv2planeX_16_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
  303. for (i = dst_u; i < dstW - 7; i += 8) {
  304. vleft = vright = vadd;
  305. for (j = 0; j < filterSize; j++) {
  306. vin32l = vec_vsx_ld(0, &src[j][i]);
  307. vin32r = vec_vsx_ld(0, &src[j][i + 4]);
  308. vtmp = (vec_u32) vec_mul(vin32l, vfilter[j]);
  309. vleft = vec_add(vleft, vtmp);
  310. vtmp = (vec_u32) vec_mul(vin32r, vfilter[j]);
  311. vright = vec_add(vright, vtmp);
  312. }
  313. vleft = vec_sra(vleft, vshift);
  314. vright = vec_sra(vright, vshift);
  315. v = (vec_u16) vec_packs((vec_s32) vleft, (vec_s32) vright);
  316. v = vec_add(v, vbias);
  317. v = vec_rl(v, vswap);
  318. vec_st(v, 0, &dest[i]);
  319. }
  320. yuv2planeX_16_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
  321. }
  322. #endif /* HAVE_POWER8 */
  323. #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
  324. yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
  325. yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t)
  326. #define yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
  327. static void yuv2plane1_ ## bits ## BE_LE ## _vsx(const int16_t *src, \
  328. uint8_t *dest, int dstW, \
  329. const uint8_t *dither, int offset) \
  330. { \
  331. yuv2plane1_ ## template_size ## _vsx((const typeX_t *) src, \
  332. (uint16_t *) dest, dstW, is_be, bits); \
  333. }
  334. #define yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t) \
  335. static void yuv2planeX_ ## bits ## BE_LE ## _vsx(const int16_t *filter, int filterSize, \
  336. const int16_t **src, uint8_t *dest, int dstW, \
  337. const uint8_t *dither, int offset)\
  338. { \
  339. yuv2planeX_## template_size ## _vsx(filter, \
  340. filterSize, (const typeX_t **) src, \
  341. (uint16_t *) dest, dstW, is_be, bits); \
  342. }
  343. yuv2NBPS( 9, BE, 1, nbps, int16_t)
  344. yuv2NBPS( 9, LE, 0, nbps, int16_t)
  345. yuv2NBPS(10, BE, 1, nbps, int16_t)
  346. yuv2NBPS(10, LE, 0, nbps, int16_t)
  347. yuv2NBPS(12, BE, 1, nbps, int16_t)
  348. yuv2NBPS(12, LE, 0, nbps, int16_t)
  349. yuv2NBPS(14, BE, 1, nbps, int16_t)
  350. yuv2NBPS(14, LE, 0, nbps, int16_t)
  351. yuv2NBPS1(16, BE, 1, 16, int32_t)
  352. yuv2NBPS1(16, LE, 0, 16, int32_t)
  353. #if HAVE_POWER8
  354. yuv2NBPSX(16, BE, 1, 16, int32_t)
  355. yuv2NBPSX(16, LE, 0, 16, int32_t)
  356. #endif
  357. #define WRITERGB \
  358. R_l = vec_max(R_l, zero32); \
  359. R_r = vec_max(R_r, zero32); \
  360. G_l = vec_max(G_l, zero32); \
  361. G_r = vec_max(G_r, zero32); \
  362. B_l = vec_max(B_l, zero32); \
  363. B_r = vec_max(B_r, zero32); \
  364. \
  365. R_l = vec_min(R_l, rgbclip); \
  366. R_r = vec_min(R_r, rgbclip); \
  367. G_l = vec_min(G_l, rgbclip); \
  368. G_r = vec_min(G_r, rgbclip); \
  369. B_l = vec_min(B_l, rgbclip); \
  370. B_r = vec_min(B_r, rgbclip); \
  371. \
  372. R_l = vec_sr(R_l, shift22); \
  373. R_r = vec_sr(R_r, shift22); \
  374. G_l = vec_sr(G_l, shift22); \
  375. G_r = vec_sr(G_r, shift22); \
  376. B_l = vec_sr(B_l, shift22); \
  377. B_r = vec_sr(B_r, shift22); \
  378. \
  379. rd16 = vec_packsu(R_l, R_r); \
  380. gd16 = vec_packsu(G_l, G_r); \
  381. bd16 = vec_packsu(B_l, B_r); \
  382. rd = vec_packsu(rd16, zero16); \
  383. gd = vec_packsu(gd16, zero16); \
  384. bd = vec_packsu(bd16, zero16); \
  385. \
  386. switch(target) { \
  387. case AV_PIX_FMT_RGB24: \
  388. out0 = vec_perm(rd, gd, perm3rg0); \
  389. out0 = vec_perm(out0, bd, perm3tb0); \
  390. out1 = vec_perm(rd, gd, perm3rg1); \
  391. out1 = vec_perm(out1, bd, perm3tb1); \
  392. \
  393. vec_vsx_st(out0, 0, dest); \
  394. vec_vsx_st(out1, 16, dest); \
  395. \
  396. dest += 24; \
  397. break; \
  398. case AV_PIX_FMT_BGR24: \
  399. out0 = vec_perm(bd, gd, perm3rg0); \
  400. out0 = vec_perm(out0, rd, perm3tb0); \
  401. out1 = vec_perm(bd, gd, perm3rg1); \
  402. out1 = vec_perm(out1, rd, perm3tb1); \
  403. \
  404. vec_vsx_st(out0, 0, dest); \
  405. vec_vsx_st(out1, 16, dest); \
  406. \
  407. dest += 24; \
  408. break; \
  409. case AV_PIX_FMT_BGRA: \
  410. out0 = vec_mergeh(bd, gd); \
  411. out1 = vec_mergeh(rd, ad); \
  412. \
  413. tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
  414. vec_vsx_st(tmp8, 0, dest); \
  415. tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
  416. vec_vsx_st(tmp8, 16, dest); \
  417. \
  418. dest += 32; \
  419. break; \
  420. case AV_PIX_FMT_RGBA: \
  421. out0 = vec_mergeh(rd, gd); \
  422. out1 = vec_mergeh(bd, ad); \
  423. \
  424. tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
  425. vec_vsx_st(tmp8, 0, dest); \
  426. tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
  427. vec_vsx_st(tmp8, 16, dest); \
  428. \
  429. dest += 32; \
  430. break; \
  431. case AV_PIX_FMT_ARGB: \
  432. out0 = vec_mergeh(ad, rd); \
  433. out1 = vec_mergeh(gd, bd); \
  434. \
  435. tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
  436. vec_vsx_st(tmp8, 0, dest); \
  437. tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
  438. vec_vsx_st(tmp8, 16, dest); \
  439. \
  440. dest += 32; \
  441. break; \
  442. case AV_PIX_FMT_ABGR: \
  443. out0 = vec_mergeh(ad, bd); \
  444. out1 = vec_mergeh(gd, rd); \
  445. \
  446. tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
  447. vec_vsx_st(tmp8, 0, dest); \
  448. tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
  449. vec_vsx_st(tmp8, 16, dest); \
  450. \
  451. dest += 32; \
  452. break; \
  453. }
  454. static av_always_inline void
  455. yuv2rgb_full_X_vsx_template(SwsContext *c, const int16_t *lumFilter,
  456. const int16_t **lumSrc, int lumFilterSize,
  457. const int16_t *chrFilter, const int16_t **chrUSrc,
  458. const int16_t **chrVSrc, int chrFilterSize,
  459. const int16_t **alpSrc, uint8_t *dest,
  460. int dstW, int y, enum AVPixelFormat target, int hasAlpha)
  461. {
  462. vec_s16 vv;
  463. vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
  464. vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
  465. vec_s32 tmp, tmp2, tmp3, tmp4;
  466. vec_u16 rd16, gd16, bd16;
  467. vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
  468. vec_s16 vlumFilter[MAX_FILTER_SIZE], vchrFilter[MAX_FILTER_SIZE];
  469. const vec_s32 ystart = vec_splats(1 << 9);
  470. const vec_s32 uvstart = vec_splats((1 << 9) - (128 << 19));
  471. const vec_u16 zero16 = vec_splat_u16(0);
  472. const vec_s32 y_offset = vec_splats(c->yuv2rgb_y_offset);
  473. const vec_s32 y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  474. const vec_s32 y_add = vec_splats(1 << 21);
  475. const vec_s32 v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  476. const vec_s32 v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  477. const vec_s32 u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  478. const vec_s32 u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  479. const vec_s32 rgbclip = vec_splats(1 << 30);
  480. const vec_s32 zero32 = vec_splat_s32(0);
  481. const vec_u32 shift22 = vec_splats(22U);
  482. const vec_u32 shift10 = vec_splat_u32(10);
  483. int i, j;
  484. // Various permutations
  485. const vec_u8 perm3rg0 = (vec_u8) {0x0, 0x10, 0,
  486. 0x1, 0x11, 0,
  487. 0x2, 0x12, 0,
  488. 0x3, 0x13, 0,
  489. 0x4, 0x14, 0,
  490. 0x5 };
  491. const vec_u8 perm3rg1 = (vec_u8) { 0x15, 0,
  492. 0x6, 0x16, 0,
  493. 0x7, 0x17, 0 };
  494. const vec_u8 perm3tb0 = (vec_u8) {0x0, 0x1, 0x10,
  495. 0x3, 0x4, 0x11,
  496. 0x6, 0x7, 0x12,
  497. 0x9, 0xa, 0x13,
  498. 0xc, 0xd, 0x14,
  499. 0xf };
  500. const vec_u8 perm3tb1 = (vec_u8) { 0x0, 0x15,
  501. 0x2, 0x3, 0x16,
  502. 0x5, 0x6, 0x17 };
  503. ad = vec_splats((uint8_t) 255);
  504. for (i = 0; i < lumFilterSize; i++)
  505. vlumFilter[i] = vec_splats(lumFilter[i]);
  506. for (i = 0; i < chrFilterSize; i++)
  507. vchrFilter[i] = vec_splats(chrFilter[i]);
  508. for (i = 0; i < dstW; i += 8) {
  509. vy32_l =
  510. vy32_r = ystart;
  511. vu32_l =
  512. vu32_r =
  513. vv32_l =
  514. vv32_r = uvstart;
  515. for (j = 0; j < lumFilterSize; j++) {
  516. vv = vec_ld(0, &lumSrc[j][i]);
  517. tmp = vec_mule(vv, vlumFilter[j]);
  518. tmp2 = vec_mulo(vv, vlumFilter[j]);
  519. tmp3 = vec_mergeh(tmp, tmp2);
  520. tmp4 = vec_mergel(tmp, tmp2);
  521. vy32_l = vec_adds(vy32_l, tmp3);
  522. vy32_r = vec_adds(vy32_r, tmp4);
  523. }
  524. for (j = 0; j < chrFilterSize; j++) {
  525. vv = vec_ld(0, &chrUSrc[j][i]);
  526. tmp = vec_mule(vv, vchrFilter[j]);
  527. tmp2 = vec_mulo(vv, vchrFilter[j]);
  528. tmp3 = vec_mergeh(tmp, tmp2);
  529. tmp4 = vec_mergel(tmp, tmp2);
  530. vu32_l = vec_adds(vu32_l, tmp3);
  531. vu32_r = vec_adds(vu32_r, tmp4);
  532. vv = vec_ld(0, &chrVSrc[j][i]);
  533. tmp = vec_mule(vv, vchrFilter[j]);
  534. tmp2 = vec_mulo(vv, vchrFilter[j]);
  535. tmp3 = vec_mergeh(tmp, tmp2);
  536. tmp4 = vec_mergel(tmp, tmp2);
  537. vv32_l = vec_adds(vv32_l, tmp3);
  538. vv32_r = vec_adds(vv32_r, tmp4);
  539. }
  540. vy32_l = vec_sra(vy32_l, shift10);
  541. vy32_r = vec_sra(vy32_r, shift10);
  542. vu32_l = vec_sra(vu32_l, shift10);
  543. vu32_r = vec_sra(vu32_r, shift10);
  544. vv32_l = vec_sra(vv32_l, shift10);
  545. vv32_r = vec_sra(vv32_r, shift10);
  546. vy32_l = vec_sub(vy32_l, y_offset);
  547. vy32_r = vec_sub(vy32_r, y_offset);
  548. vy32_l = vec_mul(vy32_l, y_coeff);
  549. vy32_r = vec_mul(vy32_r, y_coeff);
  550. vy32_l = vec_add(vy32_l, y_add);
  551. vy32_r = vec_add(vy32_r, y_add);
  552. R_l = vec_mul(vv32_l, v2r_coeff);
  553. R_l = vec_add(R_l, vy32_l);
  554. R_r = vec_mul(vv32_r, v2r_coeff);
  555. R_r = vec_add(R_r, vy32_r);
  556. G_l = vec_mul(vv32_l, v2g_coeff);
  557. tmp32 = vec_mul(vu32_l, u2g_coeff);
  558. G_l = vec_add(G_l, vy32_l);
  559. G_l = vec_add(G_l, tmp32);
  560. G_r = vec_mul(vv32_r, v2g_coeff);
  561. tmp32 = vec_mul(vu32_r, u2g_coeff);
  562. G_r = vec_add(G_r, vy32_r);
  563. G_r = vec_add(G_r, tmp32);
  564. B_l = vec_mul(vu32_l, u2b_coeff);
  565. B_l = vec_add(B_l, vy32_l);
  566. B_r = vec_mul(vu32_r, u2b_coeff);
  567. B_r = vec_add(B_r, vy32_r);
  568. WRITERGB
  569. }
  570. }
  571. #define SETUP(x, buf0, alpha1, buf1, alpha) { \
  572. x = vec_ld(0, buf0); \
  573. tmp = vec_mule(x, alpha1); \
  574. tmp2 = vec_mulo(x, alpha1); \
  575. tmp3 = vec_mergeh(tmp, tmp2); \
  576. tmp4 = vec_mergel(tmp, tmp2); \
  577. \
  578. x = vec_ld(0, buf1); \
  579. tmp = vec_mule(x, alpha); \
  580. tmp2 = vec_mulo(x, alpha); \
  581. tmp5 = vec_mergeh(tmp, tmp2); \
  582. tmp6 = vec_mergel(tmp, tmp2); \
  583. \
  584. tmp3 = vec_add(tmp3, tmp5); \
  585. tmp4 = vec_add(tmp4, tmp6); \
  586. }
  587. static av_always_inline void
  588. yuv2rgb_full_2_vsx_template(SwsContext *c, const int16_t *buf[2],
  589. const int16_t *ubuf[2], const int16_t *vbuf[2],
  590. const int16_t *abuf[2], uint8_t *dest, int dstW,
  591. int yalpha, int uvalpha, int y,
  592. enum AVPixelFormat target, int hasAlpha)
  593. {
  594. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  595. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  596. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
  597. *abuf0 = hasAlpha ? abuf[0] : NULL,
  598. *abuf1 = hasAlpha ? abuf[1] : NULL;
  599. const int16_t yalpha1 = 4096 - yalpha;
  600. const int16_t uvalpha1 = 4096 - uvalpha;
  601. vec_s16 vy, vu, vv, A = vec_splat_s16(0);
  602. vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
  603. vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
  604. vec_s32 tmp, tmp2, tmp3, tmp4, tmp5, tmp6;
  605. vec_u16 rd16, gd16, bd16;
  606. vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
  607. const vec_s16 vyalpha1 = vec_splats(yalpha1);
  608. const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
  609. const vec_s16 vyalpha = vec_splats((int16_t) yalpha);
  610. const vec_s16 vuvalpha = vec_splats((int16_t) uvalpha);
  611. const vec_u16 zero16 = vec_splat_u16(0);
  612. const vec_s32 y_offset = vec_splats(c->yuv2rgb_y_offset);
  613. const vec_s32 y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  614. const vec_s32 y_add = vec_splats(1 << 21);
  615. const vec_s32 v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  616. const vec_s32 v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  617. const vec_s32 u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  618. const vec_s32 u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  619. const vec_s32 rgbclip = vec_splats(1 << 30);
  620. const vec_s32 zero32 = vec_splat_s32(0);
  621. const vec_u32 shift19 = vec_splats(19U);
  622. const vec_u32 shift22 = vec_splats(22U);
  623. const vec_u32 shift10 = vec_splat_u32(10);
  624. const vec_s32 dec128 = vec_splats(128 << 19);
  625. const vec_s32 add18 = vec_splats(1 << 18);
  626. int i;
  627. // Various permutations
  628. const vec_u8 perm3rg0 = (vec_u8) {0x0, 0x10, 0,
  629. 0x1, 0x11, 0,
  630. 0x2, 0x12, 0,
  631. 0x3, 0x13, 0,
  632. 0x4, 0x14, 0,
  633. 0x5 };
  634. const vec_u8 perm3rg1 = (vec_u8) { 0x15, 0,
  635. 0x6, 0x16, 0,
  636. 0x7, 0x17, 0 };
  637. const vec_u8 perm3tb0 = (vec_u8) {0x0, 0x1, 0x10,
  638. 0x3, 0x4, 0x11,
  639. 0x6, 0x7, 0x12,
  640. 0x9, 0xa, 0x13,
  641. 0xc, 0xd, 0x14,
  642. 0xf };
  643. const vec_u8 perm3tb1 = (vec_u8) { 0x0, 0x15,
  644. 0x2, 0x3, 0x16,
  645. 0x5, 0x6, 0x17 };
  646. av_assert2(yalpha <= 4096U);
  647. av_assert2(uvalpha <= 4096U);
  648. for (i = 0; i < dstW; i += 8) {
  649. SETUP(vy, &buf0[i], vyalpha1, &buf1[i], vyalpha);
  650. vy32_l = vec_sra(tmp3, shift10);
  651. vy32_r = vec_sra(tmp4, shift10);
  652. SETUP(vu, &ubuf0[i], vuvalpha1, &ubuf1[i], vuvalpha);
  653. tmp3 = vec_sub(tmp3, dec128);
  654. tmp4 = vec_sub(tmp4, dec128);
  655. vu32_l = vec_sra(tmp3, shift10);
  656. vu32_r = vec_sra(tmp4, shift10);
  657. SETUP(vv, &vbuf0[i], vuvalpha1, &vbuf1[i], vuvalpha);
  658. tmp3 = vec_sub(tmp3, dec128);
  659. tmp4 = vec_sub(tmp4, dec128);
  660. vv32_l = vec_sra(tmp3, shift10);
  661. vv32_r = vec_sra(tmp4, shift10);
  662. if (hasAlpha) {
  663. SETUP(A, &abuf0[i], vyalpha1, &abuf1[i], vyalpha);
  664. tmp3 = vec_add(tmp3, add18);
  665. tmp4 = vec_add(tmp4, add18);
  666. tmp3 = vec_sra(tmp3, shift19);
  667. tmp4 = vec_sra(tmp4, shift19);
  668. A = vec_packs(tmp3, tmp4);
  669. ad = vec_packsu(A, (vec_s16) zero16);
  670. } else {
  671. ad = vec_splats((uint8_t) 255);
  672. }
  673. vy32_l = vec_sub(vy32_l, y_offset);
  674. vy32_r = vec_sub(vy32_r, y_offset);
  675. vy32_l = vec_mul(vy32_l, y_coeff);
  676. vy32_r = vec_mul(vy32_r, y_coeff);
  677. vy32_l = vec_add(vy32_l, y_add);
  678. vy32_r = vec_add(vy32_r, y_add);
  679. R_l = vec_mul(vv32_l, v2r_coeff);
  680. R_l = vec_add(R_l, vy32_l);
  681. R_r = vec_mul(vv32_r, v2r_coeff);
  682. R_r = vec_add(R_r, vy32_r);
  683. G_l = vec_mul(vv32_l, v2g_coeff);
  684. tmp32 = vec_mul(vu32_l, u2g_coeff);
  685. G_l = vec_add(G_l, vy32_l);
  686. G_l = vec_add(G_l, tmp32);
  687. G_r = vec_mul(vv32_r, v2g_coeff);
  688. tmp32 = vec_mul(vu32_r, u2g_coeff);
  689. G_r = vec_add(G_r, vy32_r);
  690. G_r = vec_add(G_r, tmp32);
  691. B_l = vec_mul(vu32_l, u2b_coeff);
  692. B_l = vec_add(B_l, vy32_l);
  693. B_r = vec_mul(vu32_r, u2b_coeff);
  694. B_r = vec_add(B_r, vy32_r);
  695. WRITERGB
  696. }
  697. }
  698. static av_always_inline void
  699. yuv2rgb_2_vsx_template(SwsContext *c, const int16_t *buf[2],
  700. const int16_t *ubuf[2], const int16_t *vbuf[2],
  701. const int16_t *abuf[2], uint8_t *dest, int dstW,
  702. int yalpha, int uvalpha, int y,
  703. enum AVPixelFormat target, int hasAlpha)
  704. {
  705. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  706. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  707. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
  708. *abuf0 = hasAlpha ? abuf[0] : NULL,
  709. *abuf1 = hasAlpha ? abuf[1] : NULL;
  710. const int16_t yalpha1 = 4096 - yalpha;
  711. const int16_t uvalpha1 = 4096 - uvalpha;
  712. vec_s16 vy, vu, vv, A = vec_splat_s16(0);
  713. vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
  714. vec_s32 R_l, R_r, G_l, G_r, B_l, B_r, vud32_l, vud32_r, vvd32_l, vvd32_r;
  715. vec_s32 tmp, tmp2, tmp3, tmp4, tmp5, tmp6;
  716. vec_u16 rd16, gd16, bd16;
  717. vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
  718. const vec_s16 vyalpha1 = vec_splats(yalpha1);
  719. const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
  720. const vec_s16 vyalpha = vec_splats((int16_t) yalpha);
  721. const vec_s16 vuvalpha = vec_splats((int16_t) uvalpha);
  722. const vec_u16 zero16 = vec_splat_u16(0);
  723. const vec_s32 y_offset = vec_splats(c->yuv2rgb_y_offset);
  724. const vec_s32 y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  725. const vec_s32 y_add = vec_splats(1 << 21);
  726. const vec_s32 v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  727. const vec_s32 v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  728. const vec_s32 u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  729. const vec_s32 u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  730. const vec_s32 rgbclip = vec_splats(1 << 30);
  731. const vec_s32 zero32 = vec_splat_s32(0);
  732. const vec_u32 shift19 = vec_splats(19U);
  733. const vec_u32 shift22 = vec_splats(22U);
  734. const vec_u32 shift10 = vec_splat_u32(10);
  735. const vec_s32 dec128 = vec_splats(128 << 19);
  736. const vec_s32 add18 = vec_splats(1 << 18);
  737. int i;
  738. // Various permutations
  739. const vec_u8 doubleleft = (vec_u8) {0, 1, 2, 3,
  740. 0, 1, 2, 3,
  741. 4, 5, 6, 7,
  742. 4, 5, 6, 7 };
  743. const vec_u8 doubleright = (vec_u8) {8, 9, 10, 11,
  744. 8, 9, 10, 11,
  745. 12, 13, 14, 15,
  746. 12, 13, 14, 15 };
  747. const vec_u8 perm3rg0 = (vec_u8) {0x0, 0x10, 0,
  748. 0x1, 0x11, 0,
  749. 0x2, 0x12, 0,
  750. 0x3, 0x13, 0,
  751. 0x4, 0x14, 0,
  752. 0x5 };
  753. const vec_u8 perm3rg1 = (vec_u8) { 0x15, 0,
  754. 0x6, 0x16, 0,
  755. 0x7, 0x17, 0 };
  756. const vec_u8 perm3tb0 = (vec_u8) {0x0, 0x1, 0x10,
  757. 0x3, 0x4, 0x11,
  758. 0x6, 0x7, 0x12,
  759. 0x9, 0xa, 0x13,
  760. 0xc, 0xd, 0x14,
  761. 0xf };
  762. const vec_u8 perm3tb1 = (vec_u8) { 0x0, 0x15,
  763. 0x2, 0x3, 0x16,
  764. 0x5, 0x6, 0x17 };
  765. av_assert2(yalpha <= 4096U);
  766. av_assert2(uvalpha <= 4096U);
  767. for (i = 0; i < (dstW + 1) >> 1; i += 8) {
  768. SETUP(vy, &buf0[i * 2], vyalpha1, &buf1[i * 2], vyalpha);
  769. vy32_l = vec_sra(tmp3, shift10);
  770. vy32_r = vec_sra(tmp4, shift10);
  771. SETUP(vu, &ubuf0[i], vuvalpha1, &ubuf1[i], vuvalpha);
  772. tmp3 = vec_sub(tmp3, dec128);
  773. tmp4 = vec_sub(tmp4, dec128);
  774. vu32_l = vec_sra(tmp3, shift10);
  775. vu32_r = vec_sra(tmp4, shift10);
  776. SETUP(vv, &vbuf0[i], vuvalpha1, &vbuf1[i], vuvalpha);
  777. tmp3 = vec_sub(tmp3, dec128);
  778. tmp4 = vec_sub(tmp4, dec128);
  779. vv32_l = vec_sra(tmp3, shift10);
  780. vv32_r = vec_sra(tmp4, shift10);
  781. if (hasAlpha) {
  782. SETUP(A, &abuf0[i], vyalpha1, &abuf1[i], vyalpha);
  783. tmp3 = vec_add(tmp3, add18);
  784. tmp4 = vec_add(tmp4, add18);
  785. tmp3 = vec_sra(tmp3, shift19);
  786. tmp4 = vec_sra(tmp4, shift19);
  787. A = vec_packs(tmp3, tmp4);
  788. ad = vec_packsu(A, (vec_s16) zero16);
  789. } else {
  790. ad = vec_splats((uint8_t) 255);
  791. }
  792. vy32_l = vec_sub(vy32_l, y_offset);
  793. vy32_r = vec_sub(vy32_r, y_offset);
  794. vy32_l = vec_mul(vy32_l, y_coeff);
  795. vy32_r = vec_mul(vy32_r, y_coeff);
  796. vy32_l = vec_add(vy32_l, y_add);
  797. vy32_r = vec_add(vy32_r, y_add);
  798. // Use the first UV half
  799. vud32_l = vec_perm(vu32_l, vu32_l, doubleleft);
  800. vud32_r = vec_perm(vu32_l, vu32_l, doubleright);
  801. vvd32_l = vec_perm(vv32_l, vv32_l, doubleleft);
  802. vvd32_r = vec_perm(vv32_l, vv32_l, doubleright);
  803. R_l = vec_mul(vvd32_l, v2r_coeff);
  804. R_l = vec_add(R_l, vy32_l);
  805. R_r = vec_mul(vvd32_r, v2r_coeff);
  806. R_r = vec_add(R_r, vy32_r);
  807. G_l = vec_mul(vvd32_l, v2g_coeff);
  808. tmp32 = vec_mul(vud32_l, u2g_coeff);
  809. G_l = vec_add(G_l, vy32_l);
  810. G_l = vec_add(G_l, tmp32);
  811. G_r = vec_mul(vvd32_r, v2g_coeff);
  812. tmp32 = vec_mul(vud32_r, u2g_coeff);
  813. G_r = vec_add(G_r, vy32_r);
  814. G_r = vec_add(G_r, tmp32);
  815. B_l = vec_mul(vud32_l, u2b_coeff);
  816. B_l = vec_add(B_l, vy32_l);
  817. B_r = vec_mul(vud32_r, u2b_coeff);
  818. B_r = vec_add(B_r, vy32_r);
  819. WRITERGB
  820. // New Y for the second half
  821. SETUP(vy, &buf0[i * 2 + 8], vyalpha1, &buf1[i * 2 + 8], vyalpha);
  822. vy32_l = vec_sra(tmp3, shift10);
  823. vy32_r = vec_sra(tmp4, shift10);
  824. vy32_l = vec_sub(vy32_l, y_offset);
  825. vy32_r = vec_sub(vy32_r, y_offset);
  826. vy32_l = vec_mul(vy32_l, y_coeff);
  827. vy32_r = vec_mul(vy32_r, y_coeff);
  828. vy32_l = vec_add(vy32_l, y_add);
  829. vy32_r = vec_add(vy32_r, y_add);
  830. // Second UV half
  831. vud32_l = vec_perm(vu32_r, vu32_r, doubleleft);
  832. vud32_r = vec_perm(vu32_r, vu32_r, doubleright);
  833. vvd32_l = vec_perm(vv32_r, vv32_r, doubleleft);
  834. vvd32_r = vec_perm(vv32_r, vv32_r, doubleright);
  835. R_l = vec_mul(vvd32_l, v2r_coeff);
  836. R_l = vec_add(R_l, vy32_l);
  837. R_r = vec_mul(vvd32_r, v2r_coeff);
  838. R_r = vec_add(R_r, vy32_r);
  839. G_l = vec_mul(vvd32_l, v2g_coeff);
  840. tmp32 = vec_mul(vud32_l, u2g_coeff);
  841. G_l = vec_add(G_l, vy32_l);
  842. G_l = vec_add(G_l, tmp32);
  843. G_r = vec_mul(vvd32_r, v2g_coeff);
  844. tmp32 = vec_mul(vud32_r, u2g_coeff);
  845. G_r = vec_add(G_r, vy32_r);
  846. G_r = vec_add(G_r, tmp32);
  847. B_l = vec_mul(vud32_l, u2b_coeff);
  848. B_l = vec_add(B_l, vy32_l);
  849. B_r = vec_mul(vud32_r, u2b_coeff);
  850. B_r = vec_add(B_r, vy32_r);
  851. WRITERGB
  852. }
  853. }
  854. #undef SETUP
  855. static av_always_inline void
  856. yuv2rgb_full_1_vsx_template(SwsContext *c, const int16_t *buf0,
  857. const int16_t *ubuf[2], const int16_t *vbuf[2],
  858. const int16_t *abuf0, uint8_t *dest, int dstW,
  859. int uvalpha, int y, enum AVPixelFormat target,
  860. int hasAlpha)
  861. {
  862. const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
  863. const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
  864. vec_s16 vy, vu, vv, A = vec_splat_s16(0), tmp16;
  865. vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
  866. vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
  867. vec_u16 rd16, gd16, bd16;
  868. vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
  869. const vec_u16 zero16 = vec_splat_u16(0);
  870. const vec_s32 y_offset = vec_splats(c->yuv2rgb_y_offset);
  871. const vec_s32 y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  872. const vec_s32 y_add = vec_splats(1 << 21);
  873. const vec_s32 v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  874. const vec_s32 v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  875. const vec_s32 u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  876. const vec_s32 u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  877. const vec_s32 rgbclip = vec_splats(1 << 30);
  878. const vec_s32 zero32 = vec_splat_s32(0);
  879. const vec_u32 shift2 = vec_splat_u32(2);
  880. const vec_u32 shift22 = vec_splats(22U);
  881. const vec_u16 sub7 = vec_splats((uint16_t) (128 << 7));
  882. const vec_u16 sub8 = vec_splats((uint16_t) (128 << 8));
  883. const vec_s16 mul4 = vec_splat_s16(4);
  884. const vec_s16 mul8 = vec_splat_s16(8);
  885. const vec_s16 add64 = vec_splat_s16(64);
  886. const vec_u16 shift7 = vec_splat_u16(7);
  887. const vec_s16 max255 = vec_splat_s16(255);
  888. int i;
  889. // Various permutations
  890. const vec_u8 perm3rg0 = (vec_u8) {0x0, 0x10, 0,
  891. 0x1, 0x11, 0,
  892. 0x2, 0x12, 0,
  893. 0x3, 0x13, 0,
  894. 0x4, 0x14, 0,
  895. 0x5 };
  896. const vec_u8 perm3rg1 = (vec_u8) { 0x15, 0,
  897. 0x6, 0x16, 0,
  898. 0x7, 0x17, 0 };
  899. const vec_u8 perm3tb0 = (vec_u8) {0x0, 0x1, 0x10,
  900. 0x3, 0x4, 0x11,
  901. 0x6, 0x7, 0x12,
  902. 0x9, 0xa, 0x13,
  903. 0xc, 0xd, 0x14,
  904. 0xf };
  905. const vec_u8 perm3tb1 = (vec_u8) { 0x0, 0x15,
  906. 0x2, 0x3, 0x16,
  907. 0x5, 0x6, 0x17 };
  908. for (i = 0; i < dstW; i += 8) { // The x86 asm also overwrites padding bytes.
  909. vy = vec_ld(0, &buf0[i]);
  910. vy32_l = vec_unpackh(vy);
  911. vy32_r = vec_unpackl(vy);
  912. vy32_l = vec_sl(vy32_l, shift2);
  913. vy32_r = vec_sl(vy32_r, shift2);
  914. vu = vec_ld(0, &ubuf0[i]);
  915. vv = vec_ld(0, &vbuf0[i]);
  916. if (uvalpha < 2048) {
  917. vu = (vec_s16) vec_sub((vec_u16) vu, sub7);
  918. vv = (vec_s16) vec_sub((vec_u16) vv, sub7);
  919. tmp32 = vec_mule(vu, mul4);
  920. tmp32_2 = vec_mulo(vu, mul4);
  921. vu32_l = vec_mergeh(tmp32, tmp32_2);
  922. vu32_r = vec_mergel(tmp32, tmp32_2);
  923. tmp32 = vec_mule(vv, mul4);
  924. tmp32_2 = vec_mulo(vv, mul4);
  925. vv32_l = vec_mergeh(tmp32, tmp32_2);
  926. vv32_r = vec_mergel(tmp32, tmp32_2);
  927. } else {
  928. tmp16 = vec_ld(0, &ubuf1[i]);
  929. vu = vec_add(vu, tmp16);
  930. vu = (vec_s16) vec_sub((vec_u16) vu, sub8);
  931. tmp16 = vec_ld(0, &vbuf1[i]);
  932. vv = vec_add(vv, tmp16);
  933. vv = (vec_s16) vec_sub((vec_u16) vv, sub8);
  934. vu32_l = vec_mule(vu, mul8);
  935. vu32_r = vec_mulo(vu, mul8);
  936. vv32_l = vec_mule(vv, mul8);
  937. vv32_r = vec_mulo(vv, mul8);
  938. }
  939. if (hasAlpha) {
  940. A = vec_ld(0, &abuf0[i]);
  941. A = vec_add(A, add64);
  942. A = vec_sr(A, shift7);
  943. A = vec_max(A, max255);
  944. ad = vec_packsu(A, (vec_s16) zero16);
  945. } else {
  946. ad = vec_splats((uint8_t) 255);
  947. }
  948. vy32_l = vec_sub(vy32_l, y_offset);
  949. vy32_r = vec_sub(vy32_r, y_offset);
  950. vy32_l = vec_mul(vy32_l, y_coeff);
  951. vy32_r = vec_mul(vy32_r, y_coeff);
  952. vy32_l = vec_add(vy32_l, y_add);
  953. vy32_r = vec_add(vy32_r, y_add);
  954. R_l = vec_mul(vv32_l, v2r_coeff);
  955. R_l = vec_add(R_l, vy32_l);
  956. R_r = vec_mul(vv32_r, v2r_coeff);
  957. R_r = vec_add(R_r, vy32_r);
  958. G_l = vec_mul(vv32_l, v2g_coeff);
  959. tmp32 = vec_mul(vu32_l, u2g_coeff);
  960. G_l = vec_add(G_l, vy32_l);
  961. G_l = vec_add(G_l, tmp32);
  962. G_r = vec_mul(vv32_r, v2g_coeff);
  963. tmp32 = vec_mul(vu32_r, u2g_coeff);
  964. G_r = vec_add(G_r, vy32_r);
  965. G_r = vec_add(G_r, tmp32);
  966. B_l = vec_mul(vu32_l, u2b_coeff);
  967. B_l = vec_add(B_l, vy32_l);
  968. B_r = vec_mul(vu32_r, u2b_coeff);
  969. B_r = vec_add(B_r, vy32_r);
  970. WRITERGB
  971. }
  972. }
  973. static av_always_inline void
  974. yuv2rgb_1_vsx_template(SwsContext *c, const int16_t *buf0,
  975. const int16_t *ubuf[2], const int16_t *vbuf[2],
  976. const int16_t *abuf0, uint8_t *dest, int dstW,
  977. int uvalpha, int y, enum AVPixelFormat target,
  978. int hasAlpha)
  979. {
  980. const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
  981. const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
  982. vec_s16 vy, vu, vv, A = vec_splat_s16(0), tmp16;
  983. vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
  984. vec_s32 vud32_l, vud32_r, vvd32_l, vvd32_r;
  985. vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
  986. vec_u16 rd16, gd16, bd16;
  987. vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
  988. const vec_u16 zero16 = vec_splat_u16(0);
  989. const vec_s32 y_offset = vec_splats(c->yuv2rgb_y_offset);
  990. const vec_s32 y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  991. const vec_s32 y_add = vec_splats(1 << 21);
  992. const vec_s32 v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  993. const vec_s32 v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  994. const vec_s32 u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  995. const vec_s32 u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  996. const vec_s32 rgbclip = vec_splats(1 << 30);
  997. const vec_s32 zero32 = vec_splat_s32(0);
  998. const vec_u32 shift2 = vec_splat_u32(2);
  999. const vec_u32 shift22 = vec_splats(22U);
  1000. const vec_u16 sub7 = vec_splats((uint16_t) (128 << 7));
  1001. const vec_u16 sub8 = vec_splats((uint16_t) (128 << 8));
  1002. const vec_s16 mul4 = vec_splat_s16(4);
  1003. const vec_s16 mul8 = vec_splat_s16(8);
  1004. const vec_s16 add64 = vec_splat_s16(64);
  1005. const vec_u16 shift7 = vec_splat_u16(7);
  1006. const vec_s16 max255 = vec_splat_s16(255);
  1007. int i;
  1008. // Various permutations
  1009. const vec_u8 doubleleft = (vec_u8) {0, 1, 2, 3,
  1010. 0, 1, 2, 3,
  1011. 4, 5, 6, 7,
  1012. 4, 5, 6, 7 };
  1013. const vec_u8 doubleright = (vec_u8) {8, 9, 10, 11,
  1014. 8, 9, 10, 11,
  1015. 12, 13, 14, 15,
  1016. 12, 13, 14, 15 };
  1017. const vec_u8 perm3rg0 = (vec_u8) {0x0, 0x10, 0,
  1018. 0x1, 0x11, 0,
  1019. 0x2, 0x12, 0,
  1020. 0x3, 0x13, 0,
  1021. 0x4, 0x14, 0,
  1022. 0x5 };
  1023. const vec_u8 perm3rg1 = (vec_u8) { 0x15, 0,
  1024. 0x6, 0x16, 0,
  1025. 0x7, 0x17, 0 };
  1026. const vec_u8 perm3tb0 = (vec_u8) {0x0, 0x1, 0x10,
  1027. 0x3, 0x4, 0x11,
  1028. 0x6, 0x7, 0x12,
  1029. 0x9, 0xa, 0x13,
  1030. 0xc, 0xd, 0x14,
  1031. 0xf };
  1032. const vec_u8 perm3tb1 = (vec_u8) { 0x0, 0x15,
  1033. 0x2, 0x3, 0x16,
  1034. 0x5, 0x6, 0x17 };
  1035. for (i = 0; i < (dstW + 1) >> 1; i += 8) { // The x86 asm also overwrites padding bytes.
  1036. vy = vec_ld(0, &buf0[i * 2]);
  1037. vy32_l = vec_unpackh(vy);
  1038. vy32_r = vec_unpackl(vy);
  1039. vy32_l = vec_sl(vy32_l, shift2);
  1040. vy32_r = vec_sl(vy32_r, shift2);
  1041. vu = vec_ld(0, &ubuf0[i]);
  1042. vv = vec_ld(0, &vbuf0[i]);
  1043. if (uvalpha < 2048) {
  1044. vu = (vec_s16) vec_sub((vec_u16) vu, sub7);
  1045. vv = (vec_s16) vec_sub((vec_u16) vv, sub7);
  1046. tmp32 = vec_mule(vu, mul4);
  1047. tmp32_2 = vec_mulo(vu, mul4);
  1048. vu32_l = vec_mergeh(tmp32, tmp32_2);
  1049. vu32_r = vec_mergel(tmp32, tmp32_2);
  1050. tmp32 = vec_mule(vv, mul4);
  1051. tmp32_2 = vec_mulo(vv, mul4);
  1052. vv32_l = vec_mergeh(tmp32, tmp32_2);
  1053. vv32_r = vec_mergel(tmp32, tmp32_2);
  1054. } else {
  1055. tmp16 = vec_ld(0, &ubuf1[i]);
  1056. vu = vec_add(vu, tmp16);
  1057. vu = (vec_s16) vec_sub((vec_u16) vu, sub8);
  1058. tmp16 = vec_ld(0, &vbuf1[i]);
  1059. vv = vec_add(vv, tmp16);
  1060. vv = (vec_s16) vec_sub((vec_u16) vv, sub8);
  1061. vu32_l = vec_mule(vu, mul8);
  1062. vu32_r = vec_mulo(vu, mul8);
  1063. vv32_l = vec_mule(vv, mul8);
  1064. vv32_r = vec_mulo(vv, mul8);
  1065. }
  1066. if (hasAlpha) {
  1067. A = vec_ld(0, &abuf0[i]);
  1068. A = vec_add(A, add64);
  1069. A = vec_sr(A, shift7);
  1070. A = vec_max(A, max255);
  1071. ad = vec_packsu(A, (vec_s16) zero16);
  1072. } else {
  1073. ad = vec_splats((uint8_t) 255);
  1074. }
  1075. vy32_l = vec_sub(vy32_l, y_offset);
  1076. vy32_r = vec_sub(vy32_r, y_offset);
  1077. vy32_l = vec_mul(vy32_l, y_coeff);
  1078. vy32_r = vec_mul(vy32_r, y_coeff);
  1079. vy32_l = vec_add(vy32_l, y_add);
  1080. vy32_r = vec_add(vy32_r, y_add);
  1081. // Use the first UV half
  1082. vud32_l = vec_perm(vu32_l, vu32_l, doubleleft);
  1083. vud32_r = vec_perm(vu32_l, vu32_l, doubleright);
  1084. vvd32_l = vec_perm(vv32_l, vv32_l, doubleleft);
  1085. vvd32_r = vec_perm(vv32_l, vv32_l, doubleright);
  1086. R_l = vec_mul(vvd32_l, v2r_coeff);
  1087. R_l = vec_add(R_l, vy32_l);
  1088. R_r = vec_mul(vvd32_r, v2r_coeff);
  1089. R_r = vec_add(R_r, vy32_r);
  1090. G_l = vec_mul(vvd32_l, v2g_coeff);
  1091. tmp32 = vec_mul(vud32_l, u2g_coeff);
  1092. G_l = vec_add(G_l, vy32_l);
  1093. G_l = vec_add(G_l, tmp32);
  1094. G_r = vec_mul(vvd32_r, v2g_coeff);
  1095. tmp32 = vec_mul(vud32_r, u2g_coeff);
  1096. G_r = vec_add(G_r, vy32_r);
  1097. G_r = vec_add(G_r, tmp32);
  1098. B_l = vec_mul(vud32_l, u2b_coeff);
  1099. B_l = vec_add(B_l, vy32_l);
  1100. B_r = vec_mul(vud32_r, u2b_coeff);
  1101. B_r = vec_add(B_r, vy32_r);
  1102. WRITERGB
  1103. // New Y for the second half
  1104. vy = vec_ld(16, &buf0[i * 2]);
  1105. vy32_l = vec_unpackh(vy);
  1106. vy32_r = vec_unpackl(vy);
  1107. vy32_l = vec_sl(vy32_l, shift2);
  1108. vy32_r = vec_sl(vy32_r, shift2);
  1109. vy32_l = vec_sub(vy32_l, y_offset);
  1110. vy32_r = vec_sub(vy32_r, y_offset);
  1111. vy32_l = vec_mul(vy32_l, y_coeff);
  1112. vy32_r = vec_mul(vy32_r, y_coeff);
  1113. vy32_l = vec_add(vy32_l, y_add);
  1114. vy32_r = vec_add(vy32_r, y_add);
  1115. // Second UV half
  1116. vud32_l = vec_perm(vu32_r, vu32_r, doubleleft);
  1117. vud32_r = vec_perm(vu32_r, vu32_r, doubleright);
  1118. vvd32_l = vec_perm(vv32_r, vv32_r, doubleleft);
  1119. vvd32_r = vec_perm(vv32_r, vv32_r, doubleright);
  1120. R_l = vec_mul(vvd32_l, v2r_coeff);
  1121. R_l = vec_add(R_l, vy32_l);
  1122. R_r = vec_mul(vvd32_r, v2r_coeff);
  1123. R_r = vec_add(R_r, vy32_r);
  1124. G_l = vec_mul(vvd32_l, v2g_coeff);
  1125. tmp32 = vec_mul(vud32_l, u2g_coeff);
  1126. G_l = vec_add(G_l, vy32_l);
  1127. G_l = vec_add(G_l, tmp32);
  1128. G_r = vec_mul(vvd32_r, v2g_coeff);
  1129. tmp32 = vec_mul(vud32_r, u2g_coeff);
  1130. G_r = vec_add(G_r, vy32_r);
  1131. G_r = vec_add(G_r, tmp32);
  1132. B_l = vec_mul(vud32_l, u2b_coeff);
  1133. B_l = vec_add(B_l, vy32_l);
  1134. B_r = vec_mul(vud32_r, u2b_coeff);
  1135. B_r = vec_add(B_r, vy32_r);
  1136. WRITERGB
  1137. }
  1138. }
  1139. #undef WRITERGB
  1140. #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
  1141. static void name ## ext ## _X_vsx(SwsContext *c, const int16_t *lumFilter, \
  1142. const int16_t **lumSrc, int lumFilterSize, \
  1143. const int16_t *chrFilter, const int16_t **chrUSrc, \
  1144. const int16_t **chrVSrc, int chrFilterSize, \
  1145. const int16_t **alpSrc, uint8_t *dest, int dstW, \
  1146. int y) \
  1147. { \
  1148. name ## base ## _X_vsx_template(c, lumFilter, lumSrc, lumFilterSize, \
  1149. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  1150. alpSrc, dest, dstW, y, fmt, hasAlpha); \
  1151. }
  1152. #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
  1153. static void name ## ext ## _2_vsx(SwsContext *c, const int16_t *buf[2], \
  1154. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1155. const int16_t *abuf[2], uint8_t *dest, int dstW, \
  1156. int yalpha, int uvalpha, int y) \
  1157. { \
  1158. name ## base ## _2_vsx_template(c, buf, ubuf, vbuf, abuf, \
  1159. dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
  1160. }
  1161. #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
  1162. static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
  1163. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1164. const int16_t *abuf0, uint8_t *dest, int dstW, \
  1165. int uvalpha, int y) \
  1166. { \
  1167. name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, abuf0, dest, \
  1168. dstW, uvalpha, y, fmt, hasAlpha); \
  1169. }
  1170. YUV2RGBWRAPPER(yuv2, rgb, bgrx32, AV_PIX_FMT_BGRA, 0)
  1171. YUV2RGBWRAPPER(yuv2, rgb, rgbx32, AV_PIX_FMT_RGBA, 0)
  1172. YUV2RGBWRAPPER(yuv2, rgb, xrgb32, AV_PIX_FMT_ARGB, 0)
  1173. YUV2RGBWRAPPER(yuv2, rgb, xbgr32, AV_PIX_FMT_ABGR, 0)
  1174. YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
  1175. YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
  1176. YUV2RGBWRAPPERX2(yuv2, rgb, bgrx32, AV_PIX_FMT_BGRA, 0)
  1177. YUV2RGBWRAPPERX2(yuv2, rgb, rgbx32, AV_PIX_FMT_RGBA, 0)
  1178. YUV2RGBWRAPPERX2(yuv2, rgb, xrgb32, AV_PIX_FMT_ARGB, 0)
  1179. YUV2RGBWRAPPERX2(yuv2, rgb, xbgr32, AV_PIX_FMT_ABGR, 0)
  1180. YUV2RGBWRAPPERX2(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
  1181. YUV2RGBWRAPPERX2(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
  1182. YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
  1183. YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
  1184. YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
  1185. YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
  1186. YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
  1187. YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
  1188. YUV2RGBWRAPPERX2(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
  1189. YUV2RGBWRAPPERX2(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
  1190. YUV2RGBWRAPPERX2(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
  1191. YUV2RGBWRAPPERX2(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
  1192. YUV2RGBWRAPPERX2(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
  1193. YUV2RGBWRAPPERX2(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
  1194. YUV2RGBWRAPPERX(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
  1195. YUV2RGBWRAPPERX(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
  1196. YUV2RGBWRAPPERX(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
  1197. YUV2RGBWRAPPERX(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
  1198. YUV2RGBWRAPPERX(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
  1199. YUV2RGBWRAPPERX(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
  1200. static av_always_inline void
  1201. write422(const vec_s16 vy1, const vec_s16 vy2,
  1202. const vec_s16 vu, const vec_s16 vv,
  1203. uint8_t *dest, const enum AVPixelFormat target)
  1204. {
  1205. vec_u8 vd1, vd2, tmp;
  1206. const vec_u8 yuyv1 = (vec_u8) {
  1207. 0x0, 0x10, 0x1, 0x18,
  1208. 0x2, 0x11, 0x3, 0x19,
  1209. 0x4, 0x12, 0x5, 0x1a,
  1210. 0x6, 0x13, 0x7, 0x1b };
  1211. const vec_u8 yuyv2 = (vec_u8) {
  1212. 0x8, 0x14, 0x9, 0x1c,
  1213. 0xa, 0x15, 0xb, 0x1d,
  1214. 0xc, 0x16, 0xd, 0x1e,
  1215. 0xe, 0x17, 0xf, 0x1f };
  1216. const vec_u8 yvyu1 = (vec_u8) {
  1217. 0x0, 0x18, 0x1, 0x10,
  1218. 0x2, 0x19, 0x3, 0x11,
  1219. 0x4, 0x1a, 0x5, 0x12,
  1220. 0x6, 0x1b, 0x7, 0x13 };
  1221. const vec_u8 yvyu2 = (vec_u8) {
  1222. 0x8, 0x1c, 0x9, 0x14,
  1223. 0xa, 0x1d, 0xb, 0x15,
  1224. 0xc, 0x1e, 0xd, 0x16,
  1225. 0xe, 0x1f, 0xf, 0x17 };
  1226. const vec_u8 uyvy1 = (vec_u8) {
  1227. 0x10, 0x0, 0x18, 0x1,
  1228. 0x11, 0x2, 0x19, 0x3,
  1229. 0x12, 0x4, 0x1a, 0x5,
  1230. 0x13, 0x6, 0x1b, 0x7 };
  1231. const vec_u8 uyvy2 = (vec_u8) {
  1232. 0x14, 0x8, 0x1c, 0x9,
  1233. 0x15, 0xa, 0x1d, 0xb,
  1234. 0x16, 0xc, 0x1e, 0xd,
  1235. 0x17, 0xe, 0x1f, 0xf };
  1236. vd1 = vec_packsu(vy1, vy2);
  1237. vd2 = vec_packsu(vu, vv);
  1238. switch (target) {
  1239. case AV_PIX_FMT_YUYV422:
  1240. tmp = vec_perm(vd1, vd2, yuyv1);
  1241. vec_st(tmp, 0, dest);
  1242. tmp = vec_perm(vd1, vd2, yuyv2);
  1243. vec_st(tmp, 16, dest);
  1244. break;
  1245. case AV_PIX_FMT_YVYU422:
  1246. tmp = vec_perm(vd1, vd2, yvyu1);
  1247. vec_st(tmp, 0, dest);
  1248. tmp = vec_perm(vd1, vd2, yvyu2);
  1249. vec_st(tmp, 16, dest);
  1250. break;
  1251. case AV_PIX_FMT_UYVY422:
  1252. tmp = vec_perm(vd1, vd2, uyvy1);
  1253. vec_st(tmp, 0, dest);
  1254. tmp = vec_perm(vd1, vd2, uyvy2);
  1255. vec_st(tmp, 16, dest);
  1256. break;
  1257. }
  1258. }
  1259. static av_always_inline void
  1260. yuv2422_X_vsx_template(SwsContext *c, const int16_t *lumFilter,
  1261. const int16_t **lumSrc, int lumFilterSize,
  1262. const int16_t *chrFilter, const int16_t **chrUSrc,
  1263. const int16_t **chrVSrc, int chrFilterSize,
  1264. const int16_t **alpSrc, uint8_t *dest, int dstW,
  1265. int y, enum AVPixelFormat target)
  1266. {
  1267. int i, j;
  1268. vec_s16 vy1, vy2, vu, vv;
  1269. vec_s32 vy32[4], vu32[2], vv32[2], tmp, tmp2, tmp3, tmp4;
  1270. vec_s16 vlumFilter[MAX_FILTER_SIZE], vchrFilter[MAX_FILTER_SIZE];
  1271. const vec_s32 start = vec_splats(1 << 18);
  1272. const vec_u32 shift19 = vec_splats(19U);
  1273. for (i = 0; i < lumFilterSize; i++)
  1274. vlumFilter[i] = vec_splats(lumFilter[i]);
  1275. for (i = 0; i < chrFilterSize; i++)
  1276. vchrFilter[i] = vec_splats(chrFilter[i]);
  1277. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1278. vy32[0] =
  1279. vy32[1] =
  1280. vy32[2] =
  1281. vy32[3] =
  1282. vu32[0] =
  1283. vu32[1] =
  1284. vv32[0] =
  1285. vv32[1] = start;
  1286. for (j = 0; j < lumFilterSize; j++) {
  1287. vv = vec_ld(0, &lumSrc[j][i * 2]);
  1288. tmp = vec_mule(vv, vlumFilter[j]);
  1289. tmp2 = vec_mulo(vv, vlumFilter[j]);
  1290. tmp3 = vec_mergeh(tmp, tmp2);
  1291. tmp4 = vec_mergel(tmp, tmp2);
  1292. vy32[0] = vec_adds(vy32[0], tmp3);
  1293. vy32[1] = vec_adds(vy32[1], tmp4);
  1294. vv = vec_ld(0, &lumSrc[j][(i + 4) * 2]);
  1295. tmp = vec_mule(vv, vlumFilter[j]);
  1296. tmp2 = vec_mulo(vv, vlumFilter[j]);
  1297. tmp3 = vec_mergeh(tmp, tmp2);
  1298. tmp4 = vec_mergel(tmp, tmp2);
  1299. vy32[2] = vec_adds(vy32[2], tmp3);
  1300. vy32[3] = vec_adds(vy32[3], tmp4);
  1301. }
  1302. for (j = 0; j < chrFilterSize; j++) {
  1303. vv = vec_ld(0, &chrUSrc[j][i]);
  1304. tmp = vec_mule(vv, vchrFilter[j]);
  1305. tmp2 = vec_mulo(vv, vchrFilter[j]);
  1306. tmp3 = vec_mergeh(tmp, tmp2);
  1307. tmp4 = vec_mergel(tmp, tmp2);
  1308. vu32[0] = vec_adds(vu32[0], tmp3);
  1309. vu32[1] = vec_adds(vu32[1], tmp4);
  1310. vv = vec_ld(0, &chrVSrc[j][i]);
  1311. tmp = vec_mule(vv, vchrFilter[j]);
  1312. tmp2 = vec_mulo(vv, vchrFilter[j]);
  1313. tmp3 = vec_mergeh(tmp, tmp2);
  1314. tmp4 = vec_mergel(tmp, tmp2);
  1315. vv32[0] = vec_adds(vv32[0], tmp3);
  1316. vv32[1] = vec_adds(vv32[1], tmp4);
  1317. }
  1318. for (j = 0; j < 4; j++) {
  1319. vy32[j] = vec_sra(vy32[j], shift19);
  1320. }
  1321. for (j = 0; j < 2; j++) {
  1322. vu32[j] = vec_sra(vu32[j], shift19);
  1323. vv32[j] = vec_sra(vv32[j], shift19);
  1324. }
  1325. vy1 = vec_packs(vy32[0], vy32[1]);
  1326. vy2 = vec_packs(vy32[2], vy32[3]);
  1327. vu = vec_packs(vu32[0], vu32[1]);
  1328. vv = vec_packs(vv32[0], vv32[1]);
  1329. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1330. }
  1331. }
  1332. #define SETUP(x, buf0, buf1, alpha) { \
  1333. x = vec_ld(0, buf0); \
  1334. tmp = vec_mule(x, alpha); \
  1335. tmp2 = vec_mulo(x, alpha); \
  1336. tmp3 = vec_mergeh(tmp, tmp2); \
  1337. tmp4 = vec_mergel(tmp, tmp2); \
  1338. \
  1339. x = vec_ld(0, buf1); \
  1340. tmp = vec_mule(x, alpha); \
  1341. tmp2 = vec_mulo(x, alpha); \
  1342. tmp5 = vec_mergeh(tmp, tmp2); \
  1343. tmp6 = vec_mergel(tmp, tmp2); \
  1344. \
  1345. tmp3 = vec_add(tmp3, tmp5); \
  1346. tmp4 = vec_add(tmp4, tmp6); \
  1347. \
  1348. tmp3 = vec_sra(tmp3, shift19); \
  1349. tmp4 = vec_sra(tmp4, shift19); \
  1350. x = vec_packs(tmp3, tmp4); \
  1351. }
  1352. static av_always_inline void
  1353. yuv2422_2_vsx_template(SwsContext *c, const int16_t *buf[2],
  1354. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1355. const int16_t *abuf[2], uint8_t *dest, int dstW,
  1356. int yalpha, int uvalpha, int y,
  1357. enum AVPixelFormat target)
  1358. {
  1359. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  1360. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  1361. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
  1362. const int16_t yalpha1 = 4096 - yalpha;
  1363. const int16_t uvalpha1 = 4096 - uvalpha;
  1364. vec_s16 vy1, vy2, vu, vv;
  1365. vec_s32 tmp, tmp2, tmp3, tmp4, tmp5, tmp6;
  1366. const vec_s16 vyalpha1 = vec_splats(yalpha1);
  1367. const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
  1368. const vec_u32 shift19 = vec_splats(19U);
  1369. int i;
  1370. av_assert2(yalpha <= 4096U);
  1371. av_assert2(uvalpha <= 4096U);
  1372. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1373. SETUP(vy1, &buf0[i * 2], &buf1[i * 2], vyalpha1)
  1374. SETUP(vy2, &buf0[(i + 4) * 2], &buf1[(i + 4) * 2], vyalpha1)
  1375. SETUP(vu, &ubuf0[i], &ubuf1[i], vuvalpha1)
  1376. SETUP(vv, &vbuf0[i], &vbuf1[i], vuvalpha1)
  1377. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1378. }
  1379. }
  1380. #undef SETUP
  1381. static av_always_inline void
  1382. yuv2422_1_vsx_template(SwsContext *c, const int16_t *buf0,
  1383. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1384. const int16_t *abuf0, uint8_t *dest, int dstW,
  1385. int uvalpha, int y, enum AVPixelFormat target)
  1386. {
  1387. const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
  1388. vec_s16 vy1, vy2, vu, vv, tmp;
  1389. const vec_s16 add64 = vec_splats((int16_t) 64);
  1390. const vec_s16 add128 = vec_splats((int16_t) 128);
  1391. const vec_u16 shift7 = vec_splat_u16(7);
  1392. const vec_u16 shift8 = vec_splat_u16(8);
  1393. int i;
  1394. if (uvalpha < 2048) {
  1395. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1396. vy1 = vec_ld(0, &buf0[i * 2]);
  1397. vy2 = vec_ld(0, &buf0[(i + 4) * 2]);
  1398. vu = vec_ld(0, &ubuf0[i]);
  1399. vv = vec_ld(0, &vbuf0[i]);
  1400. vy1 = vec_add(vy1, add64);
  1401. vy2 = vec_add(vy2, add64);
  1402. vu = vec_add(vu, add64);
  1403. vv = vec_add(vv, add64);
  1404. vy1 = vec_sra(vy1, shift7);
  1405. vy2 = vec_sra(vy2, shift7);
  1406. vu = vec_sra(vu, shift7);
  1407. vv = vec_sra(vv, shift7);
  1408. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1409. }
  1410. } else {
  1411. const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
  1412. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1413. vy1 = vec_ld(0, &buf0[i * 2]);
  1414. vy2 = vec_ld(0, &buf0[(i + 4) * 2]);
  1415. vu = vec_ld(0, &ubuf0[i]);
  1416. tmp = vec_ld(0, &ubuf1[i]);
  1417. vu = vec_adds(vu, tmp);
  1418. vv = vec_ld(0, &vbuf0[i]);
  1419. tmp = vec_ld(0, &vbuf1[i]);
  1420. vv = vec_adds(vv, tmp);
  1421. vy1 = vec_add(vy1, add64);
  1422. vy2 = vec_add(vy2, add64);
  1423. vu = vec_adds(vu, add128);
  1424. vv = vec_adds(vv, add128);
  1425. vy1 = vec_sra(vy1, shift7);
  1426. vy2 = vec_sra(vy2, shift7);
  1427. vu = vec_sra(vu, shift8);
  1428. vv = vec_sra(vv, shift8);
  1429. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1430. }
  1431. }
  1432. }
  1433. #define YUV2PACKEDWRAPPERX(name, base, ext, fmt) \
  1434. static void name ## ext ## _X_vsx(SwsContext *c, const int16_t *lumFilter, \
  1435. const int16_t **lumSrc, int lumFilterSize, \
  1436. const int16_t *chrFilter, const int16_t **chrUSrc, \
  1437. const int16_t **chrVSrc, int chrFilterSize, \
  1438. const int16_t **alpSrc, uint8_t *dest, int dstW, \
  1439. int y) \
  1440. { \
  1441. name ## base ## _X_vsx_template(c, lumFilter, lumSrc, lumFilterSize, \
  1442. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  1443. alpSrc, dest, dstW, y, fmt); \
  1444. }
  1445. #define YUV2PACKEDWRAPPER2(name, base, ext, fmt) \
  1446. YUV2PACKEDWRAPPERX(name, base, ext, fmt) \
  1447. static void name ## ext ## _2_vsx(SwsContext *c, const int16_t *buf[2], \
  1448. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1449. const int16_t *abuf[2], uint8_t *dest, int dstW, \
  1450. int yalpha, int uvalpha, int y) \
  1451. { \
  1452. name ## base ## _2_vsx_template(c, buf, ubuf, vbuf, abuf, \
  1453. dest, dstW, yalpha, uvalpha, y, fmt); \
  1454. }
  1455. #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
  1456. YUV2PACKEDWRAPPER2(name, base, ext, fmt) \
  1457. static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
  1458. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1459. const int16_t *abuf0, uint8_t *dest, int dstW, \
  1460. int uvalpha, int y) \
  1461. { \
  1462. name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, \
  1463. abuf0, dest, dstW, uvalpha, \
  1464. y, fmt); \
  1465. }
  1466. YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
  1467. YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
  1468. YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
  1469. static void hyscale_fast_vsx(SwsContext *c, int16_t *dst, int dstWidth,
  1470. const uint8_t *src, int srcW, int xInc)
  1471. {
  1472. int i;
  1473. unsigned int xpos = 0, xx;
  1474. vec_u8 vin, vin2, vperm;
  1475. vec_s8 vmul, valpha;
  1476. vec_s16 vtmp, vtmp2, vtmp3, vtmp4;
  1477. vec_u16 vd_l, vd_r, vcoord16[2];
  1478. vec_u32 vcoord[4];
  1479. const vec_u32 vadd = (vec_u32) {
  1480. 0,
  1481. xInc * 1,
  1482. xInc * 2,
  1483. xInc * 3,
  1484. };
  1485. const vec_u16 vadd16 = (vec_u16) { // Modulo math
  1486. 0,
  1487. xInc * 1,
  1488. xInc * 2,
  1489. xInc * 3,
  1490. xInc * 4,
  1491. xInc * 5,
  1492. xInc * 6,
  1493. xInc * 7,
  1494. };
  1495. const vec_u32 vshift16 = vec_splats((uint32_t) 16);
  1496. const vec_u16 vshift9 = vec_splat_u16(9);
  1497. const vec_u8 vzero = vec_splat_u8(0);
  1498. const vec_u16 vshift = vec_splat_u16(7);
  1499. for (i = 0; i < dstWidth; i += 16) {
  1500. vcoord16[0] = vec_splats((uint16_t) xpos);
  1501. vcoord16[1] = vec_splats((uint16_t) (xpos + xInc * 8));
  1502. vcoord16[0] = vec_add(vcoord16[0], vadd16);
  1503. vcoord16[1] = vec_add(vcoord16[1], vadd16);
  1504. vcoord16[0] = vec_sr(vcoord16[0], vshift9);
  1505. vcoord16[1] = vec_sr(vcoord16[1], vshift9);
  1506. valpha = (vec_s8) vec_pack(vcoord16[0], vcoord16[1]);
  1507. xx = xpos >> 16;
  1508. vin = vec_vsx_ld(0, &src[xx]);
  1509. vcoord[0] = vec_splats(xpos & 0xffff);
  1510. vcoord[1] = vec_splats((xpos & 0xffff) + xInc * 4);
  1511. vcoord[2] = vec_splats((xpos & 0xffff) + xInc * 8);
  1512. vcoord[3] = vec_splats((xpos & 0xffff) + xInc * 12);
  1513. vcoord[0] = vec_add(vcoord[0], vadd);
  1514. vcoord[1] = vec_add(vcoord[1], vadd);
  1515. vcoord[2] = vec_add(vcoord[2], vadd);
  1516. vcoord[3] = vec_add(vcoord[3], vadd);
  1517. vcoord[0] = vec_sr(vcoord[0], vshift16);
  1518. vcoord[1] = vec_sr(vcoord[1], vshift16);
  1519. vcoord[2] = vec_sr(vcoord[2], vshift16);
  1520. vcoord[3] = vec_sr(vcoord[3], vshift16);
  1521. vcoord16[0] = vec_pack(vcoord[0], vcoord[1]);
  1522. vcoord16[1] = vec_pack(vcoord[2], vcoord[3]);
  1523. vperm = vec_pack(vcoord16[0], vcoord16[1]);
  1524. vin = vec_perm(vin, vin, vperm);
  1525. vin2 = vec_vsx_ld(1, &src[xx]);
  1526. vin2 = vec_perm(vin2, vin2, vperm);
  1527. vmul = (vec_s8) vec_sub(vin2, vin);
  1528. vtmp = vec_mule(vmul, valpha);
  1529. vtmp2 = vec_mulo(vmul, valpha);
  1530. vtmp3 = vec_mergeh(vtmp, vtmp2);
  1531. vtmp4 = vec_mergel(vtmp, vtmp2);
  1532. vd_l = (vec_u16) vec_mergeh(vin, vzero);
  1533. vd_r = (vec_u16) vec_mergel(vin, vzero);
  1534. vd_l = vec_sl(vd_l, vshift);
  1535. vd_r = vec_sl(vd_r, vshift);
  1536. vd_l = vec_add(vd_l, (vec_u16) vtmp3);
  1537. vd_r = vec_add(vd_r, (vec_u16) vtmp4);
  1538. vec_st((vec_s16) vd_l, 0, &dst[i]);
  1539. vec_st((vec_s16) vd_r, 0, &dst[i + 8]);
  1540. xpos += xInc * 16;
  1541. }
  1542. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  1543. dst[i] = src[srcW-1]*128;
  1544. }
  1545. #define HCSCALE(in, out) \
  1546. vin = vec_vsx_ld(0, &in[xx]); \
  1547. vin = vec_perm(vin, vin, vperm); \
  1548. \
  1549. vin2 = vec_vsx_ld(1, &in[xx]); \
  1550. vin2 = vec_perm(vin2, vin2, vperm); \
  1551. \
  1552. vtmp = vec_mule(vin, valphaxor); \
  1553. vtmp2 = vec_mulo(vin, valphaxor); \
  1554. vtmp3 = vec_mergeh(vtmp, vtmp2); \
  1555. vtmp4 = vec_mergel(vtmp, vtmp2); \
  1556. \
  1557. vtmp = vec_mule(vin2, valpha); \
  1558. vtmp2 = vec_mulo(vin2, valpha); \
  1559. vd_l = vec_mergeh(vtmp, vtmp2); \
  1560. vd_r = vec_mergel(vtmp, vtmp2); \
  1561. \
  1562. vd_l = vec_add(vd_l, vtmp3); \
  1563. vd_r = vec_add(vd_r, vtmp4); \
  1564. \
  1565. vec_st((vec_s16) vd_l, 0, &out[i]); \
  1566. vec_st((vec_s16) vd_r, 0, &out[i + 8])
  1567. static void hcscale_fast_vsx(SwsContext *c, int16_t *dst1, int16_t *dst2,
  1568. int dstWidth, const uint8_t *src1,
  1569. const uint8_t *src2, int srcW, int xInc)
  1570. {
  1571. int i;
  1572. unsigned int xpos = 0, xx;
  1573. vec_u8 vin, vin2, vperm;
  1574. vec_u8 valpha, valphaxor;
  1575. vec_u16 vtmp, vtmp2, vtmp3, vtmp4;
  1576. vec_u16 vd_l, vd_r, vcoord16[2];
  1577. vec_u32 vcoord[4];
  1578. const vec_u8 vxor = vec_splats((uint8_t) 127);
  1579. const vec_u32 vadd = (vec_u32) {
  1580. 0,
  1581. xInc * 1,
  1582. xInc * 2,
  1583. xInc * 3,
  1584. };
  1585. const vec_u16 vadd16 = (vec_u16) { // Modulo math
  1586. 0,
  1587. xInc * 1,
  1588. xInc * 2,
  1589. xInc * 3,
  1590. xInc * 4,
  1591. xInc * 5,
  1592. xInc * 6,
  1593. xInc * 7,
  1594. };
  1595. const vec_u32 vshift16 = vec_splats((uint32_t) 16);
  1596. const vec_u16 vshift9 = vec_splat_u16(9);
  1597. for (i = 0; i < dstWidth; i += 16) {
  1598. vcoord16[0] = vec_splats((uint16_t) xpos);
  1599. vcoord16[1] = vec_splats((uint16_t) (xpos + xInc * 8));
  1600. vcoord16[0] = vec_add(vcoord16[0], vadd16);
  1601. vcoord16[1] = vec_add(vcoord16[1], vadd16);
  1602. vcoord16[0] = vec_sr(vcoord16[0], vshift9);
  1603. vcoord16[1] = vec_sr(vcoord16[1], vshift9);
  1604. valpha = vec_pack(vcoord16[0], vcoord16[1]);
  1605. valphaxor = vec_xor(valpha, vxor);
  1606. xx = xpos >> 16;
  1607. vcoord[0] = vec_splats(xpos & 0xffff);
  1608. vcoord[1] = vec_splats((xpos & 0xffff) + xInc * 4);
  1609. vcoord[2] = vec_splats((xpos & 0xffff) + xInc * 8);
  1610. vcoord[3] = vec_splats((xpos & 0xffff) + xInc * 12);
  1611. vcoord[0] = vec_add(vcoord[0], vadd);
  1612. vcoord[1] = vec_add(vcoord[1], vadd);
  1613. vcoord[2] = vec_add(vcoord[2], vadd);
  1614. vcoord[3] = vec_add(vcoord[3], vadd);
  1615. vcoord[0] = vec_sr(vcoord[0], vshift16);
  1616. vcoord[1] = vec_sr(vcoord[1], vshift16);
  1617. vcoord[2] = vec_sr(vcoord[2], vshift16);
  1618. vcoord[3] = vec_sr(vcoord[3], vshift16);
  1619. vcoord16[0] = vec_pack(vcoord[0], vcoord[1]);
  1620. vcoord16[1] = vec_pack(vcoord[2], vcoord[3]);
  1621. vperm = vec_pack(vcoord16[0], vcoord16[1]);
  1622. HCSCALE(src1, dst1);
  1623. HCSCALE(src2, dst2);
  1624. xpos += xInc * 16;
  1625. }
  1626. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
  1627. dst1[i] = src1[srcW-1]*128;
  1628. dst2[i] = src2[srcW-1]*128;
  1629. }
  1630. }
  1631. #undef HCSCALE
  1632. static void hScale8To19_vsx(SwsContext *c, int16_t *_dst, int dstW,
  1633. const uint8_t *src, const int16_t *filter,
  1634. const int32_t *filterPos, int filterSize)
  1635. {
  1636. int i, j;
  1637. int32_t *dst = (int32_t *) _dst;
  1638. vec_s16 vfilter, vin;
  1639. vec_u8 vin8;
  1640. vec_s32 vout;
  1641. const vec_u8 vzero = vec_splat_u8(0);
  1642. const vec_u8 vunusedtab[8] = {
  1643. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1644. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
  1645. (vec_u8) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
  1646. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1647. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
  1648. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1649. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
  1650. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1651. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1652. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1653. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1654. 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1655. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1656. 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
  1657. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1658. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
  1659. };
  1660. const vec_u8 vunused = vunusedtab[filterSize % 8];
  1661. if (filterSize == 1) {
  1662. for (i = 0; i < dstW; i++) {
  1663. int srcPos = filterPos[i];
  1664. int val = 0;
  1665. for (j = 0; j < filterSize; j++) {
  1666. val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
  1667. }
  1668. dst[i] = FFMIN(val >> 3, (1 << 19) - 1); // the cubic equation does overflow ...
  1669. }
  1670. } else {
  1671. for (i = 0; i < dstW; i++) {
  1672. const int srcPos = filterPos[i];
  1673. vout = vec_splat_s32(0);
  1674. for (j = 0; j < filterSize; j += 8) {
  1675. vin8 = vec_vsx_ld(0, &src[srcPos + j]);
  1676. vin = (vec_s16) vec_mergeh(vin8, vzero);
  1677. if (j + 8 > filterSize) // Remove the unused elements on the last round
  1678. vin = vec_perm(vin, (vec_s16) vzero, vunused);
  1679. vfilter = vec_vsx_ld(0, &filter[filterSize * i + j]);
  1680. vout = vec_msums(vin, vfilter, vout);
  1681. }
  1682. vout = vec_sums(vout, (vec_s32) vzero);
  1683. dst[i] = FFMIN(vout[3] >> 3, (1 << 19) - 1);
  1684. }
  1685. }
  1686. }
  1687. static void hScale16To19_vsx(SwsContext *c, int16_t *_dst, int dstW,
  1688. const uint8_t *_src, const int16_t *filter,
  1689. const int32_t *filterPos, int filterSize)
  1690. {
  1691. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->srcFormat);
  1692. int i, j;
  1693. int32_t *dst = (int32_t *) _dst;
  1694. const uint16_t *src = (const uint16_t *) _src;
  1695. int bits = desc->comp[0].depth - 1;
  1696. int sh = bits - 4;
  1697. vec_s16 vfilter, vin;
  1698. vec_s32 vout, vtmp, vtmp2, vfilter32_l, vfilter32_r;
  1699. const vec_u8 vzero = vec_splat_u8(0);
  1700. const vec_u8 vunusedtab[8] = {
  1701. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1702. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
  1703. (vec_u8) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
  1704. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1705. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
  1706. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1707. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
  1708. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1709. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1710. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1711. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1712. 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1713. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1714. 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
  1715. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1716. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
  1717. };
  1718. const vec_u8 vunused = vunusedtab[filterSize % 8];
  1719. if ((isAnyRGB(c->srcFormat) || c->srcFormat==AV_PIX_FMT_PAL8) && desc->comp[0].depth<16) {
  1720. sh = 9;
  1721. } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
  1722. sh = 16 - 1 - 4;
  1723. }
  1724. if (filterSize == 1) {
  1725. for (i = 0; i < dstW; i++) {
  1726. int srcPos = filterPos[i];
  1727. int val = 0;
  1728. for (j = 0; j < filterSize; j++) {
  1729. val += src[srcPos + j] * filter[filterSize * i + j];
  1730. }
  1731. // filter=14 bit, input=16 bit, output=30 bit, >> 11 makes 19 bit
  1732. dst[i] = FFMIN(val >> sh, (1 << 19) - 1);
  1733. }
  1734. } else {
  1735. for (i = 0; i < dstW; i++) {
  1736. const int srcPos = filterPos[i];
  1737. vout = vec_splat_s32(0);
  1738. for (j = 0; j < filterSize; j += 8) {
  1739. vin = (vec_s16) vec_vsx_ld(0, &src[srcPos + j]);
  1740. if (j + 8 > filterSize) // Remove the unused elements on the last round
  1741. vin = vec_perm(vin, (vec_s16) vzero, vunused);
  1742. vfilter = vec_vsx_ld(0, &filter[filterSize * i + j]);
  1743. vfilter32_l = vec_unpackh(vfilter);
  1744. vfilter32_r = vec_unpackl(vfilter);
  1745. vtmp = (vec_s32) vec_mergeh(vin, (vec_s16) vzero);
  1746. vtmp2 = (vec_s32) vec_mergel(vin, (vec_s16) vzero);
  1747. vtmp = vec_mul(vtmp, vfilter32_l);
  1748. vtmp2 = vec_mul(vtmp2, vfilter32_r);
  1749. vout = vec_adds(vout, vtmp);
  1750. vout = vec_adds(vout, vtmp2);
  1751. }
  1752. vout = vec_sums(vout, (vec_s32) vzero);
  1753. dst[i] = FFMIN(vout[3] >> sh, (1 << 19) - 1);
  1754. }
  1755. }
  1756. }
  1757. static void hScale16To15_vsx(SwsContext *c, int16_t *dst, int dstW,
  1758. const uint8_t *_src, const int16_t *filter,
  1759. const int32_t *filterPos, int filterSize)
  1760. {
  1761. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->srcFormat);
  1762. int i, j;
  1763. const uint16_t *src = (const uint16_t *) _src;
  1764. int sh = desc->comp[0].depth - 1;
  1765. vec_s16 vfilter, vin;
  1766. vec_s32 vout, vtmp, vtmp2, vfilter32_l, vfilter32_r;
  1767. const vec_u8 vzero = vec_splat_u8(0);
  1768. const vec_u8 vunusedtab[8] = {
  1769. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1770. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
  1771. (vec_u8) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
  1772. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1773. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
  1774. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1775. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
  1776. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1777. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1778. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1779. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1780. 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1781. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1782. 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
  1783. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1784. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
  1785. };
  1786. const vec_u8 vunused = vunusedtab[filterSize % 8];
  1787. if (sh<15) {
  1788. sh = isAnyRGB(c->srcFormat) || c->srcFormat==AV_PIX_FMT_PAL8 ? 13 : (desc->comp[0].depth - 1);
  1789. } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
  1790. sh = 16 - 1;
  1791. }
  1792. if (filterSize == 1) {
  1793. for (i = 0; i < dstW; i++) {
  1794. int srcPos = filterPos[i];
  1795. int val = 0;
  1796. for (j = 0; j < filterSize; j++) {
  1797. val += src[srcPos + j] * filter[filterSize * i + j];
  1798. }
  1799. // filter=14 bit, input=16 bit, output=30 bit, >> 15 makes 15 bit
  1800. dst[i] = FFMIN(val >> sh, (1 << 15) - 1);
  1801. }
  1802. } else {
  1803. for (i = 0; i < dstW; i++) {
  1804. const int srcPos = filterPos[i];
  1805. vout = vec_splat_s32(0);
  1806. for (j = 0; j < filterSize; j += 8) {
  1807. vin = (vec_s16) vec_vsx_ld(0, &src[srcPos + j]);
  1808. if (j + 8 > filterSize) // Remove the unused elements on the last round
  1809. vin = vec_perm(vin, (vec_s16) vzero, vunused);
  1810. vfilter = vec_vsx_ld(0, &filter[filterSize * i + j]);
  1811. vfilter32_l = vec_unpackh(vfilter);
  1812. vfilter32_r = vec_unpackl(vfilter);
  1813. vtmp = (vec_s32) vec_mergeh(vin, (vec_s16) vzero);
  1814. vtmp2 = (vec_s32) vec_mergel(vin, (vec_s16) vzero);
  1815. vtmp = vec_mul(vtmp, vfilter32_l);
  1816. vtmp2 = vec_mul(vtmp2, vfilter32_r);
  1817. vout = vec_adds(vout, vtmp);
  1818. vout = vec_adds(vout, vtmp2);
  1819. }
  1820. vout = vec_sums(vout, (vec_s32) vzero);
  1821. dst[i] = FFMIN(vout[3] >> sh, (1 << 15) - 1);
  1822. }
  1823. }
  1824. }
  1825. #endif /* !HAVE_BIGENDIAN */
  1826. #endif /* HAVE_VSX */
  1827. av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
  1828. {
  1829. #if HAVE_VSX
  1830. enum AVPixelFormat dstFormat = c->dstFormat;
  1831. const int cpu_flags = av_get_cpu_flags();
  1832. const unsigned char power8 = HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8;
  1833. if (!(cpu_flags & AV_CPU_FLAG_VSX))
  1834. return;
  1835. #if !HAVE_BIGENDIAN
  1836. if (c->srcBpc == 8) {
  1837. if (c->dstBpc <= 14) {
  1838. c->hyScale = c->hcScale = hScale_real_vsx;
  1839. if (c->flags & SWS_FAST_BILINEAR && c->dstW >= c->srcW && c->chrDstW >= c->chrSrcW) {
  1840. c->hyscale_fast = hyscale_fast_vsx;
  1841. c->hcscale_fast = hcscale_fast_vsx;
  1842. }
  1843. } else {
  1844. c->hyScale = c->hcScale = hScale8To19_vsx;
  1845. }
  1846. } else {
  1847. if (power8) {
  1848. c->hyScale = c->hcScale = c->dstBpc > 14 ? hScale16To19_vsx
  1849. : hScale16To15_vsx;
  1850. }
  1851. }
  1852. if (!is16BPS(dstFormat) && !isNBPS(dstFormat) && !isSemiPlanarYUV(dstFormat) &&
  1853. dstFormat != AV_PIX_FMT_GRAYF32BE && dstFormat != AV_PIX_FMT_GRAYF32LE &&
  1854. !c->needAlpha) {
  1855. c->yuv2planeX = yuv2planeX_vsx;
  1856. }
  1857. #endif
  1858. if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->needAlpha) {
  1859. switch (c->dstBpc) {
  1860. case 8:
  1861. c->yuv2plane1 = yuv2plane1_8_vsx;
  1862. break;
  1863. #if !HAVE_BIGENDIAN
  1864. case 9:
  1865. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_vsx : yuv2plane1_9LE_vsx;
  1866. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_vsx : yuv2planeX_9LE_vsx;
  1867. break;
  1868. case 10:
  1869. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_vsx : yuv2plane1_10LE_vsx;
  1870. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_vsx : yuv2planeX_10LE_vsx;
  1871. break;
  1872. case 12:
  1873. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_vsx : yuv2plane1_12LE_vsx;
  1874. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_vsx : yuv2planeX_12LE_vsx;
  1875. break;
  1876. case 14:
  1877. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_vsx : yuv2plane1_14LE_vsx;
  1878. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_vsx : yuv2planeX_14LE_vsx;
  1879. break;
  1880. case 16:
  1881. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_vsx : yuv2plane1_16LE_vsx;
  1882. #if HAVE_POWER8
  1883. if (cpu_flags & AV_CPU_FLAG_POWER8) {
  1884. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_vsx : yuv2planeX_16LE_vsx;
  1885. }
  1886. #endif /* HAVE_POWER8 */
  1887. break;
  1888. #endif /* !HAVE_BIGENDIAN */
  1889. }
  1890. }
  1891. if (c->flags & SWS_BITEXACT)
  1892. return;
  1893. #if !HAVE_BIGENDIAN
  1894. if (c->flags & SWS_FULL_CHR_H_INT) {
  1895. switch (dstFormat) {
  1896. case AV_PIX_FMT_RGB24:
  1897. if (power8) {
  1898. c->yuv2packed1 = yuv2rgb24_full_1_vsx;
  1899. c->yuv2packed2 = yuv2rgb24_full_2_vsx;
  1900. c->yuv2packedX = yuv2rgb24_full_X_vsx;
  1901. }
  1902. break;
  1903. case AV_PIX_FMT_BGR24:
  1904. if (power8) {
  1905. c->yuv2packed1 = yuv2bgr24_full_1_vsx;
  1906. c->yuv2packed2 = yuv2bgr24_full_2_vsx;
  1907. c->yuv2packedX = yuv2bgr24_full_X_vsx;
  1908. }
  1909. break;
  1910. case AV_PIX_FMT_BGRA:
  1911. if (power8) {
  1912. if (!c->needAlpha) {
  1913. c->yuv2packed1 = yuv2bgrx32_full_1_vsx;
  1914. c->yuv2packed2 = yuv2bgrx32_full_2_vsx;
  1915. c->yuv2packedX = yuv2bgrx32_full_X_vsx;
  1916. }
  1917. }
  1918. break;
  1919. case AV_PIX_FMT_RGBA:
  1920. if (power8) {
  1921. if (!c->needAlpha) {
  1922. c->yuv2packed1 = yuv2rgbx32_full_1_vsx;
  1923. c->yuv2packed2 = yuv2rgbx32_full_2_vsx;
  1924. c->yuv2packedX = yuv2rgbx32_full_X_vsx;
  1925. }
  1926. }
  1927. break;
  1928. case AV_PIX_FMT_ARGB:
  1929. if (power8) {
  1930. if (!c->needAlpha) {
  1931. c->yuv2packed1 = yuv2xrgb32_full_1_vsx;
  1932. c->yuv2packed2 = yuv2xrgb32_full_2_vsx;
  1933. c->yuv2packedX = yuv2xrgb32_full_X_vsx;
  1934. }
  1935. }
  1936. break;
  1937. case AV_PIX_FMT_ABGR:
  1938. if (power8) {
  1939. if (!c->needAlpha) {
  1940. c->yuv2packed1 = yuv2xbgr32_full_1_vsx;
  1941. c->yuv2packed2 = yuv2xbgr32_full_2_vsx;
  1942. c->yuv2packedX = yuv2xbgr32_full_X_vsx;
  1943. }
  1944. }
  1945. break;
  1946. }
  1947. } else { /* !SWS_FULL_CHR_H_INT */
  1948. switch (dstFormat) {
  1949. case AV_PIX_FMT_YUYV422:
  1950. c->yuv2packed1 = yuv2yuyv422_1_vsx;
  1951. c->yuv2packed2 = yuv2yuyv422_2_vsx;
  1952. c->yuv2packedX = yuv2yuyv422_X_vsx;
  1953. break;
  1954. case AV_PIX_FMT_YVYU422:
  1955. c->yuv2packed1 = yuv2yvyu422_1_vsx;
  1956. c->yuv2packed2 = yuv2yvyu422_2_vsx;
  1957. c->yuv2packedX = yuv2yvyu422_X_vsx;
  1958. break;
  1959. case AV_PIX_FMT_UYVY422:
  1960. c->yuv2packed1 = yuv2uyvy422_1_vsx;
  1961. c->yuv2packed2 = yuv2uyvy422_2_vsx;
  1962. c->yuv2packedX = yuv2uyvy422_X_vsx;
  1963. break;
  1964. case AV_PIX_FMT_BGRA:
  1965. if (power8) {
  1966. if (!c->needAlpha) {
  1967. c->yuv2packed1 = yuv2bgrx32_1_vsx;
  1968. c->yuv2packed2 = yuv2bgrx32_2_vsx;
  1969. }
  1970. }
  1971. break;
  1972. case AV_PIX_FMT_RGBA:
  1973. if (power8) {
  1974. if (!c->needAlpha) {
  1975. c->yuv2packed1 = yuv2rgbx32_1_vsx;
  1976. c->yuv2packed2 = yuv2rgbx32_2_vsx;
  1977. }
  1978. }
  1979. break;
  1980. case AV_PIX_FMT_ARGB:
  1981. if (power8) {
  1982. if (!c->needAlpha) {
  1983. c->yuv2packed1 = yuv2xrgb32_1_vsx;
  1984. c->yuv2packed2 = yuv2xrgb32_2_vsx;
  1985. }
  1986. }
  1987. break;
  1988. case AV_PIX_FMT_ABGR:
  1989. if (power8) {
  1990. if (!c->needAlpha) {
  1991. c->yuv2packed1 = yuv2xbgr32_1_vsx;
  1992. c->yuv2packed2 = yuv2xbgr32_2_vsx;
  1993. }
  1994. }
  1995. break;
  1996. case AV_PIX_FMT_RGB24:
  1997. if (power8) {
  1998. c->yuv2packed1 = yuv2rgb24_1_vsx;
  1999. c->yuv2packed2 = yuv2rgb24_2_vsx;
  2000. }
  2001. break;
  2002. case AV_PIX_FMT_BGR24:
  2003. if (power8) {
  2004. c->yuv2packed1 = yuv2bgr24_1_vsx;
  2005. c->yuv2packed2 = yuv2bgr24_2_vsx;
  2006. }
  2007. break;
  2008. }
  2009. }
  2010. #endif /* !HAVE_BIGENDIAN */
  2011. #endif /* HAVE_VSX */
  2012. }