You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

772 lines
35KB

  1. /*
  2. * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "config.h"
  21. #include "libavutil/attributes.h"
  22. #include "libavutil/cpu.h"
  23. #include "libavutil/intreadwrite.h"
  24. #include "libavutil/ppc/cpu.h"
  25. #include "libavutil/ppc/types_altivec.h"
  26. #include "libavutil/ppc/util_altivec.h"
  27. #include "libavcodec/h264data.h"
  28. #include "libavcodec/h264dsp.h"
  29. #if HAVE_ALTIVEC
  30. /****************************************************************************
  31. * IDCT transform:
  32. ****************************************************************************/
  33. #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
  34. /* 1st stage */ \
  35. vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \
  36. vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \
  37. vz2 = vec_sra(vb1,vec_splat_u16(1)); \
  38. vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \
  39. vz3 = vec_sra(vb3,vec_splat_u16(1)); \
  40. vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \
  41. /* 2nd stage: output */ \
  42. va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \
  43. va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \
  44. va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \
  45. va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */
  46. #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
  47. b0 = vec_mergeh( a0, a0 ); \
  48. b1 = vec_mergeh( a1, a0 ); \
  49. b2 = vec_mergeh( a2, a0 ); \
  50. b3 = vec_mergeh( a3, a0 ); \
  51. a0 = vec_mergeh( b0, b2 ); \
  52. a1 = vec_mergel( b0, b2 ); \
  53. a2 = vec_mergeh( b1, b3 ); \
  54. a3 = vec_mergel( b1, b3 ); \
  55. b0 = vec_mergeh( a0, a2 ); \
  56. b1 = vec_mergel( a0, a2 ); \
  57. b2 = vec_mergeh( a1, a3 ); \
  58. b3 = vec_mergel( a1, a3 )
  59. #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
  60. vdst_orig = vec_ld(0, dst); \
  61. vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
  62. vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \
  63. va = vec_add(va, vdst_ss); \
  64. va_u8 = vec_packsu(va, zero_s16v); \
  65. va_u32 = vec_splat((vec_u32)va_u8, 0); \
  66. vec_ste(va_u32, element, (uint32_t*)dst);
  67. static void h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride)
  68. {
  69. vec_s16 va0, va1, va2, va3;
  70. vec_s16 vz0, vz1, vz2, vz3;
  71. vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
  72. vec_u8 va_u8;
  73. vec_u32 va_u32;
  74. vec_s16 vdst_ss;
  75. const vec_u16 v6us = vec_splat_u16(6);
  76. vec_u8 vdst, vdst_orig;
  77. vec_u8 vdst_mask = vec_lvsl(0, dst);
  78. int element = ((unsigned long)dst & 0xf) >> 2;
  79. LOAD_ZERO;
  80. block[0] += 32; /* add 32 as a DC-level for rounding */
  81. vtmp0 = vec_ld(0,block);
  82. vtmp1 = vec_sld(vtmp0, vtmp0, 8);
  83. vtmp2 = vec_ld(16,block);
  84. vtmp3 = vec_sld(vtmp2, vtmp2, 8);
  85. memset(block, 0, 16 * sizeof(int16_t));
  86. VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
  87. VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
  88. VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
  89. va0 = vec_sra(va0,v6us);
  90. va1 = vec_sra(va1,v6us);
  91. va2 = vec_sra(va2,v6us);
  92. va3 = vec_sra(va3,v6us);
  93. VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
  94. dst += stride;
  95. VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
  96. dst += stride;
  97. VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
  98. dst += stride;
  99. VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
  100. }
  101. #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
  102. /* a0 = SRC(0) + SRC(4); */ \
  103. vec_s16 a0v = vec_add(s0, s4); \
  104. /* a2 = SRC(0) - SRC(4); */ \
  105. vec_s16 a2v = vec_sub(s0, s4); \
  106. /* a4 = (SRC(2)>>1) - SRC(6); */ \
  107. vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \
  108. /* a6 = (SRC(6)>>1) + SRC(2); */ \
  109. vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \
  110. /* b0 = a0 + a6; */ \
  111. vec_s16 b0v = vec_add(a0v, a6v); \
  112. /* b2 = a2 + a4; */ \
  113. vec_s16 b2v = vec_add(a2v, a4v); \
  114. /* b4 = a2 - a4; */ \
  115. vec_s16 b4v = vec_sub(a2v, a4v); \
  116. /* b6 = a0 - a6; */ \
  117. vec_s16 b6v = vec_sub(a0v, a6v); \
  118. /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
  119. /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
  120. vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
  121. /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
  122. /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
  123. vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
  124. /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
  125. /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
  126. vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
  127. /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
  128. vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
  129. /* b1 = (a7>>2) + a1; */ \
  130. vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
  131. /* b3 = a3 + (a5>>2); */ \
  132. vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
  133. /* b5 = (a3>>2) - a5; */ \
  134. vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
  135. /* b7 = a7 - (a1>>2); */ \
  136. vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
  137. /* DST(0, b0 + b7); */ \
  138. d0 = vec_add(b0v, b7v); \
  139. /* DST(1, b2 + b5); */ \
  140. d1 = vec_add(b2v, b5v); \
  141. /* DST(2, b4 + b3); */ \
  142. d2 = vec_add(b4v, b3v); \
  143. /* DST(3, b6 + b1); */ \
  144. d3 = vec_add(b6v, b1v); \
  145. /* DST(4, b6 - b1); */ \
  146. d4 = vec_sub(b6v, b1v); \
  147. /* DST(5, b4 - b3); */ \
  148. d5 = vec_sub(b4v, b3v); \
  149. /* DST(6, b2 - b5); */ \
  150. d6 = vec_sub(b2v, b5v); \
  151. /* DST(7, b0 - b7); */ \
  152. d7 = vec_sub(b0v, b7v); \
  153. }
  154. #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
  155. /* unaligned load */ \
  156. vec_u8 hv = vec_ld( 0, dest ); \
  157. vec_u8 lv = vec_ld( 7, dest ); \
  158. vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \
  159. vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
  160. vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \
  161. vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
  162. vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
  163. vec_u8 edgehv; \
  164. /* unaligned store */ \
  165. vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
  166. vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
  167. lv = vec_sel( lv, bodyv, edgelv ); \
  168. vec_st( lv, 7, dest ); \
  169. hv = vec_ld( 0, dest ); \
  170. edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
  171. hv = vec_sel( hv, bodyv, edgehv ); \
  172. vec_st( hv, 0, dest ); \
  173. }
  174. static void h264_idct8_add_altivec(uint8_t *dst, int16_t *dct, int stride)
  175. {
  176. vec_s16 s0, s1, s2, s3, s4, s5, s6, s7;
  177. vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
  178. vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
  179. vec_u8 perm_ldv = vec_lvsl(0, dst);
  180. vec_u8 perm_stv = vec_lvsr(8, dst);
  181. const vec_u16 onev = vec_splat_u16(1);
  182. const vec_u16 twov = vec_splat_u16(2);
  183. const vec_u16 sixv = vec_splat_u16(6);
  184. const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
  185. LOAD_ZERO;
  186. dct[0] += 32; // rounding for the >>6 at the end
  187. s0 = vec_ld(0x00, (int16_t*)dct);
  188. s1 = vec_ld(0x10, (int16_t*)dct);
  189. s2 = vec_ld(0x20, (int16_t*)dct);
  190. s3 = vec_ld(0x30, (int16_t*)dct);
  191. s4 = vec_ld(0x40, (int16_t*)dct);
  192. s5 = vec_ld(0x50, (int16_t*)dct);
  193. s6 = vec_ld(0x60, (int16_t*)dct);
  194. s7 = vec_ld(0x70, (int16_t*)dct);
  195. memset(dct, 0, 64 * sizeof(int16_t));
  196. IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
  197. d0, d1, d2, d3, d4, d5, d6, d7);
  198. TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 );
  199. IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7,
  200. idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
  201. ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
  202. ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
  203. ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
  204. ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
  205. ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
  206. ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
  207. ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
  208. ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
  209. }
  210. static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, int16_t *block, int stride, int size)
  211. {
  212. vec_s16 dc16;
  213. vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
  214. LOAD_ZERO;
  215. DECLARE_ALIGNED(16, int, dc);
  216. int i;
  217. dc = (block[0] + 32) >> 6;
  218. block[0] = 0;
  219. dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
  220. if (size == 4)
  221. dc16 = vec_sld(dc16, zero_s16v, 8);
  222. dcplus = vec_packsu(dc16, zero_s16v);
  223. dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
  224. aligner = vec_lvsr(0, dst);
  225. dcplus = vec_perm(dcplus, dcplus, aligner);
  226. dcminus = vec_perm(dcminus, dcminus, aligner);
  227. for (i = 0; i < size; i += 4) {
  228. v0 = vec_ld(0, dst+0*stride);
  229. v1 = vec_ld(0, dst+1*stride);
  230. v2 = vec_ld(0, dst+2*stride);
  231. v3 = vec_ld(0, dst+3*stride);
  232. v0 = vec_adds(v0, dcplus);
  233. v1 = vec_adds(v1, dcplus);
  234. v2 = vec_adds(v2, dcplus);
  235. v3 = vec_adds(v3, dcplus);
  236. v0 = vec_subs(v0, dcminus);
  237. v1 = vec_subs(v1, dcminus);
  238. v2 = vec_subs(v2, dcminus);
  239. v3 = vec_subs(v3, dcminus);
  240. vec_st(v0, 0, dst+0*stride);
  241. vec_st(v1, 0, dst+1*stride);
  242. vec_st(v2, 0, dst+2*stride);
  243. vec_st(v3, 0, dst+3*stride);
  244. dst += 4*stride;
  245. }
  246. }
  247. static void h264_idct_dc_add_altivec(uint8_t *dst, int16_t *block, int stride)
  248. {
  249. h264_idct_dc_add_internal(dst, block, stride, 4);
  250. }
  251. static void h264_idct8_dc_add_altivec(uint8_t *dst, int16_t *block, int stride)
  252. {
  253. h264_idct_dc_add_internal(dst, block, stride, 8);
  254. }
  255. static void h264_idct_add16_altivec(uint8_t *dst, const int *block_offset,
  256. int16_t *block, int stride,
  257. const uint8_t nnzc[15 * 8])
  258. {
  259. int i;
  260. for(i=0; i<16; i++){
  261. int nnz = nnzc[ scan8[i] ];
  262. if(nnz){
  263. if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
  264. else h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
  265. }
  266. }
  267. }
  268. static void h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset,
  269. int16_t *block, int stride,
  270. const uint8_t nnzc[15 * 8])
  271. {
  272. int i;
  273. for(i=0; i<16; i++){
  274. if(nnzc[ scan8[i] ]) h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
  275. else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
  276. }
  277. }
  278. static void h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset,
  279. int16_t *block, int stride,
  280. const uint8_t nnzc[15 * 8])
  281. {
  282. int i;
  283. for(i=0; i<16; i+=4){
  284. int nnz = nnzc[ scan8[i] ];
  285. if(nnz){
  286. if(nnz==1 && block[i*16]) h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
  287. else h264_idct8_add_altivec(dst + block_offset[i], block + i*16, stride);
  288. }
  289. }
  290. }
  291. static void h264_idct_add8_altivec(uint8_t **dest, const int *block_offset,
  292. int16_t *block, int stride,
  293. const uint8_t nnzc[15 * 8])
  294. {
  295. int i, j;
  296. for (j = 1; j < 3; j++) {
  297. for(i = j * 16; i < j * 16 + 4; i++){
  298. if(nnzc[ scan8[i] ])
  299. h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
  300. else if(block[i*16])
  301. h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
  302. }
  303. }
  304. }
  305. #define transpose4x16(r0, r1, r2, r3) { \
  306. register vec_u8 r4; \
  307. register vec_u8 r5; \
  308. register vec_u8 r6; \
  309. register vec_u8 r7; \
  310. \
  311. r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \
  312. r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \
  313. r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \
  314. r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \
  315. \
  316. r0 = vec_mergeh(r4, r6); /*all set 0*/ \
  317. r1 = vec_mergel(r4, r6); /*all set 1*/ \
  318. r2 = vec_mergeh(r5, r7); /*all set 2*/ \
  319. r3 = vec_mergel(r5, r7); /*all set 3*/ \
  320. }
  321. static inline void write16x4(uint8_t *dst, int dst_stride,
  322. register vec_u8 r0, register vec_u8 r1,
  323. register vec_u8 r2, register vec_u8 r3) {
  324. DECLARE_ALIGNED(16, unsigned char, result)[64];
  325. uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
  326. int int_dst_stride = dst_stride/4;
  327. vec_st(r0, 0, result);
  328. vec_st(r1, 16, result);
  329. vec_st(r2, 32, result);
  330. vec_st(r3, 48, result);
  331. /* FIXME: there has to be a better way!!!! */
  332. *dst_int = *src_int;
  333. *(dst_int+ int_dst_stride) = *(src_int + 1);
  334. *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
  335. *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
  336. *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
  337. *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
  338. *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
  339. *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
  340. *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
  341. *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
  342. *(dst_int+10*int_dst_stride) = *(src_int + 10);
  343. *(dst_int+11*int_dst_stride) = *(src_int + 11);
  344. *(dst_int+12*int_dst_stride) = *(src_int + 12);
  345. *(dst_int+13*int_dst_stride) = *(src_int + 13);
  346. *(dst_int+14*int_dst_stride) = *(src_int + 14);
  347. *(dst_int+15*int_dst_stride) = *(src_int + 15);
  348. }
  349. /** @brief performs a 6x16 transpose of data in src, and stores it to dst
  350. @todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
  351. out of unaligned_load() */
  352. #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
  353. register vec_u8 r0 = unaligned_load(0, src); \
  354. register vec_u8 r1 = unaligned_load( src_stride, src); \
  355. register vec_u8 r2 = unaligned_load(2* src_stride, src); \
  356. register vec_u8 r3 = unaligned_load(3* src_stride, src); \
  357. register vec_u8 r4 = unaligned_load(4* src_stride, src); \
  358. register vec_u8 r5 = unaligned_load(5* src_stride, src); \
  359. register vec_u8 r6 = unaligned_load(6* src_stride, src); \
  360. register vec_u8 r7 = unaligned_load(7* src_stride, src); \
  361. register vec_u8 r14 = unaligned_load(14*src_stride, src); \
  362. register vec_u8 r15 = unaligned_load(15*src_stride, src); \
  363. \
  364. r8 = unaligned_load( 8*src_stride, src); \
  365. r9 = unaligned_load( 9*src_stride, src); \
  366. r10 = unaligned_load(10*src_stride, src); \
  367. r11 = unaligned_load(11*src_stride, src); \
  368. r12 = unaligned_load(12*src_stride, src); \
  369. r13 = unaligned_load(13*src_stride, src); \
  370. \
  371. /*Merge first pairs*/ \
  372. r0 = vec_mergeh(r0, r8); /*0, 8*/ \
  373. r1 = vec_mergeh(r1, r9); /*1, 9*/ \
  374. r2 = vec_mergeh(r2, r10); /*2,10*/ \
  375. r3 = vec_mergeh(r3, r11); /*3,11*/ \
  376. r4 = vec_mergeh(r4, r12); /*4,12*/ \
  377. r5 = vec_mergeh(r5, r13); /*5,13*/ \
  378. r6 = vec_mergeh(r6, r14); /*6,14*/ \
  379. r7 = vec_mergeh(r7, r15); /*7,15*/ \
  380. \
  381. /*Merge second pairs*/ \
  382. r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \
  383. r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \
  384. r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \
  385. r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \
  386. r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \
  387. r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \
  388. r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \
  389. r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \
  390. \
  391. /*Third merge*/ \
  392. r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
  393. r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
  394. r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
  395. r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \
  396. r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \
  397. r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \
  398. /* Don't need to compute 3 and 7*/ \
  399. \
  400. /*Final merge*/ \
  401. r8 = vec_mergeh(r0, r4); /*all set 0*/ \
  402. r9 = vec_mergel(r0, r4); /*all set 1*/ \
  403. r10 = vec_mergeh(r1, r5); /*all set 2*/ \
  404. r11 = vec_mergel(r1, r5); /*all set 3*/ \
  405. r12 = vec_mergeh(r2, r6); /*all set 4*/ \
  406. r13 = vec_mergel(r2, r6); /*all set 5*/ \
  407. /* Don't need to compute 14 and 15*/ \
  408. \
  409. }
  410. // out: o = |x-y| < a
  411. static inline vec_u8 diff_lt_altivec ( register vec_u8 x,
  412. register vec_u8 y,
  413. register vec_u8 a) {
  414. register vec_u8 diff = vec_subs(x, y);
  415. register vec_u8 diffneg = vec_subs(y, x);
  416. register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */
  417. o = (vec_u8)vec_cmplt(o, a);
  418. return o;
  419. }
  420. static inline vec_u8 h264_deblock_mask ( register vec_u8 p0,
  421. register vec_u8 p1,
  422. register vec_u8 q0,
  423. register vec_u8 q1,
  424. register vec_u8 alpha,
  425. register vec_u8 beta) {
  426. register vec_u8 mask;
  427. register vec_u8 tempmask;
  428. mask = diff_lt_altivec(p0, q0, alpha);
  429. tempmask = diff_lt_altivec(p1, p0, beta);
  430. mask = vec_and(mask, tempmask);
  431. tempmask = diff_lt_altivec(q1, q0, beta);
  432. mask = vec_and(mask, tempmask);
  433. return mask;
  434. }
  435. // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
  436. static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
  437. register vec_u8 p1,
  438. register vec_u8 p2,
  439. register vec_u8 q0,
  440. register vec_u8 tc0) {
  441. register vec_u8 average = vec_avg(p0, q0);
  442. register vec_u8 temp;
  443. register vec_u8 uncliped;
  444. register vec_u8 ones;
  445. register vec_u8 max;
  446. register vec_u8 min;
  447. register vec_u8 newp1;
  448. temp = vec_xor(average, p2);
  449. average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */
  450. ones = vec_splat_u8(1);
  451. temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */
  452. uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
  453. max = vec_adds(p1, tc0);
  454. min = vec_subs(p1, tc0);
  455. newp1 = vec_max(min, uncliped);
  456. newp1 = vec_min(max, newp1);
  457. return newp1;
  458. }
  459. #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
  460. \
  461. const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
  462. \
  463. register vec_u8 pq0bit = vec_xor(p0,q0); \
  464. register vec_u8 q1minus; \
  465. register vec_u8 p0minus; \
  466. register vec_u8 stage1; \
  467. register vec_u8 stage2; \
  468. register vec_u8 vec160; \
  469. register vec_u8 delta; \
  470. register vec_u8 deltaneg; \
  471. \
  472. q1minus = vec_nor(q1, q1); /* 255 - q1 */ \
  473. stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \
  474. stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \
  475. p0minus = vec_nor(p0, p0); /* 255 - p0 */ \
  476. stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \
  477. pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
  478. stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
  479. stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \
  480. vec160 = vec_ld(0, &A0v); \
  481. deltaneg = vec_subs(vec160, stage2); /* -d */ \
  482. delta = vec_subs(stage2, vec160); /* d */ \
  483. deltaneg = vec_min(tc0masked, deltaneg); \
  484. delta = vec_min(tc0masked, delta); \
  485. p0 = vec_subs(p0, deltaneg); \
  486. q0 = vec_subs(q0, delta); \
  487. p0 = vec_adds(p0, delta); \
  488. q0 = vec_adds(q0, deltaneg); \
  489. }
  490. #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
  491. DECLARE_ALIGNED(16, unsigned char, temp)[16]; \
  492. register vec_u8 alphavec; \
  493. register vec_u8 betavec; \
  494. register vec_u8 mask; \
  495. register vec_u8 p1mask; \
  496. register vec_u8 q1mask; \
  497. register vector signed char tc0vec; \
  498. register vec_u8 finaltc0; \
  499. register vec_u8 tc0masked; \
  500. register vec_u8 newp1; \
  501. register vec_u8 newq1; \
  502. \
  503. temp[0] = alpha; \
  504. temp[1] = beta; \
  505. alphavec = vec_ld(0, temp); \
  506. betavec = vec_splat(alphavec, 0x1); \
  507. alphavec = vec_splat(alphavec, 0x0); \
  508. mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \
  509. \
  510. AV_COPY32(temp, tc0); \
  511. tc0vec = vec_ld(0, (signed char*)temp); \
  512. tc0vec = vec_mergeh(tc0vec, tc0vec); \
  513. tc0vec = vec_mergeh(tc0vec, tc0vec); \
  514. mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \
  515. finaltc0 = vec_and((vec_u8)tc0vec, mask); /* tc = tc0 */ \
  516. \
  517. p1mask = diff_lt_altivec(p2, p0, betavec); \
  518. p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \
  519. tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \
  520. finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \
  521. newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
  522. /*end if*/ \
  523. \
  524. q1mask = diff_lt_altivec(q2, q0, betavec); \
  525. q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\
  526. tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \
  527. finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \
  528. newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
  529. /*end if*/ \
  530. \
  531. h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
  532. p1 = newp1; \
  533. q1 = newq1; \
  534. }
  535. static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
  536. if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
  537. register vec_u8 p2 = vec_ld(-3*stride, pix);
  538. register vec_u8 p1 = vec_ld(-2*stride, pix);
  539. register vec_u8 p0 = vec_ld(-1*stride, pix);
  540. register vec_u8 q0 = vec_ld(0, pix);
  541. register vec_u8 q1 = vec_ld(stride, pix);
  542. register vec_u8 q2 = vec_ld(2*stride, pix);
  543. h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
  544. vec_st(p1, -2*stride, pix);
  545. vec_st(p0, -1*stride, pix);
  546. vec_st(q0, 0, pix);
  547. vec_st(q1, stride, pix);
  548. }
  549. }
  550. static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
  551. register vec_u8 line0, line1, line2, line3, line4, line5;
  552. if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
  553. return;
  554. readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
  555. h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
  556. transpose4x16(line1, line2, line3, line4);
  557. write16x4(pix-2, stride, line1, line2, line3, line4);
  558. }
  559. static av_always_inline
  560. void weight_h264_W_altivec(uint8_t *block, int stride, int height,
  561. int log2_denom, int weight, int offset, int w)
  562. {
  563. int y, aligned;
  564. vec_u8 vblock;
  565. vec_s16 vtemp, vweight, voffset, v0, v1;
  566. vec_u16 vlog2_denom;
  567. DECLARE_ALIGNED(16, int32_t, temp)[4];
  568. LOAD_ZERO;
  569. offset <<= log2_denom;
  570. if(log2_denom) offset += 1<<(log2_denom-1);
  571. temp[0] = log2_denom;
  572. temp[1] = weight;
  573. temp[2] = offset;
  574. vtemp = (vec_s16)vec_ld(0, temp);
  575. vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
  576. vweight = vec_splat(vtemp, 3);
  577. voffset = vec_splat(vtemp, 5);
  578. aligned = !((unsigned long)block & 0xf);
  579. for (y = 0; y < height; y++) {
  580. vblock = vec_ld(0, block);
  581. v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
  582. v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
  583. if (w == 16 || aligned) {
  584. v0 = vec_mladd(v0, vweight, zero_s16v);
  585. v0 = vec_adds(v0, voffset);
  586. v0 = vec_sra(v0, vlog2_denom);
  587. }
  588. if (w == 16 || !aligned) {
  589. v1 = vec_mladd(v1, vweight, zero_s16v);
  590. v1 = vec_adds(v1, voffset);
  591. v1 = vec_sra(v1, vlog2_denom);
  592. }
  593. vblock = vec_packsu(v0, v1);
  594. vec_st(vblock, 0, block);
  595. block += stride;
  596. }
  597. }
  598. static av_always_inline
  599. void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height,
  600. int log2_denom, int weightd, int weights, int offset, int w)
  601. {
  602. int y, dst_aligned, src_aligned;
  603. vec_u8 vsrc, vdst;
  604. vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
  605. vec_u16 vlog2_denom;
  606. DECLARE_ALIGNED(16, int32_t, temp)[4];
  607. LOAD_ZERO;
  608. offset = ((offset + 1) | 1) << log2_denom;
  609. temp[0] = log2_denom+1;
  610. temp[1] = weights;
  611. temp[2] = weightd;
  612. temp[3] = offset;
  613. vtemp = (vec_s16)vec_ld(0, temp);
  614. vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
  615. vweights = vec_splat(vtemp, 3);
  616. vweightd = vec_splat(vtemp, 5);
  617. voffset = vec_splat(vtemp, 7);
  618. dst_aligned = !((unsigned long)dst & 0xf);
  619. src_aligned = !((unsigned long)src & 0xf);
  620. for (y = 0; y < height; y++) {
  621. vdst = vec_ld(0, dst);
  622. vsrc = vec_ld(0, src);
  623. v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
  624. v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
  625. v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
  626. v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
  627. if (w == 8) {
  628. if (src_aligned)
  629. v3 = v2;
  630. else
  631. v2 = v3;
  632. }
  633. if (w == 16 || dst_aligned) {
  634. v0 = vec_mladd(v0, vweightd, zero_s16v);
  635. v2 = vec_mladd(v2, vweights, zero_s16v);
  636. v0 = vec_adds(v0, voffset);
  637. v0 = vec_adds(v0, v2);
  638. v0 = vec_sra(v0, vlog2_denom);
  639. }
  640. if (w == 16 || !dst_aligned) {
  641. v1 = vec_mladd(v1, vweightd, zero_s16v);
  642. v3 = vec_mladd(v3, vweights, zero_s16v);
  643. v1 = vec_adds(v1, voffset);
  644. v1 = vec_adds(v1, v3);
  645. v1 = vec_sra(v1, vlog2_denom);
  646. }
  647. vdst = vec_packsu(v0, v1);
  648. vec_st(vdst, 0, dst);
  649. dst += stride;
  650. src += stride;
  651. }
  652. }
  653. #define H264_WEIGHT(W) \
  654. static void weight_h264_pixels ## W ## _altivec(uint8_t *block, int stride, int height, \
  655. int log2_denom, int weight, int offset) \
  656. { \
  657. weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \
  658. }\
  659. static void biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, int stride, int height, \
  660. int log2_denom, int weightd, int weights, int offset) \
  661. { \
  662. biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \
  663. }
  664. H264_WEIGHT(16)
  665. H264_WEIGHT( 8)
  666. #endif /* HAVE_ALTIVEC */
  667. av_cold void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth,
  668. const int chroma_format_idc)
  669. {
  670. #if HAVE_ALTIVEC
  671. if (!PPC_ALTIVEC(av_get_cpu_flags()))
  672. return;
  673. if (bit_depth == 8) {
  674. c->h264_idct_add = h264_idct_add_altivec;
  675. if (chroma_format_idc <= 1)
  676. c->h264_idct_add8 = h264_idct_add8_altivec;
  677. c->h264_idct_add16 = h264_idct_add16_altivec;
  678. c->h264_idct_add16intra = h264_idct_add16intra_altivec;
  679. c->h264_idct_dc_add= h264_idct_dc_add_altivec;
  680. c->h264_idct8_dc_add = h264_idct8_dc_add_altivec;
  681. c->h264_idct8_add = h264_idct8_add_altivec;
  682. c->h264_idct8_add4 = h264_idct8_add4_altivec;
  683. c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
  684. c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
  685. c->weight_h264_pixels_tab[0] = weight_h264_pixels16_altivec;
  686. c->weight_h264_pixels_tab[1] = weight_h264_pixels8_altivec;
  687. c->biweight_h264_pixels_tab[0] = biweight_h264_pixels16_altivec;
  688. c->biweight_h264_pixels_tab[1] = biweight_h264_pixels8_altivec;
  689. }
  690. #endif /* HAVE_ALTIVEC */
  691. }