You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

338 lines
11KB

  1. /*
  2. * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
  3. * Copyright (c) 2006 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "dsputil.h"
  22. #include "gcc_fixes.h"
  23. #include "dsputil_altivec.h"
  24. // main steps of 8x8 transform
  25. #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \
  26. do { \
  27. t0 = vec_sl(vec_add(s0, s4), vec_2); \
  28. t0 = vec_add(vec_sl(t0, vec_1), t0); \
  29. t0 = vec_add(t0, vec_rnd); \
  30. t1 = vec_sl(vec_sub(s0, s4), vec_2); \
  31. t1 = vec_add(vec_sl(t1, vec_1), t1); \
  32. t1 = vec_add(t1, vec_rnd); \
  33. t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \
  34. t2 = vec_add(t2, vec_sl(s2, vec_4)); \
  35. t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \
  36. t3 = vec_sub(t3, vec_sl(s6, vec_4)); \
  37. t4 = vec_add(t0, t2); \
  38. t5 = vec_add(t1, t3); \
  39. t6 = vec_sub(t1, t3); \
  40. t7 = vec_sub(t0, t2); \
  41. \
  42. t0 = vec_sl(vec_add(s1, s3), vec_4); \
  43. t0 = vec_add(t0, vec_sl(s5, vec_3)); \
  44. t0 = vec_add(t0, vec_sl(s7, vec_2)); \
  45. t0 = vec_add(t0, vec_sub(s5, s3)); \
  46. \
  47. t1 = vec_sl(vec_sub(s1, s5), vec_4); \
  48. t1 = vec_sub(t1, vec_sl(s7, vec_3)); \
  49. t1 = vec_sub(t1, vec_sl(s3, vec_2)); \
  50. t1 = vec_sub(t1, vec_add(s1, s7)); \
  51. \
  52. t2 = vec_sl(vec_sub(s7, s3), vec_4); \
  53. t2 = vec_add(t2, vec_sl(s1, vec_3)); \
  54. t2 = vec_add(t2, vec_sl(s5, vec_2)); \
  55. t2 = vec_add(t2, vec_sub(s1, s7)); \
  56. \
  57. t3 = vec_sl(vec_sub(s5, s7), vec_4); \
  58. t3 = vec_sub(t3, vec_sl(s3, vec_3)); \
  59. t3 = vec_add(t3, vec_sl(s1, vec_2)); \
  60. t3 = vec_sub(t3, vec_add(s3, s5)); \
  61. \
  62. s0 = vec_add(t4, t0); \
  63. s1 = vec_add(t5, t1); \
  64. s2 = vec_add(t6, t2); \
  65. s3 = vec_add(t7, t3); \
  66. s4 = vec_sub(t7, t3); \
  67. s5 = vec_sub(t6, t2); \
  68. s6 = vec_sub(t5, t1); \
  69. s7 = vec_sub(t4, t0); \
  70. }while(0)
  71. #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \
  72. do { \
  73. s0 = vec_sra(s0, vec_3); \
  74. s1 = vec_sra(s1, vec_3); \
  75. s2 = vec_sra(s2, vec_3); \
  76. s3 = vec_sra(s3, vec_3); \
  77. s4 = vec_sra(s4, vec_3); \
  78. s5 = vec_sra(s5, vec_3); \
  79. s6 = vec_sra(s6, vec_3); \
  80. s7 = vec_sra(s7, vec_3); \
  81. }while(0)
  82. #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \
  83. do { \
  84. s0 = vec_sra(s0, vec_7); \
  85. s1 = vec_sra(s1, vec_7); \
  86. s2 = vec_sra(s2, vec_7); \
  87. s3 = vec_sra(s3, vec_7); \
  88. s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \
  89. s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \
  90. s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \
  91. s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \
  92. }while(0)
  93. /* main steps of 4x4 transform */
  94. #define STEP4(s0, s1, s2, s3, vec_rnd) \
  95. do { \
  96. t1 = vec_add(vec_sl(s0, vec_4), s0); \
  97. t1 = vec_add(t1, vec_rnd); \
  98. t2 = vec_add(vec_sl(s2, vec_4), s2); \
  99. t0 = vec_add(t1, t2); \
  100. t1 = vec_sub(t1, t2); \
  101. t3 = vec_sl(vec_sub(s3, s1), vec_1); \
  102. t3 = vec_add(t3, vec_sl(t3, vec_2)); \
  103. t2 = vec_add(t3, vec_sl(s1, vec_5)); \
  104. t3 = vec_add(t3, vec_sl(s3, vec_3)); \
  105. t3 = vec_add(t3, vec_sl(s3, vec_2)); \
  106. s0 = vec_add(t0, t2); \
  107. s1 = vec_sub(t1, t3); \
  108. s2 = vec_add(t1, t3); \
  109. s3 = vec_sub(t0, t2); \
  110. }while (0)
  111. #define SHIFT_HOR4(s0, s1, s2, s3) \
  112. s0 = vec_sra(s0, vec_3); \
  113. s1 = vec_sra(s1, vec_3); \
  114. s2 = vec_sra(s2, vec_3); \
  115. s3 = vec_sra(s3, vec_3);
  116. #define SHIFT_VERT4(s0, s1, s2, s3) \
  117. s0 = vec_sra(s0, vec_7); \
  118. s1 = vec_sra(s1, vec_7); \
  119. s2 = vec_sra(s2, vec_7); \
  120. s3 = vec_sra(s3, vec_7);
  121. /** Do inverse transform on 8x8 block
  122. */
  123. static void vc1_inv_trans_8x8_altivec(DCTELEM block[64])
  124. {
  125. vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
  126. vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
  127. vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
  128. vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
  129. const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
  130. const vector unsigned int vec_7 = vec_splat_u32(7);
  131. const vector unsigned int vec_5 = vec_splat_u32(5);
  132. const vector unsigned int vec_4 = vec_splat_u32(4);
  133. const vector signed int vec_4s = vec_splat_s32(4);
  134. const vector unsigned int vec_3 = vec_splat_u32(3);
  135. const vector unsigned int vec_2 = vec_splat_u32(2);
  136. const vector signed int vec_1s = vec_splat_s32(1);
  137. const vector unsigned int vec_1 = vec_splat_u32(1);
  138. src0 = vec_ld( 0, block);
  139. src1 = vec_ld( 16, block);
  140. src2 = vec_ld( 32, block);
  141. src3 = vec_ld( 48, block);
  142. src4 = vec_ld( 64, block);
  143. src5 = vec_ld( 80, block);
  144. src6 = vec_ld( 96, block);
  145. src7 = vec_ld(112, block);
  146. TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
  147. s0 = vec_unpackl(src0);
  148. s1 = vec_unpackl(src1);
  149. s2 = vec_unpackl(src2);
  150. s3 = vec_unpackl(src3);
  151. s4 = vec_unpackl(src4);
  152. s5 = vec_unpackl(src5);
  153. s6 = vec_unpackl(src6);
  154. s7 = vec_unpackl(src7);
  155. s8 = vec_unpackh(src0);
  156. s9 = vec_unpackh(src1);
  157. sA = vec_unpackh(src2);
  158. sB = vec_unpackh(src3);
  159. sC = vec_unpackh(src4);
  160. sD = vec_unpackh(src5);
  161. sE = vec_unpackh(src6);
  162. sF = vec_unpackh(src7);
  163. STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
  164. SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
  165. STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
  166. SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
  167. src0 = vec_pack(s8, s0);
  168. src1 = vec_pack(s9, s1);
  169. src2 = vec_pack(sA, s2);
  170. src3 = vec_pack(sB, s3);
  171. src4 = vec_pack(sC, s4);
  172. src5 = vec_pack(sD, s5);
  173. src6 = vec_pack(sE, s6);
  174. src7 = vec_pack(sF, s7);
  175. TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
  176. s0 = vec_unpackl(src0);
  177. s1 = vec_unpackl(src1);
  178. s2 = vec_unpackl(src2);
  179. s3 = vec_unpackl(src3);
  180. s4 = vec_unpackl(src4);
  181. s5 = vec_unpackl(src5);
  182. s6 = vec_unpackl(src6);
  183. s7 = vec_unpackl(src7);
  184. s8 = vec_unpackh(src0);
  185. s9 = vec_unpackh(src1);
  186. sA = vec_unpackh(src2);
  187. sB = vec_unpackh(src3);
  188. sC = vec_unpackh(src4);
  189. sD = vec_unpackh(src5);
  190. sE = vec_unpackh(src6);
  191. sF = vec_unpackh(src7);
  192. STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
  193. SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
  194. STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
  195. SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
  196. src0 = vec_pack(s8, s0);
  197. src1 = vec_pack(s9, s1);
  198. src2 = vec_pack(sA, s2);
  199. src3 = vec_pack(sB, s3);
  200. src4 = vec_pack(sC, s4);
  201. src5 = vec_pack(sD, s5);
  202. src6 = vec_pack(sE, s6);
  203. src7 = vec_pack(sF, s7);
  204. vec_st(src0, 0, block);
  205. vec_st(src1, 16, block);
  206. vec_st(src2, 32, block);
  207. vec_st(src3, 48, block);
  208. vec_st(src4, 64, block);
  209. vec_st(src5, 80, block);
  210. vec_st(src6, 96, block);
  211. vec_st(src7,112, block);
  212. }
  213. /** Do inverse transform on 8x4 part of block
  214. */
  215. static void vc1_inv_trans_8x4_altivec(DCTELEM block[64], int n)
  216. {
  217. vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
  218. vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
  219. vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
  220. vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
  221. const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
  222. const vector unsigned int vec_7 = vec_splat_u32(7);
  223. const vector unsigned int vec_5 = vec_splat_u32(5);
  224. const vector unsigned int vec_4 = vec_splat_u32(4);
  225. const vector signed int vec_4s = vec_splat_s32(4);
  226. const vector unsigned int vec_3 = vec_splat_u32(3);
  227. const vector unsigned int vec_2 = vec_splat_u32(2);
  228. const vector unsigned int vec_1 = vec_splat_u32(1);
  229. src0 = vec_ld( 0, block);
  230. src1 = vec_ld( 16, block);
  231. src2 = vec_ld( 32, block);
  232. src3 = vec_ld( 48, block);
  233. src4 = vec_ld( 64, block);
  234. src5 = vec_ld( 80, block);
  235. src6 = vec_ld( 96, block);
  236. src7 = vec_ld(112, block);
  237. TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
  238. s0 = vec_unpackl(src0);
  239. s1 = vec_unpackl(src1);
  240. s2 = vec_unpackl(src2);
  241. s3 = vec_unpackl(src3);
  242. s4 = vec_unpackl(src4);
  243. s5 = vec_unpackl(src5);
  244. s6 = vec_unpackl(src6);
  245. s7 = vec_unpackl(src7);
  246. s8 = vec_unpackh(src0);
  247. s9 = vec_unpackh(src1);
  248. sA = vec_unpackh(src2);
  249. sB = vec_unpackh(src3);
  250. sC = vec_unpackh(src4);
  251. sD = vec_unpackh(src5);
  252. sE = vec_unpackh(src6);
  253. sF = vec_unpackh(src7);
  254. STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
  255. SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
  256. STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
  257. SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
  258. src0 = vec_pack(s8, s0);
  259. src1 = vec_pack(s9, s1);
  260. src2 = vec_pack(sA, s2);
  261. src3 = vec_pack(sB, s3);
  262. src4 = vec_pack(sC, s4);
  263. src5 = vec_pack(sD, s5);
  264. src6 = vec_pack(sE, s6);
  265. src7 = vec_pack(sF, s7);
  266. TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
  267. if(!n){ // upper half of block
  268. s0 = vec_unpackh(src0);
  269. s1 = vec_unpackh(src1);
  270. s2 = vec_unpackh(src2);
  271. s3 = vec_unpackh(src3);
  272. s8 = vec_unpackl(src0);
  273. s9 = vec_unpackl(src1);
  274. sA = vec_unpackl(src2);
  275. sB = vec_unpackl(src3);
  276. STEP4(s0, s1, s2, s3, vec_64);
  277. SHIFT_VERT4(s0, s1, s2, s3);
  278. STEP4(s8, s9, sA, sB, vec_64);
  279. SHIFT_VERT4(s8, s9, sA, sB);
  280. src0 = vec_pack(s0, s8);
  281. src1 = vec_pack(s1, s9);
  282. src2 = vec_pack(s2, sA);
  283. src3 = vec_pack(s3, sB);
  284. vec_st(src0, 0, block);
  285. vec_st(src1, 16, block);
  286. vec_st(src2, 32, block);
  287. vec_st(src3, 48, block);
  288. } else { //lower half of block
  289. s0 = vec_unpackh(src4);
  290. s1 = vec_unpackh(src5);
  291. s2 = vec_unpackh(src6);
  292. s3 = vec_unpackh(src7);
  293. s8 = vec_unpackl(src4);
  294. s9 = vec_unpackl(src5);
  295. sA = vec_unpackl(src6);
  296. sB = vec_unpackl(src7);
  297. STEP4(s0, s1, s2, s3, vec_64);
  298. SHIFT_VERT4(s0, s1, s2, s3);
  299. STEP4(s8, s9, sA, sB, vec_64);
  300. SHIFT_VERT4(s8, s9, sA, sB);
  301. src4 = vec_pack(s0, s8);
  302. src5 = vec_pack(s1, s9);
  303. src6 = vec_pack(s2, sA);
  304. src7 = vec_pack(s3, sB);
  305. vec_st(src4, 64, block);
  306. vec_st(src5, 80, block);
  307. vec_st(src6, 96, block);
  308. vec_st(src7,112, block);
  309. }
  310. }
  311. void vc1dsp_init_altivec(DSPContext* dsp, AVCodecContext *avctx) {
  312. dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec;
  313. dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec;
  314. }