You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

371 lines
12KB

  1. /*
  2. * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
  3. * Copyright (c) 2006 Konstantin Shishkov
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  18. *
  19. */
  20. #include "../dsputil.h"
  21. #include "gcc_fixes.h"
  22. #include "dsputil_altivec.h"
  23. // Transpose 8x8 matrix of 16-bit elements. Borrowed from mpegvideo_altivec.c
  24. #define TRANSPOSE8(a,b,c,d,e,f,g,h) \
  25. do { \
  26. vector signed short A1, B1, C1, D1, E1, F1, G1, H1; \
  27. vector signed short A2, B2, C2, D2, E2, F2, G2, H2; \
  28. \
  29. A1 = vec_mergeh (a, e); \
  30. B1 = vec_mergel (a, e); \
  31. C1 = vec_mergeh (b, f); \
  32. D1 = vec_mergel (b, f); \
  33. E1 = vec_mergeh (c, g); \
  34. F1 = vec_mergel (c, g); \
  35. G1 = vec_mergeh (d, h); \
  36. H1 = vec_mergel (d, h); \
  37. \
  38. A2 = vec_mergeh (A1, E1); \
  39. B2 = vec_mergel (A1, E1); \
  40. C2 = vec_mergeh (B1, F1); \
  41. D2 = vec_mergel (B1, F1); \
  42. E2 = vec_mergeh (C1, G1); \
  43. F2 = vec_mergel (C1, G1); \
  44. G2 = vec_mergeh (D1, H1); \
  45. H2 = vec_mergel (D1, H1); \
  46. \
  47. a = vec_mergeh (A2, E2); \
  48. b = vec_mergel (A2, E2); \
  49. c = vec_mergeh (B2, F2); \
  50. d = vec_mergel (B2, F2); \
  51. e = vec_mergeh (C2, G2); \
  52. f = vec_mergel (C2, G2); \
  53. g = vec_mergeh (D2, H2); \
  54. h = vec_mergel (D2, H2); \
  55. } while (0)
  56. // main steps of 8x8 transform
  57. #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \
  58. do { \
  59. t0 = vec_sl(vec_add(s0, s4), vec_2); \
  60. t0 = vec_add(vec_sl(t0, vec_1), t0); \
  61. t0 = vec_add(t0, vec_rnd); \
  62. t1 = vec_sl(vec_sub(s0, s4), vec_2); \
  63. t1 = vec_add(vec_sl(t1, vec_1), t1); \
  64. t1 = vec_add(t1, vec_rnd); \
  65. t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \
  66. t2 = vec_add(t2, vec_sl(s2, vec_4)); \
  67. t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \
  68. t3 = vec_sub(t3, vec_sl(s6, vec_4)); \
  69. t4 = vec_add(t0, t2); \
  70. t5 = vec_add(t1, t3); \
  71. t6 = vec_sub(t1, t3); \
  72. t7 = vec_sub(t0, t2); \
  73. \
  74. t0 = vec_sl(vec_add(s1, s3), vec_4); \
  75. t0 = vec_add(t0, vec_sl(s5, vec_3)); \
  76. t0 = vec_add(t0, vec_sl(s7, vec_2)); \
  77. t0 = vec_add(t0, vec_sub(s5, s3)); \
  78. \
  79. t1 = vec_sl(vec_sub(s1, s5), vec_4); \
  80. t1 = vec_sub(t1, vec_sl(s7, vec_3)); \
  81. t1 = vec_sub(t1, vec_sl(s3, vec_2)); \
  82. t1 = vec_sub(t1, vec_add(s1, s7)); \
  83. \
  84. t2 = vec_sl(vec_sub(s7, s3), vec_4); \
  85. t2 = vec_add(t2, vec_sl(s1, vec_3)); \
  86. t2 = vec_add(t2, vec_sl(s5, vec_2)); \
  87. t2 = vec_add(t2, vec_sub(s1, s7)); \
  88. \
  89. t3 = vec_sl(vec_sub(s5, s7), vec_4); \
  90. t3 = vec_sub(t3, vec_sl(s3, vec_3)); \
  91. t3 = vec_add(t3, vec_sl(s1, vec_2)); \
  92. t3 = vec_sub(t3, vec_add(s3, s5)); \
  93. \
  94. s0 = vec_add(t4, t0); \
  95. s1 = vec_add(t5, t1); \
  96. s2 = vec_add(t6, t2); \
  97. s3 = vec_add(t7, t3); \
  98. s4 = vec_sub(t7, t3); \
  99. s5 = vec_sub(t6, t2); \
  100. s6 = vec_sub(t5, t1); \
  101. s7 = vec_sub(t4, t0); \
  102. }while(0)
  103. #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \
  104. do { \
  105. s0 = vec_sra(s0, vec_3); \
  106. s1 = vec_sra(s1, vec_3); \
  107. s2 = vec_sra(s2, vec_3); \
  108. s3 = vec_sra(s3, vec_3); \
  109. s4 = vec_sra(s4, vec_3); \
  110. s5 = vec_sra(s5, vec_3); \
  111. s6 = vec_sra(s6, vec_3); \
  112. s7 = vec_sra(s7, vec_3); \
  113. }while(0)
  114. #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \
  115. do { \
  116. s0 = vec_sra(s0, vec_7); \
  117. s1 = vec_sra(s1, vec_7); \
  118. s2 = vec_sra(s2, vec_7); \
  119. s3 = vec_sra(s3, vec_7); \
  120. s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \
  121. s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \
  122. s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \
  123. s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \
  124. }while(0)
  125. /* main steps of 4x4 transform */
  126. #define STEP4(s0, s1, s2, s3, vec_rnd) \
  127. do { \
  128. t1 = vec_add(vec_sl(s0, vec_4), s0); \
  129. t1 = vec_add(t1, vec_rnd); \
  130. t2 = vec_add(vec_sl(s2, vec_4), s2); \
  131. t0 = vec_add(t1, t2); \
  132. t1 = vec_sub(t1, t2); \
  133. t3 = vec_sl(vec_sub(s3, s1), vec_1); \
  134. t3 = vec_add(t3, vec_sl(t3, vec_2)); \
  135. t2 = vec_add(t3, vec_sl(s1, vec_5)); \
  136. t3 = vec_add(t3, vec_sl(s3, vec_3)); \
  137. t3 = vec_add(t3, vec_sl(s3, vec_2)); \
  138. s0 = vec_add(t0, t2); \
  139. s1 = vec_sub(t1, t3); \
  140. s2 = vec_add(t1, t3); \
  141. s3 = vec_sub(t0, t2); \
  142. }while (0)
  143. #define SHIFT_HOR4(s0, s1, s2, s3) \
  144. s0 = vec_sra(s0, vec_3); \
  145. s1 = vec_sra(s1, vec_3); \
  146. s2 = vec_sra(s2, vec_3); \
  147. s3 = vec_sra(s3, vec_3);
  148. #define SHIFT_VERT4(s0, s1, s2, s3) \
  149. s0 = vec_sra(s0, vec_7); \
  150. s1 = vec_sra(s1, vec_7); \
  151. s2 = vec_sra(s2, vec_7); \
  152. s3 = vec_sra(s3, vec_7);
  153. /** Do inverse transform on 8x8 block
  154. */
  155. static void vc1_inv_trans_8x8_altivec(DCTELEM block[64])
  156. {
  157. vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
  158. vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
  159. vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
  160. vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
  161. const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
  162. const vector unsigned int vec_7 = vec_splat_u32(7);
  163. const vector unsigned int vec_5 = vec_splat_u32(5);
  164. const vector unsigned int vec_4 = vec_splat_u32(4);
  165. const vector signed int vec_4s = vec_splat_s32(4);
  166. const vector unsigned int vec_3 = vec_splat_u32(3);
  167. const vector unsigned int vec_2 = vec_splat_u32(2);
  168. const vector signed int vec_1s = vec_splat_s32(1);
  169. const vector unsigned int vec_1 = vec_splat_u32(1);
  170. src0 = vec_ld( 0, block);
  171. src1 = vec_ld( 16, block);
  172. src2 = vec_ld( 32, block);
  173. src3 = vec_ld( 48, block);
  174. src4 = vec_ld( 64, block);
  175. src5 = vec_ld( 80, block);
  176. src6 = vec_ld( 96, block);
  177. src7 = vec_ld(112, block);
  178. TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
  179. s0 = vec_unpackl(src0);
  180. s1 = vec_unpackl(src1);
  181. s2 = vec_unpackl(src2);
  182. s3 = vec_unpackl(src3);
  183. s4 = vec_unpackl(src4);
  184. s5 = vec_unpackl(src5);
  185. s6 = vec_unpackl(src6);
  186. s7 = vec_unpackl(src7);
  187. s8 = vec_unpackh(src0);
  188. s9 = vec_unpackh(src1);
  189. sA = vec_unpackh(src2);
  190. sB = vec_unpackh(src3);
  191. sC = vec_unpackh(src4);
  192. sD = vec_unpackh(src5);
  193. sE = vec_unpackh(src6);
  194. sF = vec_unpackh(src7);
  195. STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
  196. SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
  197. STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
  198. SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
  199. src0 = vec_pack(s8, s0);
  200. src1 = vec_pack(s9, s1);
  201. src2 = vec_pack(sA, s2);
  202. src3 = vec_pack(sB, s3);
  203. src4 = vec_pack(sC, s4);
  204. src5 = vec_pack(sD, s5);
  205. src6 = vec_pack(sE, s6);
  206. src7 = vec_pack(sF, s7);
  207. TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
  208. s0 = vec_unpackl(src0);
  209. s1 = vec_unpackl(src1);
  210. s2 = vec_unpackl(src2);
  211. s3 = vec_unpackl(src3);
  212. s4 = vec_unpackl(src4);
  213. s5 = vec_unpackl(src5);
  214. s6 = vec_unpackl(src6);
  215. s7 = vec_unpackl(src7);
  216. s8 = vec_unpackh(src0);
  217. s9 = vec_unpackh(src1);
  218. sA = vec_unpackh(src2);
  219. sB = vec_unpackh(src3);
  220. sC = vec_unpackh(src4);
  221. sD = vec_unpackh(src5);
  222. sE = vec_unpackh(src6);
  223. sF = vec_unpackh(src7);
  224. STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
  225. SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
  226. STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
  227. SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
  228. src0 = vec_pack(s8, s0);
  229. src1 = vec_pack(s9, s1);
  230. src2 = vec_pack(sA, s2);
  231. src3 = vec_pack(sB, s3);
  232. src4 = vec_pack(sC, s4);
  233. src5 = vec_pack(sD, s5);
  234. src6 = vec_pack(sE, s6);
  235. src7 = vec_pack(sF, s7);
  236. vec_st(src0, 0, block);
  237. vec_st(src1, 16, block);
  238. vec_st(src2, 32, block);
  239. vec_st(src3, 48, block);
  240. vec_st(src4, 64, block);
  241. vec_st(src5, 80, block);
  242. vec_st(src6, 96, block);
  243. vec_st(src7,112, block);
  244. }
  245. /** Do inverse transform on 8x4 part of block
  246. */
  247. static void vc1_inv_trans_8x4_altivec(DCTELEM block[64], int n)
  248. {
  249. vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
  250. vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
  251. vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
  252. vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
  253. const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
  254. const vector unsigned int vec_7 = vec_splat_u32(7);
  255. const vector unsigned int vec_5 = vec_splat_u32(5);
  256. const vector unsigned int vec_4 = vec_splat_u32(4);
  257. const vector signed int vec_4s = vec_splat_s32(4);
  258. const vector unsigned int vec_3 = vec_splat_u32(3);
  259. const vector unsigned int vec_2 = vec_splat_u32(2);
  260. const vector unsigned int vec_1 = vec_splat_u32(1);
  261. src0 = vec_ld( 0, block);
  262. src1 = vec_ld( 16, block);
  263. src2 = vec_ld( 32, block);
  264. src3 = vec_ld( 48, block);
  265. src4 = vec_ld( 64, block);
  266. src5 = vec_ld( 80, block);
  267. src6 = vec_ld( 96, block);
  268. src7 = vec_ld(112, block);
  269. TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
  270. s0 = vec_unpackl(src0);
  271. s1 = vec_unpackl(src1);
  272. s2 = vec_unpackl(src2);
  273. s3 = vec_unpackl(src3);
  274. s4 = vec_unpackl(src4);
  275. s5 = vec_unpackl(src5);
  276. s6 = vec_unpackl(src6);
  277. s7 = vec_unpackl(src7);
  278. s8 = vec_unpackh(src0);
  279. s9 = vec_unpackh(src1);
  280. sA = vec_unpackh(src2);
  281. sB = vec_unpackh(src3);
  282. sC = vec_unpackh(src4);
  283. sD = vec_unpackh(src5);
  284. sE = vec_unpackh(src6);
  285. sF = vec_unpackh(src7);
  286. STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
  287. SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
  288. STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
  289. SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
  290. src0 = vec_pack(s8, s0);
  291. src1 = vec_pack(s9, s1);
  292. src2 = vec_pack(sA, s2);
  293. src3 = vec_pack(sB, s3);
  294. src4 = vec_pack(sC, s4);
  295. src5 = vec_pack(sD, s5);
  296. src6 = vec_pack(sE, s6);
  297. src7 = vec_pack(sF, s7);
  298. TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
  299. if(!n){ // upper half of block
  300. s0 = vec_unpackh(src0);
  301. s1 = vec_unpackh(src1);
  302. s2 = vec_unpackh(src2);
  303. s3 = vec_unpackh(src3);
  304. s8 = vec_unpackl(src0);
  305. s9 = vec_unpackl(src1);
  306. sA = vec_unpackl(src2);
  307. sB = vec_unpackl(src3);
  308. STEP4(s0, s1, s2, s3, vec_64);
  309. SHIFT_VERT4(s0, s1, s2, s3);
  310. STEP4(s8, s9, sA, sB, vec_64);
  311. SHIFT_VERT4(s8, s9, sA, sB);
  312. src0 = vec_pack(s0, s8);
  313. src1 = vec_pack(s1, s9);
  314. src2 = vec_pack(s2, sA);
  315. src3 = vec_pack(s3, sB);
  316. vec_st(src0, 0, block);
  317. vec_st(src1, 16, block);
  318. vec_st(src2, 32, block);
  319. vec_st(src3, 48, block);
  320. } else { //lower half of block
  321. s0 = vec_unpackh(src4);
  322. s1 = vec_unpackh(src5);
  323. s2 = vec_unpackh(src6);
  324. s3 = vec_unpackh(src7);
  325. s8 = vec_unpackl(src4);
  326. s9 = vec_unpackl(src5);
  327. sA = vec_unpackl(src6);
  328. sB = vec_unpackl(src7);
  329. STEP4(s0, s1, s2, s3, vec_64);
  330. SHIFT_VERT4(s0, s1, s2, s3);
  331. STEP4(s8, s9, sA, sB, vec_64);
  332. SHIFT_VERT4(s8, s9, sA, sB);
  333. src4 = vec_pack(s0, s8);
  334. src5 = vec_pack(s1, s9);
  335. src6 = vec_pack(s2, sA);
  336. src7 = vec_pack(s3, sB);
  337. vec_st(src4, 64, block);
  338. vec_st(src5, 80, block);
  339. vec_st(src6, 96, block);
  340. vec_st(src7,112, block);
  341. }
  342. }
  343. void vc1dsp_init_altivec(DSPContext* dsp, AVCodecContext *avctx) {
  344. dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec;
  345. dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec;
  346. }