You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

304 lines
11KB

  1. /*
  2. * FFT/MDCT transform with SSE optimizations
  3. * Copyright (c) 2002 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/x86_cpu.h"
  22. #include "libavcodec/dsputil.h"
  23. static const int p1p1p1m1[4] __attribute__((aligned(16))) =
  24. { 0, 0, 0, 1 << 31 };
  25. static const int p1p1m1p1[4] __attribute__((aligned(16))) =
  26. { 0, 0, 1 << 31, 0 };
  27. static const int p1p1m1m1[4] __attribute__((aligned(16))) =
  28. { 0, 0, 1 << 31, 1 << 31 };
  29. static const int p1m1p1m1[4] __attribute__((aligned(16))) =
  30. { 0, 1 << 31, 0, 1 << 31 };
  31. static const int m1m1m1m1[4] __attribute__((aligned(16))) =
  32. { 1 << 31, 1 << 31, 1 << 31, 1 << 31 };
  33. #if 0
  34. static void print_v4sf(const char *str, __m128 a)
  35. {
  36. float *p = (float *)&a;
  37. printf("%s: %f %f %f %f\n",
  38. str, p[0], p[1], p[2], p[3]);
  39. }
  40. #endif
  41. /* XXX: handle reverse case */
  42. void ff_fft_calc_sse(FFTContext *s, FFTComplex *z)
  43. {
  44. int ln = s->nbits;
  45. x86_reg i;
  46. long j;
  47. long nblocks, nloops;
  48. FFTComplex *p, *cptr;
  49. asm volatile(
  50. "movaps %0, %%xmm4 \n\t"
  51. "movaps %1, %%xmm5 \n\t"
  52. ::"m"(*p1p1m1m1),
  53. "m"(*(s->inverse ? p1p1m1p1 : p1p1p1m1))
  54. );
  55. i = 8 << ln;
  56. asm volatile(
  57. "1: \n\t"
  58. "sub $32, %0 \n\t"
  59. /* do the pass 0 butterfly */
  60. "movaps (%0,%1), %%xmm0 \n\t"
  61. "movaps %%xmm0, %%xmm1 \n\t"
  62. "shufps $0x4E, %%xmm0, %%xmm0 \n\t"
  63. "xorps %%xmm4, %%xmm1 \n\t"
  64. "addps %%xmm1, %%xmm0 \n\t"
  65. "movaps 16(%0,%1), %%xmm2 \n\t"
  66. "movaps %%xmm2, %%xmm3 \n\t"
  67. "shufps $0x4E, %%xmm2, %%xmm2 \n\t"
  68. "xorps %%xmm4, %%xmm3 \n\t"
  69. "addps %%xmm3, %%xmm2 \n\t"
  70. /* multiply third by -i */
  71. /* by toggling the sign bit */
  72. "shufps $0xB4, %%xmm2, %%xmm2 \n\t"
  73. "xorps %%xmm5, %%xmm2 \n\t"
  74. /* do the pass 1 butterfly */
  75. "movaps %%xmm0, %%xmm1 \n\t"
  76. "addps %%xmm2, %%xmm0 \n\t"
  77. "subps %%xmm2, %%xmm1 \n\t"
  78. "movaps %%xmm0, (%0,%1) \n\t"
  79. "movaps %%xmm1, 16(%0,%1) \n\t"
  80. "jg 1b \n\t"
  81. :"+r"(i)
  82. :"r"(z)
  83. );
  84. /* pass 2 .. ln-1 */
  85. nblocks = 1 << (ln-3);
  86. nloops = 1 << 2;
  87. cptr = s->exptab1;
  88. do {
  89. p = z;
  90. j = nblocks;
  91. do {
  92. i = nloops*8;
  93. asm volatile(
  94. "1: \n\t"
  95. "sub $32, %0 \n\t"
  96. "movaps (%2,%0), %%xmm1 \n\t"
  97. "movaps (%1,%0), %%xmm0 \n\t"
  98. "movaps 16(%2,%0), %%xmm5 \n\t"
  99. "movaps 16(%1,%0), %%xmm4 \n\t"
  100. "movaps %%xmm1, %%xmm2 \n\t"
  101. "movaps %%xmm5, %%xmm6 \n\t"
  102. "shufps $0xA0, %%xmm1, %%xmm1 \n\t"
  103. "shufps $0xF5, %%xmm2, %%xmm2 \n\t"
  104. "shufps $0xA0, %%xmm5, %%xmm5 \n\t"
  105. "shufps $0xF5, %%xmm6, %%xmm6 \n\t"
  106. "mulps (%3,%0,2), %%xmm1 \n\t" // cre*re cim*re
  107. "mulps 16(%3,%0,2), %%xmm2 \n\t" // -cim*im cre*im
  108. "mulps 32(%3,%0,2), %%xmm5 \n\t" // cre*re cim*re
  109. "mulps 48(%3,%0,2), %%xmm6 \n\t" // -cim*im cre*im
  110. "addps %%xmm2, %%xmm1 \n\t"
  111. "addps %%xmm6, %%xmm5 \n\t"
  112. "movaps %%xmm0, %%xmm3 \n\t"
  113. "movaps %%xmm4, %%xmm7 \n\t"
  114. "addps %%xmm1, %%xmm0 \n\t"
  115. "subps %%xmm1, %%xmm3 \n\t"
  116. "addps %%xmm5, %%xmm4 \n\t"
  117. "subps %%xmm5, %%xmm7 \n\t"
  118. "movaps %%xmm0, (%1,%0) \n\t"
  119. "movaps %%xmm3, (%2,%0) \n\t"
  120. "movaps %%xmm4, 16(%1,%0) \n\t"
  121. "movaps %%xmm7, 16(%2,%0) \n\t"
  122. "jg 1b \n\t"
  123. :"+r"(i)
  124. :"r"(p), "r"(p + nloops), "r"(cptr)
  125. );
  126. p += nloops*2;
  127. } while (--j);
  128. cptr += nloops*2;
  129. nblocks >>= 1;
  130. nloops <<= 1;
  131. } while (nblocks != 0);
  132. }
  133. void ff_imdct_calc_sse(MDCTContext *s, FFTSample *output,
  134. const FFTSample *input, FFTSample *tmp)
  135. {
  136. x86_reg k;
  137. long n8, n4, n2, n;
  138. const uint16_t *revtab = s->fft.revtab;
  139. const FFTSample *tcos = s->tcos;
  140. const FFTSample *tsin = s->tsin;
  141. const FFTSample *in1, *in2;
  142. FFTComplex *z = (FFTComplex *)tmp;
  143. n = 1 << s->nbits;
  144. n2 = n >> 1;
  145. n4 = n >> 2;
  146. n8 = n >> 3;
  147. #ifdef ARCH_X86_64
  148. asm volatile ("movaps %0, %%xmm8\n\t"::"m"(*p1m1p1m1));
  149. #define P1M1P1M1 "%%xmm8"
  150. #else
  151. #define P1M1P1M1 "%4"
  152. #endif
  153. /* pre rotation */
  154. in1 = input;
  155. in2 = input + n2 - 4;
  156. /* Complex multiplication */
  157. for (k = 0; k < n4; k += 4) {
  158. asm volatile (
  159. "movaps %0, %%xmm0 \n\t" // xmm0 = r0 X r1 X : in2
  160. "movaps %1, %%xmm3 \n\t" // xmm3 = X i1 X i0: in1
  161. "movaps -16+1*%0, %%xmm4 \n\t" // xmm4 = r0 X r1 X : in2
  162. "movaps 16+1*%1, %%xmm7 \n\t" // xmm7 = X i1 X i0: in1
  163. "movlps %2, %%xmm1 \n\t" // xmm1 = X X R1 R0: tcos
  164. "movlps %3, %%xmm2 \n\t" // xmm2 = X X I1 I0: tsin
  165. "movlps 8+1*%2, %%xmm5 \n\t" // xmm5 = X X R1 R0: tcos
  166. "movlps 8+1*%3, %%xmm6 \n\t" // xmm6 = X X I1 I0: tsin
  167. "shufps $95, %%xmm0, %%xmm0 \n\t" // xmm0 = r1 r1 r0 r0
  168. "shufps $160,%%xmm3, %%xmm3 \n\t" // xmm3 = i1 i1 i0 i0
  169. "shufps $95, %%xmm4, %%xmm4 \n\t" // xmm4 = r1 r1 r0 r0
  170. "shufps $160,%%xmm7, %%xmm7 \n\t" // xmm7 = i1 i1 i0 i0
  171. "unpcklps %%xmm2, %%xmm1 \n\t" // xmm1 = I1 R1 I0 R0
  172. "unpcklps %%xmm6, %%xmm5 \n\t" // xmm5 = I1 R1 I0 R0
  173. "movaps %%xmm1, %%xmm2 \n\t" // xmm2 = I1 R1 I0 R0
  174. "movaps %%xmm5, %%xmm6 \n\t" // xmm6 = I1 R1 I0 R0
  175. "xorps "P1M1P1M1", %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0
  176. "xorps "P1M1P1M1", %%xmm6 \n\t" // xmm6 = -I1 R1 -I0 R0
  177. "mulps %%xmm1, %%xmm0 \n\t" // xmm0 = rI rR rI rR
  178. "mulps %%xmm5, %%xmm4 \n\t" // xmm4 = rI rR rI rR
  179. "shufps $177,%%xmm2, %%xmm2 \n\t" // xmm2 = R1 -I1 R0 -I0
  180. "shufps $177,%%xmm6, %%xmm6 \n\t" // xmm6 = R1 -I1 R0 -I0
  181. "mulps %%xmm2, %%xmm3 \n\t" // xmm3 = Ri -Ii Ri -Ii
  182. "mulps %%xmm6, %%xmm7 \n\t" // xmm7 = Ri -Ii Ri -Ii
  183. "addps %%xmm3, %%xmm0 \n\t" // xmm0 = result
  184. "addps %%xmm7, %%xmm4 \n\t" // xmm4 = result
  185. ::"m"(in2[-2*k]), "m"(in1[2*k]),
  186. "m"(tcos[k]), "m"(tsin[k])
  187. #ifndef ARCH_X86_64
  188. ,"m"(*p1m1p1m1)
  189. #endif
  190. );
  191. /* Should be in the same block, hack for gcc2.95 & gcc3 */
  192. asm (
  193. "movlps %%xmm0, %0 \n\t"
  194. "movhps %%xmm0, %1 \n\t"
  195. "movlps %%xmm4, %2 \n\t"
  196. "movhps %%xmm4, %3 \n\t"
  197. :"=m"(z[revtab[k]]), "=m"(z[revtab[k + 1]]),
  198. "=m"(z[revtab[k + 2]]), "=m"(z[revtab[k + 3]])
  199. );
  200. }
  201. ff_fft_calc_sse(&s->fft, z);
  202. #ifndef ARCH_X86_64
  203. #undef P1M1P1M1
  204. #define P1M1P1M1 "%3"
  205. #endif
  206. /* post rotation + reordering */
  207. for (k = 0; k < n4; k += 4) {
  208. asm (
  209. "movaps %0, %%xmm0 \n\t" // xmm0 = i1 r1 i0 r0: z
  210. "movaps 16+1*%0, %%xmm4 \n\t" // xmm4 = i1 r1 i0 r0: z
  211. "movlps %1, %%xmm1 \n\t" // xmm1 = X X R1 R0: tcos
  212. "movlps 8+1*%1, %%xmm5 \n\t" // xmm5 = X X R1 R0: tcos
  213. "movaps %%xmm0, %%xmm3 \n\t" // xmm3 = i1 r1 i0 r0
  214. "movaps %%xmm4, %%xmm7 \n\t" // xmm7 = i1 r1 i0 r0
  215. "movlps %2, %%xmm2 \n\t" // xmm2 = X X I1 I0: tsin
  216. "movlps 8+1*%2, %%xmm6 \n\t" // xmm6 = X X I1 I0: tsin
  217. "shufps $160,%%xmm0, %%xmm0 \n\t" // xmm0 = r1 r1 r0 r0
  218. "shufps $245,%%xmm3, %%xmm3 \n\t" // xmm3 = i1 i1 i0 i0
  219. "shufps $160,%%xmm4, %%xmm4 \n\t" // xmm4 = r1 r1 r0 r0
  220. "shufps $245,%%xmm7, %%xmm7 \n\t" // xmm7 = i1 i1 i0 i0
  221. "unpcklps %%xmm2, %%xmm1 \n\t" // xmm1 = I1 R1 I0 R0
  222. "unpcklps %%xmm6, %%xmm5 \n\t" // xmm5 = I1 R1 I0 R0
  223. "movaps %%xmm1, %%xmm2 \n\t" // xmm2 = I1 R1 I0 R0
  224. "movaps %%xmm5, %%xmm6 \n\t" // xmm6 = I1 R1 I0 R0
  225. "xorps "P1M1P1M1", %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0
  226. "mulps %%xmm1, %%xmm0 \n\t" // xmm0 = rI rR rI rR
  227. "xorps "P1M1P1M1", %%xmm6 \n\t" // xmm6 = -I1 R1 -I0 R0
  228. "mulps %%xmm5, %%xmm4 \n\t" // xmm4 = rI rR rI rR
  229. "shufps $177,%%xmm2, %%xmm2 \n\t" // xmm2 = R1 -I1 R0 -I0
  230. "shufps $177,%%xmm6, %%xmm6 \n\t" // xmm6 = R1 -I1 R0 -I0
  231. "mulps %%xmm2, %%xmm3 \n\t" // xmm3 = Ri -Ii Ri -Ii
  232. "mulps %%xmm6, %%xmm7 \n\t" // xmm7 = Ri -Ii Ri -Ii
  233. "addps %%xmm3, %%xmm0 \n\t" // xmm0 = result
  234. "addps %%xmm7, %%xmm4 \n\t" // xmm4 = result
  235. "movaps %%xmm0, %0 \n\t"
  236. "movaps %%xmm4, 16+1*%0\n\t"
  237. :"+m"(z[k])
  238. :"m"(tcos[k]), "m"(tsin[k])
  239. #ifndef ARCH_X86_64
  240. ,"m"(*p1m1p1m1)
  241. #endif
  242. );
  243. }
  244. /*
  245. Mnemonics:
  246. 0 = z[k].re
  247. 1 = z[k].im
  248. 2 = z[k + 1].re
  249. 3 = z[k + 1].im
  250. 4 = z[-k - 2].re
  251. 5 = z[-k - 2].im
  252. 6 = z[-k - 1].re
  253. 7 = z[-k - 1].im
  254. */
  255. k = 16-n;
  256. asm volatile("movaps %0, %%xmm7 \n\t"::"m"(*m1m1m1m1));
  257. asm volatile(
  258. "1: \n\t"
  259. "movaps -16(%4,%0), %%xmm1 \n\t" // xmm1 = 4 5 6 7 = z[-2-k]
  260. "neg %0 \n\t"
  261. "movaps (%4,%0), %%xmm0 \n\t" // xmm0 = 0 1 2 3 = z[k]
  262. "xorps %%xmm7, %%xmm0 \n\t" // xmm0 = -0 -1 -2 -3
  263. "movaps %%xmm0, %%xmm2 \n\t" // xmm2 = -0 -1 -2 -3
  264. "shufps $141,%%xmm1, %%xmm0 \n\t" // xmm0 = -1 -3 4 6
  265. "shufps $216,%%xmm1, %%xmm2 \n\t" // xmm2 = -0 -2 5 7
  266. "shufps $156,%%xmm0, %%xmm0 \n\t" // xmm0 = -1 6 -3 4 !
  267. "shufps $156,%%xmm2, %%xmm2 \n\t" // xmm2 = -0 7 -2 5 !
  268. "movaps %%xmm0, (%1,%0) \n\t" // output[2*k]
  269. "movaps %%xmm2, (%2,%0) \n\t" // output[n2+2*k]
  270. "neg %0 \n\t"
  271. "shufps $27, %%xmm0, %%xmm0 \n\t" // xmm0 = 4 -3 6 -1
  272. "xorps %%xmm7, %%xmm0 \n\t" // xmm0 = -4 3 -6 1 !
  273. "shufps $27, %%xmm2, %%xmm2 \n\t" // xmm2 = 5 -2 7 -0 !
  274. "movaps %%xmm0, -16(%2,%0) \n\t" // output[n2-4-2*k]
  275. "movaps %%xmm2, -16(%3,%0) \n\t" // output[n-4-2*k]
  276. "add $16, %0 \n\t"
  277. "jle 1b \n\t"
  278. :"+r"(k)
  279. :"r"(output), "r"(output+n2), "r"(output+n), "r"(z+n8)
  280. :"memory"
  281. );
  282. }