You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

269 lines
10KB

  1. /*
  2. * SIMD-optimized MP3 decoding functions
  3. * Copyright (c) 2010 Vitor Sessak
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/attributes.h"
  22. #include "libavutil/cpu.h"
  23. #include "libavutil/internal.h"
  24. #include "libavutil/x86/asm.h"
  25. #include "libavutil/x86/cpu.h"
  26. #include "libavcodec/mpegaudiodsp.h"
  27. void ff_imdct36_float_sse(float *out, float *buf, float *in, float *win);
  28. void ff_imdct36_float_sse2(float *out, float *buf, float *in, float *win);
  29. void ff_imdct36_float_sse3(float *out, float *buf, float *in, float *win);
  30. void ff_imdct36_float_ssse3(float *out, float *buf, float *in, float *win);
  31. void ff_imdct36_float_avx(float *out, float *buf, float *in, float *win);
  32. void ff_four_imdct36_float_sse(float *out, float *buf, float *in, float *win,
  33. float *tmpbuf);
  34. void ff_four_imdct36_float_avx(float *out, float *buf, float *in, float *win,
  35. float *tmpbuf);
  36. DECLARE_ALIGNED(16, static float, mdct_win_sse)[2][4][4*40];
  37. #if HAVE_SSE2_INLINE
  38. #define MACS(rt, ra, rb) rt+=(ra)*(rb)
  39. #define MLSS(rt, ra, rb) rt-=(ra)*(rb)
  40. #define SUM8(op, sum, w, p) \
  41. { \
  42. op(sum, (w)[0 * 64], (p)[0 * 64]); \
  43. op(sum, (w)[1 * 64], (p)[1 * 64]); \
  44. op(sum, (w)[2 * 64], (p)[2 * 64]); \
  45. op(sum, (w)[3 * 64], (p)[3 * 64]); \
  46. op(sum, (w)[4 * 64], (p)[4 * 64]); \
  47. op(sum, (w)[5 * 64], (p)[5 * 64]); \
  48. op(sum, (w)[6 * 64], (p)[6 * 64]); \
  49. op(sum, (w)[7 * 64], (p)[7 * 64]); \
  50. }
  51. static void apply_window(const float *buf, const float *win1,
  52. const float *win2, float *sum1, float *sum2, int len)
  53. {
  54. x86_reg count = - 4*len;
  55. const float *win1a = win1+len;
  56. const float *win2a = win2+len;
  57. const float *bufa = buf+len;
  58. float *sum1a = sum1+len;
  59. float *sum2a = sum2+len;
  60. #define MULT(a, b) \
  61. "movaps " #a "(%1,%0), %%xmm1 \n\t" \
  62. "movaps " #a "(%3,%0), %%xmm2 \n\t" \
  63. "mulps %%xmm2, %%xmm1 \n\t" \
  64. "subps %%xmm1, %%xmm0 \n\t" \
  65. "mulps " #b "(%2,%0), %%xmm2 \n\t" \
  66. "subps %%xmm2, %%xmm4 \n\t" \
  67. __asm__ volatile(
  68. "1: \n\t"
  69. "xorps %%xmm0, %%xmm0 \n\t"
  70. "xorps %%xmm4, %%xmm4 \n\t"
  71. MULT( 0, 0)
  72. MULT( 256, 64)
  73. MULT( 512, 128)
  74. MULT( 768, 192)
  75. MULT(1024, 256)
  76. MULT(1280, 320)
  77. MULT(1536, 384)
  78. MULT(1792, 448)
  79. "movaps %%xmm0, (%4,%0) \n\t"
  80. "movaps %%xmm4, (%5,%0) \n\t"
  81. "add $16, %0 \n\t"
  82. "jl 1b \n\t"
  83. :"+&r"(count)
  84. :"r"(win1a), "r"(win2a), "r"(bufa), "r"(sum1a), "r"(sum2a)
  85. );
  86. #undef MULT
  87. }
  88. static void apply_window_mp3(float *in, float *win, int *unused, float *out,
  89. ptrdiff_t incr)
  90. {
  91. LOCAL_ALIGNED_16(float, suma, [17]);
  92. LOCAL_ALIGNED_16(float, sumb, [17]);
  93. LOCAL_ALIGNED_16(float, sumc, [17]);
  94. LOCAL_ALIGNED_16(float, sumd, [17]);
  95. float sum;
  96. /* copy to avoid wrap */
  97. __asm__ volatile(
  98. "movaps 0(%0), %%xmm0 \n\t" \
  99. "movaps 16(%0), %%xmm1 \n\t" \
  100. "movaps 32(%0), %%xmm2 \n\t" \
  101. "movaps 48(%0), %%xmm3 \n\t" \
  102. "movaps %%xmm0, 0(%1) \n\t" \
  103. "movaps %%xmm1, 16(%1) \n\t" \
  104. "movaps %%xmm2, 32(%1) \n\t" \
  105. "movaps %%xmm3, 48(%1) \n\t" \
  106. "movaps 64(%0), %%xmm0 \n\t" \
  107. "movaps 80(%0), %%xmm1 \n\t" \
  108. "movaps 96(%0), %%xmm2 \n\t" \
  109. "movaps 112(%0), %%xmm3 \n\t" \
  110. "movaps %%xmm0, 64(%1) \n\t" \
  111. "movaps %%xmm1, 80(%1) \n\t" \
  112. "movaps %%xmm2, 96(%1) \n\t" \
  113. "movaps %%xmm3, 112(%1) \n\t"
  114. ::"r"(in), "r"(in+512)
  115. :"memory"
  116. );
  117. apply_window(in + 16, win , win + 512, suma, sumc, 16);
  118. apply_window(in + 32, win + 48, win + 640, sumb, sumd, 16);
  119. SUM8(MACS, suma[0], win + 32, in + 48);
  120. sumc[ 0] = 0;
  121. sumb[16] = 0;
  122. sumd[16] = 0;
  123. #define SUMS(suma, sumb, sumc, sumd, out1, out2) \
  124. "movups " #sumd "(%4), %%xmm0 \n\t" \
  125. "shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
  126. "subps " #suma "(%1), %%xmm0 \n\t" \
  127. "movaps %%xmm0," #out1 "(%0) \n\t" \
  128. \
  129. "movups " #sumc "(%3), %%xmm0 \n\t" \
  130. "shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
  131. "addps " #sumb "(%2), %%xmm0 \n\t" \
  132. "movaps %%xmm0," #out2 "(%0) \n\t"
  133. if (incr == 1) {
  134. __asm__ volatile(
  135. SUMS( 0, 48, 4, 52, 0, 112)
  136. SUMS(16, 32, 20, 36, 16, 96)
  137. SUMS(32, 16, 36, 20, 32, 80)
  138. SUMS(48, 0, 52, 4, 48, 64)
  139. :"+&r"(out)
  140. :"r"(&suma[0]), "r"(&sumb[0]), "r"(&sumc[0]), "r"(&sumd[0])
  141. :"memory"
  142. );
  143. out += 16*incr;
  144. } else {
  145. int j;
  146. float *out2 = out + 32 * incr;
  147. out[0 ] = -suma[ 0];
  148. out += incr;
  149. out2 -= incr;
  150. for(j=1;j<16;j++) {
  151. *out = -suma[ j] + sumd[16-j];
  152. *out2 = sumb[16-j] + sumc[ j];
  153. out += incr;
  154. out2 -= incr;
  155. }
  156. }
  157. sum = 0;
  158. SUM8(MLSS, sum, win + 16 + 32, in + 32);
  159. *out = sum;
  160. }
  161. #endif /* HAVE_SSE2_INLINE */
  162. #if HAVE_X86ASM
  163. #define DECL_IMDCT_BLOCKS(CPU1, CPU2) \
  164. static void imdct36_blocks_ ## CPU1(float *out, float *buf, float *in, \
  165. int count, int switch_point, int block_type) \
  166. { \
  167. int align_end = count - (count & 3); \
  168. int j; \
  169. for (j = 0; j < align_end; j+= 4) { \
  170. LOCAL_ALIGNED_16(float, tmpbuf, [1024]); \
  171. float *win = mdct_win_sse[switch_point && j < 4][block_type]; \
  172. /* apply window & overlap with previous buffer */ \
  173. \
  174. /* select window */ \
  175. ff_four_imdct36_float_ ## CPU2(out, buf, in, win, tmpbuf); \
  176. in += 4*18; \
  177. buf += 4*18; \
  178. out += 4; \
  179. } \
  180. for (; j < count; j++) { \
  181. /* apply window & overlap with previous buffer */ \
  182. \
  183. /* select window */ \
  184. int win_idx = (switch_point && j < 2) ? 0 : block_type; \
  185. float *win = ff_mdct_win_float[win_idx + (4 & -(j & 1))]; \
  186. \
  187. ff_imdct36_float_ ## CPU1(out, buf, in, win); \
  188. \
  189. in += 18; \
  190. buf++; \
  191. out++; \
  192. } \
  193. }
  194. DECL_IMDCT_BLOCKS(sse,sse)
  195. DECL_IMDCT_BLOCKS(sse2,sse)
  196. DECL_IMDCT_BLOCKS(sse3,sse)
  197. DECL_IMDCT_BLOCKS(ssse3,sse)
  198. DECL_IMDCT_BLOCKS(avx,avx)
  199. #endif /* HAVE_X86ASM */
  200. av_cold void ff_mpadsp_init_x86(MPADSPContext *s)
  201. {
  202. int cpu_flags = av_get_cpu_flags();
  203. int i, j;
  204. for (j = 0; j < 4; j++) {
  205. for (i = 0; i < 40; i ++) {
  206. mdct_win_sse[0][j][4*i ] = ff_mdct_win_float[j ][i];
  207. mdct_win_sse[0][j][4*i + 1] = ff_mdct_win_float[j + 4][i];
  208. mdct_win_sse[0][j][4*i + 2] = ff_mdct_win_float[j ][i];
  209. mdct_win_sse[0][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
  210. mdct_win_sse[1][j][4*i ] = ff_mdct_win_float[0 ][i];
  211. mdct_win_sse[1][j][4*i + 1] = ff_mdct_win_float[4 ][i];
  212. mdct_win_sse[1][j][4*i + 2] = ff_mdct_win_float[j ][i];
  213. mdct_win_sse[1][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
  214. }
  215. }
  216. #if HAVE_SSE2_INLINE
  217. if (INLINE_SSE2(cpu_flags)) {
  218. s->apply_window_float = apply_window_mp3;
  219. }
  220. #endif /* HAVE_SSE2_INLINE */
  221. #if HAVE_X86ASM
  222. if (EXTERNAL_SSE(cpu_flags)) {
  223. s->imdct36_blocks_float = imdct36_blocks_sse;
  224. }
  225. if (EXTERNAL_SSE2(cpu_flags)) {
  226. s->imdct36_blocks_float = imdct36_blocks_sse2;
  227. }
  228. if (EXTERNAL_SSE3(cpu_flags)) {
  229. s->imdct36_blocks_float = imdct36_blocks_sse3;
  230. }
  231. if (EXTERNAL_SSSE3(cpu_flags)) {
  232. s->imdct36_blocks_float = imdct36_blocks_ssse3;
  233. }
  234. if (EXTERNAL_AVX(cpu_flags)) {
  235. s->imdct36_blocks_float = imdct36_blocks_avx;
  236. }
  237. #endif /* HAVE_X86ASM */
  238. }