You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

274 lines
10KB

  1. /*
  2. * MMX optimized MP3 decoding functions
  3. * Copyright (c) 2010 Vitor Sessak
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/attributes.h"
  22. #include "libavutil/cpu.h"
  23. #include "libavutil/x86/asm.h"
  24. #include "libavutil/x86/cpu.h"
  25. #include "libavcodec/dsputil.h"
  26. #include "libavcodec/mpegaudiodsp.h"
  27. #define DECL(CPU)\
  28. static void imdct36_blocks_ ## CPU(float *out, float *buf, float *in, int count, int switch_point, int block_type);\
  29. void ff_imdct36_float_ ## CPU(float *out, float *buf, float *in, float *win);
  30. DECL(sse)
  31. DECL(sse2)
  32. DECL(sse3)
  33. DECL(ssse3)
  34. DECL(avx)
  35. void ff_four_imdct36_float_sse(float *out, float *buf, float *in, float *win,
  36. float *tmpbuf);
  37. void ff_four_imdct36_float_avx(float *out, float *buf, float *in, float *win,
  38. float *tmpbuf);
  39. DECLARE_ALIGNED(16, static float, mdct_win_sse)[2][4][4*40];
  40. #if HAVE_SSE2_INLINE
  41. #define MACS(rt, ra, rb) rt+=(ra)*(rb)
  42. #define MLSS(rt, ra, rb) rt-=(ra)*(rb)
  43. #define SUM8(op, sum, w, p) \
  44. { \
  45. op(sum, (w)[0 * 64], (p)[0 * 64]); \
  46. op(sum, (w)[1 * 64], (p)[1 * 64]); \
  47. op(sum, (w)[2 * 64], (p)[2 * 64]); \
  48. op(sum, (w)[3 * 64], (p)[3 * 64]); \
  49. op(sum, (w)[4 * 64], (p)[4 * 64]); \
  50. op(sum, (w)[5 * 64], (p)[5 * 64]); \
  51. op(sum, (w)[6 * 64], (p)[6 * 64]); \
  52. op(sum, (w)[7 * 64], (p)[7 * 64]); \
  53. }
  54. static void apply_window(const float *buf, const float *win1,
  55. const float *win2, float *sum1, float *sum2, int len)
  56. {
  57. x86_reg count = - 4*len;
  58. const float *win1a = win1+len;
  59. const float *win2a = win2+len;
  60. const float *bufa = buf+len;
  61. float *sum1a = sum1+len;
  62. float *sum2a = sum2+len;
  63. #define MULT(a, b) \
  64. "movaps " #a "(%1,%0), %%xmm1 \n\t" \
  65. "movaps " #a "(%3,%0), %%xmm2 \n\t" \
  66. "mulps %%xmm2, %%xmm1 \n\t" \
  67. "subps %%xmm1, %%xmm0 \n\t" \
  68. "mulps " #b "(%2,%0), %%xmm2 \n\t" \
  69. "subps %%xmm2, %%xmm4 \n\t" \
  70. __asm__ volatile(
  71. "1: \n\t"
  72. "xorps %%xmm0, %%xmm0 \n\t"
  73. "xorps %%xmm4, %%xmm4 \n\t"
  74. MULT( 0, 0)
  75. MULT( 256, 64)
  76. MULT( 512, 128)
  77. MULT( 768, 192)
  78. MULT(1024, 256)
  79. MULT(1280, 320)
  80. MULT(1536, 384)
  81. MULT(1792, 448)
  82. "movaps %%xmm0, (%4,%0) \n\t"
  83. "movaps %%xmm4, (%5,%0) \n\t"
  84. "add $16, %0 \n\t"
  85. "jl 1b \n\t"
  86. :"+&r"(count)
  87. :"r"(win1a), "r"(win2a), "r"(bufa), "r"(sum1a), "r"(sum2a)
  88. );
  89. #undef MULT
  90. }
  91. static void apply_window_mp3(float *in, float *win, int *unused, float *out,
  92. int incr)
  93. {
  94. LOCAL_ALIGNED_16(float, suma, [17]);
  95. LOCAL_ALIGNED_16(float, sumb, [17]);
  96. LOCAL_ALIGNED_16(float, sumc, [17]);
  97. LOCAL_ALIGNED_16(float, sumd, [17]);
  98. float sum;
  99. /* copy to avoid wrap */
  100. __asm__ volatile(
  101. "movaps 0(%0), %%xmm0 \n\t" \
  102. "movaps 16(%0), %%xmm1 \n\t" \
  103. "movaps 32(%0), %%xmm2 \n\t" \
  104. "movaps 48(%0), %%xmm3 \n\t" \
  105. "movaps %%xmm0, 0(%1) \n\t" \
  106. "movaps %%xmm1, 16(%1) \n\t" \
  107. "movaps %%xmm2, 32(%1) \n\t" \
  108. "movaps %%xmm3, 48(%1) \n\t" \
  109. "movaps 64(%0), %%xmm0 \n\t" \
  110. "movaps 80(%0), %%xmm1 \n\t" \
  111. "movaps 96(%0), %%xmm2 \n\t" \
  112. "movaps 112(%0), %%xmm3 \n\t" \
  113. "movaps %%xmm0, 64(%1) \n\t" \
  114. "movaps %%xmm1, 80(%1) \n\t" \
  115. "movaps %%xmm2, 96(%1) \n\t" \
  116. "movaps %%xmm3, 112(%1) \n\t"
  117. ::"r"(in), "r"(in+512)
  118. :"memory"
  119. );
  120. apply_window(in + 16, win , win + 512, suma, sumc, 16);
  121. apply_window(in + 32, win + 48, win + 640, sumb, sumd, 16);
  122. SUM8(MACS, suma[0], win + 32, in + 48);
  123. sumc[ 0] = 0;
  124. sumb[16] = 0;
  125. sumd[16] = 0;
  126. #define SUMS(suma, sumb, sumc, sumd, out1, out2) \
  127. "movups " #sumd "(%4), %%xmm0 \n\t" \
  128. "shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
  129. "subps " #suma "(%1), %%xmm0 \n\t" \
  130. "movaps %%xmm0," #out1 "(%0) \n\t" \
  131. \
  132. "movups " #sumc "(%3), %%xmm0 \n\t" \
  133. "shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
  134. "addps " #sumb "(%2), %%xmm0 \n\t" \
  135. "movaps %%xmm0," #out2 "(%0) \n\t"
  136. if (incr == 1) {
  137. __asm__ volatile(
  138. SUMS( 0, 48, 4, 52, 0, 112)
  139. SUMS(16, 32, 20, 36, 16, 96)
  140. SUMS(32, 16, 36, 20, 32, 80)
  141. SUMS(48, 0, 52, 4, 48, 64)
  142. :"+&r"(out)
  143. :"r"(&suma[0]), "r"(&sumb[0]), "r"(&sumc[0]), "r"(&sumd[0])
  144. :"memory"
  145. );
  146. out += 16*incr;
  147. } else {
  148. int j;
  149. float *out2 = out + 32 * incr;
  150. out[0 ] = -suma[ 0];
  151. out += incr;
  152. out2 -= incr;
  153. for(j=1;j<16;j++) {
  154. *out = -suma[ j] + sumd[16-j];
  155. *out2 = sumb[16-j] + sumc[ j];
  156. out += incr;
  157. out2 -= incr;
  158. }
  159. }
  160. sum = 0;
  161. SUM8(MLSS, sum, win + 16 + 32, in + 32);
  162. *out = sum;
  163. }
  164. #endif /* HAVE_SSE2_INLINE */
  165. #if HAVE_YASM
  166. #define DECL_IMDCT_BLOCKS(CPU1, CPU2) \
  167. static void imdct36_blocks_ ## CPU1(float *out, float *buf, float *in, \
  168. int count, int switch_point, int block_type) \
  169. { \
  170. int align_end = count - (count & 3); \
  171. int j; \
  172. for (j = 0; j < align_end; j+= 4) { \
  173. LOCAL_ALIGNED_16(float, tmpbuf, [1024]); \
  174. float *win = mdct_win_sse[switch_point && j < 4][block_type]; \
  175. /* apply window & overlap with previous buffer */ \
  176. \
  177. /* select window */ \
  178. ff_four_imdct36_float_ ## CPU2(out, buf, in, win, tmpbuf); \
  179. in += 4*18; \
  180. buf += 4*18; \
  181. out += 4; \
  182. } \
  183. for (; j < count; j++) { \
  184. /* apply window & overlap with previous buffer */ \
  185. \
  186. /* select window */ \
  187. int win_idx = (switch_point && j < 2) ? 0 : block_type; \
  188. float *win = ff_mdct_win_float[win_idx + (4 & -(j & 1))]; \
  189. \
  190. ff_imdct36_float_ ## CPU1(out, buf, in, win); \
  191. \
  192. in += 18; \
  193. buf++; \
  194. out++; \
  195. } \
  196. }
  197. #if HAVE_SSE
  198. DECL_IMDCT_BLOCKS(sse,sse)
  199. DECL_IMDCT_BLOCKS(sse2,sse)
  200. DECL_IMDCT_BLOCKS(sse3,sse)
  201. DECL_IMDCT_BLOCKS(ssse3,sse)
  202. #endif
  203. #if HAVE_AVX_EXTERNAL
  204. DECL_IMDCT_BLOCKS(avx,avx)
  205. #endif
  206. #endif /* HAVE_YASM */
  207. av_cold void ff_mpadsp_init_x86(MPADSPContext *s)
  208. {
  209. int mm_flags = av_get_cpu_flags();
  210. int i, j;
  211. for (j = 0; j < 4; j++) {
  212. for (i = 0; i < 40; i ++) {
  213. mdct_win_sse[0][j][4*i ] = ff_mdct_win_float[j ][i];
  214. mdct_win_sse[0][j][4*i + 1] = ff_mdct_win_float[j + 4][i];
  215. mdct_win_sse[0][j][4*i + 2] = ff_mdct_win_float[j ][i];
  216. mdct_win_sse[0][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
  217. mdct_win_sse[1][j][4*i ] = ff_mdct_win_float[0 ][i];
  218. mdct_win_sse[1][j][4*i + 1] = ff_mdct_win_float[4 ][i];
  219. mdct_win_sse[1][j][4*i + 2] = ff_mdct_win_float[j ][i];
  220. mdct_win_sse[1][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
  221. }
  222. }
  223. #if HAVE_SSE2_INLINE
  224. if (mm_flags & AV_CPU_FLAG_SSE2) {
  225. s->apply_window_float = apply_window_mp3;
  226. }
  227. #endif /* HAVE_SSE2_INLINE */
  228. #if HAVE_YASM
  229. if (EXTERNAL_AVX(mm_flags)) {
  230. s->imdct36_blocks_float = imdct36_blocks_avx;
  231. } else if (EXTERNAL_SSSE3(mm_flags)) {
  232. s->imdct36_blocks_float = imdct36_blocks_ssse3;
  233. } else if (EXTERNAL_SSE3(mm_flags)) {
  234. s->imdct36_blocks_float = imdct36_blocks_sse3;
  235. } else if (EXTERNAL_SSE2(mm_flags)) {
  236. s->imdct36_blocks_float = imdct36_blocks_sse2;
  237. } else if (EXTERNAL_SSE(mm_flags)) {
  238. s->imdct36_blocks_float = imdct36_blocks_sse;
  239. }
  240. #endif /* HAVE_YASM */
  241. }