You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

253 lines
12KB

  1. /*
  2. * x86-optimized AC-3 DSP functions
  3. * Copyright (c) 2011 Justin Ruggles
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/attributes.h"
  22. #include "libavutil/mem.h"
  23. #include "libavutil/x86/asm.h"
  24. #include "libavutil/x86/cpu.h"
  25. #include "libavcodec/ac3.h"
  26. #include "libavcodec/ac3dsp.h"
  27. void ff_ac3_exponent_min_mmx (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
  28. void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs);
  29. void ff_ac3_exponent_min_sse2 (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
  30. int ff_ac3_max_msb_abs_int16_mmx (const int16_t *src, int len);
  31. int ff_ac3_max_msb_abs_int16_mmxext(const int16_t *src, int len);
  32. int ff_ac3_max_msb_abs_int16_sse2 (const int16_t *src, int len);
  33. int ff_ac3_max_msb_abs_int16_ssse3(const int16_t *src, int len);
  34. void ff_ac3_lshift_int16_mmx (int16_t *src, unsigned int len, unsigned int shift);
  35. void ff_ac3_lshift_int16_sse2(int16_t *src, unsigned int len, unsigned int shift);
  36. void ff_ac3_rshift_int32_mmx (int32_t *src, unsigned int len, unsigned int shift);
  37. void ff_ac3_rshift_int32_sse2(int32_t *src, unsigned int len, unsigned int shift);
  38. void ff_float_to_fixed24_3dnow(int32_t *dst, const float *src, unsigned int len);
  39. void ff_float_to_fixed24_sse (int32_t *dst, const float *src, unsigned int len);
  40. void ff_float_to_fixed24_sse2 (int32_t *dst, const float *src, unsigned int len);
  41. int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16]);
  42. void ff_ac3_extract_exponents_3dnow(uint8_t *exp, int32_t *coef, int nb_coefs);
  43. void ff_ac3_extract_exponents_sse2 (uint8_t *exp, int32_t *coef, int nb_coefs);
  44. void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs);
  45. void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
  46. const int16_t *window, unsigned int len);
  47. void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
  48. const int16_t *window, unsigned int len);
  49. void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
  50. const int16_t *window, unsigned int len);
  51. void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
  52. const int16_t *window, unsigned int len);
  53. void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
  54. const int16_t *window, unsigned int len);
  55. void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
  56. const int16_t *window, unsigned int len);
  57. #if HAVE_SSE_INLINE && HAVE_7REGS
  58. #define IF1(x) x
  59. #define IF0(x)
  60. #define MIX5(mono, stereo) \
  61. __asm__ volatile ( \
  62. "movss 0(%1), %%xmm5 \n" \
  63. "movss 8(%1), %%xmm6 \n" \
  64. "movss 24(%1), %%xmm7 \n" \
  65. "shufps $0, %%xmm5, %%xmm5 \n" \
  66. "shufps $0, %%xmm6, %%xmm6 \n" \
  67. "shufps $0, %%xmm7, %%xmm7 \n" \
  68. "1: \n" \
  69. "movaps (%0, %2), %%xmm0 \n" \
  70. "movaps (%0, %3), %%xmm1 \n" \
  71. "movaps (%0, %4), %%xmm2 \n" \
  72. "movaps (%0, %5), %%xmm3 \n" \
  73. "movaps (%0, %6), %%xmm4 \n" \
  74. "mulps %%xmm5, %%xmm0 \n" \
  75. "mulps %%xmm6, %%xmm1 \n" \
  76. "mulps %%xmm5, %%xmm2 \n" \
  77. "mulps %%xmm7, %%xmm3 \n" \
  78. "mulps %%xmm7, %%xmm4 \n" \
  79. stereo("addps %%xmm1, %%xmm0 \n") \
  80. "addps %%xmm1, %%xmm2 \n" \
  81. "addps %%xmm3, %%xmm0 \n" \
  82. "addps %%xmm4, %%xmm2 \n" \
  83. mono("addps %%xmm2, %%xmm0 \n") \
  84. "movaps %%xmm0, (%0, %2) \n" \
  85. stereo("movaps %%xmm2, (%0, %3) \n") \
  86. "add $16, %0 \n" \
  87. "jl 1b \n" \
  88. : "+&r"(i) \
  89. : "r"(matrix), \
  90. "r"(samples[0] + len), \
  91. "r"(samples[1] + len), \
  92. "r"(samples[2] + len), \
  93. "r"(samples[3] + len), \
  94. "r"(samples[4] + len) \
  95. : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
  96. "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
  97. "memory" \
  98. );
  99. #define MIX_MISC(stereo) \
  100. __asm__ volatile ( \
  101. "mov %5, %2 \n" \
  102. "1: \n" \
  103. "mov -%c7(%6, %2, %c8), %3 \n" \
  104. "movaps (%3, %0), %%xmm0 \n" \
  105. stereo("movaps %%xmm0, %%xmm1 \n") \
  106. "mulps %%xmm4, %%xmm0 \n" \
  107. stereo("mulps %%xmm5, %%xmm1 \n") \
  108. "2: \n" \
  109. "mov (%6, %2, %c8), %1 \n" \
  110. "movaps (%1, %0), %%xmm2 \n" \
  111. stereo("movaps %%xmm2, %%xmm3 \n") \
  112. "mulps (%4, %2, 8), %%xmm2 \n" \
  113. stereo("mulps 16(%4, %2, 8), %%xmm3 \n") \
  114. "addps %%xmm2, %%xmm0 \n" \
  115. stereo("addps %%xmm3, %%xmm1 \n") \
  116. "add $4, %2 \n" \
  117. "jl 2b \n" \
  118. "mov %5, %2 \n" \
  119. stereo("mov (%6, %2, %c8), %1 \n") \
  120. "movaps %%xmm0, (%3, %0) \n" \
  121. stereo("movaps %%xmm1, (%1, %0) \n") \
  122. "add $16, %0 \n" \
  123. "jl 1b \n" \
  124. : "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m) \
  125. : "r"(matrix_simd + in_ch), \
  126. "g"((intptr_t) - 4 * (in_ch - 1)), \
  127. "r"(samp + in_ch), \
  128. "i"(sizeof(float *)), "i"(sizeof(float *)/4) \
  129. : "memory" \
  130. );
  131. static void ac3_downmix_sse(float **samples, float (*matrix)[2],
  132. int out_ch, int in_ch, int len)
  133. {
  134. int (*matrix_cmp)[2] = (int(*)[2])matrix;
  135. intptr_t i, j, k, m;
  136. i = -len * sizeof(float);
  137. if (in_ch == 5 && out_ch == 2 &&
  138. !(matrix_cmp[0][1] | matrix_cmp[2][0] |
  139. matrix_cmp[3][1] | matrix_cmp[4][0] |
  140. (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
  141. (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
  142. MIX5(IF0, IF1);
  143. } else if (in_ch == 5 && out_ch == 1 &&
  144. matrix_cmp[0][0] == matrix_cmp[2][0] &&
  145. matrix_cmp[3][0] == matrix_cmp[4][0]) {
  146. MIX5(IF1, IF0);
  147. } else {
  148. DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
  149. float *samp[AC3_MAX_CHANNELS];
  150. for (j = 0; j < in_ch; j++)
  151. samp[j] = samples[j] + len;
  152. j = 2 * in_ch * sizeof(float);
  153. __asm__ volatile (
  154. "1: \n"
  155. "sub $8, %0 \n"
  156. "movss (%2, %0), %%xmm4 \n"
  157. "movss 4(%2, %0), %%xmm5 \n"
  158. "shufps $0, %%xmm4, %%xmm4 \n"
  159. "shufps $0, %%xmm5, %%xmm5 \n"
  160. "movaps %%xmm4, (%1, %0, 4) \n"
  161. "movaps %%xmm5, 16(%1, %0, 4) \n"
  162. "jg 1b \n"
  163. : "+&r"(j)
  164. : "r"(matrix_simd), "r"(matrix)
  165. : "memory"
  166. );
  167. if (out_ch == 2) {
  168. MIX_MISC(IF1);
  169. } else {
  170. MIX_MISC(IF0);
  171. }
  172. }
  173. }
  174. #endif /* HAVE_SSE_INLINE && HAVE_7REGS */
  175. av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact)
  176. {
  177. int cpu_flags = av_get_cpu_flags();
  178. if (EXTERNAL_MMX(cpu_flags)) {
  179. c->ac3_exponent_min = ff_ac3_exponent_min_mmx;
  180. c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx;
  181. c->ac3_lshift_int16 = ff_ac3_lshift_int16_mmx;
  182. c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx;
  183. }
  184. if (EXTERNAL_AMD3DNOW(cpu_flags)) {
  185. if (!bit_exact) {
  186. c->float_to_fixed24 = ff_float_to_fixed24_3dnow;
  187. }
  188. }
  189. if (EXTERNAL_MMXEXT(cpu_flags)) {
  190. c->ac3_exponent_min = ff_ac3_exponent_min_mmxext;
  191. c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmxext;
  192. if (bit_exact) {
  193. c->apply_window_int16 = ff_apply_window_int16_mmxext;
  194. } else {
  195. c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
  196. }
  197. }
  198. if (EXTERNAL_SSE(cpu_flags)) {
  199. c->float_to_fixed24 = ff_float_to_fixed24_sse;
  200. }
  201. if (EXTERNAL_SSE2(cpu_flags)) {
  202. c->ac3_exponent_min = ff_ac3_exponent_min_sse2;
  203. c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_sse2;
  204. c->float_to_fixed24 = ff_float_to_fixed24_sse2;
  205. c->compute_mantissa_size = ff_ac3_compute_mantissa_size_sse2;
  206. c->extract_exponents = ff_ac3_extract_exponents_sse2;
  207. if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
  208. c->ac3_lshift_int16 = ff_ac3_lshift_int16_sse2;
  209. c->ac3_rshift_int32 = ff_ac3_rshift_int32_sse2;
  210. }
  211. if (bit_exact) {
  212. c->apply_window_int16 = ff_apply_window_int16_sse2;
  213. } else if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
  214. c->apply_window_int16 = ff_apply_window_int16_round_sse2;
  215. }
  216. }
  217. if (EXTERNAL_SSSE3(cpu_flags)) {
  218. c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_ssse3;
  219. if (cpu_flags & AV_CPU_FLAG_ATOM) {
  220. c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
  221. } else {
  222. c->extract_exponents = ff_ac3_extract_exponents_ssse3;
  223. c->apply_window_int16 = ff_apply_window_int16_ssse3;
  224. }
  225. }
  226. #if HAVE_SSE_INLINE && HAVE_7REGS
  227. if (INLINE_SSE(cpu_flags)) {
  228. c->downmix = ac3_downmix_sse;
  229. }
  230. #endif
  231. }