You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

194 lines
7.2KB

  1. /*
  2. * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/x86/asm.h"
  21. #include "libavutil/cpu.h"
  22. #include "libswresample/swresample_internal.h"
  23. int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
  24. int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
  25. int swri_resample_float_sse (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx);
  26. DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
  27. #define COMMON_CORE_INT16_MMX2 \
  28. x86_reg len= -2*c->filter_length;\
  29. __asm__ volatile(\
  30. "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
  31. "1: \n\t"\
  32. "movq (%1, %0), %%mm1 \n\t"\
  33. "pmaddwd (%2, %0), %%mm1 \n\t"\
  34. "paddd %%mm1, %%mm0 \n\t"\
  35. "add $8, %0 \n\t"\
  36. " js 1b \n\t"\
  37. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  38. "paddd %%mm1, %%mm0 \n\t"\
  39. "psrad $15, %%mm0 \n\t"\
  40. "packssdw %%mm0, %%mm0 \n\t"\
  41. "movd %%mm0, (%3) \n\t"\
  42. : "+r" (len)\
  43. : "r" (((uint8_t*)(src+sample_index))-len),\
  44. "r" (((uint8_t*)filter)-len),\
  45. "r" (dst+dst_index)\
  46. NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\
  47. );
  48. #define LINEAR_CORE_INT16_MMX2 \
  49. x86_reg len= -2*c->filter_length;\
  50. __asm__ volatile(\
  51. "pxor %%mm0, %%mm0 \n\t"\
  52. "pxor %%mm2, %%mm2 \n\t"\
  53. "1: \n\t"\
  54. "movq (%3, %0), %%mm1 \n\t"\
  55. "movq %%mm1, %%mm3 \n\t"\
  56. "pmaddwd (%4, %0), %%mm1 \n\t"\
  57. "pmaddwd (%5, %0), %%mm3 \n\t"\
  58. "paddd %%mm1, %%mm0 \n\t"\
  59. "paddd %%mm3, %%mm2 \n\t"\
  60. "add $8, %0 \n\t"\
  61. " js 1b \n\t"\
  62. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  63. "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
  64. "paddd %%mm1, %%mm0 \n\t"\
  65. "paddd %%mm3, %%mm2 \n\t"\
  66. "movd %%mm0, %1 \n\t"\
  67. "movd %%mm2, %2 \n\t"\
  68. : "+r" (len),\
  69. "=r" (val),\
  70. "=r" (v2)\
  71. : "r" (((uint8_t*)(src+sample_index))-len),\
  72. "r" (((uint8_t*)filter)-len),\
  73. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  74. );
  75. #define COMMON_CORE_INT16_SSE2 \
  76. x86_reg len= -2*c->filter_length;\
  77. __asm__ volatile(\
  78. "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
  79. "1: \n\t"\
  80. "movdqu (%1, %0), %%xmm1 \n\t"\
  81. "pmaddwd (%2, %0), %%xmm1 \n\t"\
  82. "paddd %%xmm1, %%xmm0 \n\t"\
  83. "add $16, %0 \n\t"\
  84. " js 1b \n\t"\
  85. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  86. "paddd %%xmm1, %%xmm0 \n\t"\
  87. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  88. "paddd %%xmm1, %%xmm0 \n\t"\
  89. "psrad $15, %%xmm0 \n\t"\
  90. "packssdw %%xmm0, %%xmm0 \n\t"\
  91. "movd %%xmm0, (%3) \n\t"\
  92. : "+r" (len)\
  93. : "r" (((uint8_t*)(src+sample_index))-len),\
  94. "r" (((uint8_t*)filter)-len),\
  95. "r" (dst+dst_index)\
  96. NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\
  97. );
  98. #define LINEAR_CORE_INT16_SSE2 \
  99. x86_reg len= -2*c->filter_length;\
  100. __asm__ volatile(\
  101. "pxor %%xmm0, %%xmm0 \n\t"\
  102. "pxor %%xmm2, %%xmm2 \n\t"\
  103. "1: \n\t"\
  104. "movdqu (%3, %0), %%xmm1 \n\t"\
  105. "movdqa %%xmm1, %%xmm3 \n\t"\
  106. "pmaddwd (%4, %0), %%xmm1 \n\t"\
  107. "pmaddwd (%5, %0), %%xmm3 \n\t"\
  108. "paddd %%xmm1, %%xmm0 \n\t"\
  109. "paddd %%xmm3, %%xmm2 \n\t"\
  110. "add $16, %0 \n\t"\
  111. " js 1b \n\t"\
  112. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  113. "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
  114. "paddd %%xmm1, %%xmm0 \n\t"\
  115. "paddd %%xmm3, %%xmm2 \n\t"\
  116. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  117. "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
  118. "paddd %%xmm1, %%xmm0 \n\t"\
  119. "paddd %%xmm3, %%xmm2 \n\t"\
  120. "movd %%xmm0, %1 \n\t"\
  121. "movd %%xmm2, %2 \n\t"\
  122. : "+r" (len),\
  123. "=r" (val),\
  124. "=r" (v2)\
  125. : "r" (((uint8_t*)(src+sample_index))-len),\
  126. "r" (((uint8_t*)filter)-len),\
  127. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  128. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  129. );
  130. #define COMMON_CORE_FLT_SSE \
  131. x86_reg len= -4*c->filter_length;\
  132. __asm__ volatile(\
  133. "xorps %%xmm0, %%xmm0 \n\t"\
  134. "1: \n\t"\
  135. "movups (%1, %0), %%xmm1 \n\t"\
  136. "mulps (%2, %0), %%xmm1 \n\t"\
  137. "addps %%xmm1, %%xmm0 \n\t"\
  138. "add $16, %0 \n\t"\
  139. " js 1b \n\t"\
  140. "movhlps %%xmm0, %%xmm1 \n\t"\
  141. "addps %%xmm1, %%xmm0 \n\t"\
  142. "movss %%xmm0, %%xmm1 \n\t"\
  143. "shufps $1, %%xmm0, %%xmm0 \n\t"\
  144. "addps %%xmm1, %%xmm0 \n\t"\
  145. "movss %%xmm0, (%3) \n\t"\
  146. : "+r" (len)\
  147. : "r" (((uint8_t*)(src+sample_index))-len),\
  148. "r" (((uint8_t*)filter)-len),\
  149. "r" (dst+dst_index)\
  150. );
  151. #define LINEAR_CORE_FLT_SSE \
  152. x86_reg len= -4*c->filter_length;\
  153. __asm__ volatile(\
  154. "xorps %%xmm0, %%xmm0 \n\t"\
  155. "xorps %%xmm2, %%xmm2 \n\t"\
  156. "1: \n\t"\
  157. "movups (%3, %0), %%xmm1 \n\t"\
  158. "movaps %%xmm1, %%xmm3 \n\t"\
  159. "mulps (%4, %0), %%xmm1 \n\t"\
  160. "mulps (%5, %0), %%xmm3 \n\t"\
  161. "addps %%xmm1, %%xmm0 \n\t"\
  162. "addps %%xmm3, %%xmm2 \n\t"\
  163. "add $16, %0 \n\t"\
  164. " js 1b \n\t"\
  165. "movhlps %%xmm0, %%xmm1 \n\t"\
  166. "movhlps %%xmm2, %%xmm3 \n\t"\
  167. "addps %%xmm1, %%xmm0 \n\t"\
  168. "addps %%xmm3, %%xmm2 \n\t"\
  169. "movss %%xmm0, %%xmm1 \n\t"\
  170. "movss %%xmm2, %%xmm3 \n\t"\
  171. "shufps $1, %%xmm0, %%xmm0 \n\t"\
  172. "shufps $1, %%xmm2, %%xmm2 \n\t"\
  173. "addps %%xmm1, %%xmm0 \n\t"\
  174. "addps %%xmm3, %%xmm2 \n\t"\
  175. "movss %%xmm0, %1 \n\t"\
  176. "movss %%xmm2, %2 \n\t"\
  177. : "+r" (len),\
  178. "=m" (val),\
  179. "=m" (v2)\
  180. : "r" (((uint8_t*)(src+sample_index))-len),\
  181. "r" (((uint8_t*)filter)-len),\
  182. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  183. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  184. );