You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

243 lines
9.0KB

  1. /*
  2. * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/x86/asm.h"
  21. #include "libavutil/cpu.h"
  22. #include "libswresample/swresample_internal.h"
  23. int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
  24. int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
  25. int swri_resample_float_sse (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx);
  26. int swri_resample_double_sse2(struct ResampleContext *c, double *dst, const double *src, int *consumed, int src_size, int dst_size, int update_ctx);
  27. DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
  28. #define COMMON_CORE_INT16_MMX2 \
  29. x86_reg len= -2*c->filter_length;\
  30. __asm__ volatile(\
  31. "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
  32. "1: \n\t"\
  33. "movq (%1, %0), %%mm1 \n\t"\
  34. "pmaddwd (%2, %0), %%mm1 \n\t"\
  35. "paddd %%mm1, %%mm0 \n\t"\
  36. "add $8, %0 \n\t"\
  37. " js 1b \n\t"\
  38. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  39. "paddd %%mm1, %%mm0 \n\t"\
  40. "psrad $15, %%mm0 \n\t"\
  41. "packssdw %%mm0, %%mm0 \n\t"\
  42. "movd %%mm0, (%3) \n\t"\
  43. : "+r" (len)\
  44. : "r" (((uint8_t*)(src+sample_index))-len),\
  45. "r" (((uint8_t*)filter)-len),\
  46. "r" (dst+dst_index)\
  47. NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\
  48. );
  49. #define LINEAR_CORE_INT16_MMX2 \
  50. x86_reg len= -2*c->filter_length;\
  51. __asm__ volatile(\
  52. "pxor %%mm0, %%mm0 \n\t"\
  53. "pxor %%mm2, %%mm2 \n\t"\
  54. "1: \n\t"\
  55. "movq (%3, %0), %%mm1 \n\t"\
  56. "movq %%mm1, %%mm3 \n\t"\
  57. "pmaddwd (%4, %0), %%mm1 \n\t"\
  58. "pmaddwd (%5, %0), %%mm3 \n\t"\
  59. "paddd %%mm1, %%mm0 \n\t"\
  60. "paddd %%mm3, %%mm2 \n\t"\
  61. "add $8, %0 \n\t"\
  62. " js 1b \n\t"\
  63. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  64. "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
  65. "paddd %%mm1, %%mm0 \n\t"\
  66. "paddd %%mm3, %%mm2 \n\t"\
  67. "movd %%mm0, %1 \n\t"\
  68. "movd %%mm2, %2 \n\t"\
  69. : "+r" (len),\
  70. "=r" (val),\
  71. "=r" (v2)\
  72. : "r" (((uint8_t*)(src+sample_index))-len),\
  73. "r" (((uint8_t*)filter)-len),\
  74. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  75. );
  76. #define COMMON_CORE_INT16_SSE2 \
  77. x86_reg len= -2*c->filter_length;\
  78. __asm__ volatile(\
  79. "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
  80. "1: \n\t"\
  81. "movdqu (%1, %0), %%xmm1 \n\t"\
  82. "pmaddwd (%2, %0), %%xmm1 \n\t"\
  83. "paddd %%xmm1, %%xmm0 \n\t"\
  84. "add $16, %0 \n\t"\
  85. " js 1b \n\t"\
  86. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  87. "paddd %%xmm1, %%xmm0 \n\t"\
  88. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  89. "paddd %%xmm1, %%xmm0 \n\t"\
  90. "psrad $15, %%xmm0 \n\t"\
  91. "packssdw %%xmm0, %%xmm0 \n\t"\
  92. "movd %%xmm0, (%3) \n\t"\
  93. : "+r" (len)\
  94. : "r" (((uint8_t*)(src+sample_index))-len),\
  95. "r" (((uint8_t*)filter)-len),\
  96. "r" (dst+dst_index)\
  97. NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\
  98. );
  99. #define LINEAR_CORE_INT16_SSE2 \
  100. x86_reg len= -2*c->filter_length;\
  101. __asm__ volatile(\
  102. "pxor %%xmm0, %%xmm0 \n\t"\
  103. "pxor %%xmm2, %%xmm2 \n\t"\
  104. "1: \n\t"\
  105. "movdqu (%3, %0), %%xmm1 \n\t"\
  106. "movdqa %%xmm1, %%xmm3 \n\t"\
  107. "pmaddwd (%4, %0), %%xmm1 \n\t"\
  108. "pmaddwd (%5, %0), %%xmm3 \n\t"\
  109. "paddd %%xmm1, %%xmm0 \n\t"\
  110. "paddd %%xmm3, %%xmm2 \n\t"\
  111. "add $16, %0 \n\t"\
  112. " js 1b \n\t"\
  113. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  114. "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
  115. "paddd %%xmm1, %%xmm0 \n\t"\
  116. "paddd %%xmm3, %%xmm2 \n\t"\
  117. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  118. "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
  119. "paddd %%xmm1, %%xmm0 \n\t"\
  120. "paddd %%xmm3, %%xmm2 \n\t"\
  121. "movd %%xmm0, %1 \n\t"\
  122. "movd %%xmm2, %2 \n\t"\
  123. : "+r" (len),\
  124. "=r" (val),\
  125. "=r" (v2)\
  126. : "r" (((uint8_t*)(src+sample_index))-len),\
  127. "r" (((uint8_t*)filter)-len),\
  128. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  129. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  130. );
  131. #define COMMON_CORE_FLT_SSE \
  132. x86_reg len= -4*c->filter_length;\
  133. __asm__ volatile(\
  134. "xorps %%xmm0, %%xmm0 \n\t"\
  135. "1: \n\t"\
  136. "movups (%1, %0), %%xmm1 \n\t"\
  137. "mulps (%2, %0), %%xmm1 \n\t"\
  138. "addps %%xmm1, %%xmm0 \n\t"\
  139. "add $16, %0 \n\t"\
  140. " js 1b \n\t"\
  141. "movhlps %%xmm0, %%xmm1 \n\t"\
  142. "addps %%xmm1, %%xmm0 \n\t"\
  143. "movss %%xmm0, %%xmm1 \n\t"\
  144. "shufps $1, %%xmm0, %%xmm0 \n\t"\
  145. "addps %%xmm1, %%xmm0 \n\t"\
  146. "movss %%xmm0, (%3) \n\t"\
  147. : "+r" (len)\
  148. : "r" (((uint8_t*)(src+sample_index))-len),\
  149. "r" (((uint8_t*)filter)-len),\
  150. "r" (dst+dst_index)\
  151. );
  152. #define LINEAR_CORE_FLT_SSE \
  153. x86_reg len= -4*c->filter_length;\
  154. __asm__ volatile(\
  155. "xorps %%xmm0, %%xmm0 \n\t"\
  156. "xorps %%xmm2, %%xmm2 \n\t"\
  157. "1: \n\t"\
  158. "movups (%3, %0), %%xmm1 \n\t"\
  159. "movaps %%xmm1, %%xmm3 \n\t"\
  160. "mulps (%4, %0), %%xmm1 \n\t"\
  161. "mulps (%5, %0), %%xmm3 \n\t"\
  162. "addps %%xmm1, %%xmm0 \n\t"\
  163. "addps %%xmm3, %%xmm2 \n\t"\
  164. "add $16, %0 \n\t"\
  165. " js 1b \n\t"\
  166. "movhlps %%xmm0, %%xmm1 \n\t"\
  167. "movhlps %%xmm2, %%xmm3 \n\t"\
  168. "addps %%xmm1, %%xmm0 \n\t"\
  169. "addps %%xmm3, %%xmm2 \n\t"\
  170. "movss %%xmm0, %%xmm1 \n\t"\
  171. "movss %%xmm2, %%xmm3 \n\t"\
  172. "shufps $1, %%xmm0, %%xmm0 \n\t"\
  173. "shufps $1, %%xmm2, %%xmm2 \n\t"\
  174. "addps %%xmm1, %%xmm0 \n\t"\
  175. "addps %%xmm3, %%xmm2 \n\t"\
  176. "movss %%xmm0, %1 \n\t"\
  177. "movss %%xmm2, %2 \n\t"\
  178. : "+r" (len),\
  179. "=m" (val),\
  180. "=m" (v2)\
  181. : "r" (((uint8_t*)(src+sample_index))-len),\
  182. "r" (((uint8_t*)filter)-len),\
  183. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  184. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  185. );
  186. #define COMMON_CORE_DBL_SSE2 \
  187. x86_reg len= -8*c->filter_length;\
  188. __asm__ volatile(\
  189. "xorpd %%xmm0, %%xmm0 \n\t"\
  190. "1: \n\t"\
  191. "movupd (%1, %0), %%xmm1 \n\t"\
  192. "mulpd (%2, %0), %%xmm1 \n\t"\
  193. "addpd %%xmm1, %%xmm0 \n\t"\
  194. "add $16, %0 \n\t"\
  195. " js 1b \n\t"\
  196. "movhlps %%xmm0, %%xmm1 \n\t"\
  197. "addpd %%xmm1, %%xmm0 \n\t"\
  198. "movsd %%xmm0, (%3) \n\t"\
  199. : "+r" (len)\
  200. : "r" (((uint8_t*)(src+sample_index))-len),\
  201. "r" (((uint8_t*)filter)-len),\
  202. "r" (dst+dst_index)\
  203. );
  204. #define LINEAR_CORE_DBL_SSE2 \
  205. x86_reg len= -8*c->filter_length;\
  206. __asm__ volatile(\
  207. "xorpd %%xmm0, %%xmm0 \n\t"\
  208. "xorpd %%xmm2, %%xmm2 \n\t"\
  209. "1: \n\t"\
  210. "movupd (%3, %0), %%xmm1 \n\t"\
  211. "movapd %%xmm1, %%xmm3 \n\t"\
  212. "mulpd (%4, %0), %%xmm1 \n\t"\
  213. "mulpd (%5, %0), %%xmm3 \n\t"\
  214. "addpd %%xmm1, %%xmm0 \n\t"\
  215. "addpd %%xmm3, %%xmm2 \n\t"\
  216. "add $16, %0 \n\t"\
  217. " js 1b \n\t"\
  218. "movhlps %%xmm0, %%xmm1 \n\t"\
  219. "movhlps %%xmm2, %%xmm3 \n\t"\
  220. "addpd %%xmm1, %%xmm0 \n\t"\
  221. "addpd %%xmm3, %%xmm2 \n\t"\
  222. "movsd %%xmm0, %1 \n\t"\
  223. "movsd %%xmm2, %2 \n\t"\
  224. : "+r" (len),\
  225. "=m" (val),\
  226. "=m" (v2)\
  227. : "r" (((uint8_t*)(src+sample_index))-len),\
  228. "r" (((uint8_t*)filter)-len),\
  229. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  230. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  231. );