You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

246 lines
9.1KB

  1. /*
  2. * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/x86/asm.h"
  21. #include "libavutil/cpu.h"
  22. #include "libswresample/swresample_internal.h"
  23. int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
  24. int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
  25. int swri_resample_float_sse (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx);
  26. int swri_resample_double_sse2(struct ResampleContext *c, double *dst, const double *src, int *consumed, int src_size, int dst_size, int update_ctx);
  27. DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
  28. #define COMMON_CORE_INT16_MMX2 \
  29. x86_reg len= -2*c->filter_length;\
  30. __asm__ volatile(\
  31. "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
  32. "1: \n\t"\
  33. "movq (%1, %0), %%mm1 \n\t"\
  34. "pmaddwd (%2, %0), %%mm1 \n\t"\
  35. "paddd %%mm1, %%mm0 \n\t"\
  36. "add $8, %0 \n\t"\
  37. " js 1b \n\t"\
  38. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  39. "paddd %%mm1, %%mm0 \n\t"\
  40. "psrad $15, %%mm0 \n\t"\
  41. "packssdw %%mm0, %%mm0 \n\t"\
  42. "movd %%mm0, (%3) \n\t"\
  43. : "+r" (len)\
  44. : "r" (((uint8_t*)(src+sample_index))-len),\
  45. "r" (((uint8_t*)filter)-len),\
  46. "r" (dst+dst_index)\
  47. NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
  48. );
  49. #define LINEAR_CORE_INT16_MMX2 \
  50. x86_reg len= -2*c->filter_length;\
  51. __asm__ volatile(\
  52. "pxor %%mm0, %%mm0 \n\t"\
  53. "pxor %%mm2, %%mm2 \n\t"\
  54. "1: \n\t"\
  55. "movq (%3, %0), %%mm1 \n\t"\
  56. "movq %%mm1, %%mm3 \n\t"\
  57. "pmaddwd (%4, %0), %%mm1 \n\t"\
  58. "pmaddwd (%5, %0), %%mm3 \n\t"\
  59. "paddd %%mm1, %%mm0 \n\t"\
  60. "paddd %%mm3, %%mm2 \n\t"\
  61. "add $8, %0 \n\t"\
  62. " js 1b \n\t"\
  63. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  64. "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
  65. "paddd %%mm1, %%mm0 \n\t"\
  66. "paddd %%mm3, %%mm2 \n\t"\
  67. "movd %%mm0, %1 \n\t"\
  68. "movd %%mm2, %2 \n\t"\
  69. : "+r" (len),\
  70. "=r" (val),\
  71. "=r" (v2)\
  72. : "r" (((uint8_t*)(src+sample_index))-len),\
  73. "r" (((uint8_t*)filter)-len),\
  74. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  75. );
  76. #define COMMON_CORE_INT16_SSE2 \
  77. x86_reg len= -2*c->filter_length;\
  78. __asm__ volatile(\
  79. "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
  80. "1: \n\t"\
  81. "movdqu (%1, %0), %%xmm1 \n\t"\
  82. "pmaddwd (%2, %0), %%xmm1 \n\t"\
  83. "paddd %%xmm1, %%xmm0 \n\t"\
  84. "add $16, %0 \n\t"\
  85. " js 1b \n\t"\
  86. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  87. "paddd %%xmm1, %%xmm0 \n\t"\
  88. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  89. "paddd %%xmm1, %%xmm0 \n\t"\
  90. "psrad $15, %%xmm0 \n\t"\
  91. "packssdw %%xmm0, %%xmm0 \n\t"\
  92. "movd %%xmm0, (%3) \n\t"\
  93. : "+r" (len)\
  94. : "r" (((uint8_t*)(src+sample_index))-len),\
  95. "r" (((uint8_t*)filter)-len),\
  96. "r" (dst+dst_index)\
  97. NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
  98. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  99. );
  100. #define LINEAR_CORE_INT16_SSE2 \
  101. x86_reg len= -2*c->filter_length;\
  102. __asm__ volatile(\
  103. "pxor %%xmm0, %%xmm0 \n\t"\
  104. "pxor %%xmm2, %%xmm2 \n\t"\
  105. "1: \n\t"\
  106. "movdqu (%3, %0), %%xmm1 \n\t"\
  107. "movdqa %%xmm1, %%xmm3 \n\t"\
  108. "pmaddwd (%4, %0), %%xmm1 \n\t"\
  109. "pmaddwd (%5, %0), %%xmm3 \n\t"\
  110. "paddd %%xmm1, %%xmm0 \n\t"\
  111. "paddd %%xmm3, %%xmm2 \n\t"\
  112. "add $16, %0 \n\t"\
  113. " js 1b \n\t"\
  114. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  115. "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
  116. "paddd %%xmm1, %%xmm0 \n\t"\
  117. "paddd %%xmm3, %%xmm2 \n\t"\
  118. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  119. "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
  120. "paddd %%xmm1, %%xmm0 \n\t"\
  121. "paddd %%xmm3, %%xmm2 \n\t"\
  122. "movd %%xmm0, %1 \n\t"\
  123. "movd %%xmm2, %2 \n\t"\
  124. : "+r" (len),\
  125. "=r" (val),\
  126. "=r" (v2)\
  127. : "r" (((uint8_t*)(src+sample_index))-len),\
  128. "r" (((uint8_t*)filter)-len),\
  129. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  130. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  131. );
  132. #define COMMON_CORE_FLT_SSE \
  133. x86_reg len= -4*c->filter_length;\
  134. __asm__ volatile(\
  135. "xorps %%xmm0, %%xmm0 \n\t"\
  136. "1: \n\t"\
  137. "movups (%1, %0), %%xmm1 \n\t"\
  138. "mulps (%2, %0), %%xmm1 \n\t"\
  139. "addps %%xmm1, %%xmm0 \n\t"\
  140. "add $16, %0 \n\t"\
  141. " js 1b \n\t"\
  142. "movhlps %%xmm0, %%xmm1 \n\t"\
  143. "addps %%xmm1, %%xmm0 \n\t"\
  144. "movss %%xmm0, %%xmm1 \n\t"\
  145. "shufps $1, %%xmm0, %%xmm0 \n\t"\
  146. "addps %%xmm1, %%xmm0 \n\t"\
  147. "movss %%xmm0, (%3) \n\t"\
  148. : "+r" (len)\
  149. : "r" (((uint8_t*)(src+sample_index))-len),\
  150. "r" (((uint8_t*)filter)-len),\
  151. "r" (dst+dst_index)\
  152. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  153. );
  154. #define LINEAR_CORE_FLT_SSE \
  155. x86_reg len= -4*c->filter_length;\
  156. __asm__ volatile(\
  157. "xorps %%xmm0, %%xmm0 \n\t"\
  158. "xorps %%xmm2, %%xmm2 \n\t"\
  159. "1: \n\t"\
  160. "movups (%3, %0), %%xmm1 \n\t"\
  161. "movaps %%xmm1, %%xmm3 \n\t"\
  162. "mulps (%4, %0), %%xmm1 \n\t"\
  163. "mulps (%5, %0), %%xmm3 \n\t"\
  164. "addps %%xmm1, %%xmm0 \n\t"\
  165. "addps %%xmm3, %%xmm2 \n\t"\
  166. "add $16, %0 \n\t"\
  167. " js 1b \n\t"\
  168. "movhlps %%xmm0, %%xmm1 \n\t"\
  169. "movhlps %%xmm2, %%xmm3 \n\t"\
  170. "addps %%xmm1, %%xmm0 \n\t"\
  171. "addps %%xmm3, %%xmm2 \n\t"\
  172. "movss %%xmm0, %%xmm1 \n\t"\
  173. "movss %%xmm2, %%xmm3 \n\t"\
  174. "shufps $1, %%xmm0, %%xmm0 \n\t"\
  175. "shufps $1, %%xmm2, %%xmm2 \n\t"\
  176. "addps %%xmm1, %%xmm0 \n\t"\
  177. "addps %%xmm3, %%xmm2 \n\t"\
  178. "movss %%xmm0, %1 \n\t"\
  179. "movss %%xmm2, %2 \n\t"\
  180. : "+r" (len),\
  181. "=m" (val),\
  182. "=m" (v2)\
  183. : "r" (((uint8_t*)(src+sample_index))-len),\
  184. "r" (((uint8_t*)filter)-len),\
  185. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  186. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  187. );
  188. #define COMMON_CORE_DBL_SSE2 \
  189. x86_reg len= -8*c->filter_length;\
  190. __asm__ volatile(\
  191. "xorpd %%xmm0, %%xmm0 \n\t"\
  192. "1: \n\t"\
  193. "movupd (%1, %0), %%xmm1 \n\t"\
  194. "mulpd (%2, %0), %%xmm1 \n\t"\
  195. "addpd %%xmm1, %%xmm0 \n\t"\
  196. "add $16, %0 \n\t"\
  197. " js 1b \n\t"\
  198. "movhlps %%xmm0, %%xmm1 \n\t"\
  199. "addpd %%xmm1, %%xmm0 \n\t"\
  200. "movsd %%xmm0, (%3) \n\t"\
  201. : "+r" (len)\
  202. : "r" (((uint8_t*)(src+sample_index))-len),\
  203. "r" (((uint8_t*)filter)-len),\
  204. "r" (dst+dst_index)\
  205. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  206. );
  207. #define LINEAR_CORE_DBL_SSE2 \
  208. x86_reg len= -8*c->filter_length;\
  209. __asm__ volatile(\
  210. "xorpd %%xmm0, %%xmm0 \n\t"\
  211. "xorpd %%xmm2, %%xmm2 \n\t"\
  212. "1: \n\t"\
  213. "movupd (%3, %0), %%xmm1 \n\t"\
  214. "movapd %%xmm1, %%xmm3 \n\t"\
  215. "mulpd (%4, %0), %%xmm1 \n\t"\
  216. "mulpd (%5, %0), %%xmm3 \n\t"\
  217. "addpd %%xmm1, %%xmm0 \n\t"\
  218. "addpd %%xmm3, %%xmm2 \n\t"\
  219. "add $16, %0 \n\t"\
  220. " js 1b \n\t"\
  221. "movhlps %%xmm0, %%xmm1 \n\t"\
  222. "movhlps %%xmm2, %%xmm3 \n\t"\
  223. "addpd %%xmm1, %%xmm0 \n\t"\
  224. "addpd %%xmm3, %%xmm2 \n\t"\
  225. "movsd %%xmm0, %1 \n\t"\
  226. "movsd %%xmm2, %2 \n\t"\
  227. : "+r" (len),\
  228. "=m" (val),\
  229. "=m" (v2)\
  230. : "r" (((uint8_t*)(src+sample_index))-len),\
  231. "r" (((uint8_t*)filter)-len),\
  232. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  233. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  234. );