You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

307 lines
12KB

  1. /*
  2. * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/x86/asm.h"
  21. #include "libavutil/cpu.h"
  22. #include "libswresample/swresample_internal.h"
  23. int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
  24. int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
  25. int swri_resample_float_sse (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx);
  26. int swri_resample_float_avx (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx);
  27. int swri_resample_double_sse2(struct ResampleContext *c, double *dst, const double *src, int *consumed, int src_size, int dst_size, int update_ctx);
  28. DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
  29. #define COMMON_CORE_INT16_MMX2 \
  30. x86_reg len= -2*c->filter_length;\
  31. __asm__ volatile(\
  32. "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
  33. "1: \n\t"\
  34. "movq (%1, %0), %%mm1 \n\t"\
  35. "pmaddwd (%2, %0), %%mm1 \n\t"\
  36. "paddd %%mm1, %%mm0 \n\t"\
  37. "add $8, %0 \n\t"\
  38. " js 1b \n\t"\
  39. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  40. "paddd %%mm1, %%mm0 \n\t"\
  41. "psrad $15, %%mm0 \n\t"\
  42. "packssdw %%mm0, %%mm0 \n\t"\
  43. "movd %%mm0, (%3) \n\t"\
  44. : "+r" (len)\
  45. : "r" (((uint8_t*)(src+sample_index))-len),\
  46. "r" (((uint8_t*)filter)-len),\
  47. "r" (dst+dst_index)\
  48. NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
  49. );
  50. #define LINEAR_CORE_INT16_MMX2 \
  51. x86_reg len= -2*c->filter_length;\
  52. __asm__ volatile(\
  53. "pxor %%mm0, %%mm0 \n\t"\
  54. "pxor %%mm2, %%mm2 \n\t"\
  55. "1: \n\t"\
  56. "movq (%3, %0), %%mm1 \n\t"\
  57. "movq %%mm1, %%mm3 \n\t"\
  58. "pmaddwd (%4, %0), %%mm1 \n\t"\
  59. "pmaddwd (%5, %0), %%mm3 \n\t"\
  60. "paddd %%mm1, %%mm0 \n\t"\
  61. "paddd %%mm3, %%mm2 \n\t"\
  62. "add $8, %0 \n\t"\
  63. " js 1b \n\t"\
  64. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  65. "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
  66. "paddd %%mm1, %%mm0 \n\t"\
  67. "paddd %%mm3, %%mm2 \n\t"\
  68. "movd %%mm0, %1 \n\t"\
  69. "movd %%mm2, %2 \n\t"\
  70. : "+r" (len),\
  71. "=r" (val),\
  72. "=r" (v2)\
  73. : "r" (((uint8_t*)(src+sample_index))-len),\
  74. "r" (((uint8_t*)filter)-len),\
  75. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  76. );
  77. #define COMMON_CORE_INT16_SSE2 \
  78. x86_reg len= -2*c->filter_length;\
  79. __asm__ volatile(\
  80. "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
  81. "1: \n\t"\
  82. "movdqu (%1, %0), %%xmm1 \n\t"\
  83. "pmaddwd (%2, %0), %%xmm1 \n\t"\
  84. "paddd %%xmm1, %%xmm0 \n\t"\
  85. "add $16, %0 \n\t"\
  86. " js 1b \n\t"\
  87. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  88. "paddd %%xmm1, %%xmm0 \n\t"\
  89. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  90. "paddd %%xmm1, %%xmm0 \n\t"\
  91. "psrad $15, %%xmm0 \n\t"\
  92. "packssdw %%xmm0, %%xmm0 \n\t"\
  93. "movd %%xmm0, (%3) \n\t"\
  94. : "+r" (len)\
  95. : "r" (((uint8_t*)(src+sample_index))-len),\
  96. "r" (((uint8_t*)filter)-len),\
  97. "r" (dst+dst_index)\
  98. NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
  99. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  100. );
  101. #define LINEAR_CORE_INT16_SSE2 \
  102. x86_reg len= -2*c->filter_length;\
  103. __asm__ volatile(\
  104. "pxor %%xmm0, %%xmm0 \n\t"\
  105. "pxor %%xmm2, %%xmm2 \n\t"\
  106. "1: \n\t"\
  107. "movdqu (%3, %0), %%xmm1 \n\t"\
  108. "movdqa %%xmm1, %%xmm3 \n\t"\
  109. "pmaddwd (%4, %0), %%xmm1 \n\t"\
  110. "pmaddwd (%5, %0), %%xmm3 \n\t"\
  111. "paddd %%xmm1, %%xmm0 \n\t"\
  112. "paddd %%xmm3, %%xmm2 \n\t"\
  113. "add $16, %0 \n\t"\
  114. " js 1b \n\t"\
  115. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  116. "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
  117. "paddd %%xmm1, %%xmm0 \n\t"\
  118. "paddd %%xmm3, %%xmm2 \n\t"\
  119. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  120. "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
  121. "paddd %%xmm1, %%xmm0 \n\t"\
  122. "paddd %%xmm3, %%xmm2 \n\t"\
  123. "movd %%xmm0, %1 \n\t"\
  124. "movd %%xmm2, %2 \n\t"\
  125. : "+r" (len),\
  126. "=r" (val),\
  127. "=r" (v2)\
  128. : "r" (((uint8_t*)(src+sample_index))-len),\
  129. "r" (((uint8_t*)filter)-len),\
  130. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  131. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  132. );
  133. #define COMMON_CORE_FLT_SSE \
  134. x86_reg len= -4*c->filter_length;\
  135. __asm__ volatile(\
  136. "xorps %%xmm0, %%xmm0 \n\t"\
  137. "1: \n\t"\
  138. "movups (%1, %0), %%xmm1 \n\t"\
  139. "mulps (%2, %0), %%xmm1 \n\t"\
  140. "addps %%xmm1, %%xmm0 \n\t"\
  141. "add $16, %0 \n\t"\
  142. " js 1b \n\t"\
  143. "movhlps %%xmm0, %%xmm1 \n\t"\
  144. "addps %%xmm1, %%xmm0 \n\t"\
  145. "movss %%xmm0, %%xmm1 \n\t"\
  146. "shufps $1, %%xmm0, %%xmm0 \n\t"\
  147. "addps %%xmm1, %%xmm0 \n\t"\
  148. "movss %%xmm0, (%3) \n\t"\
  149. : "+r" (len)\
  150. : "r" (((uint8_t*)(src+sample_index))-len),\
  151. "r" (((uint8_t*)filter)-len),\
  152. "r" (dst+dst_index)\
  153. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  154. );
  155. #define LINEAR_CORE_FLT_SSE \
  156. x86_reg len= -4*c->filter_length;\
  157. __asm__ volatile(\
  158. "xorps %%xmm0, %%xmm0 \n\t"\
  159. "xorps %%xmm2, %%xmm2 \n\t"\
  160. "1: \n\t"\
  161. "movups (%3, %0), %%xmm1 \n\t"\
  162. "movaps %%xmm1, %%xmm3 \n\t"\
  163. "mulps (%4, %0), %%xmm1 \n\t"\
  164. "mulps (%5, %0), %%xmm3 \n\t"\
  165. "addps %%xmm1, %%xmm0 \n\t"\
  166. "addps %%xmm3, %%xmm2 \n\t"\
  167. "add $16, %0 \n\t"\
  168. " js 1b \n\t"\
  169. "movhlps %%xmm0, %%xmm1 \n\t"\
  170. "movhlps %%xmm2, %%xmm3 \n\t"\
  171. "addps %%xmm1, %%xmm0 \n\t"\
  172. "addps %%xmm3, %%xmm2 \n\t"\
  173. "movss %%xmm0, %%xmm1 \n\t"\
  174. "movss %%xmm2, %%xmm3 \n\t"\
  175. "shufps $1, %%xmm0, %%xmm0 \n\t"\
  176. "shufps $1, %%xmm2, %%xmm2 \n\t"\
  177. "addps %%xmm1, %%xmm0 \n\t"\
  178. "addps %%xmm3, %%xmm2 \n\t"\
  179. "movss %%xmm0, %1 \n\t"\
  180. "movss %%xmm2, %2 \n\t"\
  181. : "+r" (len),\
  182. "=m" (val),\
  183. "=m" (v2)\
  184. : "r" (((uint8_t*)(src+sample_index))-len),\
  185. "r" (((uint8_t*)filter)-len),\
  186. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  187. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  188. );
  189. #define COMMON_CORE_FLT_AVX \
  190. x86_reg len= -4*c->filter_length;\
  191. __asm__ volatile(\
  192. "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
  193. "1: \n\t"\
  194. "vmovups (%1, %0), %%ymm1 \n\t"\
  195. "vmulps (%2, %0), %%ymm1, %%ymm1 \n\t"\
  196. "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
  197. "add $32, %0 \n\t"\
  198. " js 1b \n\t"\
  199. "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
  200. "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
  201. "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
  202. "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
  203. "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
  204. "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
  205. "vmovss %%xmm0, (%3) \n\t"\
  206. : "+r" (len)\
  207. : "r" (((uint8_t*)(src+sample_index))-len),\
  208. "r" (((uint8_t*)filter)-len),\
  209. "r" (dst+dst_index)\
  210. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  211. );
  212. #define LINEAR_CORE_FLT_AVX \
  213. x86_reg len= -4*c->filter_length;\
  214. __asm__ volatile(\
  215. "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
  216. "vxorps %%ymm2, %%ymm2, %%ymm2 \n\t"\
  217. "1: \n\t"\
  218. "vmovups (%3, %0), %%ymm1 \n\t"\
  219. "vmulps (%5, %0), %%ymm1, %%ymm3 \n\t"\
  220. "vmulps (%4, %0), %%ymm1, %%ymm1 \n\t"\
  221. "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
  222. "vaddps %%ymm3, %%ymm2, %%ymm2 \n\t"\
  223. "add $32, %0 \n\t"\
  224. " js 1b \n\t"\
  225. "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
  226. "vextractf128 $1, %%ymm2, %%xmm3 \n\t"\
  227. "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
  228. "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
  229. "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
  230. "vmovhlps %%xmm2, %%xmm3, %%xmm3 \n\t"\
  231. "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
  232. "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
  233. "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
  234. "vshufps $1, %%xmm2, %%xmm2, %%xmm3 \n\t"\
  235. "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
  236. "vaddss %%xmm3, %%xmm2, %%xmm2 \n\t"\
  237. "vmovss %%xmm0, %1 \n\t"\
  238. "vmovss %%xmm2, %2 \n\t"\
  239. : "+r" (len),\
  240. "=m" (val),\
  241. "=m" (v2)\
  242. : "r" (((uint8_t*)(src+sample_index))-len),\
  243. "r" (((uint8_t*)filter)-len),\
  244. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  245. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  246. );
  247. #define COMMON_CORE_DBL_SSE2 \
  248. x86_reg len= -8*c->filter_length;\
  249. __asm__ volatile(\
  250. "xorpd %%xmm0, %%xmm0 \n\t"\
  251. "1: \n\t"\
  252. "movupd (%1, %0), %%xmm1 \n\t"\
  253. "mulpd (%2, %0), %%xmm1 \n\t"\
  254. "addpd %%xmm1, %%xmm0 \n\t"\
  255. "add $16, %0 \n\t"\
  256. " js 1b \n\t"\
  257. "movhlps %%xmm0, %%xmm1 \n\t"\
  258. "addpd %%xmm1, %%xmm0 \n\t"\
  259. "movsd %%xmm0, (%3) \n\t"\
  260. : "+r" (len)\
  261. : "r" (((uint8_t*)(src+sample_index))-len),\
  262. "r" (((uint8_t*)filter)-len),\
  263. "r" (dst+dst_index)\
  264. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  265. );
  266. #define LINEAR_CORE_DBL_SSE2 \
  267. x86_reg len= -8*c->filter_length;\
  268. __asm__ volatile(\
  269. "xorpd %%xmm0, %%xmm0 \n\t"\
  270. "xorpd %%xmm2, %%xmm2 \n\t"\
  271. "1: \n\t"\
  272. "movupd (%3, %0), %%xmm1 \n\t"\
  273. "movapd %%xmm1, %%xmm3 \n\t"\
  274. "mulpd (%4, %0), %%xmm1 \n\t"\
  275. "mulpd (%5, %0), %%xmm3 \n\t"\
  276. "addpd %%xmm1, %%xmm0 \n\t"\
  277. "addpd %%xmm3, %%xmm2 \n\t"\
  278. "add $16, %0 \n\t"\
  279. " js 1b \n\t"\
  280. "movhlps %%xmm0, %%xmm1 \n\t"\
  281. "movhlps %%xmm2, %%xmm3 \n\t"\
  282. "addpd %%xmm1, %%xmm0 \n\t"\
  283. "addpd %%xmm3, %%xmm2 \n\t"\
  284. "movsd %%xmm0, %1 \n\t"\
  285. "movsd %%xmm2, %2 \n\t"\
  286. : "+r" (len),\
  287. "=m" (val),\
  288. "=m" (v2)\
  289. : "r" (((uint8_t*)(src+sample_index))-len),\
  290. "r" (((uint8_t*)filter)-len),\
  291. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  292. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  293. );