You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

301 lines
11KB

  1. /*
  2. * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/x86/asm.h"
  21. #include "libavutil/cpu.h"
  22. #include "libswresample/swresample_internal.h"
  23. DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
  24. #define COMMON_CORE_INT16_MMX2 \
  25. x86_reg len= -2*c->filter_length;\
  26. __asm__ volatile(\
  27. "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
  28. "1: \n\t"\
  29. "movq (%1, %0), %%mm1 \n\t"\
  30. "pmaddwd (%2, %0), %%mm1 \n\t"\
  31. "paddd %%mm1, %%mm0 \n\t"\
  32. "add $8, %0 \n\t"\
  33. " js 1b \n\t"\
  34. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  35. "paddd %%mm1, %%mm0 \n\t"\
  36. "psrad $15, %%mm0 \n\t"\
  37. "packssdw %%mm0, %%mm0 \n\t"\
  38. "movd %%mm0, (%3) \n\t"\
  39. : "+r" (len)\
  40. : "r" (((uint8_t*)(src+sample_index))-len),\
  41. "r" (((uint8_t*)filter)-len),\
  42. "r" (dst+dst_index)\
  43. NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
  44. );
  45. #define LINEAR_CORE_INT16_MMX2 \
  46. x86_reg len= -2*c->filter_length;\
  47. __asm__ volatile(\
  48. "pxor %%mm0, %%mm0 \n\t"\
  49. "pxor %%mm2, %%mm2 \n\t"\
  50. "1: \n\t"\
  51. "movq (%3, %0), %%mm1 \n\t"\
  52. "movq %%mm1, %%mm3 \n\t"\
  53. "pmaddwd (%4, %0), %%mm1 \n\t"\
  54. "pmaddwd (%5, %0), %%mm3 \n\t"\
  55. "paddd %%mm1, %%mm0 \n\t"\
  56. "paddd %%mm3, %%mm2 \n\t"\
  57. "add $8, %0 \n\t"\
  58. " js 1b \n\t"\
  59. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  60. "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
  61. "paddd %%mm1, %%mm0 \n\t"\
  62. "paddd %%mm3, %%mm2 \n\t"\
  63. "movd %%mm0, %1 \n\t"\
  64. "movd %%mm2, %2 \n\t"\
  65. : "+r" (len),\
  66. "=r" (val),\
  67. "=r" (v2)\
  68. : "r" (((uint8_t*)(src+sample_index))-len),\
  69. "r" (((uint8_t*)filter)-len),\
  70. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  71. );
  72. #define COMMON_CORE_INT16_SSE2 \
  73. x86_reg len= -2*c->filter_length;\
  74. __asm__ volatile(\
  75. "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
  76. "1: \n\t"\
  77. "movdqu (%1, %0), %%xmm1 \n\t"\
  78. "pmaddwd (%2, %0), %%xmm1 \n\t"\
  79. "paddd %%xmm1, %%xmm0 \n\t"\
  80. "add $16, %0 \n\t"\
  81. " js 1b \n\t"\
  82. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  83. "paddd %%xmm1, %%xmm0 \n\t"\
  84. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  85. "paddd %%xmm1, %%xmm0 \n\t"\
  86. "psrad $15, %%xmm0 \n\t"\
  87. "packssdw %%xmm0, %%xmm0 \n\t"\
  88. "movd %%xmm0, (%3) \n\t"\
  89. : "+r" (len)\
  90. : "r" (((uint8_t*)(src+sample_index))-len),\
  91. "r" (((uint8_t*)filter)-len),\
  92. "r" (dst+dst_index)\
  93. NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
  94. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  95. );
  96. #define LINEAR_CORE_INT16_SSE2 \
  97. x86_reg len= -2*c->filter_length;\
  98. __asm__ volatile(\
  99. "pxor %%xmm0, %%xmm0 \n\t"\
  100. "pxor %%xmm2, %%xmm2 \n\t"\
  101. "1: \n\t"\
  102. "movdqu (%3, %0), %%xmm1 \n\t"\
  103. "movdqa %%xmm1, %%xmm3 \n\t"\
  104. "pmaddwd (%4, %0), %%xmm1 \n\t"\
  105. "pmaddwd (%5, %0), %%xmm3 \n\t"\
  106. "paddd %%xmm1, %%xmm0 \n\t"\
  107. "paddd %%xmm3, %%xmm2 \n\t"\
  108. "add $16, %0 \n\t"\
  109. " js 1b \n\t"\
  110. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  111. "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
  112. "paddd %%xmm1, %%xmm0 \n\t"\
  113. "paddd %%xmm3, %%xmm2 \n\t"\
  114. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  115. "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
  116. "paddd %%xmm1, %%xmm0 \n\t"\
  117. "paddd %%xmm3, %%xmm2 \n\t"\
  118. "movd %%xmm0, %1 \n\t"\
  119. "movd %%xmm2, %2 \n\t"\
  120. : "+r" (len),\
  121. "=r" (val),\
  122. "=r" (v2)\
  123. : "r" (((uint8_t*)(src+sample_index))-len),\
  124. "r" (((uint8_t*)filter)-len),\
  125. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  126. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  127. );
  128. #define COMMON_CORE_FLT_SSE \
  129. x86_reg len= -4*c->filter_length;\
  130. __asm__ volatile(\
  131. "xorps %%xmm0, %%xmm0 \n\t"\
  132. "1: \n\t"\
  133. "movups (%1, %0), %%xmm1 \n\t"\
  134. "mulps (%2, %0), %%xmm1 \n\t"\
  135. "addps %%xmm1, %%xmm0 \n\t"\
  136. "add $16, %0 \n\t"\
  137. " js 1b \n\t"\
  138. "movhlps %%xmm0, %%xmm1 \n\t"\
  139. "addps %%xmm1, %%xmm0 \n\t"\
  140. "movss %%xmm0, %%xmm1 \n\t"\
  141. "shufps $1, %%xmm0, %%xmm0 \n\t"\
  142. "addps %%xmm1, %%xmm0 \n\t"\
  143. "movss %%xmm0, (%3) \n\t"\
  144. : "+r" (len)\
  145. : "r" (((uint8_t*)(src+sample_index))-len),\
  146. "r" (((uint8_t*)filter)-len),\
  147. "r" (dst+dst_index)\
  148. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  149. );
  150. #define LINEAR_CORE_FLT_SSE \
  151. x86_reg len= -4*c->filter_length;\
  152. __asm__ volatile(\
  153. "xorps %%xmm0, %%xmm0 \n\t"\
  154. "xorps %%xmm2, %%xmm2 \n\t"\
  155. "1: \n\t"\
  156. "movups (%3, %0), %%xmm1 \n\t"\
  157. "movaps %%xmm1, %%xmm3 \n\t"\
  158. "mulps (%4, %0), %%xmm1 \n\t"\
  159. "mulps (%5, %0), %%xmm3 \n\t"\
  160. "addps %%xmm1, %%xmm0 \n\t"\
  161. "addps %%xmm3, %%xmm2 \n\t"\
  162. "add $16, %0 \n\t"\
  163. " js 1b \n\t"\
  164. "movhlps %%xmm0, %%xmm1 \n\t"\
  165. "movhlps %%xmm2, %%xmm3 \n\t"\
  166. "addps %%xmm1, %%xmm0 \n\t"\
  167. "addps %%xmm3, %%xmm2 \n\t"\
  168. "movss %%xmm0, %%xmm1 \n\t"\
  169. "movss %%xmm2, %%xmm3 \n\t"\
  170. "shufps $1, %%xmm0, %%xmm0 \n\t"\
  171. "shufps $1, %%xmm2, %%xmm2 \n\t"\
  172. "addps %%xmm1, %%xmm0 \n\t"\
  173. "addps %%xmm3, %%xmm2 \n\t"\
  174. "movss %%xmm0, %1 \n\t"\
  175. "movss %%xmm2, %2 \n\t"\
  176. : "+r" (len),\
  177. "=m" (val),\
  178. "=m" (v2)\
  179. : "r" (((uint8_t*)(src+sample_index))-len),\
  180. "r" (((uint8_t*)filter)-len),\
  181. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  182. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  183. );
  184. #define COMMON_CORE_FLT_AVX \
  185. x86_reg len= -4*c->filter_length;\
  186. __asm__ volatile(\
  187. "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
  188. "1: \n\t"\
  189. "vmovups (%1, %0), %%ymm1 \n\t"\
  190. "vmulps (%2, %0), %%ymm1, %%ymm1 \n\t"\
  191. "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
  192. "add $32, %0 \n\t"\
  193. " js 1b \n\t"\
  194. "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
  195. "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
  196. "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
  197. "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
  198. "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
  199. "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
  200. "vmovss %%xmm0, (%3) \n\t"\
  201. : "+r" (len)\
  202. : "r" (((uint8_t*)(src+sample_index))-len),\
  203. "r" (((uint8_t*)filter)-len),\
  204. "r" (dst+dst_index)\
  205. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  206. );
  207. #define LINEAR_CORE_FLT_AVX \
  208. x86_reg len= -4*c->filter_length;\
  209. __asm__ volatile(\
  210. "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
  211. "vxorps %%ymm2, %%ymm2, %%ymm2 \n\t"\
  212. "1: \n\t"\
  213. "vmovups (%3, %0), %%ymm1 \n\t"\
  214. "vmulps (%5, %0), %%ymm1, %%ymm3 \n\t"\
  215. "vmulps (%4, %0), %%ymm1, %%ymm1 \n\t"\
  216. "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
  217. "vaddps %%ymm3, %%ymm2, %%ymm2 \n\t"\
  218. "add $32, %0 \n\t"\
  219. " js 1b \n\t"\
  220. "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
  221. "vextractf128 $1, %%ymm2, %%xmm3 \n\t"\
  222. "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
  223. "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
  224. "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
  225. "vmovhlps %%xmm2, %%xmm3, %%xmm3 \n\t"\
  226. "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
  227. "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
  228. "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
  229. "vshufps $1, %%xmm2, %%xmm2, %%xmm3 \n\t"\
  230. "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
  231. "vaddss %%xmm3, %%xmm2, %%xmm2 \n\t"\
  232. "vmovss %%xmm0, %1 \n\t"\
  233. "vmovss %%xmm2, %2 \n\t"\
  234. : "+r" (len),\
  235. "=m" (val),\
  236. "=m" (v2)\
  237. : "r" (((uint8_t*)(src+sample_index))-len),\
  238. "r" (((uint8_t*)filter)-len),\
  239. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  240. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  241. );
  242. #define COMMON_CORE_DBL_SSE2 \
  243. x86_reg len= -8*c->filter_length;\
  244. __asm__ volatile(\
  245. "xorpd %%xmm0, %%xmm0 \n\t"\
  246. "1: \n\t"\
  247. "movupd (%1, %0), %%xmm1 \n\t"\
  248. "mulpd (%2, %0), %%xmm1 \n\t"\
  249. "addpd %%xmm1, %%xmm0 \n\t"\
  250. "add $16, %0 \n\t"\
  251. " js 1b \n\t"\
  252. "movhlps %%xmm0, %%xmm1 \n\t"\
  253. "addpd %%xmm1, %%xmm0 \n\t"\
  254. "movsd %%xmm0, (%3) \n\t"\
  255. : "+r" (len)\
  256. : "r" (((uint8_t*)(src+sample_index))-len),\
  257. "r" (((uint8_t*)filter)-len),\
  258. "r" (dst+dst_index)\
  259. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  260. );
  261. #define LINEAR_CORE_DBL_SSE2 \
  262. x86_reg len= -8*c->filter_length;\
  263. __asm__ volatile(\
  264. "xorpd %%xmm0, %%xmm0 \n\t"\
  265. "xorpd %%xmm2, %%xmm2 \n\t"\
  266. "1: \n\t"\
  267. "movupd (%3, %0), %%xmm1 \n\t"\
  268. "movapd %%xmm1, %%xmm3 \n\t"\
  269. "mulpd (%4, %0), %%xmm1 \n\t"\
  270. "mulpd (%5, %0), %%xmm3 \n\t"\
  271. "addpd %%xmm1, %%xmm0 \n\t"\
  272. "addpd %%xmm3, %%xmm2 \n\t"\
  273. "add $16, %0 \n\t"\
  274. " js 1b \n\t"\
  275. "movhlps %%xmm0, %%xmm1 \n\t"\
  276. "movhlps %%xmm2, %%xmm3 \n\t"\
  277. "addpd %%xmm1, %%xmm0 \n\t"\
  278. "addpd %%xmm3, %%xmm2 \n\t"\
  279. "movsd %%xmm0, %1 \n\t"\
  280. "movsd %%xmm2, %2 \n\t"\
  281. : "+r" (len),\
  282. "=m" (val),\
  283. "=m" (v2)\
  284. : "r" (((uint8_t*)(src+sample_index))-len),\
  285. "r" (((uint8_t*)filter)-len),\
  286. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  287. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  288. );