You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

291 lines
7.8KB

  1. ;*****************************************************************************
  2. ;* x86-optimized Float DSP functions
  3. ;*
  4. ;* Copyright 2006 Loren Merritt
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "x86util.asm"
  23. SECTION .text
  24. ;-----------------------------------------------------------------------------
  25. ; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
  26. ;-----------------------------------------------------------------------------
  27. %macro VECTOR_FMUL 0
  28. cglobal vector_fmul, 4,4,2, dst, src0, src1, len
  29. lea lenq, [lend*4 - 2*mmsize]
  30. ALIGN 16
  31. .loop:
  32. mova m0, [src0q + lenq]
  33. mova m1, [src0q + lenq + mmsize]
  34. mulps m0, m0, [src1q + lenq]
  35. mulps m1, m1, [src1q + lenq + mmsize]
  36. mova [dstq + lenq], m0
  37. mova [dstq + lenq + mmsize], m1
  38. sub lenq, 2*mmsize
  39. jge .loop
  40. REP_RET
  41. %endmacro
  42. INIT_XMM sse
  43. VECTOR_FMUL
  44. %if HAVE_AVX_EXTERNAL
  45. INIT_YMM avx
  46. VECTOR_FMUL
  47. %endif
  48. ;------------------------------------------------------------------------------
  49. ; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
  50. ;------------------------------------------------------------------------------
  51. %macro VECTOR_FMAC_SCALAR 0
  52. %if UNIX64
  53. cglobal vector_fmac_scalar, 3,3,3, dst, src, len
  54. %else
  55. cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
  56. %endif
  57. %if ARCH_X86_32
  58. VBROADCASTSS m0, mulm
  59. %else
  60. %if WIN64
  61. mova xmm0, xmm2
  62. %endif
  63. shufps xmm0, xmm0, 0
  64. %if cpuflag(avx)
  65. vinsertf128 m0, m0, xmm0, 1
  66. %endif
  67. %endif
  68. lea lenq, [lend*4-2*mmsize]
  69. .loop:
  70. mulps m1, m0, [srcq+lenq ]
  71. mulps m2, m0, [srcq+lenq+mmsize]
  72. addps m1, m1, [dstq+lenq ]
  73. addps m2, m2, [dstq+lenq+mmsize]
  74. mova [dstq+lenq ], m1
  75. mova [dstq+lenq+mmsize], m2
  76. sub lenq, 2*mmsize
  77. jge .loop
  78. REP_RET
  79. %endmacro
  80. INIT_XMM sse
  81. VECTOR_FMAC_SCALAR
  82. %if HAVE_AVX_EXTERNAL
  83. INIT_YMM avx
  84. VECTOR_FMAC_SCALAR
  85. %endif
  86. ;------------------------------------------------------------------------------
  87. ; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
  88. ;------------------------------------------------------------------------------
  89. %macro VECTOR_FMUL_SCALAR 0
  90. %if UNIX64
  91. cglobal vector_fmul_scalar, 3,3,2, dst, src, len
  92. %else
  93. cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
  94. %endif
  95. %if ARCH_X86_32
  96. movss m0, mulm
  97. %elif WIN64
  98. SWAP 0, 2
  99. %endif
  100. shufps m0, m0, 0
  101. lea lenq, [lend*4-mmsize]
  102. .loop:
  103. mova m1, [srcq+lenq]
  104. mulps m1, m0
  105. mova [dstq+lenq], m1
  106. sub lenq, mmsize
  107. jge .loop
  108. REP_RET
  109. %endmacro
  110. INIT_XMM sse
  111. VECTOR_FMUL_SCALAR
  112. ;------------------------------------------------------------------------------
  113. ; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
  114. ; int len)
  115. ;------------------------------------------------------------------------------
  116. %macro VECTOR_DMUL_SCALAR 0
  117. %if ARCH_X86_32
  118. cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
  119. mov lenq, lenaddrm
  120. %elif UNIX64
  121. cglobal vector_dmul_scalar, 3,3,3, dst, src, len
  122. %else
  123. cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
  124. %endif
  125. %if ARCH_X86_32
  126. VBROADCASTSD m0, mulm
  127. %else
  128. %if WIN64
  129. movlhps xmm2, xmm2
  130. %if cpuflag(avx)
  131. vinsertf128 ymm2, ymm2, xmm2, 1
  132. %endif
  133. SWAP 0, 2
  134. %else
  135. movlhps xmm0, xmm0
  136. %if cpuflag(avx)
  137. vinsertf128 ymm0, ymm0, xmm0, 1
  138. %endif
  139. %endif
  140. %endif
  141. lea lenq, [lend*8-2*mmsize]
  142. .loop:
  143. mulpd m1, m0, [srcq+lenq ]
  144. mulpd m2, m0, [srcq+lenq+mmsize]
  145. mova [dstq+lenq ], m1
  146. mova [dstq+lenq+mmsize], m2
  147. sub lenq, 2*mmsize
  148. jge .loop
  149. REP_RET
  150. %endmacro
  151. INIT_XMM sse2
  152. VECTOR_DMUL_SCALAR
  153. %if HAVE_AVX_EXTERNAL
  154. INIT_YMM avx
  155. VECTOR_DMUL_SCALAR
  156. %endif
  157. ;-----------------------------------------------------------------------------
  158. ; vector_fmul_add(float *dst, const float *src0, const float *src1,
  159. ; const float *src2, int len)
  160. ;-----------------------------------------------------------------------------
  161. %macro VECTOR_FMUL_ADD 0
  162. cglobal vector_fmul_add, 5,5,2, dst, src0, src1, src2, len
  163. lea lenq, [lend*4 - 2*mmsize]
  164. ALIGN 16
  165. .loop:
  166. mova m0, [src0q + lenq]
  167. mova m1, [src0q + lenq + mmsize]
  168. mulps m0, m0, [src1q + lenq]
  169. mulps m1, m1, [src1q + lenq + mmsize]
  170. addps m0, m0, [src2q + lenq]
  171. addps m1, m1, [src2q + lenq + mmsize]
  172. mova [dstq + lenq], m0
  173. mova [dstq + lenq + mmsize], m1
  174. sub lenq, 2*mmsize
  175. jge .loop
  176. REP_RET
  177. %endmacro
  178. INIT_XMM sse
  179. VECTOR_FMUL_ADD
  180. %if HAVE_AVX_EXTERNAL
  181. INIT_YMM avx
  182. VECTOR_FMUL_ADD
  183. %endif
  184. ;-----------------------------------------------------------------------------
  185. ; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
  186. ; int len)
  187. ;-----------------------------------------------------------------------------
  188. %macro VECTOR_FMUL_REVERSE 0
  189. cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
  190. lea lenq, [lend*4 - 2*mmsize]
  191. ALIGN 16
  192. .loop:
  193. %if cpuflag(avx)
  194. vmovaps xmm0, [src1q + 16]
  195. vinsertf128 m0, m0, [src1q], 1
  196. vshufps m0, m0, m0, q0123
  197. vmovaps xmm1, [src1q + mmsize + 16]
  198. vinsertf128 m1, m1, [src1q + mmsize], 1
  199. vshufps m1, m1, m1, q0123
  200. %else
  201. mova m0, [src1q]
  202. mova m1, [src1q + mmsize]
  203. shufps m0, m0, q0123
  204. shufps m1, m1, q0123
  205. %endif
  206. mulps m0, m0, [src0q + lenq + mmsize]
  207. mulps m1, m1, [src0q + lenq]
  208. mova [dstq + lenq + mmsize], m0
  209. mova [dstq + lenq], m1
  210. add src1q, 2*mmsize
  211. sub lenq, 2*mmsize
  212. jge .loop
  213. REP_RET
  214. %endmacro
  215. INIT_XMM sse
  216. VECTOR_FMUL_REVERSE
  217. %if HAVE_AVX_EXTERNAL
  218. INIT_YMM avx
  219. VECTOR_FMUL_REVERSE
  220. %endif
  221. ; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
  222. INIT_XMM sse
  223. cglobal scalarproduct_float, 3,3,2, v1, v2, offset
  224. neg offsetq
  225. shl offsetq, 2
  226. sub v1q, offsetq
  227. sub v2q, offsetq
  228. xorps xmm0, xmm0
  229. .loop:
  230. movaps xmm1, [v1q+offsetq]
  231. mulps xmm1, [v2q+offsetq]
  232. addps xmm0, xmm1
  233. add offsetq, 16
  234. js .loop
  235. movhlps xmm1, xmm0
  236. addps xmm0, xmm1
  237. movss xmm1, xmm0
  238. shufps xmm0, xmm0, 1
  239. addss xmm0, xmm1
  240. %if ARCH_X86_64 == 0
  241. movss r0m, xmm0
  242. fld dword r0m
  243. %endif
  244. RET
  245. ;-----------------------------------------------------------------------------
  246. ; void ff_butterflies_float(float *src0, float *src1, int len);
  247. ;-----------------------------------------------------------------------------
  248. INIT_XMM sse
  249. cglobal butterflies_float, 3,3,3, src0, src1, len
  250. %if ARCH_X86_64
  251. movsxd lenq, lend
  252. %endif
  253. test lenq, lenq
  254. jz .end
  255. shl lenq, 2
  256. add src0q, lenq
  257. add src1q, lenq
  258. neg lenq
  259. .loop:
  260. mova m0, [src0q + lenq]
  261. mova m1, [src1q + lenq]
  262. subps m2, m0, m1
  263. addps m0, m0, m1
  264. mova [src1q + lenq], m2
  265. mova [src0q + lenq], m0
  266. add lenq, mmsize
  267. jl .loop
  268. .end:
  269. REP_RET