You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

289 lines
7.8KB

  1. ;*****************************************************************************
  2. ;* x86-optimized Float DSP functions
  3. ;*
  4. ;* This file is part of Libav.
  5. ;*
  6. ;* Libav is free software; you can redistribute it and/or
  7. ;* modify it under the terms of the GNU Lesser General Public
  8. ;* License as published by the Free Software Foundation; either
  9. ;* version 2.1 of the License, or (at your option) any later version.
  10. ;*
  11. ;* Libav is distributed in the hope that it will be useful,
  12. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. ;* Lesser General Public License for more details.
  15. ;*
  16. ;* You should have received a copy of the GNU Lesser General Public
  17. ;* License along with Libav; if not, write to the Free Software
  18. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. ;******************************************************************************
  20. %include "x86util.asm"
  21. SECTION .text
  22. ;-----------------------------------------------------------------------------
  23. ; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
  24. ;-----------------------------------------------------------------------------
  25. %macro VECTOR_FMUL 0
  26. cglobal vector_fmul, 4,4,2, dst, src0, src1, len
  27. lea lenq, [lend*4 - 64]
  28. ALIGN 16
  29. .loop:
  30. %assign a 0
  31. %rep 32/mmsize
  32. mova m0, [src0q + lenq + (a+0)*mmsize]
  33. mova m1, [src0q + lenq + (a+1)*mmsize]
  34. mulps m0, m0, [src1q + lenq + (a+0)*mmsize]
  35. mulps m1, m1, [src1q + lenq + (a+1)*mmsize]
  36. mova [dstq + lenq + (a+0)*mmsize], m0
  37. mova [dstq + lenq + (a+1)*mmsize], m1
  38. %assign a a+2
  39. %endrep
  40. sub lenq, 64
  41. jge .loop
  42. REP_RET
  43. %endmacro
  44. INIT_XMM sse
  45. VECTOR_FMUL
  46. INIT_YMM avx
  47. VECTOR_FMUL
  48. ;------------------------------------------------------------------------------
  49. ; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
  50. ;------------------------------------------------------------------------------
  51. %macro VECTOR_FMAC_SCALAR 0
  52. %if UNIX64
  53. cglobal vector_fmac_scalar, 3,3,3, dst, src, len
  54. %else
  55. cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
  56. %endif
  57. %if ARCH_X86_32
  58. VBROADCASTSS m0, mulm
  59. %else
  60. %if WIN64
  61. mova xmm0, xmm2
  62. %endif
  63. shufps xmm0, xmm0, 0
  64. %if cpuflag(avx)
  65. vinsertf128 m0, m0, xmm0, 1
  66. %endif
  67. %endif
  68. lea lenq, [lend*4-64]
  69. .loop:
  70. %assign a 0
  71. %rep 32/mmsize
  72. mulps m1, m0, [srcq+lenq+(a+0)*mmsize]
  73. mulps m2, m0, [srcq+lenq+(a+1)*mmsize]
  74. addps m1, m1, [dstq+lenq+(a+0)*mmsize]
  75. addps m2, m2, [dstq+lenq+(a+1)*mmsize]
  76. mova [dstq+lenq+(a+0)*mmsize], m1
  77. mova [dstq+lenq+(a+1)*mmsize], m2
  78. %assign a a+2
  79. %endrep
  80. sub lenq, 64
  81. jge .loop
  82. REP_RET
  83. %endmacro
  84. INIT_XMM sse
  85. VECTOR_FMAC_SCALAR
  86. INIT_YMM avx
  87. VECTOR_FMAC_SCALAR
  88. ;------------------------------------------------------------------------------
  89. ; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
  90. ;------------------------------------------------------------------------------
  91. %macro VECTOR_FMUL_SCALAR 0
  92. %if UNIX64
  93. cglobal vector_fmul_scalar, 3,3,2, dst, src, len
  94. %else
  95. cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
  96. %endif
  97. %if ARCH_X86_32
  98. movss m0, mulm
  99. %elif WIN64
  100. SWAP 0, 2
  101. %endif
  102. shufps m0, m0, 0
  103. lea lenq, [lend*4-mmsize]
  104. .loop:
  105. mova m1, [srcq+lenq]
  106. mulps m1, m0
  107. mova [dstq+lenq], m1
  108. sub lenq, mmsize
  109. jge .loop
  110. REP_RET
  111. %endmacro
  112. INIT_XMM sse
  113. VECTOR_FMUL_SCALAR
  114. ;------------------------------------------------------------------------------
  115. ; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
  116. ; int len)
  117. ;------------------------------------------------------------------------------
  118. %macro VECTOR_DMUL_SCALAR 0
  119. %if ARCH_X86_32
  120. cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
  121. mov lenq, lenaddrm
  122. %elif UNIX64
  123. cglobal vector_dmul_scalar, 3,3,3, dst, src, len
  124. %else
  125. cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
  126. %endif
  127. %if ARCH_X86_32
  128. VBROADCASTSD m0, mulm
  129. %else
  130. %if WIN64
  131. movlhps xmm2, xmm2
  132. %if cpuflag(avx)
  133. vinsertf128 ymm2, ymm2, xmm2, 1
  134. %endif
  135. SWAP 0, 2
  136. %else
  137. movlhps xmm0, xmm0
  138. %if cpuflag(avx)
  139. vinsertf128 ymm0, ymm0, xmm0, 1
  140. %endif
  141. %endif
  142. %endif
  143. lea lenq, [lend*8-2*mmsize]
  144. .loop:
  145. mulpd m1, m0, [srcq+lenq ]
  146. mulpd m2, m0, [srcq+lenq+mmsize]
  147. mova [dstq+lenq ], m1
  148. mova [dstq+lenq+mmsize], m2
  149. sub lenq, 2*mmsize
  150. jge .loop
  151. REP_RET
  152. %endmacro
  153. INIT_XMM sse2
  154. VECTOR_DMUL_SCALAR
  155. %if HAVE_AVX_EXTERNAL
  156. INIT_YMM avx
  157. VECTOR_DMUL_SCALAR
  158. %endif
  159. ;-----------------------------------------------------------------------------
  160. ; vector_fmul_add(float *dst, const float *src0, const float *src1,
  161. ; const float *src2, int len)
  162. ;-----------------------------------------------------------------------------
  163. %macro VECTOR_FMUL_ADD 0
  164. cglobal vector_fmul_add, 5,5,2, dst, src0, src1, src2, len
  165. lea lenq, [lend*4 - 2*mmsize]
  166. ALIGN 16
  167. .loop:
  168. mova m0, [src0q + lenq]
  169. mova m1, [src0q + lenq + mmsize]
  170. mulps m0, m0, [src1q + lenq]
  171. mulps m1, m1, [src1q + lenq + mmsize]
  172. addps m0, m0, [src2q + lenq]
  173. addps m1, m1, [src2q + lenq + mmsize]
  174. mova [dstq + lenq], m0
  175. mova [dstq + lenq + mmsize], m1
  176. sub lenq, 2*mmsize
  177. jge .loop
  178. REP_RET
  179. %endmacro
  180. INIT_XMM sse
  181. VECTOR_FMUL_ADD
  182. INIT_YMM avx
  183. VECTOR_FMUL_ADD
  184. ;-----------------------------------------------------------------------------
  185. ; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
  186. ; int len)
  187. ;-----------------------------------------------------------------------------
  188. %macro VECTOR_FMUL_REVERSE 0
  189. cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
  190. lea lenq, [lend*4 - 2*mmsize]
  191. ALIGN 16
  192. .loop:
  193. %if cpuflag(avx)
  194. vmovaps xmm0, [src1q + 16]
  195. vinsertf128 m0, m0, [src1q], 1
  196. vshufps m0, m0, m0, q0123
  197. vmovaps xmm1, [src1q + mmsize + 16]
  198. vinsertf128 m1, m1, [src1q + mmsize], 1
  199. vshufps m1, m1, m1, q0123
  200. %else
  201. mova m0, [src1q]
  202. mova m1, [src1q + mmsize]
  203. shufps m0, m0, q0123
  204. shufps m1, m1, q0123
  205. %endif
  206. mulps m0, m0, [src0q + lenq + mmsize]
  207. mulps m1, m1, [src0q + lenq]
  208. mova [dstq + lenq + mmsize], m0
  209. mova [dstq + lenq], m1
  210. add src1q, 2*mmsize
  211. sub lenq, 2*mmsize
  212. jge .loop
  213. REP_RET
  214. %endmacro
  215. INIT_XMM sse
  216. VECTOR_FMUL_REVERSE
  217. INIT_YMM avx
  218. VECTOR_FMUL_REVERSE
  219. ; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
  220. INIT_XMM sse
  221. cglobal scalarproduct_float, 3,3,2, v1, v2, offset
  222. neg offsetq
  223. shl offsetq, 2
  224. sub v1q, offsetq
  225. sub v2q, offsetq
  226. xorps xmm0, xmm0
  227. .loop:
  228. movaps xmm1, [v1q+offsetq]
  229. mulps xmm1, [v2q+offsetq]
  230. addps xmm0, xmm1
  231. add offsetq, 16
  232. js .loop
  233. movhlps xmm1, xmm0
  234. addps xmm0, xmm1
  235. movss xmm1, xmm0
  236. shufps xmm0, xmm0, 1
  237. addss xmm0, xmm1
  238. %if ARCH_X86_64 == 0
  239. movss r0m, xmm0
  240. fld dword r0m
  241. %endif
  242. RET
  243. ;-----------------------------------------------------------------------------
  244. ; void ff_butterflies_float(float *src0, float *src1, int len);
  245. ;-----------------------------------------------------------------------------
  246. INIT_XMM sse
  247. cglobal butterflies_float, 3,3,3, src0, src1, len
  248. %if ARCH_X86_64
  249. movsxd lenq, lend
  250. %endif
  251. test lenq, lenq
  252. jz .end
  253. shl lenq, 2
  254. lea src0q, [src0q + lenq]
  255. lea src1q, [src1q + lenq]
  256. neg lenq
  257. .loop:
  258. mova m0, [src0q + lenq]
  259. mova m1, [src1q + lenq]
  260. subps m2, m0, m1
  261. addps m0, m0, m1
  262. mova [src1q + lenq], m2
  263. mova [src0q + lenq], m0
  264. add lenq, mmsize
  265. jl .loop
  266. .end:
  267. REP_RET