You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

346 lines
9.2KB

  1. ;******************************************************************************
  2. ;* SSE-optimized functions for the DCA decoder
  3. ;* Copyright (C) 2012-2014 Christophe Gisquet <christophe.gisquet@gmail.com>
  4. ;*
  5. ;* This file is part of FFmpeg.
  6. ;*
  7. ;* FFmpeg is free software; you can redistribute it and/or
  8. ;* modify it under the terms of the GNU Lesser General Public
  9. ;* License as published by the Free Software Foundation; either
  10. ;* version 2.1 of the License, or (at your option) any later version.
  11. ;*
  12. ;* FFmpeg is distributed in the hope that it will be useful,
  13. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. ;* Lesser General Public License for more details.
  16. ;*
  17. ;* You should have received a copy of the GNU Lesser General Public
  18. ;* License along with FFmpeg; if not, write to the Free Software
  19. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. ;******************************************************************************
  21. %include "libavutil/x86/x86util.asm"
  22. SECTION_RODATA
  23. pf_inv16: times 4 dd 0x3D800000 ; 1/16
  24. SECTION .text
  25. ; %1=v0/v1 %2=in1 %3=in2
  26. %macro FIR_LOOP 2-3
  27. .loop%1:
  28. %define va m1
  29. %define vb m2
  30. %if %1
  31. %define OFFSET 0
  32. %else
  33. %define OFFSET NUM_COEF*count
  34. %endif
  35. ; for v0, incrementing and for v1, decrementing
  36. mova va, [cf0q + OFFSET]
  37. mova vb, [cf0q + OFFSET + 4*NUM_COEF]
  38. %if %0 == 3
  39. mova m4, [cf0q + OFFSET + mmsize]
  40. mova m0, [cf0q + OFFSET + 4*NUM_COEF + mmsize]
  41. %endif
  42. mulps va, %2
  43. mulps vb, %2
  44. %if %0 == 3
  45. %if cpuflag(fma3)
  46. fmaddps va, m4, %3, va
  47. fmaddps vb, m0, %3, vb
  48. %else
  49. mulps m4, %3
  50. mulps m0, %3
  51. addps va, m4
  52. addps vb, m0
  53. %endif
  54. %endif
  55. ; va = va1 va2 va3 va4
  56. ; vb = vb1 vb2 vb3 vb4
  57. %if %1
  58. SWAP va, vb
  59. %endif
  60. mova m4, va
  61. unpcklps va, vb ; va3 vb3 va4 vb4
  62. unpckhps m4, vb ; va1 vb1 va2 vb2
  63. addps m4, va ; va1+3 vb1+3 va2+4 vb2+4
  64. movhlps vb, m4 ; va1+3 vb1+3
  65. addps vb, m4 ; va0..4 vb0..4
  66. movlps [outq + count], vb
  67. %if %1
  68. sub cf0q, 8*NUM_COEF
  69. %endif
  70. add count, 8
  71. jl .loop%1
  72. %endmacro
  73. ; void dca_lfe_fir(float *out, float *in, float *coefs)
  74. %macro DCA_LFE_FIR 1
  75. cglobal dca_lfe_fir%1, 3,3,6-%1, out, in, cf0
  76. %define IN1 m3
  77. %define IN2 m5
  78. %define count inq
  79. %define NUM_COEF 4*(2-%1)
  80. %define NUM_OUT 32*(%1+1)
  81. movu IN1, [inq + 4 - 1*mmsize]
  82. shufps IN1, IN1, q0123
  83. %if %1 == 0
  84. movu IN2, [inq + 4 - 2*mmsize]
  85. shufps IN2, IN2, q0123
  86. %endif
  87. mov count, -4*NUM_OUT
  88. add cf0q, 4*NUM_COEF*NUM_OUT
  89. add outq, 4*NUM_OUT
  90. ; compute v0 first
  91. %if %1 == 0
  92. FIR_LOOP 0, IN1, IN2
  93. %else
  94. FIR_LOOP 0, IN1
  95. %endif
  96. shufps IN1, IN1, q0123
  97. mov count, -4*NUM_OUT
  98. ; cf1 already correctly positioned
  99. add outq, 4*NUM_OUT ; outq now at out2
  100. sub cf0q, 8*NUM_COEF
  101. %if %1 == 0
  102. shufps IN2, IN2, q0123
  103. FIR_LOOP 1, IN2, IN1
  104. %else
  105. FIR_LOOP 1, IN1
  106. %endif
  107. RET
  108. %endmacro
  109. INIT_XMM sse
  110. DCA_LFE_FIR 0
  111. DCA_LFE_FIR 1
  112. %if HAVE_FMA3_EXTERNAL
  113. INIT_XMM fma3
  114. DCA_LFE_FIR 0
  115. %endif
  116. %macro SETZERO 1
  117. %if cpuflag(sse2) && notcpuflag(avx)
  118. pxor %1, %1
  119. %else
  120. xorps %1, %1, %1
  121. %endif
  122. %endmacro
  123. %macro SHUF 3
  124. %if cpuflag(avx)
  125. mova %3, [%2 - 16]
  126. vperm2f128 %1, %3, %3, 1
  127. vshufps %1, %1, %1, q0123
  128. %elif cpuflag(sse2)
  129. pshufd %1, [%2], q0123
  130. %else
  131. mova %1, [%2]
  132. shufps %1, %1, q0123
  133. %endif
  134. %endmacro
  135. %macro INNER_LOOP 1
  136. ; reading backwards: ptr1 = synth_buf + j + i; ptr2 = synth_buf + j - i
  137. ;~ a += window[i + j] * (-synth_buf[15 - i + j])
  138. ;~ b += window[i + j + 16] * (synth_buf[i + j])
  139. SHUF m5, ptr2 + j + (15 - 3) * 4, m6
  140. mova m6, [ptr1 + j]
  141. %if ARCH_X86_64
  142. SHUF m11, ptr2 + j + (15 - 3) * 4 - mmsize, m12
  143. mova m12, [ptr1 + j + mmsize]
  144. %endif
  145. %if cpuflag(fma3)
  146. fmaddps m2, m6, [win + %1 + j + 16 * 4], m2
  147. fnmaddps m1, m5, [win + %1 + j], m1
  148. %if ARCH_X86_64
  149. fmaddps m8, m12, [win + %1 + j + mmsize + 16 * 4], m8
  150. fnmaddps m7, m11, [win + %1 + j + mmsize], m7
  151. %endif
  152. %else ; non-FMA
  153. mulps m6, m6, [win + %1 + j + 16 * 4]
  154. mulps m5, m5, [win + %1 + j]
  155. %if ARCH_X86_64
  156. mulps m12, m12, [win + %1 + j + mmsize + 16 * 4]
  157. mulps m11, m11, [win + %1 + j + mmsize]
  158. %endif
  159. addps m2, m2, m6
  160. subps m1, m1, m5
  161. %if ARCH_X86_64
  162. addps m8, m8, m12
  163. subps m7, m7, m11
  164. %endif
  165. %endif ; cpuflag(fma3)
  166. ;~ c += window[i + j + 32] * (synth_buf[16 + i + j])
  167. ;~ d += window[i + j + 48] * (synth_buf[31 - i + j])
  168. SHUF m6, ptr2 + j + (31 - 3) * 4, m5
  169. mova m5, [ptr1 + j + 16 * 4]
  170. %if ARCH_X86_64
  171. SHUF m12, ptr2 + j + (31 - 3) * 4 - mmsize, m11
  172. mova m11, [ptr1 + j + mmsize + 16 * 4]
  173. %endif
  174. %if cpuflag(fma3)
  175. fmaddps m3, m5, [win + %1 + j + 32 * 4], m3
  176. fmaddps m4, m6, [win + %1 + j + 48 * 4], m4
  177. %if ARCH_X86_64
  178. fmaddps m9, m11, [win + %1 + j + mmsize + 32 * 4], m9
  179. fmaddps m10, m12, [win + %1 + j + mmsize + 48 * 4], m10
  180. %endif
  181. %else ; non-FMA
  182. mulps m5, m5, [win + %1 + j + 32 * 4]
  183. mulps m6, m6, [win + %1 + j + 48 * 4]
  184. %if ARCH_X86_64
  185. mulps m11, m11, [win + %1 + j + mmsize + 32 * 4]
  186. mulps m12, m12, [win + %1 + j + mmsize + 48 * 4]
  187. %endif
  188. addps m3, m3, m5
  189. addps m4, m4, m6
  190. %if ARCH_X86_64
  191. addps m9, m9, m11
  192. addps m10, m10, m12
  193. %endif
  194. %endif ; cpuflag(fma3)
  195. sub j, 64 * 4
  196. %endmacro
  197. ; void ff_synth_filter_inner_<opt>(float *synth_buf, float synth_buf2[32],
  198. ; const float window[512], float out[32],
  199. ; intptr_t offset, float scale)
  200. %macro SYNTH_FILTER 0
  201. cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 * ARCH_X86_64, \
  202. synth_buf, synth_buf2, window, out, off, scale
  203. %define scale m0
  204. %if ARCH_X86_32 || WIN64
  205. %if cpuflag(sse2) && notcpuflag(avx)
  206. movd scale, scalem
  207. SPLATD m0
  208. %else
  209. VBROADCASTSS m0, scalem
  210. %endif
  211. ; Make sure offset is in a register and not on the stack
  212. %define OFFQ r4q
  213. %else
  214. SPLATD xmm0
  215. %if cpuflag(avx)
  216. vinsertf128 m0, m0, xmm0, 1
  217. %endif
  218. %define OFFQ offq
  219. %endif
  220. ; prepare inner counter limit 1
  221. mov r5q, 480
  222. sub r5q, offmp
  223. and r5q, -64
  224. shl r5q, 2
  225. %if ARCH_X86_32 || notcpuflag(avx)
  226. mov OFFQ, r5q
  227. %define i r5q
  228. mov i, 16 * 4 - (ARCH_X86_64 + 1) * mmsize ; main loop counter
  229. %else
  230. %define i 0
  231. %define OFFQ r5q
  232. %endif
  233. %define buf2 synth_buf2q
  234. %if ARCH_X86_32
  235. mov buf2, synth_buf2mp
  236. %endif
  237. .mainloop:
  238. ; m1 = a m2 = b m3 = c m4 = d
  239. SETZERO m3
  240. SETZERO m4
  241. mova m1, [buf2 + i]
  242. mova m2, [buf2 + i + 16 * 4]
  243. %if ARCH_X86_32
  244. %define ptr1 r0q
  245. %define ptr2 r1q
  246. %define win r2q
  247. %define j r3q
  248. mov win, windowm
  249. mov ptr1, synth_bufm
  250. %if ARCH_X86_32 || notcpuflag(avx)
  251. add win, i
  252. add ptr1, i
  253. %endif
  254. %else ; ARCH_X86_64
  255. %define ptr1 r6q
  256. %define ptr2 r7q ; must be loaded
  257. %define win r8q
  258. %define j r9q
  259. SETZERO m9
  260. SETZERO m10
  261. mova m7, [buf2 + i + mmsize]
  262. mova m8, [buf2 + i + mmsize + 16 * 4]
  263. lea win, [windowq + i]
  264. lea ptr1, [synth_bufq + i]
  265. %endif
  266. mov ptr2, synth_bufmp
  267. ; prepare the inner loop counter
  268. mov j, OFFQ
  269. %if ARCH_X86_32 || notcpuflag(avx)
  270. sub ptr2, i
  271. %endif
  272. .loop1:
  273. INNER_LOOP 0
  274. jge .loop1
  275. mov j, 448 * 4
  276. sub j, OFFQ
  277. jz .end
  278. sub ptr1, j
  279. sub ptr2, j
  280. add win, OFFQ ; now at j-64, so define OFFSET
  281. sub j, 64 * 4
  282. .loop2:
  283. INNER_LOOP 64 * 4
  284. jge .loop2
  285. .end:
  286. %if ARCH_X86_32
  287. mov buf2, synth_buf2m ; needed for next iteration anyway
  288. mov outq, outmp ; j, which will be set again during it
  289. %endif
  290. ;~ out[i] = a * scale;
  291. ;~ out[i + 16] = b * scale;
  292. mulps m1, m1, scale
  293. mulps m2, m2, scale
  294. %if ARCH_X86_64
  295. mulps m7, m7, scale
  296. mulps m8, m8, scale
  297. %endif
  298. ;~ synth_buf2[i] = c;
  299. ;~ synth_buf2[i + 16] = d;
  300. mova [buf2 + i + 0 * 4], m3
  301. mova [buf2 + i + 16 * 4], m4
  302. %if ARCH_X86_64
  303. mova [buf2 + i + 0 * 4 + mmsize], m9
  304. mova [buf2 + i + 16 * 4 + mmsize], m10
  305. %endif
  306. ;~ out[i] = a;
  307. ;~ out[i + 16] = a;
  308. mova [outq + i + 0 * 4], m1
  309. mova [outq + i + 16 * 4], m2
  310. %if ARCH_X86_64
  311. mova [outq + i + 0 * 4 + mmsize], m7
  312. mova [outq + i + 16 * 4 + mmsize], m8
  313. %endif
  314. %if ARCH_X86_32 || notcpuflag(avx)
  315. sub i, (ARCH_X86_64 + 1) * mmsize
  316. jge .mainloop
  317. %endif
  318. RET
  319. %endmacro
  320. %if ARCH_X86_32
  321. INIT_XMM sse
  322. SYNTH_FILTER
  323. %endif
  324. INIT_XMM sse2
  325. SYNTH_FILTER
  326. INIT_YMM avx
  327. SYNTH_FILTER
  328. INIT_YMM fma3
  329. SYNTH_FILTER