You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

851 lines
21KB

  1. ;******************************************************************************
  2. ;* FFT transform with SSE/3DNow optimizations
  3. ;* Copyright (c) 2008 Loren Merritt
  4. ;* Copyright (c) 2011 Vitor Sessak
  5. ;*
  6. ;* This algorithm (though not any of the implementation details) is
  7. ;* based on libdjbfft by D. J. Bernstein.
  8. ;*
  9. ;* This file is part of Libav.
  10. ;*
  11. ;* Libav is free software; you can redistribute it and/or
  12. ;* modify it under the terms of the GNU Lesser General Public
  13. ;* License as published by the Free Software Foundation; either
  14. ;* version 2.1 of the License, or (at your option) any later version.
  15. ;*
  16. ;* Libav is distributed in the hope that it will be useful,
  17. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. ;* Lesser General Public License for more details.
  20. ;*
  21. ;* You should have received a copy of the GNU Lesser General Public
  22. ;* License along with Libav; if not, write to the Free Software
  23. ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  24. ;******************************************************************************
  25. ; These functions are not individually interchangeable with the C versions.
  26. ; While C takes arrays of FFTComplex, SSE/3DNow leave intermediate results
  27. ; in blocks as conventient to the vector size.
  28. ; i.e. {4x real, 4x imaginary, 4x real, ...} (or 2x respectively)
  29. %include "x86inc.asm"
  30. %ifdef ARCH_X86_64
  31. %define pointer resq
  32. %else
  33. %define pointer resd
  34. %endif
  35. struc FFTContext
  36. .nbits: resd 1
  37. .reverse: resd 1
  38. .revtab: pointer 1
  39. .tmpbuf: pointer 1
  40. .mdctsize: resd 1
  41. .mdctbits: resd 1
  42. .tcos: pointer 1
  43. .tsin: pointer 1
  44. endstruc
  45. SECTION_RODATA
  46. %define M_SQRT1_2 0.70710678118654752440
  47. %define M_COS_PI_1_8 0.923879532511287
  48. %define M_COS_PI_3_8 0.38268343236509
  49. align 32
  50. ps_cos16_1: dd 1.0, M_COS_PI_1_8, M_SQRT1_2, M_COS_PI_3_8, 1.0, M_COS_PI_1_8, M_SQRT1_2, M_COS_PI_3_8
  51. ps_cos16_2: dd 0, M_COS_PI_3_8, M_SQRT1_2, M_COS_PI_1_8, 0, -M_COS_PI_3_8, -M_SQRT1_2, -M_COS_PI_1_8
  52. ps_root2: times 8 dd M_SQRT1_2
  53. ps_root2mppm: dd -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2, -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
  54. ps_p1p1m1p1: dd 0, 0, 1<<31, 0, 0, 0, 1<<31, 0
  55. perm1: dd 0x00, 0x02, 0x03, 0x01, 0x03, 0x00, 0x02, 0x01
  56. perm2: dd 0x00, 0x01, 0x02, 0x03, 0x01, 0x00, 0x02, 0x03
  57. ps_p1p1m1p1root2: dd 1.0, 1.0, -1.0, 1.0, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2
  58. ps_m1m1p1m1p1m1m1m1: dd 1<<31, 1<<31, 0, 1<<31, 0, 1<<31, 1<<31, 1<<31
  59. ps_m1p1: dd 1<<31, 0
  60. %assign i 16
  61. %rep 13
  62. cextern cos_ %+ i
  63. %assign i i<<1
  64. %endrep
  65. %ifdef ARCH_X86_64
  66. %define pointer dq
  67. %else
  68. %define pointer dd
  69. %endif
  70. %macro IF0 1+
  71. %endmacro
  72. %macro IF1 1+
  73. %1
  74. %endmacro
  75. section .text align=16
  76. %macro T2_3DN 4 ; z0, z1, mem0, mem1
  77. mova %1, %3
  78. mova %2, %1
  79. pfadd %1, %4
  80. pfsub %2, %4
  81. %endmacro
  82. %macro T4_3DN 6 ; z0, z1, z2, z3, tmp0, tmp1
  83. mova %5, %3
  84. pfsub %3, %4
  85. pfadd %5, %4 ; {t6,t5}
  86. pxor %3, [ps_m1p1] ; {t8,t7}
  87. mova %6, %1
  88. pswapd %3, %3
  89. pfadd %1, %5 ; {r0,i0}
  90. pfsub %6, %5 ; {r2,i2}
  91. mova %4, %2
  92. pfadd %2, %3 ; {r1,i1}
  93. pfsub %4, %3 ; {r3,i3}
  94. SWAP %3, %6
  95. %endmacro
  96. ; in: %1 = {r0,i0,r2,i2,r4,i4,r6,i6}
  97. ; %2 = {r1,i1,r3,i3,r5,i5,r7,i7}
  98. ; %3, %4, %5 tmp
  99. ; out: %1 = {r0,r1,r2,r3,i0,i1,i2,i3}
  100. ; %2 = {r4,r5,r6,r7,i4,i5,i6,i7}
  101. %macro T8_AVX 5
  102. vsubps %5, %1, %2 ; v = %1 - %2
  103. vaddps %3, %1, %2 ; w = %1 + %2
  104. vmulps %2, %5, [ps_p1p1m1p1root2] ; v *= vals1
  105. vpermilps %2, %2, [perm1]
  106. vblendps %1, %2, %3, 0x33 ; q = {w1,w2,v4,v2,w5,w6,v7,v6}
  107. vshufps %5, %3, %2, 0x4e ; r = {w3,w4,v1,v3,w7,w8,v8,v5}
  108. vsubps %4, %5, %1 ; s = r - q
  109. vaddps %1, %5, %1 ; u = r + q
  110. vpermilps %1, %1, [perm2] ; k = {u1,u2,u3,u4,u6,u5,u7,u8}
  111. vshufps %5, %4, %1, 0xbb
  112. vshufps %3, %4, %1, 0xee
  113. vperm2f128 %3, %3, %5, 0x13
  114. vxorps %4, %4, [ps_m1m1p1m1p1m1m1m1] ; s *= {1,1,-1,-1,1,-1,-1,-1}
  115. vshufps %2, %1, %4, 0xdd
  116. vshufps %1, %1, %4, 0x88
  117. vperm2f128 %4, %2, %1, 0x02 ; v = {k1,k3,s1,s3,k2,k4,s2,s4}
  118. vperm2f128 %1, %1, %2, 0x13 ; w = {k6,k8,s6,s8,k5,k7,s5,s7}
  119. vsubps %5, %1, %3
  120. vblendps %1, %5, %1, 0x55 ; w -= {0,s7,0,k7,0,s8,0,k8}
  121. vsubps %2, %4, %1 ; %2 = v - w
  122. vaddps %1, %4, %1 ; %1 = v + w
  123. %endmacro
  124. ; In SSE mode do one fft4 transforms
  125. ; in: %1={r0,i0,r2,i2} %2={r1,i1,r3,i3}
  126. ; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3}
  127. ;
  128. ; In AVX mode do two fft4 transforms
  129. ; in: %1={r0,i0,r2,i2,r4,i4,r6,i6} %2={r1,i1,r3,i3,r5,i5,r7,i7}
  130. ; out: %1={r0,r1,r2,r3,r4,r5,r6,r7} %2={i0,i1,i2,i3,i4,i5,i6,i7}
  131. %macro T4_SSE 3
  132. subps %3, %1, %2 ; {t3,t4,-t8,t7}
  133. addps %1, %1, %2 ; {t1,t2,t6,t5}
  134. xorps %3, %3, [ps_p1p1m1p1]
  135. shufps %2, %1, %3, 0xbe ; {t6,t5,t7,t8}
  136. shufps %1, %1, %3, 0x44 ; {t1,t2,t3,t4}
  137. subps %3, %1, %2 ; {r2,i2,r3,i3}
  138. addps %1, %1, %2 ; {r0,i0,r1,i1}
  139. shufps %2, %1, %3, 0xdd ; {i0,i1,i2,i3}
  140. shufps %1, %1, %3, 0x88 ; {r0,r1,r2,r3}
  141. %endmacro
  142. ; In SSE mode do one FFT8
  143. ; in: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3} %3={r4,i4,r6,i6} %4={r5,i5,r7,i7}
  144. ; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3} %1={r4,r5,r6,r7} %2={i4,i5,i6,i7}
  145. ;
  146. ; In AVX mode do two FFT8
  147. ; in: %1={r0,i0,r2,i2,r8, i8, r10,i10} %2={r1,i1,r3,i3,r9, i9, r11,i11}
  148. ; %3={r4,i4,r6,i6,r12,i12,r14,i14} %4={r5,i5,r7,i7,r13,i13,r15,i15}
  149. ; out: %1={r0,r1,r2,r3,r8, r9, r10,r11} %2={i0,i1,i2,i3,i8, i9, i10,i11}
  150. ; %3={r4,r5,r6,r7,r12,r13,r14,r15} %4={i4,i5,i6,i7,i12,i13,i14,i15}
  151. %macro T8_SSE 6
  152. addps %6, %3, %4 ; {t1,t2,t3,t4}
  153. subps %3, %3, %4 ; {r5,i5,r7,i7}
  154. shufps %4, %3, %3, 0xb1 ; {i5,r5,i7,r7}
  155. mulps %3, %3, [ps_root2mppm] ; {-r5,i5,r7,-i7}
  156. mulps %4, %4, [ps_root2]
  157. addps %3, %3, %4 ; {t8,t7,ta,t9}
  158. shufps %4, %6, %3, 0x9c ; {t1,t4,t7,ta}
  159. shufps %6, %6, %3, 0x36 ; {t3,t2,t9,t8}
  160. subps %3, %6, %4 ; {t6,t5,tc,tb}
  161. addps %6, %6, %4 ; {t1,t2,t9,ta}
  162. shufps %5, %6, %3, 0x8d ; {t2,ta,t6,tc}
  163. shufps %6, %6, %3, 0xd8 ; {t1,t9,t5,tb}
  164. subps %3, %1, %6 ; {r4,r5,r6,r7}
  165. addps %1, %1, %6 ; {r0,r1,r2,r3}
  166. subps %4, %2, %5 ; {i4,i5,i6,i7}
  167. addps %2, %2, %5 ; {i0,i1,i2,i3}
  168. %endmacro
  169. ; scheduled for cpu-bound sizes
  170. %macro PASS_SMALL 3 ; (to load m4-m7), wre, wim
  171. IF%1 mova m4, Z(4)
  172. IF%1 mova m5, Z(5)
  173. mova m0, %2 ; wre
  174. mova m1, %3 ; wim
  175. mulps m2, m4, m0 ; r2*wre
  176. IF%1 mova m6, Z2(6)
  177. mulps m3, m5, m1 ; i2*wim
  178. IF%1 mova m7, Z2(7)
  179. mulps m4, m4, m1 ; r2*wim
  180. mulps m5, m5, m0 ; i2*wre
  181. addps m2, m2, m3 ; r2*wre + i2*wim
  182. mulps m3, m1, m7 ; i3*wim
  183. subps m5, m5, m4 ; i2*wre - r2*wim
  184. mulps m1, m1, m6 ; r3*wim
  185. mulps m4, m0, m6 ; r3*wre
  186. mulps m0, m0, m7 ; i3*wre
  187. subps m4, m4, m3 ; r3*wre - i3*wim
  188. mova m3, Z(0)
  189. addps m0, m0, m1 ; i3*wre + r3*wim
  190. subps m1, m4, m2 ; t3
  191. addps m4, m4, m2 ; t5
  192. subps m3, m3, m4 ; r2
  193. addps m4, m4, Z(0) ; r0
  194. mova m6, Z(2)
  195. mova Z(4), m3
  196. mova Z(0), m4
  197. subps m3, m5, m0 ; t4
  198. subps m4, m6, m3 ; r3
  199. addps m3, m3, m6 ; r1
  200. mova Z2(6), m4
  201. mova Z(2), m3
  202. mova m2, Z(3)
  203. addps m3, m5, m0 ; t6
  204. subps m2, m2, m1 ; i3
  205. mova m7, Z(1)
  206. addps m1, m1, Z(3) ; i1
  207. mova Z2(7), m2
  208. mova Z(3), m1
  209. subps m4, m7, m3 ; i2
  210. addps m3, m3, m7 ; i0
  211. mova Z(5), m4
  212. mova Z(1), m3
  213. %endmacro
  214. ; scheduled to avoid store->load aliasing
  215. %macro PASS_BIG 1 ; (!interleave)
  216. mova m4, Z(4) ; r2
  217. mova m5, Z(5) ; i2
  218. mova m0, [wq] ; wre
  219. mova m1, [wq+o1q] ; wim
  220. mulps m2, m4, m0 ; r2*wre
  221. mova m6, Z2(6) ; r3
  222. mulps m3, m5, m1 ; i2*wim
  223. mova m7, Z2(7) ; i3
  224. mulps m4, m4, m1 ; r2*wim
  225. mulps m5, m5, m0 ; i2*wre
  226. addps m2, m2, m3 ; r2*wre + i2*wim
  227. mulps m3, m1, m7 ; i3*wim
  228. mulps m1, m1, m6 ; r3*wim
  229. subps m5, m5, m4 ; i2*wre - r2*wim
  230. mulps m4, m0, m6 ; r3*wre
  231. mulps m0, m0, m7 ; i3*wre
  232. subps m4, m4, m3 ; r3*wre - i3*wim
  233. mova m3, Z(0)
  234. addps m0, m0, m1 ; i3*wre + r3*wim
  235. subps m1, m4, m2 ; t3
  236. addps m4, m4, m2 ; t5
  237. subps m3, m3, m4 ; r2
  238. addps m4, m4, Z(0) ; r0
  239. mova m6, Z(2)
  240. mova Z(4), m3
  241. mova Z(0), m4
  242. subps m3, m5, m0 ; t4
  243. subps m4, m6, m3 ; r3
  244. addps m3, m3, m6 ; r1
  245. IF%1 mova Z2(6), m4
  246. IF%1 mova Z(2), m3
  247. mova m2, Z(3)
  248. addps m5, m5, m0 ; t6
  249. subps m2, m2, m1 ; i3
  250. mova m7, Z(1)
  251. addps m1, m1, Z(3) ; i1
  252. IF%1 mova Z2(7), m2
  253. IF%1 mova Z(3), m1
  254. subps m6, m7, m5 ; i2
  255. addps m5, m5, m7 ; i0
  256. IF%1 mova Z(5), m6
  257. IF%1 mova Z(1), m5
  258. %if %1==0
  259. INTERL m1, m3, m7, Z, 2
  260. INTERL m2, m4, m0, Z2, 6
  261. mova m1, Z(0)
  262. mova m2, Z(4)
  263. INTERL m5, m1, m3, Z, 0
  264. INTERL m6, m2, m7, Z, 4
  265. %endif
  266. %endmacro
  267. %macro PUNPCK 3
  268. mova %3, %1
  269. punpckldq %1, %2
  270. punpckhdq %3, %2
  271. %endmacro
  272. %define Z(x) [r0+mmsize*x]
  273. %define Z2(x) [r0+mmsize*x]
  274. %define ZH(x) [r0+mmsize*x+mmsize/2]
  275. INIT_YMM
  276. align 16
  277. fft8_avx:
  278. mova m0, Z(0)
  279. mova m1, Z(1)
  280. T8_AVX m0, m1, m2, m3, m4
  281. mova Z(0), m0
  282. mova Z(1), m1
  283. ret
  284. align 16
  285. fft16_avx:
  286. mova m2, Z(2)
  287. mova m3, Z(3)
  288. T4_SSE m2, m3, m7
  289. mova m0, Z(0)
  290. mova m1, Z(1)
  291. T8_AVX m0, m1, m4, m5, m7
  292. mova m4, [ps_cos16_1]
  293. mova m5, [ps_cos16_2]
  294. vmulps m6, m2, m4
  295. vmulps m7, m3, m5
  296. vaddps m7, m7, m6
  297. vmulps m2, m2, m5
  298. vmulps m3, m3, m4
  299. vsubps m3, m3, m2
  300. vblendps m2, m7, m3, 0xf0
  301. vperm2f128 m3, m7, m3, 0x21
  302. vaddps m4, m2, m3
  303. vsubps m2, m3, m2
  304. vperm2f128 m2, m2, m2, 0x01
  305. vsubps m3, m1, m2
  306. vaddps m1, m1, m2
  307. vsubps m5, m0, m4
  308. vaddps m0, m0, m4
  309. vextractf128 Z(0), m0, 0
  310. vextractf128 ZH(0), m1, 0
  311. vextractf128 Z(1), m0, 1
  312. vextractf128 ZH(1), m1, 1
  313. vextractf128 Z(2), m5, 0
  314. vextractf128 ZH(2), m3, 0
  315. vextractf128 Z(3), m5, 1
  316. vextractf128 ZH(3), m3, 1
  317. ret
  318. align 16
  319. fft32_avx:
  320. call fft16_avx
  321. mova m0, Z(4)
  322. mova m1, Z(5)
  323. T4_SSE m0, m1, m4
  324. mova m2, Z(6)
  325. mova m3, Z(7)
  326. T8_SSE m0, m1, m2, m3, m4, m6
  327. ; m0={r0,r1,r2,r3,r8, r9, r10,r11} m1={i0,i1,i2,i3,i8, i9, i10,i11}
  328. ; m2={r4,r5,r6,r7,r12,r13,r14,r15} m3={i4,i5,i6,i7,i12,i13,i14,i15}
  329. vperm2f128 m4, m0, m2, 0x20
  330. vperm2f128 m5, m1, m3, 0x20
  331. vperm2f128 m6, m0, m2, 0x31
  332. vperm2f128 m7, m1, m3, 0x31
  333. PASS_SMALL 0, [cos_32], [cos_32+32]
  334. ret
  335. fft32_interleave_avx:
  336. call fft32_avx
  337. mov r2d, 32
  338. .deint_loop:
  339. mova m2, Z(0)
  340. mova m3, Z(1)
  341. vunpcklps m0, m2, m3
  342. vunpckhps m1, m2, m3
  343. vextractf128 Z(0), m0, 0
  344. vextractf128 ZH(0), m1, 0
  345. vextractf128 Z(1), m0, 1
  346. vextractf128 ZH(1), m1, 1
  347. add r0, mmsize*2
  348. sub r2d, mmsize/4
  349. jg .deint_loop
  350. ret
  351. INIT_XMM
  352. %define movdqa movaps
  353. align 16
  354. fft4_avx:
  355. fft4_sse:
  356. mova m0, Z(0)
  357. mova m1, Z(1)
  358. T4_SSE m0, m1, m2
  359. mova Z(0), m0
  360. mova Z(1), m1
  361. ret
  362. align 16
  363. fft8_sse:
  364. mova m0, Z(0)
  365. mova m1, Z(1)
  366. T4_SSE m0, m1, m2
  367. mova m2, Z(2)
  368. mova m3, Z(3)
  369. T8_SSE m0, m1, m2, m3, m4, m5
  370. mova Z(0), m0
  371. mova Z(1), m1
  372. mova Z(2), m2
  373. mova Z(3), m3
  374. ret
  375. align 16
  376. fft16_sse:
  377. mova m0, Z(0)
  378. mova m1, Z(1)
  379. T4_SSE m0, m1, m2
  380. mova m2, Z(2)
  381. mova m3, Z(3)
  382. T8_SSE m0, m1, m2, m3, m4, m5
  383. mova m4, Z(4)
  384. mova m5, Z(5)
  385. mova Z(0), m0
  386. mova Z(1), m1
  387. mova Z(2), m2
  388. mova Z(3), m3
  389. T4_SSE m4, m5, m6
  390. mova m6, Z2(6)
  391. mova m7, Z2(7)
  392. T4_SSE m6, m7, m0
  393. PASS_SMALL 0, [cos_16], [cos_16+16]
  394. ret
  395. INIT_MMX
  396. %macro FFT48_3DN 1
  397. align 16
  398. fft4%1:
  399. T2_3DN m0, m1, Z(0), Z(1)
  400. mova m2, Z(2)
  401. mova m3, Z(3)
  402. T4_3DN m0, m1, m2, m3, m4, m5
  403. PUNPCK m0, m1, m4
  404. PUNPCK m2, m3, m5
  405. mova Z(0), m0
  406. mova Z(1), m4
  407. mova Z(2), m2
  408. mova Z(3), m5
  409. ret
  410. align 16
  411. fft8%1:
  412. T2_3DN m0, m1, Z(0), Z(1)
  413. mova m2, Z(2)
  414. mova m3, Z(3)
  415. T4_3DN m0, m1, m2, m3, m4, m5
  416. mova Z(0), m0
  417. mova Z(2), m2
  418. T2_3DN m4, m5, Z(4), Z(5)
  419. T2_3DN m6, m7, Z2(6), Z2(7)
  420. pswapd m0, m5
  421. pswapd m2, m7
  422. pxor m0, [ps_m1p1]
  423. pxor m2, [ps_m1p1]
  424. pfsub m5, m0
  425. pfadd m7, m2
  426. pfmul m5, [ps_root2]
  427. pfmul m7, [ps_root2]
  428. T4_3DN m1, m3, m5, m7, m0, m2
  429. mova Z(5), m5
  430. mova Z2(7), m7
  431. mova m0, Z(0)
  432. mova m2, Z(2)
  433. T4_3DN m0, m2, m4, m6, m5, m7
  434. PUNPCK m0, m1, m5
  435. PUNPCK m2, m3, m7
  436. mova Z(0), m0
  437. mova Z(1), m5
  438. mova Z(2), m2
  439. mova Z(3), m7
  440. PUNPCK m4, Z(5), m5
  441. PUNPCK m6, Z2(7), m7
  442. mova Z(4), m4
  443. mova Z(5), m5
  444. mova Z2(6), m6
  445. mova Z2(7), m7
  446. ret
  447. %endmacro
  448. FFT48_3DN _3dn2
  449. %macro pswapd 2
  450. %ifidn %1, %2
  451. movd [r0+12], %1
  452. punpckhdq %1, [r0+8]
  453. %else
  454. movq %1, %2
  455. psrlq %1, 32
  456. punpckldq %1, %2
  457. %endif
  458. %endmacro
  459. FFT48_3DN _3dn
  460. %define Z(x) [zq + o1q*(x&6) + mmsize*(x&1)]
  461. %define Z2(x) [zq + o3q + mmsize*(x&1)]
  462. %define ZH(x) [zq + o1q*(x&6) + mmsize*(x&1) + mmsize/2]
  463. %define Z2H(x) [zq + o3q + mmsize*(x&1) + mmsize/2]
  464. %macro DECL_PASS 2+ ; name, payload
  465. align 16
  466. %1:
  467. DEFINE_ARGS z, w, n, o1, o3
  468. lea o3q, [nq*3]
  469. lea o1q, [nq*8]
  470. shl o3q, 4
  471. .loop:
  472. %2
  473. add zq, mmsize*2
  474. add wq, mmsize
  475. sub nd, mmsize/8
  476. jg .loop
  477. rep ret
  478. %endmacro
  479. INIT_YMM
  480. %macro INTERL_AVX 5
  481. vunpckhps %3, %2, %1
  482. vunpcklps %2, %2, %1
  483. vextractf128 %4(%5), %2, 0
  484. vextractf128 %4 %+ H(%5), %3, 0
  485. vextractf128 %4(%5 + 1), %2, 1
  486. vextractf128 %4 %+ H(%5 + 1), %3, 1
  487. %endmacro
  488. %define INTERL INTERL_AVX
  489. DECL_PASS pass_avx, PASS_BIG 1
  490. DECL_PASS pass_interleave_avx, PASS_BIG 0
  491. INIT_XMM
  492. %macro INTERL_SSE 5
  493. mova %3, %2
  494. unpcklps %2, %1
  495. unpckhps %3, %1
  496. mova %4(%5), %2
  497. mova %4(%5+1), %3
  498. %endmacro
  499. %define INTERL INTERL_SSE
  500. DECL_PASS pass_sse, PASS_BIG 1
  501. DECL_PASS pass_interleave_sse, PASS_BIG 0
  502. INIT_MMX
  503. %define mulps pfmul
  504. %define addps pfadd
  505. %define subps pfsub
  506. %define unpcklps punpckldq
  507. %define unpckhps punpckhdq
  508. DECL_PASS pass_3dn, PASS_SMALL 1, [wq], [wq+o1q]
  509. DECL_PASS pass_interleave_3dn, PASS_BIG 0
  510. %define pass_3dn2 pass_3dn
  511. %define pass_interleave_3dn2 pass_interleave_3dn
  512. %ifdef PIC
  513. %define SECTION_REL - $$
  514. %else
  515. %define SECTION_REL
  516. %endif
  517. %macro FFT_DISPATCH 2; clobbers 5 GPRs, 8 XMMs
  518. lea r2, [dispatch_tab%1]
  519. mov r2, [r2 + (%2q-2)*gprsize]
  520. %ifdef PIC
  521. lea r3, [$$]
  522. add r2, r3
  523. %endif
  524. call r2
  525. %endmacro ; FFT_DISPATCH
  526. %macro DECL_FFT 2-3 ; nbits, cpu, suffix
  527. %xdefine list_of_fft fft4%2 SECTION_REL, fft8%2 SECTION_REL
  528. %if %1>=5
  529. %xdefine list_of_fft list_of_fft, fft16%2 SECTION_REL
  530. %endif
  531. %if %1>=6
  532. %xdefine list_of_fft list_of_fft, fft32%3%2 SECTION_REL
  533. %endif
  534. %assign n 1<<%1
  535. %rep 17-%1
  536. %assign n2 n/2
  537. %assign n4 n/4
  538. %xdefine list_of_fft list_of_fft, fft %+ n %+ %3%2 SECTION_REL
  539. align 16
  540. fft %+ n %+ %3%2:
  541. call fft %+ n2 %+ %2
  542. add r0, n*4 - (n&(-2<<%1))
  543. call fft %+ n4 %+ %2
  544. add r0, n*2 - (n2&(-2<<%1))
  545. call fft %+ n4 %+ %2
  546. sub r0, n*6 + (n2&(-2<<%1))
  547. lea r1, [cos_ %+ n]
  548. mov r2d, n4/2
  549. jmp pass%3%2
  550. %assign n n*2
  551. %endrep
  552. %undef n
  553. align 8
  554. dispatch_tab%3%2: pointer list_of_fft
  555. section .text
  556. ; On x86_32, this function does the register saving and restoring for all of fft.
  557. ; The others pass args in registers and don't spill anything.
  558. cglobal fft_dispatch%3%2, 2,5,8, z, nbits
  559. FFT_DISPATCH %3%2, nbits
  560. %ifidn %2, _avx
  561. vzeroupper
  562. %endif
  563. RET
  564. %endmacro ; DECL_FFT
  565. DECL_FFT 6, _avx
  566. DECL_FFT 6, _avx, _interleave
  567. DECL_FFT 5, _sse
  568. DECL_FFT 5, _sse, _interleave
  569. DECL_FFT 4, _3dn
  570. DECL_FFT 4, _3dn, _interleave
  571. DECL_FFT 4, _3dn2
  572. DECL_FFT 4, _3dn2, _interleave
  573. INIT_XMM
  574. %undef mulps
  575. %undef addps
  576. %undef subps
  577. %undef unpcklps
  578. %undef unpckhps
  579. %macro PREROTATER 5 ;-2*k, 2*k, input+n4, tcos+n8, tsin+n8
  580. movaps xmm0, [%3+%2*4]
  581. movaps xmm1, [%3+%1*4-0x10]
  582. movaps xmm2, xmm0
  583. shufps xmm0, xmm1, 0x88
  584. shufps xmm1, xmm2, 0x77
  585. movlps xmm4, [%4+%2*2]
  586. movlps xmm5, [%5+%2*2+0x0]
  587. movhps xmm4, [%4+%1*2-0x8]
  588. movhps xmm5, [%5+%1*2-0x8]
  589. movaps xmm2, xmm0
  590. movaps xmm3, xmm1
  591. mulps xmm0, xmm5
  592. mulps xmm1, xmm4
  593. mulps xmm2, xmm4
  594. mulps xmm3, xmm5
  595. subps xmm1, xmm0
  596. addps xmm2, xmm3
  597. movaps xmm0, xmm1
  598. unpcklps xmm1, xmm2
  599. unpckhps xmm0, xmm2
  600. %endmacro
  601. %macro CMUL 6 ;j, xmm0, xmm1, 3, 4, 5
  602. mulps m6, %3, [%5+%1]
  603. mulps m7, %2, [%5+%1]
  604. mulps %2, %2, [%6+%1]
  605. mulps %3, %3, [%6+%1]
  606. subps %2, %2, m6
  607. addps %3, %3, m7
  608. %endmacro
  609. %macro POSROTATESHUF_AVX 5 ;j, k, z+n8, tcos+n8, tsin+n8
  610. .post:
  611. vmovaps ymm1, [%3+%1*2]
  612. vmovaps ymm0, [%3+%1*2+0x20]
  613. vmovaps ymm3, [%3+%2*2]
  614. vmovaps ymm2, [%3+%2*2+0x20]
  615. CMUL %1, ymm0, ymm1, %3, %4, %5
  616. CMUL %2, ymm2, ymm3, %3, %4, %5
  617. vshufps ymm1, ymm1, ymm1, 0x1b
  618. vshufps ymm3, ymm3, ymm3, 0x1b
  619. vperm2f128 ymm1, ymm1, ymm1, 0x01
  620. vperm2f128 ymm3, ymm3, ymm3, 0x01
  621. vunpcklps ymm6, ymm2, ymm1
  622. vunpckhps ymm4, ymm2, ymm1
  623. vunpcklps ymm7, ymm0, ymm3
  624. vunpckhps ymm5, ymm0, ymm3
  625. vextractf128 [%3+%1*2], ymm7, 0
  626. vextractf128 [%3+%1*2+0x10], ymm5, 0
  627. vextractf128 [%3+%1*2+0x20], ymm7, 1
  628. vextractf128 [%3+%1*2+0x30], ymm5, 1
  629. vextractf128 [%3+%2*2], ymm6, 0
  630. vextractf128 [%3+%2*2+0x10], ymm4, 0
  631. vextractf128 [%3+%2*2+0x20], ymm6, 1
  632. vextractf128 [%3+%2*2+0x30], ymm4, 1
  633. sub %2, 0x20
  634. add %1, 0x20
  635. jl .post
  636. %endmacro
  637. %macro POSROTATESHUF 5 ;j, k, z+n8, tcos+n8, tsin+n8
  638. .post:
  639. movaps xmm1, [%3+%1*2]
  640. movaps xmm0, [%3+%1*2+0x10]
  641. CMUL %1, xmm0, xmm1, %3, %4, %5
  642. movaps xmm5, [%3+%2*2]
  643. movaps xmm4, [%3+%2*2+0x10]
  644. CMUL %2, xmm4, xmm5, %3, %4, %5
  645. shufps xmm1, xmm1, 0x1b
  646. shufps xmm5, xmm5, 0x1b
  647. movaps xmm6, xmm4
  648. unpckhps xmm4, xmm1
  649. unpcklps xmm6, xmm1
  650. movaps xmm2, xmm0
  651. unpcklps xmm0, xmm5
  652. unpckhps xmm2, xmm5
  653. movaps [%3+%2*2], xmm6
  654. movaps [%3+%2*2+0x10], xmm4
  655. movaps [%3+%1*2], xmm0
  656. movaps [%3+%1*2+0x10], xmm2
  657. sub %2, 0x10
  658. add %1, 0x10
  659. jl .post
  660. %endmacro
  661. %macro DECL_IMDCT 2
  662. cglobal imdct_half%1, 3,7,8; FFTContext *s, FFTSample *output, const FFTSample *input
  663. %ifdef ARCH_X86_64
  664. %define rrevtab r10
  665. %define rtcos r11
  666. %define rtsin r12
  667. push r12
  668. push r13
  669. push r14
  670. %else
  671. %define rrevtab r6
  672. %define rtsin r6
  673. %define rtcos r5
  674. %endif
  675. mov r3d, [r0+FFTContext.mdctsize]
  676. add r2, r3
  677. shr r3, 1
  678. mov rtcos, [r0+FFTContext.tcos]
  679. mov rtsin, [r0+FFTContext.tsin]
  680. add rtcos, r3
  681. add rtsin, r3
  682. %ifndef ARCH_X86_64
  683. push rtcos
  684. push rtsin
  685. %endif
  686. shr r3, 1
  687. mov rrevtab, [r0+FFTContext.revtab]
  688. add rrevtab, r3
  689. %ifndef ARCH_X86_64
  690. push rrevtab
  691. %endif
  692. sub r3, 4
  693. %ifdef ARCH_X86_64
  694. xor r4, r4
  695. sub r4, r3
  696. %endif
  697. .pre:
  698. %ifndef ARCH_X86_64
  699. ;unspill
  700. xor r4, r4
  701. sub r4, r3
  702. mov rtsin, [esp+4]
  703. mov rtcos, [esp+8]
  704. %endif
  705. PREROTATER r4, r3, r2, rtcos, rtsin
  706. %ifdef ARCH_X86_64
  707. movzx r5, word [rrevtab+r4-4]
  708. movzx r6, word [rrevtab+r4-2]
  709. movzx r13, word [rrevtab+r3]
  710. movzx r14, word [rrevtab+r3+2]
  711. movlps [r1+r5 *8], xmm0
  712. movhps [r1+r6 *8], xmm0
  713. movlps [r1+r13*8], xmm1
  714. movhps [r1+r14*8], xmm1
  715. add r4, 4
  716. %else
  717. mov r6, [esp]
  718. movzx r5, word [r6+r4-4]
  719. movzx r4, word [r6+r4-2]
  720. movlps [r1+r5*8], xmm0
  721. movhps [r1+r4*8], xmm0
  722. movzx r5, word [r6+r3]
  723. movzx r4, word [r6+r3+2]
  724. movlps [r1+r5*8], xmm1
  725. movhps [r1+r4*8], xmm1
  726. %endif
  727. sub r3, 4
  728. jns .pre
  729. mov r5, r0
  730. mov r6, r1
  731. mov r0, r1
  732. mov r1d, [r5+FFTContext.nbits]
  733. FFT_DISPATCH %1, r1
  734. mov r0d, [r5+FFTContext.mdctsize]
  735. add r6, r0
  736. shr r0, 1
  737. %ifndef ARCH_X86_64
  738. %define rtcos r2
  739. %define rtsin r3
  740. mov rtcos, [esp+8]
  741. mov rtsin, [esp+4]
  742. %endif
  743. neg r0
  744. mov r1, -mmsize
  745. sub r1, r0
  746. %2 r0, r1, r6, rtcos, rtsin
  747. %ifdef ARCH_X86_64
  748. pop r14
  749. pop r13
  750. pop r12
  751. %else
  752. add esp, 12
  753. %endif
  754. %ifidn avx_enabled, 1
  755. vzeroupper
  756. %endif
  757. RET
  758. %endmacro
  759. DECL_IMDCT _sse, POSROTATESHUF
  760. INIT_YMM
  761. DECL_IMDCT _avx, POSROTATESHUF_AVX