You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

481 lines
11KB

  1. ;******************************************************************************
  2. ;* FFT transform with SSE/3DNow optimizations
  3. ;* Copyright (c) 2008 Loren Merritt
  4. ;*
  5. ;* This file is part of FFmpeg.
  6. ;*
  7. ;* FFmpeg is free software; you can redistribute it and/or
  8. ;* modify it under the terms of the GNU Lesser General Public
  9. ;* License as published by the Free Software Foundation; either
  10. ;* version 2.1 of the License, or (at your option) any later version.
  11. ;*
  12. ;* FFmpeg is distributed in the hope that it will be useful,
  13. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. ;* Lesser General Public License for more details.
  16. ;*
  17. ;* You should have received a copy of the GNU Lesser General Public
  18. ;* License along with FFmpeg; if not, write to the Free Software
  19. ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. ;******************************************************************************
  21. ; These functions are not individually interchangeable with the C versions.
  22. ; While C takes arrays of FFTComplex, SSE/3DNow leave intermediate results
  23. ; in blocks as conventient to the vector size.
  24. ; i.e. {4x real, 4x imaginary, 4x real, ...} (or 2x respectively)
  25. %include "x86inc.asm"
  26. SECTION_RODATA
  27. %define M_SQRT1_2 0.70710678118654752440
  28. ps_root2: times 4 dd M_SQRT1_2
  29. ps_root2mppm: dd -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
  30. ps_m1p1: dd 1<<31, 0
  31. %assign i 16
  32. %rep 13
  33. cextern cos_ %+ i
  34. %assign i i<<1
  35. %endrep
  36. %ifdef ARCH_X86_64
  37. %define pointer dq
  38. %else
  39. %define pointer dd
  40. %endif
  41. %macro IF0 1+
  42. %endmacro
  43. %macro IF1 1+
  44. %1
  45. %endmacro
  46. section .text align=16
  47. %macro T2_3DN 4 ; z0, z1, mem0, mem1
  48. mova %1, %3
  49. mova %2, %1
  50. pfadd %1, %4
  51. pfsub %2, %4
  52. %endmacro
  53. %macro T4_3DN 6 ; z0, z1, z2, z3, tmp0, tmp1
  54. mova %5, %3
  55. pfsub %3, %4
  56. pfadd %5, %4 ; {t6,t5}
  57. pxor %3, [ps_m1p1] ; {t8,t7}
  58. mova %6, %1
  59. pswapd %3, %3
  60. pfadd %1, %5 ; {r0,i0}
  61. pfsub %6, %5 ; {r2,i2}
  62. mova %4, %2
  63. pfadd %2, %3 ; {r1,i1}
  64. pfsub %4, %3 ; {r3,i3}
  65. SWAP %3, %6
  66. %endmacro
  67. ; in: %1={r0,i0,r1,i1} %2={r2,i2,r3,i3}
  68. ; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3}
  69. %macro T4_SSE 3
  70. mova %3, %1
  71. shufps %1, %2, 0x64 ; {r0,i0,r3,i2}
  72. shufps %3, %2, 0xce ; {r1,i1,r2,i3}
  73. mova %2, %1
  74. addps %1, %3 ; {t1,t2,t6,t5}
  75. subps %2, %3 ; {t3,t4,t8,t7}
  76. mova %3, %1
  77. shufps %1, %2, 0x44 ; {t1,t2,t3,t4}
  78. shufps %3, %2, 0xbe ; {t6,t5,t7,t8}
  79. mova %2, %1
  80. addps %1, %3 ; {r0,i0,r1,i1}
  81. subps %2, %3 ; {r2,i2,r3,i3}
  82. mova %3, %1
  83. shufps %1, %2, 0x88 ; {r0,r1,r2,r3}
  84. shufps %3, %2, 0xdd ; {i0,i1,i2,i3}
  85. SWAP %2, %3
  86. %endmacro
  87. %macro T8_SSE 6 ; r0,i0,r1,i1,t0,t1
  88. mova %5, %3
  89. shufps %3, %4, 0x44 ; {r4,i4,r6,i6}
  90. shufps %5, %4, 0xee ; {r5,i5,r7,i7}
  91. mova %6, %3
  92. subps %3, %5 ; {r5,i5,r7,i7}
  93. addps %6, %5 ; {t1,t2,t3,t4}
  94. mova %5, %3
  95. shufps %5, %5, 0xb1 ; {i5,r5,i7,r7}
  96. mulps %3, [ps_root2mppm] ; {-r5,i5,r7,-i7}
  97. mulps %5, [ps_root2]
  98. addps %3, %5 ; {t8,t7,ta,t9}
  99. mova %5, %6
  100. shufps %6, %3, 0x36 ; {t3,t2,t9,t8}
  101. shufps %5, %3, 0x9c ; {t1,t4,t7,ta}
  102. mova %3, %6
  103. addps %6, %5 ; {t1,t2,t9,ta}
  104. subps %3, %5 ; {t6,t5,tc,tb}
  105. mova %5, %6
  106. shufps %6, %3, 0xd8 ; {t1,t9,t5,tb}
  107. shufps %5, %3, 0x8d ; {t2,ta,t6,tc}
  108. mova %3, %1
  109. mova %4, %2
  110. addps %1, %6 ; {r0,r1,r2,r3}
  111. addps %2, %5 ; {i0,i1,i2,i3}
  112. subps %3, %6 ; {r4,r5,r6,r7}
  113. subps %4, %5 ; {i4,i5,i6,i7}
  114. %endmacro
  115. ; scheduled for cpu-bound sizes
  116. %macro PASS_SMALL 3 ; (to load m4-m7), wre, wim
  117. IF%1 mova m4, Z(4)
  118. IF%1 mova m5, Z(5)
  119. mova m0, %2 ; wre
  120. mova m2, m4
  121. mova m1, %3 ; wim
  122. mova m3, m5
  123. mulps m2, m0 ; r2*wre
  124. IF%1 mova m6, Z(6)
  125. mulps m3, m1 ; i2*wim
  126. IF%1 mova m7, Z(7)
  127. mulps m4, m1 ; r2*wim
  128. mulps m5, m0 ; i2*wre
  129. addps m2, m3 ; r2*wre + i2*wim
  130. mova m3, m1
  131. mulps m1, m6 ; r3*wim
  132. subps m5, m4 ; i2*wre - r2*wim
  133. mova m4, m0
  134. mulps m3, m7 ; i3*wim
  135. mulps m4, m6 ; r3*wre
  136. mulps m0, m7 ; i3*wre
  137. subps m4, m3 ; r3*wre - i3*wim
  138. mova m3, Z(0)
  139. addps m0, m1 ; i3*wre + r3*wim
  140. mova m1, m4
  141. addps m4, m2 ; t5
  142. subps m1, m2 ; t3
  143. subps m3, m4 ; r2
  144. addps m4, Z(0) ; r0
  145. mova m6, Z(2)
  146. mova Z(4), m3
  147. mova Z(0), m4
  148. mova m3, m5
  149. subps m5, m0 ; t4
  150. mova m4, m6
  151. subps m6, m5 ; r3
  152. addps m5, m4 ; r1
  153. mova Z(6), m6
  154. mova Z(2), m5
  155. mova m2, Z(3)
  156. addps m3, m0 ; t6
  157. subps m2, m1 ; i3
  158. mova m7, Z(1)
  159. addps m1, Z(3) ; i1
  160. mova Z(7), m2
  161. mova Z(3), m1
  162. mova m4, m7
  163. subps m7, m3 ; i2
  164. addps m3, m4 ; i0
  165. mova Z(5), m7
  166. mova Z(1), m3
  167. %endmacro
  168. ; scheduled to avoid store->load aliasing
  169. %macro PASS_BIG 1 ; (!interleave)
  170. mova m4, Z(4) ; r2
  171. mova m5, Z(5) ; i2
  172. mova m2, m4
  173. mova m0, [wq] ; wre
  174. mova m3, m5
  175. mova m1, [wq+o1q] ; wim
  176. mulps m2, m0 ; r2*wre
  177. mova m6, Z(6) ; r3
  178. mulps m3, m1 ; i2*wim
  179. mova m7, Z(7) ; i3
  180. mulps m4, m1 ; r2*wim
  181. mulps m5, m0 ; i2*wre
  182. addps m2, m3 ; r2*wre + i2*wim
  183. mova m3, m1
  184. mulps m1, m6 ; r3*wim
  185. subps m5, m4 ; i2*wre - r2*wim
  186. mova m4, m0
  187. mulps m3, m7 ; i3*wim
  188. mulps m4, m6 ; r3*wre
  189. mulps m0, m7 ; i3*wre
  190. subps m4, m3 ; r3*wre - i3*wim
  191. mova m3, Z(0)
  192. addps m0, m1 ; i3*wre + r3*wim
  193. mova m1, m4
  194. addps m4, m2 ; t5
  195. subps m1, m2 ; t3
  196. subps m3, m4 ; r2
  197. addps m4, Z(0) ; r0
  198. mova m6, Z(2)
  199. mova Z(4), m3
  200. mova Z(0), m4
  201. mova m3, m5
  202. subps m5, m0 ; t4
  203. mova m4, m6
  204. subps m6, m5 ; r3
  205. addps m5, m4 ; r1
  206. IF%1 mova Z(6), m6
  207. IF%1 mova Z(2), m5
  208. mova m2, Z(3)
  209. addps m3, m0 ; t6
  210. subps m2, m1 ; i3
  211. mova m7, Z(1)
  212. addps m1, Z(3) ; i1
  213. IF%1 mova Z(7), m2
  214. IF%1 mova Z(3), m1
  215. mova m4, m7
  216. subps m7, m3 ; i2
  217. addps m3, m4 ; i0
  218. IF%1 mova Z(5), m7
  219. IF%1 mova Z(1), m3
  220. %if %1==0
  221. mova m4, m5 ; r1
  222. mova m0, m6 ; r3
  223. unpcklps m5, m1
  224. unpckhps m4, m1
  225. unpcklps m6, m2
  226. unpckhps m0, m2
  227. mova m1, Z(0)
  228. mova m2, Z(4)
  229. mova Z(2), m5
  230. mova Z(3), m4
  231. mova Z(6), m6
  232. mova Z(7), m0
  233. mova m5, m1 ; r0
  234. mova m4, m2 ; r2
  235. unpcklps m1, m3
  236. unpckhps m5, m3
  237. unpcklps m2, m7
  238. unpckhps m4, m7
  239. mova Z(0), m1
  240. mova Z(1), m5
  241. mova Z(4), m2
  242. mova Z(5), m4
  243. %endif
  244. %endmacro
  245. %macro PUNPCK 3
  246. mova %3, %1
  247. punpckldq %1, %2
  248. punpckhdq %3, %2
  249. %endmacro
  250. INIT_XMM
  251. %define mova movaps
  252. %define Z(x) [r0+mmsize*x]
  253. align 16
  254. fft4_sse:
  255. mova m0, Z(0)
  256. mova m1, Z(1)
  257. T4_SSE m0, m1, m2
  258. mova Z(0), m0
  259. mova Z(1), m1
  260. ret
  261. align 16
  262. fft8_sse:
  263. mova m0, Z(0)
  264. mova m1, Z(1)
  265. T4_SSE m0, m1, m2
  266. mova m2, Z(2)
  267. mova m3, Z(3)
  268. T8_SSE m0, m1, m2, m3, m4, m5
  269. mova Z(0), m0
  270. mova Z(1), m1
  271. mova Z(2), m2
  272. mova Z(3), m3
  273. ret
  274. align 16
  275. fft16_sse:
  276. mova m0, Z(0)
  277. mova m1, Z(1)
  278. T4_SSE m0, m1, m2
  279. mova m2, Z(2)
  280. mova m3, Z(3)
  281. T8_SSE m0, m1, m2, m3, m4, m5
  282. mova m4, Z(4)
  283. mova m5, Z(5)
  284. mova Z(0), m0
  285. mova Z(1), m1
  286. mova Z(2), m2
  287. mova Z(3), m3
  288. T4_SSE m4, m5, m6
  289. mova m6, Z(6)
  290. mova m7, Z(7)
  291. T4_SSE m6, m7, m0
  292. PASS_SMALL 0, [cos_16], [cos_16+16]
  293. ret
  294. INIT_MMX
  295. %macro FFT48_3DN 1
  296. align 16
  297. fft4%1:
  298. T2_3DN m0, m1, Z(0), Z(1)
  299. mova m2, Z(2)
  300. mova m3, Z(3)
  301. T4_3DN m0, m1, m2, m3, m4, m5
  302. PUNPCK m0, m1, m4
  303. PUNPCK m2, m3, m5
  304. mova Z(0), m0
  305. mova Z(1), m4
  306. mova Z(2), m2
  307. mova Z(3), m5
  308. ret
  309. align 16
  310. fft8%1:
  311. T2_3DN m0, m1, Z(0), Z(1)
  312. mova m2, Z(2)
  313. mova m3, Z(3)
  314. T4_3DN m0, m1, m2, m3, m4, m5
  315. mova Z(0), m0
  316. mova Z(2), m2
  317. T2_3DN m4, m5, Z(4), Z(5)
  318. T2_3DN m6, m7, Z(6), Z(7)
  319. pswapd m0, m5
  320. pswapd m2, m7
  321. pxor m0, [ps_m1p1]
  322. pxor m2, [ps_m1p1]
  323. pfsub m5, m0
  324. pfadd m7, m2
  325. pfmul m5, [ps_root2]
  326. pfmul m7, [ps_root2]
  327. T4_3DN m1, m3, m5, m7, m0, m2
  328. mova Z(5), m5
  329. mova Z(7), m7
  330. mova m0, Z(0)
  331. mova m2, Z(2)
  332. T4_3DN m0, m2, m4, m6, m5, m7
  333. PUNPCK m0, m1, m5
  334. PUNPCK m2, m3, m7
  335. mova Z(0), m0
  336. mova Z(1), m5
  337. mova Z(2), m2
  338. mova Z(3), m7
  339. PUNPCK m4, Z(5), m5
  340. PUNPCK m6, Z(7), m7
  341. mova Z(4), m4
  342. mova Z(5), m5
  343. mova Z(6), m6
  344. mova Z(7), m7
  345. ret
  346. %endmacro
  347. FFT48_3DN _3dn2
  348. %macro pswapd 2
  349. %ifidn %1, %2
  350. movd [r0+12], %1
  351. punpckhdq %1, [r0+8]
  352. %else
  353. movq %1, %2
  354. psrlq %1, 32
  355. punpckldq %1, %2
  356. %endif
  357. %endmacro
  358. FFT48_3DN _3dn
  359. %define Z(x) [zq + o1q*(x&6)*((x/6)^1) + o3q*(x/6) + mmsize*(x&1)]
  360. %macro DECL_PASS 2+ ; name, payload
  361. align 16
  362. %1:
  363. DEFINE_ARGS z, w, n, o1, o3
  364. lea o3q, [nq*3]
  365. lea o1q, [nq*8]
  366. shl o3q, 4
  367. .loop:
  368. %2
  369. add zq, mmsize*2
  370. add wq, mmsize
  371. sub nd, mmsize/8
  372. jg .loop
  373. rep ret
  374. %endmacro
  375. INIT_XMM
  376. %define mova movaps
  377. DECL_PASS pass_sse, PASS_BIG 1
  378. DECL_PASS pass_interleave_sse, PASS_BIG 0
  379. INIT_MMX
  380. %define mulps pfmul
  381. %define addps pfadd
  382. %define subps pfsub
  383. %define unpcklps punpckldq
  384. %define unpckhps punpckhdq
  385. DECL_PASS pass_3dn, PASS_SMALL 1, [wq], [wq+o1q]
  386. DECL_PASS pass_interleave_3dn, PASS_BIG 0
  387. %define pass_3dn2 pass_3dn
  388. %define pass_interleave_3dn2 pass_interleave_3dn
  389. %ifdef PIC
  390. %define SECTION_REL - $$
  391. %else
  392. %define SECTION_REL
  393. %endif
  394. %macro DECL_FFT 2-3 ; nbits, cpu, suffix
  395. %xdefine list_of_fft fft4%2 SECTION_REL, fft8%2 SECTION_REL
  396. %if %1==5
  397. %xdefine list_of_fft list_of_fft, fft16%2 SECTION_REL
  398. %endif
  399. %assign n 1<<%1
  400. %rep 17-%1
  401. %assign n2 n/2
  402. %assign n4 n/4
  403. %xdefine list_of_fft list_of_fft, fft %+ n %+ %3%2 SECTION_REL
  404. align 16
  405. fft %+ n %+ %3%2:
  406. call fft %+ n2 %+ %2
  407. add r0, n*4 - (n&(-2<<%1))
  408. call fft %+ n4 %+ %2
  409. add r0, n*2 - (n2&(-2<<%1))
  410. call fft %+ n4 %+ %2
  411. sub r0, n*6 + (n2&(-2<<%1))
  412. lea r1, [cos_ %+ n]
  413. mov r2d, n4/2
  414. jmp pass%3%2
  415. %assign n n*2
  416. %endrep
  417. %undef n
  418. align 8
  419. dispatch_tab%3%2: pointer list_of_fft
  420. section .text
  421. ; On x86_32, this function does the register saving and restoring for all of fft.
  422. ; The others pass args in registers and don't spill anything.
  423. cglobal fft_dispatch%3%2, 2,5,8, z, nbits
  424. lea r2, [dispatch_tab%3%2]
  425. mov r2, [r2 + (nbitsq-2)*gprsize]
  426. %ifdef PIC
  427. lea r3, [$$]
  428. add r2, r3
  429. %endif
  430. call r2
  431. RET
  432. %endmacro ; DECL_FFT
  433. DECL_FFT 5, _sse
  434. DECL_FFT 5, _sse, _interleave
  435. DECL_FFT 4, _3dn
  436. DECL_FFT 4, _3dn, _interleave
  437. DECL_FFT 4, _3dn2
  438. DECL_FFT 4, _3dn2, _interleave