You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

529 lines
15KB

  1. ;******************************************************************************
  2. ;* x86 optimized channel mixing
  3. ;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
  4. ;*
  5. ;* This file is part of Libav.
  6. ;*
  7. ;* Libav is free software; you can redistribute it and/or
  8. ;* modify it under the terms of the GNU Lesser General Public
  9. ;* License as published by the Free Software Foundation; either
  10. ;* version 2.1 of the License, or (at your option) any later version.
  11. ;*
  12. ;* Libav is distributed in the hope that it will be useful,
  13. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. ;* Lesser General Public License for more details.
  16. ;*
  17. ;* You should have received a copy of the GNU Lesser General Public
  18. ;* License along with Libav; if not, write to the Free Software
  19. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. ;******************************************************************************
  21. %include "libavutil/x86/x86util.asm"
  22. %include "util.asm"
  23. SECTION_TEXT
  24. ;-----------------------------------------------------------------------------
  25. ; void ff_mix_2_to_1_fltp_flt(float **src, float **matrix, int len,
  26. ; int out_ch, int in_ch);
  27. ;-----------------------------------------------------------------------------
  28. %macro MIX_2_TO_1_FLTP_FLT 0
  29. cglobal mix_2_to_1_fltp_flt, 3,4,6, src, matrix, len, src1
  30. mov src1q, [srcq+gprsize]
  31. mov srcq, [srcq ]
  32. sub src1q, srcq
  33. mov matrixq, [matrixq ]
  34. VBROADCASTSS m4, [matrixq ]
  35. VBROADCASTSS m5, [matrixq+4]
  36. ALIGN 16
  37. .loop:
  38. mulps m0, m4, [srcq ]
  39. mulps m1, m5, [srcq+src1q ]
  40. mulps m2, m4, [srcq+ mmsize]
  41. mulps m3, m5, [srcq+src1q+mmsize]
  42. addps m0, m0, m1
  43. addps m2, m2, m3
  44. mova [srcq ], m0
  45. mova [srcq+mmsize], m2
  46. add srcq, mmsize*2
  47. sub lend, mmsize*2/4
  48. jg .loop
  49. REP_RET
  50. %endmacro
  51. INIT_XMM sse
  52. MIX_2_TO_1_FLTP_FLT
  53. %if HAVE_AVX_EXTERNAL
  54. INIT_YMM avx
  55. MIX_2_TO_1_FLTP_FLT
  56. %endif
  57. ;-----------------------------------------------------------------------------
  58. ; void ff_mix_2_to_1_s16p_flt(int16_t **src, float **matrix, int len,
  59. ; int out_ch, int in_ch);
  60. ;-----------------------------------------------------------------------------
  61. %macro MIX_2_TO_1_S16P_FLT 0
  62. cglobal mix_2_to_1_s16p_flt, 3,4,6, src, matrix, len, src1
  63. mov src1q, [srcq+gprsize]
  64. mov srcq, [srcq]
  65. sub src1q, srcq
  66. mov matrixq, [matrixq ]
  67. VBROADCASTSS m4, [matrixq ]
  68. VBROADCASTSS m5, [matrixq+4]
  69. ALIGN 16
  70. .loop:
  71. mova m0, [srcq ]
  72. mova m2, [srcq+src1q]
  73. S16_TO_S32_SX 0, 1
  74. S16_TO_S32_SX 2, 3
  75. cvtdq2ps m0, m0
  76. cvtdq2ps m1, m1
  77. cvtdq2ps m2, m2
  78. cvtdq2ps m3, m3
  79. mulps m0, m4
  80. mulps m1, m4
  81. mulps m2, m5
  82. mulps m3, m5
  83. addps m0, m2
  84. addps m1, m3
  85. cvtps2dq m0, m0
  86. cvtps2dq m1, m1
  87. packssdw m0, m1
  88. mova [srcq], m0
  89. add srcq, mmsize
  90. sub lend, mmsize/2
  91. jg .loop
  92. REP_RET
  93. %endmacro
  94. INIT_XMM sse2
  95. MIX_2_TO_1_S16P_FLT
  96. INIT_XMM sse4
  97. MIX_2_TO_1_S16P_FLT
  98. ;-----------------------------------------------------------------------------
  99. ; void ff_mix_2_to_1_s16p_q8(int16_t **src, int16_t **matrix, int len,
  100. ; int out_ch, int in_ch);
  101. ;-----------------------------------------------------------------------------
  102. INIT_XMM sse2
  103. cglobal mix_2_to_1_s16p_q8, 3,4,6, src, matrix, len, src1
  104. mov src1q, [srcq+gprsize]
  105. mov srcq, [srcq]
  106. sub src1q, srcq
  107. mov matrixq, [matrixq]
  108. movd m4, [matrixq]
  109. movd m5, [matrixq]
  110. SPLATW m4, m4, 0
  111. SPLATW m5, m5, 1
  112. pxor m0, m0
  113. punpcklwd m4, m0
  114. punpcklwd m5, m0
  115. ALIGN 16
  116. .loop:
  117. mova m0, [srcq ]
  118. mova m2, [srcq+src1q]
  119. punpckhwd m1, m0, m0
  120. punpcklwd m0, m0
  121. punpckhwd m3, m2, m2
  122. punpcklwd m2, m2
  123. pmaddwd m0, m4
  124. pmaddwd m1, m4
  125. pmaddwd m2, m5
  126. pmaddwd m3, m5
  127. paddd m0, m2
  128. paddd m1, m3
  129. psrad m0, 8
  130. psrad m1, 8
  131. packssdw m0, m1
  132. mova [srcq], m0
  133. add srcq, mmsize
  134. sub lend, mmsize/2
  135. jg .loop
  136. REP_RET
  137. ;-----------------------------------------------------------------------------
  138. ; void ff_mix_1_to_2_fltp_flt(float **src, float **matrix, int len,
  139. ; int out_ch, int in_ch);
  140. ;-----------------------------------------------------------------------------
  141. %macro MIX_1_TO_2_FLTP_FLT 0
  142. cglobal mix_1_to_2_fltp_flt, 3,5,4, src0, matrix0, len, src1, matrix1
  143. mov src1q, [src0q+gprsize]
  144. mov src0q, [src0q]
  145. sub src1q, src0q
  146. mov matrix1q, [matrix0q+gprsize]
  147. mov matrix0q, [matrix0q]
  148. VBROADCASTSS m2, [matrix0q]
  149. VBROADCASTSS m3, [matrix1q]
  150. ALIGN 16
  151. .loop:
  152. mova m0, [src0q]
  153. mulps m1, m0, m3
  154. mulps m0, m0, m2
  155. mova [src0q ], m0
  156. mova [src0q+src1q], m1
  157. add src0q, mmsize
  158. sub lend, mmsize/4
  159. jg .loop
  160. REP_RET
  161. %endmacro
  162. INIT_XMM sse
  163. MIX_1_TO_2_FLTP_FLT
  164. %if HAVE_AVX_EXTERNAL
  165. INIT_YMM avx
  166. MIX_1_TO_2_FLTP_FLT
  167. %endif
  168. ;-----------------------------------------------------------------------------
  169. ; void ff_mix_1_to_2_s16p_flt(int16_t **src, float **matrix, int len,
  170. ; int out_ch, int in_ch);
  171. ;-----------------------------------------------------------------------------
  172. %macro MIX_1_TO_2_S16P_FLT 0
  173. cglobal mix_1_to_2_s16p_flt, 3,5,6, src0, matrix0, len, src1, matrix1
  174. mov src1q, [src0q+gprsize]
  175. mov src0q, [src0q]
  176. sub src1q, src0q
  177. mov matrix1q, [matrix0q+gprsize]
  178. mov matrix0q, [matrix0q]
  179. VBROADCASTSS m4, [matrix0q]
  180. VBROADCASTSS m5, [matrix1q]
  181. ALIGN 16
  182. .loop:
  183. mova m0, [src0q]
  184. S16_TO_S32_SX 0, 2
  185. cvtdq2ps m0, m0
  186. cvtdq2ps m2, m2
  187. mulps m1, m0, m5
  188. mulps m0, m0, m4
  189. mulps m3, m2, m5
  190. mulps m2, m2, m4
  191. cvtps2dq m0, m0
  192. cvtps2dq m1, m1
  193. cvtps2dq m2, m2
  194. cvtps2dq m3, m3
  195. packssdw m0, m2
  196. packssdw m1, m3
  197. mova [src0q ], m0
  198. mova [src0q+src1q], m1
  199. add src0q, mmsize
  200. sub lend, mmsize/2
  201. jg .loop
  202. REP_RET
  203. %endmacro
  204. INIT_XMM sse2
  205. MIX_1_TO_2_S16P_FLT
  206. INIT_XMM sse4
  207. MIX_1_TO_2_S16P_FLT
  208. %if HAVE_AVX_EXTERNAL
  209. INIT_XMM avx
  210. MIX_1_TO_2_S16P_FLT
  211. %endif
  212. ;-----------------------------------------------------------------------------
  213. ; void ff_mix_3_8_to_1_2_fltp/s16p_flt(float/int16_t **src, float **matrix,
  214. ; int len, int out_ch, int in_ch);
  215. ;-----------------------------------------------------------------------------
  216. %macro MIX_3_8_TO_1_2_FLT 3 ; %1 = in channels, %2 = out channels, %3 = s16p or fltp
  217. ; define some names to make the code clearer
  218. %assign in_channels %1
  219. %assign out_channels %2
  220. %assign stereo out_channels - 1
  221. %ifidn %3, s16p
  222. %assign is_s16 1
  223. %else
  224. %assign is_s16 0
  225. %endif
  226. ; determine how many matrix elements must go on the stack vs. mmregs
  227. %assign matrix_elements in_channels * out_channels
  228. %if is_s16
  229. %if stereo
  230. %assign needed_mmregs 7
  231. %else
  232. %assign needed_mmregs 5
  233. %endif
  234. %else
  235. %if stereo
  236. %assign needed_mmregs 4
  237. %else
  238. %assign needed_mmregs 3
  239. %endif
  240. %endif
  241. %assign matrix_elements_mm num_mmregs - needed_mmregs
  242. %if matrix_elements < matrix_elements_mm
  243. %assign matrix_elements_mm matrix_elements
  244. %endif
  245. %if matrix_elements_mm < matrix_elements
  246. %assign matrix_elements_stack matrix_elements - matrix_elements_mm
  247. %else
  248. %assign matrix_elements_stack 0
  249. %endif
  250. cglobal mix_%1_to_%2_%3_flt, 3,in_channels+2,needed_mmregs+matrix_elements_mm, src0, src1, len, src2, src3, src4, src5, src6, src7
  251. ; get aligned stack space if needed
  252. %if matrix_elements_stack > 0
  253. %if mmsize == 32
  254. %assign bkpreg %1 + 1
  255. %define bkpq r %+ bkpreg %+ q
  256. mov bkpq, rsp
  257. and rsp, ~(mmsize-1)
  258. sub rsp, matrix_elements_stack * mmsize
  259. %else
  260. %assign matrix_stack_size matrix_elements_stack * mmsize
  261. %assign pad matrix_stack_size + (mmsize - gprsize) - (stack_offset & (mmsize - gprsize))
  262. ; on x86-32 for 7 and 8 channels we need more stack space for src pointers
  263. %if ARCH_X86_32 && in_channels >= 7
  264. %assign pad pad + 0x10
  265. %define src5m [rsp+matrix_stack_size+0]
  266. %define src6m [rsp+matrix_stack_size+4]
  267. %define src7m [rsp+matrix_stack_size+8]
  268. %endif
  269. SUB rsp, pad
  270. %endif
  271. %endif
  272. ; load matrix pointers
  273. %define matrix0q r1q
  274. %define matrix1q r3q
  275. %if stereo
  276. mov matrix1q, [matrix0q+gprsize]
  277. %endif
  278. mov matrix0q, [matrix0q]
  279. ; define matrix coeff names
  280. %assign %%i 0
  281. %assign %%j needed_mmregs
  282. %rep in_channels
  283. %if %%i >= matrix_elements_mm
  284. CAT_XDEFINE mx_stack_0_, %%i, 1
  285. CAT_XDEFINE mx_0_, %%i, [rsp+(%%i-matrix_elements_mm)*mmsize]
  286. %else
  287. CAT_XDEFINE mx_stack_0_, %%i, 0
  288. CAT_XDEFINE mx_0_, %%i, m %+ %%j
  289. %assign %%j %%j+1
  290. %endif
  291. %assign %%i %%i+1
  292. %endrep
  293. %if stereo
  294. %assign %%i 0
  295. %rep in_channels
  296. %if in_channels + %%i >= matrix_elements_mm
  297. CAT_XDEFINE mx_stack_1_, %%i, 1
  298. CAT_XDEFINE mx_1_, %%i, [rsp+(in_channels+%%i-matrix_elements_mm)*mmsize]
  299. %else
  300. CAT_XDEFINE mx_stack_1_, %%i, 0
  301. CAT_XDEFINE mx_1_, %%i, m %+ %%j
  302. %assign %%j %%j+1
  303. %endif
  304. %assign %%i %%i+1
  305. %endrep
  306. %endif
  307. ; load/splat matrix coeffs
  308. %assign %%i 0
  309. %rep in_channels
  310. %if mx_stack_0_ %+ %%i
  311. VBROADCASTSS m0, [matrix0q+4*%%i]
  312. mova mx_0_ %+ %%i, m0
  313. %else
  314. VBROADCASTSS mx_0_ %+ %%i, [matrix0q+4*%%i]
  315. %endif
  316. %if stereo
  317. %if mx_stack_1_ %+ %%i
  318. VBROADCASTSS m0, [matrix1q+4*%%i]
  319. mova mx_1_ %+ %%i, m0
  320. %else
  321. VBROADCASTSS mx_1_ %+ %%i, [matrix1q+4*%%i]
  322. %endif
  323. %endif
  324. %assign %%i %%i+1
  325. %endrep
  326. ; load channel pointers to registers as offsets from the first channel pointer
  327. %if ARCH_X86_64
  328. movsxd lenq, r2d
  329. %endif
  330. shl lenq, 2-is_s16
  331. %assign %%i 1
  332. %rep (in_channels - 1)
  333. %if ARCH_X86_32 && in_channels >= 7 && %%i >= 5
  334. mov src5q, [src0q+%%i*gprsize]
  335. add src5q, lenq
  336. mov src %+ %%i %+ m, src5q
  337. %else
  338. mov src %+ %%i %+ q, [src0q+%%i*gprsize]
  339. add src %+ %%i %+ q, lenq
  340. %endif
  341. %assign %%i %%i+1
  342. %endrep
  343. mov src0q, [src0q]
  344. add src0q, lenq
  345. neg lenq
  346. .loop:
  347. ; for x86-32 with 7-8 channels we do not have enough gp registers for all src
  348. ; pointers, so we have to load some of them from the stack each time
  349. %define copy_src_from_stack ARCH_X86_32 && in_channels >= 7 && %%i >= 5
  350. %if is_s16
  351. ; mix with s16p input
  352. mova m0, [src0q+lenq]
  353. S16_TO_S32_SX 0, 1
  354. cvtdq2ps m0, m0
  355. cvtdq2ps m1, m1
  356. %if stereo
  357. mulps m2, m0, mx_1_0
  358. mulps m3, m1, mx_1_0
  359. %endif
  360. mulps m0, m0, mx_0_0
  361. mulps m1, m1, mx_0_0
  362. %assign %%i 1
  363. %rep (in_channels - 1)
  364. %if copy_src_from_stack
  365. %define src_ptr src5q
  366. %else
  367. %define src_ptr src %+ %%i %+ q
  368. %endif
  369. %if stereo
  370. %if copy_src_from_stack
  371. mov src_ptr, src %+ %%i %+ m
  372. %endif
  373. mova m4, [src_ptr+lenq]
  374. S16_TO_S32_SX 4, 5
  375. cvtdq2ps m4, m4
  376. cvtdq2ps m5, m5
  377. fmaddps m2, m4, mx_1_ %+ %%i, m2, m6
  378. fmaddps m3, m5, mx_1_ %+ %%i, m3, m6
  379. fmaddps m0, m4, mx_0_ %+ %%i, m0, m4
  380. fmaddps m1, m5, mx_0_ %+ %%i, m1, m5
  381. %else
  382. %if copy_src_from_stack
  383. mov src_ptr, src %+ %%i %+ m
  384. %endif
  385. mova m2, [src_ptr+lenq]
  386. S16_TO_S32_SX 2, 3
  387. cvtdq2ps m2, m2
  388. cvtdq2ps m3, m3
  389. fmaddps m0, m2, mx_0_ %+ %%i, m0, m4
  390. fmaddps m1, m3, mx_0_ %+ %%i, m1, m4
  391. %endif
  392. %assign %%i %%i+1
  393. %endrep
  394. %if stereo
  395. cvtps2dq m2, m2
  396. cvtps2dq m3, m3
  397. packssdw m2, m3
  398. mova [src1q+lenq], m2
  399. %endif
  400. cvtps2dq m0, m0
  401. cvtps2dq m1, m1
  402. packssdw m0, m1
  403. mova [src0q+lenq], m0
  404. %else
  405. ; mix with fltp input
  406. %if stereo || mx_stack_0_0
  407. mova m0, [src0q+lenq]
  408. %endif
  409. %if stereo
  410. mulps m1, m0, mx_1_0
  411. %endif
  412. %if stereo || mx_stack_0_0
  413. mulps m0, m0, mx_0_0
  414. %else
  415. mulps m0, [src0q+lenq], mx_0_0
  416. %endif
  417. %assign %%i 1
  418. %rep (in_channels - 1)
  419. %if copy_src_from_stack
  420. %define src_ptr src5q
  421. mov src_ptr, src %+ %%i %+ m
  422. %else
  423. %define src_ptr src %+ %%i %+ q
  424. %endif
  425. ; avoid extra load for mono if matrix is in a mm register
  426. %if stereo || mx_stack_0_ %+ %%i
  427. mova m2, [src_ptr+lenq]
  428. %endif
  429. %if stereo
  430. fmaddps m1, m2, mx_1_ %+ %%i, m1, m3
  431. %endif
  432. %if stereo || mx_stack_0_ %+ %%i
  433. fmaddps m0, m2, mx_0_ %+ %%i, m0, m2
  434. %else
  435. fmaddps m0, mx_0_ %+ %%i, [src_ptr+lenq], m0, m1
  436. %endif
  437. %assign %%i %%i+1
  438. %endrep
  439. mova [src0q+lenq], m0
  440. %if stereo
  441. mova [src1q+lenq], m1
  442. %endif
  443. %endif
  444. add lenq, mmsize
  445. jl .loop
  446. ; restore stack pointer
  447. %if matrix_elements_stack > 0
  448. %if mmsize == 32
  449. mov rsp, bkpq
  450. %else
  451. ADD rsp, pad
  452. %endif
  453. %endif
  454. ; zero ymm high halves
  455. %if mmsize == 32
  456. vzeroupper
  457. %endif
  458. RET
  459. %endmacro
  460. %macro MIX_3_8_TO_1_2_FLT_FUNCS 0
  461. %assign %%i 3
  462. %rep 6
  463. INIT_XMM sse
  464. MIX_3_8_TO_1_2_FLT %%i, 1, fltp
  465. MIX_3_8_TO_1_2_FLT %%i, 2, fltp
  466. INIT_XMM sse2
  467. MIX_3_8_TO_1_2_FLT %%i, 1, s16p
  468. MIX_3_8_TO_1_2_FLT %%i, 2, s16p
  469. INIT_XMM sse4
  470. MIX_3_8_TO_1_2_FLT %%i, 1, s16p
  471. MIX_3_8_TO_1_2_FLT %%i, 2, s16p
  472. ; do not use ymm AVX or FMA4 in x86-32 for 6 or more channels due to stack alignment issues
  473. %if HAVE_AVX_EXTERNAL
  474. %if ARCH_X86_64 || %%i < 6
  475. INIT_YMM avx
  476. %else
  477. INIT_XMM avx
  478. %endif
  479. MIX_3_8_TO_1_2_FLT %%i, 1, fltp
  480. MIX_3_8_TO_1_2_FLT %%i, 2, fltp
  481. INIT_XMM avx
  482. MIX_3_8_TO_1_2_FLT %%i, 1, s16p
  483. MIX_3_8_TO_1_2_FLT %%i, 2, s16p
  484. %endif
  485. %if HAVE_FMA4_EXTERNAL
  486. %if ARCH_X86_64 || %%i < 6
  487. INIT_YMM fma4
  488. %else
  489. INIT_XMM fma4
  490. %endif
  491. MIX_3_8_TO_1_2_FLT %%i, 1, fltp
  492. MIX_3_8_TO_1_2_FLT %%i, 2, fltp
  493. INIT_XMM fma4
  494. MIX_3_8_TO_1_2_FLT %%i, 1, s16p
  495. MIX_3_8_TO_1_2_FLT %%i, 2, s16p
  496. %endif
  497. %assign %%i %%i+1
  498. %endrep
  499. %endmacro
  500. MIX_3_8_TO_1_2_FLT_FUNCS