You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

576 lines
22KB

  1. ;******************************************************************************
  2. ;* Copyright (c) 2012 Michael Niedermayer
  3. ;* Copyright (c) 2014 James Almer <jamrial <at> gmail.com>
  4. ;* Copyright (c) 2014 Ronald S. Bultje <rsbultje@gmail.com>
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "libavutil/x86/x86util.asm"
  23. %if ARCH_X86_64
  24. %define pointer resq
  25. %else
  26. %define pointer resd
  27. %endif
  28. struc ResampleContext
  29. .av_class: pointer 1
  30. .filter_bank: pointer 1
  31. .filter_length: resd 1
  32. .filter_alloc: resd 1
  33. .ideal_dst_incr: resd 1
  34. .dst_incr: resd 1
  35. .dst_incr_div: resd 1
  36. .dst_incr_mod: resd 1
  37. .index: resd 1
  38. .frac: resd 1
  39. .src_incr: resd 1
  40. .compensation_distance: resd 1
  41. .phase_shift: resd 1
  42. .phase_mask: resd 1
  43. ; there's a few more here but we only care about the first few
  44. endstruc
  45. SECTION_RODATA
  46. pf_1: dd 1.0
  47. pdbl_1: dq 1.0
  48. pd_0x4000: dd 0x4000
  49. SECTION .text
  50. %macro RESAMPLE_FNS 3-5 ; format [float or int16], bps, log2_bps, float op suffix [s or d], 1.0 constant
  51. ; int resample_common_$format(ResampleContext *ctx, $format *dst,
  52. ; const $format *src, int size, int update_ctx)
  53. %if ARCH_X86_64 ; unix64 and win64
  54. cglobal resample_common_%1, 0, 15, 2, ctx, dst, src, phase_shift, index, frac, \
  55. dst_incr_mod, size, min_filter_count_x4, \
  56. min_filter_len_x4, dst_incr_div, src_incr, \
  57. phase_mask, dst_end, filter_bank
  58. ; use red-zone for variable storage
  59. %define ctx_stackq [rsp-0x8]
  60. %define src_stackq [rsp-0x10]
  61. %if WIN64
  62. %define update_context_stackd r4m
  63. %else ; unix64
  64. %define update_context_stackd [rsp-0x14]
  65. %endif
  66. ; load as many variables in registers as possible; for the rest, store
  67. ; on stack so that we have 'ctx' available as one extra register
  68. mov sized, r3d
  69. mov phase_maskd, [ctxq+ResampleContext.phase_mask]
  70. %if UNIX64
  71. mov update_context_stackd, r4d
  72. %endif
  73. mov indexd, [ctxq+ResampleContext.index]
  74. mov fracd, [ctxq+ResampleContext.frac]
  75. mov dst_incr_modd, [ctxq+ResampleContext.dst_incr_mod]
  76. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  77. mov src_incrd, [ctxq+ResampleContext.src_incr]
  78. mov ctx_stackq, ctxq
  79. mov min_filter_len_x4d, [ctxq+ResampleContext.filter_length]
  80. mov dst_incr_divd, [ctxq+ResampleContext.dst_incr_div]
  81. shl min_filter_len_x4d, %3
  82. lea dst_endq, [dstq+sizeq*%2]
  83. %if UNIX64
  84. mov ecx, [ctxq+ResampleContext.phase_shift]
  85. mov edi, [ctxq+ResampleContext.filter_alloc]
  86. DEFINE_ARGS filter_alloc, dst, src, phase_shift, index, frac, dst_incr_mod, \
  87. filter, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  88. src_incr, phase_mask, dst_end, filter_bank
  89. %elif WIN64
  90. mov R9d, [ctxq+ResampleContext.filter_alloc]
  91. mov ecx, [ctxq+ResampleContext.phase_shift]
  92. DEFINE_ARGS phase_shift, dst, src, filter_alloc, index, frac, dst_incr_mod, \
  93. filter, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  94. src_incr, phase_mask, dst_end, filter_bank
  95. %endif
  96. neg min_filter_len_x4q
  97. sub filter_bankq, min_filter_len_x4q
  98. sub srcq, min_filter_len_x4q
  99. mov src_stackq, srcq
  100. %else ; x86-32
  101. cglobal resample_common_%1, 1, 7, 2, ctx, phase_shift, dst, frac, \
  102. index, min_filter_length_x4, filter_bank
  103. ; push temp variables to stack
  104. %define ctx_stackq r0mp
  105. %define src_stackq r2mp
  106. %define update_context_stackd r4m
  107. mov dstq, r1mp
  108. mov r3, r3mp
  109. lea r3, [dstq+r3*%2]
  110. PUSH dword [ctxq+ResampleContext.dst_incr_div]
  111. PUSH dword [ctxq+ResampleContext.dst_incr_mod]
  112. PUSH dword [ctxq+ResampleContext.filter_alloc]
  113. PUSH r3
  114. PUSH dword [ctxq+ResampleContext.phase_mask]
  115. PUSH dword [ctxq+ResampleContext.src_incr]
  116. mov min_filter_length_x4d, [ctxq+ResampleContext.filter_length]
  117. mov indexd, [ctxq+ResampleContext.index]
  118. shl min_filter_length_x4d, %3
  119. mov fracd, [ctxq+ResampleContext.frac]
  120. neg min_filter_length_x4q
  121. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  122. sub r2mp, min_filter_length_x4q
  123. sub filter_bankq, min_filter_length_x4q
  124. PUSH min_filter_length_x4q
  125. PUSH filter_bankq
  126. mov phase_shiftd, [ctxq+ResampleContext.phase_shift]
  127. DEFINE_ARGS src, phase_shift, dst, frac, index, min_filter_count_x4, filter
  128. %define filter_bankq dword [rsp+0x0]
  129. %define min_filter_length_x4q dword [rsp+0x4]
  130. %define src_incrd dword [rsp+0x8]
  131. %define phase_maskd dword [rsp+0xc]
  132. %define dst_endq dword [rsp+0x10]
  133. %define filter_allocd dword [rsp+0x14]
  134. %define dst_incr_modd dword [rsp+0x18]
  135. %define dst_incr_divd dword [rsp+0x1c]
  136. mov srcq, r2mp
  137. %endif
  138. .loop:
  139. mov filterd, filter_allocd
  140. imul filterd, indexd
  141. %if ARCH_X86_64
  142. mov min_filter_count_x4q, min_filter_len_x4q
  143. lea filterq, [filter_bankq+filterq*%2]
  144. %else ; x86-32
  145. mov min_filter_count_x4q, filter_bankq
  146. lea filterq, [min_filter_count_x4q+filterq*%2]
  147. mov min_filter_count_x4q, min_filter_length_x4q
  148. %endif
  149. %ifidn %1, int16
  150. movd m0, [pd_0x4000]
  151. %else ; float/double
  152. xorps m0, m0, m0
  153. %endif
  154. align 16
  155. .inner_loop:
  156. movu m1, [srcq+min_filter_count_x4q*1]
  157. %ifidn %1, int16
  158. pmaddwd m1, [filterq+min_filter_count_x4q*1]
  159. paddd m0, m1
  160. %else ; float/double
  161. mulp%4 m1, m1, [filterq+min_filter_count_x4q*1]
  162. addp%4 m0, m0, m1
  163. %endif
  164. add min_filter_count_x4q, mmsize
  165. js .inner_loop
  166. %if cpuflag(avx)
  167. vextractf128 xm1, m0, 0x1
  168. addps xm0, xm1
  169. %endif
  170. %ifidn %1, int16
  171. %if mmsize == 16
  172. pshufd m1, m0, q0032
  173. paddd m0, m1
  174. pshufd m1, m0, q0001
  175. %else ; mmsize == 8
  176. pshufw m1, m0, q0032
  177. %endif
  178. paddd m0, m1
  179. psrad m0, 15
  180. add fracd, dst_incr_modd
  181. packssdw m0, m0
  182. add indexd, dst_incr_divd
  183. movd [dstq], m0
  184. %else ; float/double
  185. ; horizontal sum & store
  186. movhlps xm1, xm0
  187. %ifidn %1, float
  188. addps xm0, xm1
  189. shufps xm1, xm0, xm0, q0001
  190. %endif
  191. add fracd, dst_incr_modd
  192. addp%4 xm0, xm1
  193. add indexd, dst_incr_divd
  194. movs%4 [dstq], xm0
  195. %endif
  196. cmp fracd, src_incrd
  197. jl .skip
  198. sub fracd, src_incrd
  199. inc indexd
  200. %if UNIX64
  201. DEFINE_ARGS filter_alloc, dst, src, phase_shift, index, frac, dst_incr_mod, \
  202. index_incr, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  203. src_incr, phase_mask, dst_end, filter_bank
  204. %elif WIN64
  205. DEFINE_ARGS phase_shift, dst, src, filter_alloc, index, frac, dst_incr_mod, \
  206. index_incr, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  207. src_incr, phase_mask, dst_end, filter_bank
  208. %else ; x86-32
  209. DEFINE_ARGS src, phase_shift, dst, frac, index, index_incr
  210. %endif
  211. .skip:
  212. mov index_incrd, indexd
  213. add dstq, %2
  214. and indexd, phase_maskd
  215. sar index_incrd, phase_shiftb
  216. lea srcq, [srcq+index_incrq*%2]
  217. cmp dstq, dst_endq
  218. jne .loop
  219. %if ARCH_X86_64
  220. DEFINE_ARGS ctx, dst, src, phase_shift, index, frac
  221. %else ; x86-32
  222. DEFINE_ARGS src, ctx, update_context, frac, index
  223. %endif
  224. cmp dword update_context_stackd, 0
  225. jz .skip_store
  226. ; strictly speaking, the function should always return the consumed
  227. ; number of bytes; however, we only use the value if update_context
  228. ; is true, so let's just leave it uninitialized otherwise
  229. mov ctxq, ctx_stackq
  230. movifnidn rax, srcq
  231. mov [ctxq+ResampleContext.frac ], fracd
  232. sub rax, src_stackq
  233. mov [ctxq+ResampleContext.index], indexd
  234. shr rax, %3
  235. .skip_store:
  236. %if ARCH_X86_32
  237. ADD rsp, 0x20
  238. %endif
  239. RET
  240. ; int resample_linear_$format(ResampleContext *ctx, float *dst,
  241. ; const float *src, int size, int update_ctx)
  242. %if ARCH_X86_64 ; unix64 and win64
  243. %if UNIX64
  244. cglobal resample_linear_%1, 0, 15, 5, ctx, dst, phase_mask, phase_shift, index, frac, \
  245. size, dst_incr_mod, min_filter_count_x4, \
  246. min_filter_len_x4, dst_incr_div, src_incr, \
  247. src, dst_end, filter_bank
  248. mov srcq, r2mp
  249. %else ; win64
  250. cglobal resample_linear_%1, 0, 15, 5, ctx, phase_mask, src, phase_shift, index, frac, \
  251. size, dst_incr_mod, min_filter_count_x4, \
  252. min_filter_len_x4, dst_incr_div, src_incr, \
  253. dst, dst_end, filter_bank
  254. mov dstq, r1mp
  255. %endif
  256. ; use red-zone for variable storage
  257. %define ctx_stackq [rsp-0x8]
  258. %define src_stackq [rsp-0x10]
  259. %define phase_mask_stackd [rsp-0x14]
  260. %if WIN64
  261. %define update_context_stackd r4m
  262. %else ; unix64
  263. %define update_context_stackd [rsp-0x18]
  264. %endif
  265. ; load as many variables in registers as possible; for the rest, store
  266. ; on stack so that we have 'ctx' available as one extra register
  267. mov sized, r3d
  268. mov phase_maskd, [ctxq+ResampleContext.phase_mask]
  269. %if UNIX64
  270. mov update_context_stackd, r4d
  271. %endif
  272. mov indexd, [ctxq+ResampleContext.index]
  273. mov fracd, [ctxq+ResampleContext.frac]
  274. mov dst_incr_modd, [ctxq+ResampleContext.dst_incr_mod]
  275. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  276. mov src_incrd, [ctxq+ResampleContext.src_incr]
  277. mov ctx_stackq, ctxq
  278. mov phase_mask_stackd, phase_maskd
  279. mov min_filter_len_x4d, [ctxq+ResampleContext.filter_length]
  280. %ifidn %1, int16
  281. movd m4, [pd_0x4000]
  282. %else ; float/double
  283. cvtsi2s%4 xm0, src_incrd
  284. movs%4 xm4, [%5]
  285. divs%4 xm4, xm0
  286. %endif
  287. mov dst_incr_divd, [ctxq+ResampleContext.dst_incr_div]
  288. shl min_filter_len_x4d, %3
  289. lea dst_endq, [dstq+sizeq*%2]
  290. %if UNIX64
  291. mov ecx, [ctxq+ResampleContext.phase_shift]
  292. mov edi, [ctxq+ResampleContext.filter_alloc]
  293. DEFINE_ARGS filter_alloc, dst, filter2, phase_shift, index, frac, filter1, \
  294. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  295. dst_incr_div, src_incr, src, dst_end, filter_bank
  296. %elif WIN64
  297. mov R9d, [ctxq+ResampleContext.filter_alloc]
  298. mov ecx, [ctxq+ResampleContext.phase_shift]
  299. DEFINE_ARGS phase_shift, filter2, src, filter_alloc, index, frac, filter1, \
  300. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  301. dst_incr_div, src_incr, dst, dst_end, filter_bank
  302. %endif
  303. neg min_filter_len_x4q
  304. sub filter_bankq, min_filter_len_x4q
  305. sub srcq, min_filter_len_x4q
  306. mov src_stackq, srcq
  307. %else ; x86-32
  308. cglobal resample_linear_%1, 1, 7, 5, ctx, min_filter_length_x4, filter2, \
  309. frac, index, dst, filter_bank
  310. ; push temp variables to stack
  311. %define ctx_stackq r0mp
  312. %define src_stackq r2mp
  313. %define update_context_stackd r4m
  314. mov dstq, r1mp
  315. mov r3, r3mp
  316. lea r3, [dstq+r3*%2]
  317. PUSH dword [ctxq+ResampleContext.dst_incr_div]
  318. PUSH r3
  319. mov r3, dword [ctxq+ResampleContext.filter_alloc]
  320. PUSH dword [ctxq+ResampleContext.dst_incr_mod]
  321. PUSH r3
  322. shl r3, %3
  323. PUSH r3
  324. mov r3, dword [ctxq+ResampleContext.src_incr]
  325. PUSH dword [ctxq+ResampleContext.phase_mask]
  326. PUSH r3d
  327. %ifidn %1, int16
  328. movd m4, [pd_0x4000]
  329. %else ; float/double
  330. cvtsi2s%4 xm0, r3d
  331. movs%4 xm4, [%5]
  332. divs%4 xm4, xm0
  333. %endif
  334. mov min_filter_length_x4d, [ctxq+ResampleContext.filter_length]
  335. mov indexd, [ctxq+ResampleContext.index]
  336. shl min_filter_length_x4d, %3
  337. mov fracd, [ctxq+ResampleContext.frac]
  338. neg min_filter_length_x4q
  339. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  340. sub r2mp, min_filter_length_x4q
  341. sub filter_bankq, min_filter_length_x4q
  342. PUSH min_filter_length_x4q
  343. PUSH filter_bankq
  344. PUSH dword [ctxq+ResampleContext.phase_shift]
  345. DEFINE_ARGS filter1, min_filter_count_x4, filter2, frac, index, dst, src
  346. %define phase_shift_stackd dword [rsp+0x0]
  347. %define filter_bankq dword [rsp+0x4]
  348. %define min_filter_length_x4q dword [rsp+0x8]
  349. %define src_incrd dword [rsp+0xc]
  350. %define phase_mask_stackd dword [rsp+0x10]
  351. %define filter_alloc_x4q dword [rsp+0x14]
  352. %define filter_allocd dword [rsp+0x18]
  353. %define dst_incr_modd dword [rsp+0x1c]
  354. %define dst_endq dword [rsp+0x20]
  355. %define dst_incr_divd dword [rsp+0x24]
  356. mov srcq, r2mp
  357. %endif
  358. .loop:
  359. mov filter1d, filter_allocd
  360. imul filter1d, indexd
  361. %if ARCH_X86_64
  362. mov min_filter_count_x4q, min_filter_len_x4q
  363. lea filter1q, [filter_bankq+filter1q*%2]
  364. lea filter2q, [filter1q+filter_allocq*%2]
  365. %else ; x86-32
  366. mov min_filter_count_x4q, filter_bankq
  367. lea filter1q, [min_filter_count_x4q+filter1q*%2]
  368. mov min_filter_count_x4q, min_filter_length_x4q
  369. mov filter2q, filter1q
  370. add filter2q, filter_alloc_x4q
  371. %endif
  372. %ifidn %1, int16
  373. mova m0, m4
  374. mova m2, m4
  375. %else ; float/double
  376. xorps m0, m0, m0
  377. xorps m2, m2, m2
  378. %endif
  379. align 16
  380. .inner_loop:
  381. movu m1, [srcq+min_filter_count_x4q*1]
  382. %ifidn %1, int16
  383. pmaddwd m3, m1, [filter2q+min_filter_count_x4q*1]
  384. pmaddwd m1, [filter1q+min_filter_count_x4q*1]
  385. paddd m2, m3
  386. paddd m0, m1
  387. %else ; float/double
  388. mulp%4 m3, m1, [filter2q+min_filter_count_x4q*1]
  389. mulp%4 m1, m1, [filter1q+min_filter_count_x4q*1]
  390. addp%4 m2, m2, m3
  391. addp%4 m0, m0, m1
  392. %endif
  393. add min_filter_count_x4q, mmsize
  394. js .inner_loop
  395. %if cpuflag(avx)
  396. vextractf128 xm1, m0, 0x1
  397. vextractf128 xm3, m2, 0x1
  398. addps xm0, xm1
  399. addps xm2, xm3
  400. %endif
  401. %ifidn %1, int16
  402. %if mmsize == 16
  403. pshufd m3, m2, q0032
  404. pshufd m1, m0, q0032
  405. paddd m2, m3
  406. paddd m0, m1
  407. pshufd m3, m2, q0001
  408. pshufd m1, m0, q0001
  409. %else ; mmsize == 8
  410. pshufw m3, m2, q0032
  411. pshufw m1, m0, q0032
  412. %endif
  413. paddd m2, m3
  414. paddd m0, m1
  415. psubd m2, m0
  416. ; This is probably a really bad idea on atom and other machines with a
  417. ; long transfer latency between GPRs and XMMs (atom). However, it does
  418. ; make the clip a lot simpler...
  419. movd eax, m2
  420. add indexd, dst_incr_divd
  421. imul fracd
  422. idiv src_incrd
  423. movd m1, eax
  424. add fracd, dst_incr_modd
  425. paddd m0, m1
  426. psrad m0, 15
  427. packssdw m0, m0
  428. movd [dstq], m0
  429. ; note that for imul/idiv, I need to move filter to edx/eax for each:
  430. ; - 32bit: eax=r0[filter1], edx=r2[filter2]
  431. ; - win64: eax=r6[filter1], edx=r1[todo]
  432. ; - unix64: eax=r6[filter1], edx=r2[todo]
  433. %else ; float/double
  434. ; val += (v2 - val) * (FELEML) frac / c->src_incr;
  435. cvtsi2s%4 xm1, fracd
  436. subp%4 xm2, xm0
  437. mulp%4 xm1, xm4
  438. shufp%4 xm1, xm1, q0000
  439. mulp%4 xm2, xm1
  440. addp%4 xm0, xm2
  441. ; horizontal sum & store
  442. movhlps xm1, xm0
  443. %ifidn %1, float
  444. addps xm0, xm1
  445. shufps xm1, xm0, xm0, q0001
  446. %endif
  447. add fracd, dst_incr_modd
  448. addp%4 xm0, xm1
  449. add indexd, dst_incr_divd
  450. movs%4 [dstq], xm0
  451. %endif
  452. cmp fracd, src_incrd
  453. jl .skip
  454. sub fracd, src_incrd
  455. inc indexd
  456. %if UNIX64
  457. DEFINE_ARGS filter_alloc, dst, filter2, phase_shift, index, frac, index_incr, \
  458. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  459. dst_incr_div, src_incr, src, dst_end, filter_bank
  460. %elif WIN64
  461. DEFINE_ARGS phase_shift, filter2, src, filter_alloc, index, frac, index_incr, \
  462. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  463. dst_incr_div, src_incr, dst, dst_end, filter_bank
  464. %else ; x86-32
  465. DEFINE_ARGS filter1, phase_shift, index_incr, frac, index, dst, src
  466. %endif
  467. .skip:
  468. %if ARCH_X86_32
  469. mov phase_shiftd, phase_shift_stackd
  470. %endif
  471. mov index_incrd, indexd
  472. add dstq, %2
  473. and indexd, phase_mask_stackd
  474. sar index_incrd, phase_shiftb
  475. lea srcq, [srcq+index_incrq*%2]
  476. cmp dstq, dst_endq
  477. jne .loop
  478. %if UNIX64
  479. DEFINE_ARGS ctx, dst, filter2, phase_shift, index, frac, index_incr, \
  480. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  481. dst_incr_div, src_incr, src, dst_end, filter_bank
  482. %elif WIN64
  483. DEFINE_ARGS ctx, filter2, src, phase_shift, index, frac, index_incr, \
  484. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  485. dst_incr_div, src_incr, dst, dst_end, filter_bank
  486. %else ; x86-32
  487. DEFINE_ARGS filter1, ctx, update_context, frac, index, dst, src
  488. %endif
  489. cmp dword update_context_stackd, 0
  490. jz .skip_store
  491. ; strictly speaking, the function should always return the consumed
  492. ; number of bytes; however, we only use the value if update_context
  493. ; is true, so let's just leave it uninitialized otherwise
  494. mov ctxq, ctx_stackq
  495. movifnidn rax, srcq
  496. mov [ctxq+ResampleContext.frac ], fracd
  497. sub rax, src_stackq
  498. mov [ctxq+ResampleContext.index], indexd
  499. shr rax, %3
  500. .skip_store:
  501. %if ARCH_X86_32
  502. ADD rsp, 0x28
  503. %endif
  504. RET
  505. %endmacro
  506. INIT_XMM sse
  507. RESAMPLE_FNS float, 4, 2, s, pf_1
  508. %if HAVE_AVX_EXTERNAL
  509. INIT_YMM avx
  510. RESAMPLE_FNS float, 4, 2, s, pf_1
  511. %endif
  512. %if ARCH_X86_32
  513. INIT_MMX mmxext
  514. RESAMPLE_FNS int16, 2, 1
  515. %endif
  516. INIT_XMM sse2
  517. RESAMPLE_FNS int16, 2, 1
  518. RESAMPLE_FNS double, 8, 3, d, pdbl_1