You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

306 lines
8.1KB

  1. ;******************************************************************************
  2. ;* x86 optimized Format Conversion Utils
  3. ;* Copyright (c) 2008 Loren Merritt
  4. ;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
  5. ;*
  6. ;* This file is part of Libav.
  7. ;*
  8. ;* Libav is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* Libav is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with Libav; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "x86inc.asm"
  23. %include "x86util.asm"
  24. %include "util.asm"
  25. SECTION_RODATA 32
  26. pf_s32_inv_scale: times 8 dd 0x30000000
  27. pf_s32_scale: times 8 dd 0x4f000000
  28. pf_s16_inv_scale: times 4 dd 0x38000000
  29. pf_s16_scale: times 4 dd 0x47000000
  30. SECTION_TEXT
  31. ;------------------------------------------------------------------------------
  32. ; void ff_conv_s16_to_s32(int32_t *dst, const int16_t *src, int len);
  33. ;------------------------------------------------------------------------------
  34. INIT_XMM sse2
  35. cglobal conv_s16_to_s32, 3,3,3, dst, src, len
  36. lea lenq, [2*lend]
  37. lea dstq, [dstq+2*lenq]
  38. add srcq, lenq
  39. neg lenq
  40. .loop:
  41. mova m2, [srcq+lenq]
  42. pxor m0, m0
  43. pxor m1, m1
  44. punpcklwd m0, m2
  45. punpckhwd m1, m2
  46. mova [dstq+2*lenq ], m0
  47. mova [dstq+2*lenq+mmsize], m1
  48. add lenq, mmsize
  49. jl .loop
  50. REP_RET
  51. ;------------------------------------------------------------------------------
  52. ; void ff_conv_s16_to_flt(float *dst, const int16_t *src, int len);
  53. ;------------------------------------------------------------------------------
  54. %macro CONV_S16_TO_FLT 0
  55. cglobal conv_s16_to_flt, 3,3,3, dst, src, len
  56. lea lenq, [2*lend]
  57. add srcq, lenq
  58. lea dstq, [dstq + 2*lenq]
  59. neg lenq
  60. mova m2, [pf_s16_inv_scale]
  61. ALIGN 16
  62. .loop:
  63. mova m0, [srcq+lenq]
  64. S16_TO_S32_SX 0, 1
  65. cvtdq2ps m0, m0
  66. cvtdq2ps m1, m1
  67. mulps m0, m2
  68. mulps m1, m2
  69. mova [dstq+2*lenq ], m0
  70. mova [dstq+2*lenq+mmsize], m1
  71. add lenq, mmsize
  72. jl .loop
  73. REP_RET
  74. %endmacro
  75. INIT_XMM sse2
  76. CONV_S16_TO_FLT
  77. INIT_XMM sse4
  78. CONV_S16_TO_FLT
  79. ;------------------------------------------------------------------------------
  80. ; void ff_conv_s32_to_s16(int16_t *dst, const int32_t *src, int len);
  81. ;------------------------------------------------------------------------------
  82. %macro CONV_S32_TO_S16 0
  83. cglobal conv_s32_to_s16, 3,3,4, dst, src, len
  84. lea lenq, [2*lend]
  85. lea srcq, [srcq+2*lenq]
  86. add dstq, lenq
  87. neg lenq
  88. .loop:
  89. mova m0, [srcq+2*lenq ]
  90. mova m1, [srcq+2*lenq+ mmsize]
  91. mova m2, [srcq+2*lenq+2*mmsize]
  92. mova m3, [srcq+2*lenq+3*mmsize]
  93. psrad m0, 16
  94. psrad m1, 16
  95. psrad m2, 16
  96. psrad m3, 16
  97. packssdw m0, m1
  98. packssdw m2, m3
  99. mova [dstq+lenq ], m0
  100. mova [dstq+lenq+mmsize], m2
  101. add lenq, mmsize*2
  102. jl .loop
  103. %if mmsize == 8
  104. emms
  105. RET
  106. %else
  107. REP_RET
  108. %endif
  109. %endmacro
  110. INIT_MMX mmx
  111. CONV_S32_TO_S16
  112. INIT_XMM sse2
  113. CONV_S32_TO_S16
  114. ;------------------------------------------------------------------------------
  115. ; void ff_conv_s32_to_flt(float *dst, const int32_t *src, int len);
  116. ;------------------------------------------------------------------------------
  117. %macro CONV_S32_TO_FLT 0
  118. cglobal conv_s32_to_flt, 3,3,3, dst, src, len
  119. lea lenq, [4*lend]
  120. add srcq, lenq
  121. add dstq, lenq
  122. neg lenq
  123. mova m0, [pf_s32_inv_scale]
  124. ALIGN 16
  125. .loop:
  126. cvtdq2ps m1, [srcq+lenq ]
  127. cvtdq2ps m2, [srcq+lenq+mmsize]
  128. mulps m1, m1, m0
  129. mulps m2, m2, m0
  130. mova [dstq+lenq ], m1
  131. mova [dstq+lenq+mmsize], m2
  132. add lenq, mmsize*2
  133. jl .loop
  134. REP_RET
  135. %endmacro
  136. INIT_XMM sse2
  137. CONV_S32_TO_FLT
  138. %if HAVE_AVX
  139. INIT_YMM avx
  140. CONV_S32_TO_FLT
  141. %endif
  142. ;------------------------------------------------------------------------------
  143. ; void ff_conv_flt_to_s16(int16_t *dst, const float *src, int len);
  144. ;------------------------------------------------------------------------------
  145. INIT_XMM sse2
  146. cglobal conv_flt_to_s16, 3,3,5, dst, src, len
  147. lea lenq, [2*lend]
  148. lea srcq, [srcq+2*lenq]
  149. add dstq, lenq
  150. neg lenq
  151. mova m4, [pf_s16_scale]
  152. .loop:
  153. mova m0, [srcq+2*lenq ]
  154. mova m1, [srcq+2*lenq+1*mmsize]
  155. mova m2, [srcq+2*lenq+2*mmsize]
  156. mova m3, [srcq+2*lenq+3*mmsize]
  157. mulps m0, m4
  158. mulps m1, m4
  159. mulps m2, m4
  160. mulps m3, m4
  161. cvtps2dq m0, m0
  162. cvtps2dq m1, m1
  163. cvtps2dq m2, m2
  164. cvtps2dq m3, m3
  165. packssdw m0, m1
  166. packssdw m2, m3
  167. mova [dstq+lenq ], m0
  168. mova [dstq+lenq+mmsize], m2
  169. add lenq, mmsize*2
  170. jl .loop
  171. REP_RET
  172. ;------------------------------------------------------------------------------
  173. ; void ff_conv_flt_to_s32(int32_t *dst, const float *src, int len);
  174. ;------------------------------------------------------------------------------
  175. %macro CONV_FLT_TO_S32 0
  176. cglobal conv_flt_to_s32, 3,3,5, dst, src, len
  177. lea lenq, [lend*4]
  178. add srcq, lenq
  179. add dstq, lenq
  180. neg lenq
  181. mova m4, [pf_s32_scale]
  182. .loop:
  183. mulps m0, m4, [srcq+lenq ]
  184. mulps m1, m4, [srcq+lenq+1*mmsize]
  185. mulps m2, m4, [srcq+lenq+2*mmsize]
  186. mulps m3, m4, [srcq+lenq+3*mmsize]
  187. cvtps2dq m0, m0
  188. cvtps2dq m1, m1
  189. cvtps2dq m2, m2
  190. cvtps2dq m3, m3
  191. mova [dstq+lenq ], m0
  192. mova [dstq+lenq+1*mmsize], m1
  193. mova [dstq+lenq+2*mmsize], m2
  194. mova [dstq+lenq+3*mmsize], m3
  195. add lenq, mmsize*4
  196. jl .loop
  197. REP_RET
  198. %endmacro
  199. INIT_XMM sse2
  200. CONV_FLT_TO_S32
  201. %if HAVE_AVX
  202. INIT_YMM avx
  203. CONV_FLT_TO_S32
  204. %endif
  205. ;-----------------------------------------------------------------------------
  206. ; void ff_conv_fltp_to_flt_6ch(float *dst, float *const *src, int len,
  207. ; int channels);
  208. ;-----------------------------------------------------------------------------
  209. %macro CONV_FLTP_TO_FLT_6CH 0
  210. cglobal conv_fltp_to_flt_6ch, 2,8,7, dst, src, src1, src2, src3, src4, src5, len
  211. %if ARCH_X86_64
  212. mov lend, r2d
  213. %else
  214. %define lend dword r2m
  215. %endif
  216. mov src1q, [srcq+1*gprsize]
  217. mov src2q, [srcq+2*gprsize]
  218. mov src3q, [srcq+3*gprsize]
  219. mov src4q, [srcq+4*gprsize]
  220. mov src5q, [srcq+5*gprsize]
  221. mov srcq, [srcq]
  222. sub src1q, srcq
  223. sub src2q, srcq
  224. sub src3q, srcq
  225. sub src4q, srcq
  226. sub src5q, srcq
  227. .loop:
  228. mova m0, [srcq ]
  229. mova m1, [srcq+src1q]
  230. mova m2, [srcq+src2q]
  231. mova m3, [srcq+src3q]
  232. mova m4, [srcq+src4q]
  233. mova m5, [srcq+src5q]
  234. %if cpuflag(sse4)
  235. SBUTTERFLYPS 0, 1, 6
  236. SBUTTERFLYPS 2, 3, 6
  237. SBUTTERFLYPS 4, 5, 6
  238. blendps m6, m4, m0, 1100b
  239. movlhps m0, m2
  240. movhlps m4, m2
  241. blendps m2, m5, m1, 1100b
  242. movlhps m1, m3
  243. movhlps m5, m3
  244. movaps [dstq ], m0
  245. movaps [dstq+16], m6
  246. movaps [dstq+32], m4
  247. movaps [dstq+48], m1
  248. movaps [dstq+64], m2
  249. movaps [dstq+80], m5
  250. %else ; mmx
  251. SBUTTERFLY dq, 0, 1, 6
  252. SBUTTERFLY dq, 2, 3, 6
  253. SBUTTERFLY dq, 4, 5, 6
  254. movq [dstq ], m0
  255. movq [dstq+ 8], m2
  256. movq [dstq+16], m4
  257. movq [dstq+24], m1
  258. movq [dstq+32], m3
  259. movq [dstq+40], m5
  260. %endif
  261. add srcq, mmsize
  262. add dstq, mmsize*6
  263. sub lend, mmsize/4
  264. jg .loop
  265. %if mmsize == 8
  266. emms
  267. RET
  268. %else
  269. REP_RET
  270. %endif
  271. %endmacro
  272. INIT_MMX mmx
  273. CONV_FLTP_TO_FLT_6CH
  274. INIT_XMM sse4
  275. CONV_FLTP_TO_FLT_6CH
  276. %if HAVE_AVX
  277. INIT_XMM avx
  278. CONV_FLTP_TO_FLT_6CH
  279. %endif