You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

316 lines
8.2KB

  1. ;******************************************************************************
  2. ;* x86 optimized Format Conversion Utils
  3. ;* Copyright (c) 2008 Loren Merritt
  4. ;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
  5. ;*
  6. ;* This file is part of Libav.
  7. ;*
  8. ;* Libav is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* Libav is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with Libav; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "x86inc.asm"
  23. %include "x86util.asm"
  24. %include "util.asm"
  25. SECTION_RODATA 32
  26. pf_s32_inv_scale: times 8 dd 0x30000000
  27. pf_s32_scale: times 8 dd 0x4f000000
  28. pf_s16_inv_scale: times 4 dd 0x38000000
  29. pf_s16_scale: times 4 dd 0x47000000
  30. SECTION_TEXT
  31. ;------------------------------------------------------------------------------
  32. ; void ff_conv_s16_to_s32(int32_t *dst, const int16_t *src, int len);
  33. ;------------------------------------------------------------------------------
  34. INIT_XMM sse2
  35. cglobal conv_s16_to_s32, 3,3,3, dst, src, len
  36. lea lenq, [2*lend]
  37. lea dstq, [dstq+2*lenq]
  38. add srcq, lenq
  39. neg lenq
  40. .loop:
  41. mova m2, [srcq+lenq]
  42. pxor m0, m0
  43. pxor m1, m1
  44. punpcklwd m0, m2
  45. punpckhwd m1, m2
  46. mova [dstq+2*lenq ], m0
  47. mova [dstq+2*lenq+mmsize], m1
  48. add lenq, mmsize
  49. jl .loop
  50. REP_RET
  51. ;------------------------------------------------------------------------------
  52. ; void ff_conv_s16_to_flt(float *dst, const int16_t *src, int len);
  53. ;------------------------------------------------------------------------------
  54. %macro CONV_S16_TO_FLT 0
  55. cglobal conv_s16_to_flt, 3,3,3, dst, src, len
  56. lea lenq, [2*lend]
  57. add srcq, lenq
  58. lea dstq, [dstq + 2*lenq]
  59. neg lenq
  60. mova m2, [pf_s16_inv_scale]
  61. ALIGN 16
  62. .loop:
  63. mova m0, [srcq+lenq]
  64. S16_TO_S32_SX 0, 1
  65. cvtdq2ps m0, m0
  66. cvtdq2ps m1, m1
  67. mulps m0, m2
  68. mulps m1, m2
  69. mova [dstq+2*lenq ], m0
  70. mova [dstq+2*lenq+mmsize], m1
  71. add lenq, mmsize
  72. jl .loop
  73. REP_RET
  74. %endmacro
  75. INIT_XMM sse2
  76. CONV_S16_TO_FLT
  77. INIT_XMM sse4
  78. CONV_S16_TO_FLT
  79. ;------------------------------------------------------------------------------
  80. ; void ff_conv_s32_to_s16(int16_t *dst, const int32_t *src, int len);
  81. ;------------------------------------------------------------------------------
  82. %macro CONV_S32_TO_S16 0
  83. cglobal conv_s32_to_s16, 3,3,4, dst, src, len
  84. lea lenq, [2*lend]
  85. lea srcq, [srcq+2*lenq]
  86. add dstq, lenq
  87. neg lenq
  88. .loop:
  89. mova m0, [srcq+2*lenq ]
  90. mova m1, [srcq+2*lenq+ mmsize]
  91. mova m2, [srcq+2*lenq+2*mmsize]
  92. mova m3, [srcq+2*lenq+3*mmsize]
  93. psrad m0, 16
  94. psrad m1, 16
  95. psrad m2, 16
  96. psrad m3, 16
  97. packssdw m0, m1
  98. packssdw m2, m3
  99. mova [dstq+lenq ], m0
  100. mova [dstq+lenq+mmsize], m2
  101. add lenq, mmsize*2
  102. jl .loop
  103. %if mmsize == 8
  104. emms
  105. RET
  106. %else
  107. REP_RET
  108. %endif
  109. %endmacro
  110. INIT_MMX mmx
  111. CONV_S32_TO_S16
  112. INIT_XMM sse2
  113. CONV_S32_TO_S16
  114. ;------------------------------------------------------------------------------
  115. ; void ff_conv_s32_to_flt(float *dst, const int32_t *src, int len);
  116. ;------------------------------------------------------------------------------
  117. %macro CONV_S32_TO_FLT 0
  118. cglobal conv_s32_to_flt, 3,3,3, dst, src, len
  119. lea lenq, [4*lend]
  120. add srcq, lenq
  121. add dstq, lenq
  122. neg lenq
  123. mova m0, [pf_s32_inv_scale]
  124. ALIGN 16
  125. .loop:
  126. cvtdq2ps m1, [srcq+lenq ]
  127. cvtdq2ps m2, [srcq+lenq+mmsize]
  128. mulps m1, m1, m0
  129. mulps m2, m2, m0
  130. mova [dstq+lenq ], m1
  131. mova [dstq+lenq+mmsize], m2
  132. add lenq, mmsize*2
  133. jl .loop
  134. %if mmsize == 32
  135. vzeroupper
  136. RET
  137. %else
  138. REP_RET
  139. %endif
  140. %endmacro
  141. INIT_XMM sse2
  142. CONV_S32_TO_FLT
  143. %if HAVE_AVX
  144. INIT_YMM avx
  145. CONV_S32_TO_FLT
  146. %endif
  147. ;------------------------------------------------------------------------------
  148. ; void ff_conv_flt_to_s16(int16_t *dst, const float *src, int len);
  149. ;------------------------------------------------------------------------------
  150. INIT_XMM sse2
  151. cglobal conv_flt_to_s16, 3,3,5, dst, src, len
  152. lea lenq, [2*lend]
  153. lea srcq, [srcq+2*lenq]
  154. add dstq, lenq
  155. neg lenq
  156. mova m4, [pf_s16_scale]
  157. .loop:
  158. mova m0, [srcq+2*lenq ]
  159. mova m1, [srcq+2*lenq+1*mmsize]
  160. mova m2, [srcq+2*lenq+2*mmsize]
  161. mova m3, [srcq+2*lenq+3*mmsize]
  162. mulps m0, m4
  163. mulps m1, m4
  164. mulps m2, m4
  165. mulps m3, m4
  166. cvtps2dq m0, m0
  167. cvtps2dq m1, m1
  168. cvtps2dq m2, m2
  169. cvtps2dq m3, m3
  170. packssdw m0, m1
  171. packssdw m2, m3
  172. mova [dstq+lenq ], m0
  173. mova [dstq+lenq+mmsize], m2
  174. add lenq, mmsize*2
  175. jl .loop
  176. REP_RET
  177. ;------------------------------------------------------------------------------
  178. ; void ff_conv_flt_to_s32(int32_t *dst, const float *src, int len);
  179. ;------------------------------------------------------------------------------
  180. %macro CONV_FLT_TO_S32 0
  181. cglobal conv_flt_to_s32, 3,3,5, dst, src, len
  182. lea lenq, [lend*4]
  183. add srcq, lenq
  184. add dstq, lenq
  185. neg lenq
  186. mova m4, [pf_s32_scale]
  187. .loop:
  188. mulps m0, m4, [srcq+lenq ]
  189. mulps m1, m4, [srcq+lenq+1*mmsize]
  190. mulps m2, m4, [srcq+lenq+2*mmsize]
  191. mulps m3, m4, [srcq+lenq+3*mmsize]
  192. cvtps2dq m0, m0
  193. cvtps2dq m1, m1
  194. cvtps2dq m2, m2
  195. cvtps2dq m3, m3
  196. mova [dstq+lenq ], m0
  197. mova [dstq+lenq+1*mmsize], m1
  198. mova [dstq+lenq+2*mmsize], m2
  199. mova [dstq+lenq+3*mmsize], m3
  200. add lenq, mmsize*4
  201. jl .loop
  202. %if mmsize == 32
  203. vzeroupper
  204. RET
  205. %else
  206. REP_RET
  207. %endif
  208. %endmacro
  209. INIT_XMM sse2
  210. CONV_FLT_TO_S32
  211. %if HAVE_AVX
  212. INIT_YMM avx
  213. CONV_FLT_TO_S32
  214. %endif
  215. ;-----------------------------------------------------------------------------
  216. ; void ff_conv_fltp_to_flt_6ch(float *dst, float *const *src, int len,
  217. ; int channels);
  218. ;-----------------------------------------------------------------------------
  219. %macro CONV_FLTP_TO_FLT_6CH 0
  220. cglobal conv_fltp_to_flt_6ch, 2,8,7, dst, src, src1, src2, src3, src4, src5, len
  221. %if ARCH_X86_64
  222. mov lend, r2d
  223. %else
  224. %define lend dword r2m
  225. %endif
  226. mov src1q, [srcq+1*gprsize]
  227. mov src2q, [srcq+2*gprsize]
  228. mov src3q, [srcq+3*gprsize]
  229. mov src4q, [srcq+4*gprsize]
  230. mov src5q, [srcq+5*gprsize]
  231. mov srcq, [srcq]
  232. sub src1q, srcq
  233. sub src2q, srcq
  234. sub src3q, srcq
  235. sub src4q, srcq
  236. sub src5q, srcq
  237. .loop:
  238. mova m0, [srcq ]
  239. mova m1, [srcq+src1q]
  240. mova m2, [srcq+src2q]
  241. mova m3, [srcq+src3q]
  242. mova m4, [srcq+src4q]
  243. mova m5, [srcq+src5q]
  244. %if cpuflag(sse4)
  245. SBUTTERFLYPS 0, 1, 6
  246. SBUTTERFLYPS 2, 3, 6
  247. SBUTTERFLYPS 4, 5, 6
  248. blendps m6, m4, m0, 1100b
  249. movlhps m0, m2
  250. movhlps m4, m2
  251. blendps m2, m5, m1, 1100b
  252. movlhps m1, m3
  253. movhlps m5, m3
  254. movaps [dstq ], m0
  255. movaps [dstq+16], m6
  256. movaps [dstq+32], m4
  257. movaps [dstq+48], m1
  258. movaps [dstq+64], m2
  259. movaps [dstq+80], m5
  260. %else ; mmx
  261. SBUTTERFLY dq, 0, 1, 6
  262. SBUTTERFLY dq, 2, 3, 6
  263. SBUTTERFLY dq, 4, 5, 6
  264. movq [dstq ], m0
  265. movq [dstq+ 8], m2
  266. movq [dstq+16], m4
  267. movq [dstq+24], m1
  268. movq [dstq+32], m3
  269. movq [dstq+40], m5
  270. %endif
  271. add srcq, mmsize
  272. add dstq, mmsize*6
  273. sub lend, mmsize/4
  274. jg .loop
  275. %if mmsize == 8
  276. emms
  277. RET
  278. %else
  279. REP_RET
  280. %endif
  281. %endmacro
  282. INIT_MMX mmx
  283. CONV_FLTP_TO_FLT_6CH
  284. INIT_XMM sse4
  285. CONV_FLTP_TO_FLT_6CH
  286. %if HAVE_AVX
  287. INIT_XMM avx
  288. CONV_FLTP_TO_FLT_6CH
  289. %endif