You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

304 lines
12KB

  1. /*
  2. * ARM NEON optimised MDCT
  3. * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/arm/asm.S"
  22. preserve8
  23. #define ff_fft_calc_neon X(ff_fft_calc_neon)
  24. function ff_imdct_half_neon, export=1
  25. push {r4-r8,lr}
  26. mov r12, #1
  27. ldr lr, [r0, #20] @ mdct_bits
  28. ldr r4, [r0, #24] @ tcos
  29. ldr r3, [r0, #8] @ revtab
  30. lsl r12, r12, lr @ n = 1 << nbits
  31. lsr lr, r12, #2 @ n4 = n >> 2
  32. add r7, r2, r12, lsl #1
  33. mov r12, #-16
  34. sub r7, r7, #16
  35. vld2.32 {d16-d17},[r7,:128],r12 @ d16=x,n1 d17=x,n0
  36. vld2.32 {d0-d1}, [r2,:128]! @ d0 =m0,x d1 =m1,x
  37. vrev64.32 d17, d17
  38. vld2.32 {d2,d3}, [r4,:128]! @ d2=c0,c1 d3=s0,s2
  39. vmul.f32 d6, d17, d2
  40. vmul.f32 d7, d0, d2
  41. 1:
  42. subs lr, lr, #2
  43. ldr r6, [r3], #4
  44. vmul.f32 d4, d0, d3
  45. vmul.f32 d5, d17, d3
  46. vsub.f32 d4, d6, d4
  47. vadd.f32 d5, d5, d7
  48. uxth r8, r6, ror #16
  49. uxth r6, r6
  50. add r8, r1, r8, lsl #3
  51. add r6, r1, r6, lsl #3
  52. beq 1f
  53. vld2.32 {d16-d17},[r7,:128],r12
  54. vld2.32 {d0-d1}, [r2,:128]!
  55. vrev64.32 d17, d17
  56. vld2.32 {d2,d3}, [r4,:128]! @ d2=c0,c1 d3=s0,s2
  57. vmul.f32 d6, d17, d2
  58. vmul.f32 d7, d0, d2
  59. vst2.32 {d4[0],d5[0]}, [r6,:64]
  60. vst2.32 {d4[1],d5[1]}, [r8,:64]
  61. b 1b
  62. 1:
  63. vst2.32 {d4[0],d5[0]}, [r6,:64]
  64. vst2.32 {d4[1],d5[1]}, [r8,:64]
  65. mov r4, r0
  66. mov r6, r1
  67. bl ff_fft_calc_neon
  68. mov r12, #1
  69. ldr lr, [r4, #20] @ mdct_bits
  70. ldr r4, [r4, #24] @ tcos
  71. lsl r12, r12, lr @ n = 1 << nbits
  72. lsr lr, r12, #3 @ n8 = n >> 3
  73. add r4, r4, lr, lsl #3
  74. add r6, r6, lr, lsl #3
  75. sub r1, r4, #16
  76. sub r3, r6, #16
  77. mov r7, #-16
  78. mov r8, r6
  79. mov r0, r3
  80. vld2.32 {d0-d1}, [r3,:128], r7 @ d0 =i1,r1 d1 =i0,r0
  81. vld2.32 {d20-d21},[r6,:128]! @ d20=i2,r2 d21=i3,r3
  82. vld2.32 {d16,d18},[r1,:128], r7 @ d16=c1,c0 d18=s1,s0
  83. 1:
  84. subs lr, lr, #2
  85. vmul.f32 d7, d0, d18
  86. vld2.32 {d17,d19},[r4,:128]! @ d17=c2,c3 d19=s2,s3
  87. vmul.f32 d4, d1, d18
  88. vmul.f32 d5, d21, d19
  89. vmul.f32 d6, d20, d19
  90. vmul.f32 d22, d1, d16
  91. vmul.f32 d23, d21, d17
  92. vmul.f32 d24, d0, d16
  93. vmul.f32 d25, d20, d17
  94. vadd.f32 d7, d7, d22
  95. vadd.f32 d6, d6, d23
  96. vsub.f32 d4, d4, d24
  97. vsub.f32 d5, d5, d25
  98. beq 1f
  99. vld2.32 {d0-d1}, [r3,:128], r7
  100. vld2.32 {d20-d21},[r6,:128]!
  101. vld2.32 {d16,d18},[r1,:128], r7 @ d16=c1,c0 d18=s1,s0
  102. vrev64.32 q3, q3
  103. vst2.32 {d4,d6}, [r0,:128], r7
  104. vst2.32 {d5,d7}, [r8,:128]!
  105. b 1b
  106. 1:
  107. vrev64.32 q3, q3
  108. vst2.32 {d4,d6}, [r0,:128]
  109. vst2.32 {d5,d7}, [r8,:128]
  110. pop {r4-r8,pc}
  111. endfunc
  112. function ff_imdct_calc_neon, export=1
  113. push {r4-r6,lr}
  114. ldr r3, [r0, #20]
  115. mov r4, #1
  116. mov r5, r1
  117. lsl r4, r4, r3
  118. add r1, r1, r4
  119. bl ff_imdct_half_neon
  120. add r0, r5, r4, lsl #2
  121. add r1, r5, r4, lsl #1
  122. sub r0, r0, #8
  123. sub r2, r1, #16
  124. mov r3, #-16
  125. mov r6, #-8
  126. vmov.i32 d30, #1<<31
  127. 1:
  128. vld1.32 {d0-d1}, [r2,:128], r3
  129. pld [r0, #-16]
  130. vrev64.32 q0, q0
  131. vld1.32 {d2-d3}, [r1,:128]!
  132. veor d4, d1, d30
  133. pld [r2, #-16]
  134. vrev64.32 q1, q1
  135. veor d5, d0, d30
  136. vst1.32 {d2}, [r0,:64], r6
  137. vst1.32 {d3}, [r0,:64], r6
  138. vst1.32 {d4-d5}, [r5,:128]!
  139. subs r4, r4, #16
  140. bgt 1b
  141. pop {r4-r6,pc}
  142. endfunc
  143. function ff_mdct_calc_neon, export=1
  144. push {r4-r10,lr}
  145. mov r12, #1
  146. ldr lr, [r0, #20] @ mdct_bits
  147. ldr r4, [r0, #24] @ tcos
  148. ldr r3, [r0, #8] @ revtab
  149. lsl lr, r12, lr @ n = 1 << nbits
  150. add r7, r2, lr @ in4u
  151. sub r9, r7, #16 @ in4d
  152. add r2, r7, lr, lsl #1 @ in3u
  153. add r8, r9, lr, lsl #1 @ in3d
  154. add r5, r4, lr, lsl #1
  155. sub r5, r5, #16
  156. sub r3, r3, #4
  157. mov r12, #-16
  158. vld2.32 {d16,d18},[r9,:128],r12 @ in0u0,in0u1 in4d1,in4d0
  159. vld2.32 {d17,d19},[r8,:128],r12 @ in2u0,in2u1 in3d1,in3d0
  160. vld2.32 {d0, d2}, [r7,:128]! @ in4u0,in4u1 in2d1,in2d0
  161. vrev64.32 q9, q9 @ in4d0,in4d1 in3d0,in3d1
  162. vld2.32 {d1, d3}, [r2,:128]! @ in3u0,in3u1 in1d1,in1d0
  163. vsub.f32 d0, d18, d0 @ in4d-in4u I
  164. vld2.32 {d20,d21},[r4,:128]! @ c0,c1 s0,s1
  165. vrev64.32 q1, q1 @ in2d0,in2d1 in1d0,in1d1
  166. vld2.32 {d30,d31},[r5,:128],r12 @ c2,c3 s2,s3
  167. vadd.f32 d1, d1, d19 @ in3u+in3d -R
  168. vsub.f32 d16, d16, d2 @ in0u-in2d R
  169. vadd.f32 d17, d17, d3 @ in2u+in1d -I
  170. 1:
  171. vmul.f32 d7, d0, d21 @ I*s
  172. A ldr r10, [r3, lr, lsr #1]
  173. T lsr r10, lr, #1
  174. T ldr r10, [r3, r10]
  175. vmul.f32 d6, d1, d20 @ -R*c
  176. ldr r6, [r3, #4]!
  177. vmul.f32 d4, d1, d21 @ -R*s
  178. vmul.f32 d5, d0, d20 @ I*c
  179. vmul.f32 d24, d16, d30 @ R*c
  180. vmul.f32 d25, d17, d31 @ -I*s
  181. vmul.f32 d22, d16, d31 @ R*s
  182. vmul.f32 d23, d17, d30 @ I*c
  183. subs lr, lr, #16
  184. vsub.f32 d6, d6, d7 @ -R*c-I*s
  185. vadd.f32 d7, d4, d5 @ -R*s+I*c
  186. vsub.f32 d24, d25, d24 @ I*s-R*c
  187. vadd.f32 d25, d22, d23 @ R*s-I*c
  188. beq 1f
  189. mov r12, #-16
  190. vld2.32 {d16,d18},[r9,:128],r12 @ in0u0,in0u1 in4d1,in4d0
  191. vld2.32 {d17,d19},[r8,:128],r12 @ in2u0,in2u1 in3d1,in3d0
  192. vneg.f32 d7, d7 @ R*s-I*c
  193. vld2.32 {d0, d2}, [r7,:128]! @ in4u0,in4u1 in2d1,in2d0
  194. vrev64.32 q9, q9 @ in4d0,in4d1 in3d0,in3d1
  195. vld2.32 {d1, d3}, [r2,:128]! @ in3u0,in3u1 in1d1,in1d0
  196. vsub.f32 d0, d18, d0 @ in4d-in4u I
  197. vld2.32 {d20,d21},[r4,:128]! @ c0,c1 s0,s1
  198. vrev64.32 q1, q1 @ in2d0,in2d1 in1d0,in1d1
  199. vld2.32 {d30,d31},[r5,:128],r12 @ c2,c3 s2,s3
  200. vadd.f32 d1, d1, d19 @ in3u+in3d -R
  201. vsub.f32 d16, d16, d2 @ in0u-in2d R
  202. vadd.f32 d17, d17, d3 @ in2u+in1d -I
  203. uxth r12, r6, ror #16
  204. uxth r6, r6
  205. add r12, r1, r12, lsl #3
  206. add r6, r1, r6, lsl #3
  207. vst2.32 {d6[0],d7[0]}, [r6,:64]
  208. vst2.32 {d6[1],d7[1]}, [r12,:64]
  209. uxth r6, r10, ror #16
  210. uxth r10, r10
  211. add r6 , r1, r6, lsl #3
  212. add r10, r1, r10, lsl #3
  213. vst2.32 {d24[0],d25[0]},[r10,:64]
  214. vst2.32 {d24[1],d25[1]},[r6,:64]
  215. b 1b
  216. 1:
  217. vneg.f32 d7, d7 @ R*s-I*c
  218. uxth r12, r6, ror #16
  219. uxth r6, r6
  220. add r12, r1, r12, lsl #3
  221. add r6, r1, r6, lsl #3
  222. vst2.32 {d6[0],d7[0]}, [r6,:64]
  223. vst2.32 {d6[1],d7[1]}, [r12,:64]
  224. uxth r6, r10, ror #16
  225. uxth r10, r10
  226. add r6 , r1, r6, lsl #3
  227. add r10, r1, r10, lsl #3
  228. vst2.32 {d24[0],d25[0]},[r10,:64]
  229. vst2.32 {d24[1],d25[1]},[r6,:64]
  230. mov r4, r0
  231. mov r6, r1
  232. bl ff_fft_calc_neon
  233. mov r12, #1
  234. ldr lr, [r4, #20] @ mdct_bits
  235. ldr r4, [r4, #24] @ tcos
  236. lsl r12, r12, lr @ n = 1 << nbits
  237. lsr lr, r12, #3 @ n8 = n >> 3
  238. add r4, r4, lr, lsl #3
  239. add r6, r6, lr, lsl #3
  240. sub r1, r4, #16
  241. sub r3, r6, #16
  242. mov r7, #-16
  243. mov r8, r6
  244. mov r0, r3
  245. vld2.32 {d0-d1}, [r3,:128], r7 @ d0 =r1,i1 d1 =r0,i0
  246. vld2.32 {d20-d21},[r6,:128]! @ d20=r2,i2 d21=r3,i3
  247. vld2.32 {d16,d18},[r1,:128], r7 @ c1,c0 s1,s0
  248. 1:
  249. subs lr, lr, #2
  250. vmul.f32 d7, d0, d18 @ r1*s1,r0*s0
  251. vld2.32 {d17,d19},[r4,:128]! @ c2,c3 s2,s3
  252. vmul.f32 d4, d1, d18 @ i1*s1,i0*s0
  253. vmul.f32 d5, d21, d19 @ i2*s2,i3*s3
  254. vmul.f32 d6, d20, d19 @ r2*s2,r3*s3
  255. vmul.f32 d24, d0, d16 @ r1*c1,r0*c0
  256. vmul.f32 d25, d20, d17 @ r2*c2,r3*c3
  257. vmul.f32 d22, d21, d17 @ i2*c2,i3*c3
  258. vmul.f32 d23, d1, d16 @ i1*c1,i0*c0
  259. vadd.f32 d4, d4, d24 @ i1*s1+r1*c1,i0*s0+r0*c0
  260. vadd.f32 d5, d5, d25 @ i2*s2+r2*c2,i3*s3+r3*c3
  261. vsub.f32 d6, d22, d6 @ i2*c2-r2*s2,i3*c3-r3*s3
  262. vsub.f32 d7, d23, d7 @ i1*c1-r1*s1,i0*c0-r0*s0
  263. vneg.f32 q2, q2
  264. beq 1f
  265. vld2.32 {d0-d1}, [r3,:128], r7
  266. vld2.32 {d20-d21},[r6,:128]!
  267. vld2.32 {d16,d18},[r1,:128], r7 @ c1,c0 s1,s0
  268. vrev64.32 q3, q3
  269. vst2.32 {d4,d6}, [r0,:128], r7
  270. vst2.32 {d5,d7}, [r8,:128]!
  271. b 1b
  272. 1:
  273. vrev64.32 q3, q3
  274. vst2.32 {d4,d6}, [r0,:128]
  275. vst2.32 {d5,d7}, [r8,:128]
  276. pop {r4-r10,pc}
  277. endfunc