You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1175 lines
47KB

  1. /*
  2. * VC1 NEON optimisations
  3. *
  4. * Copyright (c) 2010 Rob Clark <rob@ti.com>
  5. * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "libavutil/arm/asm.S"
  24. #include "neon.S"
  25. #include "config.h"
  26. @ Transpose rows into columns of a matrix of 16-bit elements. For 4x4, pass
  27. @ double-word registers, for 8x4, pass quad-word registers.
  28. .macro transpose16 r0, r1, r2, r3
  29. @ At this point:
  30. @ row[0] r0
  31. @ row[1] r1
  32. @ row[2] r2
  33. @ row[3] r3
  34. vtrn.16 \r0, \r1 @ first and second row
  35. vtrn.16 \r2, \r3 @ third and fourth row
  36. vtrn.32 \r0, \r2 @ first and third row
  37. vtrn.32 \r1, \r3 @ second and fourth row
  38. @ At this point, if registers are quad-word:
  39. @ column[0] d0
  40. @ column[1] d2
  41. @ column[2] d4
  42. @ column[3] d6
  43. @ column[4] d1
  44. @ column[5] d3
  45. @ column[6] d5
  46. @ column[7] d7
  47. @ At this point, if registers are double-word:
  48. @ column[0] d0
  49. @ column[1] d1
  50. @ column[2] d2
  51. @ column[3] d3
  52. .endm
  53. @ ff_vc1_inv_trans_{4,8}x{4,8}_neon and overflow: The input values in the file
  54. @ are supposed to be in a specific range as to allow for 16-bit math without
  55. @ causing overflows, but sometimes the input values are just big enough to
  56. @ barely cause overflow in vadd instructions like:
  57. @
  58. @ vadd.i16 q0, q8, q10
  59. @ vshr.s16 q0, q0, #\rshift
  60. @
  61. @ To prevent these borderline cases from overflowing, we just need one more
  62. @ bit of precision, which is accomplished by replacing the sequence above with:
  63. @
  64. @ vhadd.s16 q0, q8, q10
  65. @ vshr.s16 q0, q0, #(\rshift -1)
  66. @
  67. @ This works because vhadd is a single instruction that adds, then shifts to
  68. @ the right once, all before writing the result to the destination register.
  69. @
  70. @ Even with this workaround, there were still some files that caused overflows
  71. @ in ff_vc1_inv_trans_8x8_neon. See the comments in ff_vc1_inv_trans_8x8_neon
  72. @ for the additional workaround.
  73. @ Takes 4 columns of 8 values each and operates on it. Modeled after the first
  74. @ for loop in vc1_inv_trans_4x8_c.
  75. @ Input columns: q0 q1 q2 q3
  76. @ Output columns: q0 q1 q2 q3
  77. @ Trashes: r12 q8 q9 q10 q11 q12 q13
  78. .macro vc1_inv_trans_4x8_helper add rshift
  79. @ Compute temp1, temp2 and setup scalar #17, #22, #10
  80. vadd.i16 q12, q0, q2 @ temp1 = src[0] + src[2]
  81. movw r12, #17
  82. vsub.i16 q13, q0, q2 @ temp2 = src[0] - src[2]
  83. movt r12, #22
  84. vmov.32 d0[0], r12
  85. movw r12, #10
  86. vmov.16 d1[0], r12
  87. vmov.i16 q8, #\add @ t1 will accumulate here
  88. vmov.i16 q9, #\add @ t2 will accumulate here
  89. vmul.i16 q10, q1, d0[1] @ t3 = 22 * (src[1])
  90. vmul.i16 q11, q3, d0[1] @ t4 = 22 * (src[3])
  91. vmla.i16 q8, q12, d0[0] @ t1 = 17 * (temp1) + 4
  92. vmla.i16 q9, q13, d0[0] @ t2 = 17 * (temp2) + 4
  93. vmla.i16 q10, q3, d1[0] @ t3 += 10 * src[3]
  94. vmls.i16 q11, q1, d1[0] @ t4 -= 10 * src[1]
  95. vhadd.s16 q0, q8, q10 @ dst[0] = (t1 + t3) >> 1
  96. vhsub.s16 q3, q8, q10 @ dst[3] = (t1 - t3) >> 1
  97. vhsub.s16 q1, q9, q11 @ dst[1] = (t2 - t4) >> 1
  98. vhadd.s16 q2, q9, q11 @ dst[2] = (t2 + t4) >> 1
  99. @ Halving add/sub above already did one shift
  100. vshr.s16 q0, q0, #(\rshift - 1) @ dst[0] >>= (rshift - 1)
  101. vshr.s16 q3, q3, #(\rshift - 1) @ dst[3] >>= (rshift - 1)
  102. vshr.s16 q1, q1, #(\rshift - 1) @ dst[1] >>= (rshift - 1)
  103. vshr.s16 q2, q2, #(\rshift - 1) @ dst[2] >>= (rshift - 1)
  104. .endm
  105. @ Takes 8 columns of 4 values each and operates on it. Modeled after the second
  106. @ for loop in vc1_inv_trans_4x8_c.
  107. @ Input columns: d0 d2 d4 d6 d1 d3 d5 d7
  108. @ Output columns: d16 d17 d18 d19 d21 d20 d23 d22
  109. @ Trashes all NEON registers (and r12) except for: q4 q5 q6 q7
  110. .macro vc1_inv_trans_8x4_helper add add1beforeshift rshift
  111. @ At this point:
  112. @ src[0] d0 overwritten later
  113. @ src[8] d2
  114. @ src[16] d4 overwritten later
  115. @ src[24] d6
  116. @ src[32] d1 overwritten later
  117. @ src[40] d3
  118. @ src[48] d5 overwritten later
  119. @ src[56] d7
  120. movw r12, #12
  121. vmov.i16 q14, #\add @ t1|t2 will accumulate here
  122. movt r12, #6
  123. vadd.i16 d20, d0, d1 @ temp1 = src[0] + src[32]
  124. vsub.i16 d21, d0, d1 @ temp2 = src[0] - src[32]
  125. vmov.i32 d0[0], r12 @ 16-bit: d0[0] = #12, d0[1] = #6
  126. vshl.i16 q15, q2, #4 @ t3|t4 = 16 * (src[16]|src[48])
  127. vswp d4, d5 @ q2 = src[48]|src[16]
  128. vmla.i16 q14, q10, d0[0] @ t1|t2 = 12 * (temp1|temp2) + 64
  129. movw r12, #15
  130. movt r12, #9
  131. vmov.i32 d0[1], r12 @ 16-bit: d0[2] = #15, d0[3] = #9
  132. vneg.s16 d31, d31 @ t4 = -t4
  133. vmla.i16 q15, q2, d0[1] @ t3|t4 += 6 * (src[48]|src[16])
  134. @ At this point:
  135. @ d0[2] #15
  136. @ d0[3] #9
  137. @ q1 src[8]|src[40]
  138. @ q3 src[24]|src[56]
  139. @ q14 old t1|t2
  140. @ q15 old t3|t4
  141. vshl.i16 q8, q1, #4 @ t1|t2 = 16 * (src[8]|src[40])
  142. vswp d2, d3 @ q1 = src[40]|src[8]
  143. vshl.i16 q12, q3, #4 @ temp3a|temp4a = 16 * src[24]|src[56]
  144. vswp d6, d7 @ q3 = src[56]|src[24]
  145. vshl.i16 q13, q1, #2 @ temp3b|temp4b = 4 * (src[40]|src[8])
  146. vshl.i16 q2, q3, #2 @ temp1|temp2 = 4 * (src[56]|src[24])
  147. vswp d3, d6 @ q1 = src[40]|src[56], q3 = src[8]|src[24]
  148. vsub.i16 q9, q13, q12 @ t3|t4 = - (temp3a|temp4a) + (temp3b|temp4b)
  149. vadd.i16 q8, q8, q2 @ t1|t2 += temp1|temp2
  150. vmul.i16 q12, q3, d0[3] @ temp3|temp4 = 9 * src[8]|src[24]
  151. vmla.i16 q8, q1, d0[3] @ t1|t2 += 9 * (src[40]|src[56])
  152. vswp d6, d7 @ q3 = src[24]|src[8]
  153. vswp d2, d3 @ q1 = src[56]|src[40]
  154. vsub.i16 q11, q14, q15 @ t8|t7 = old t1|t2 - old t3|t4
  155. vadd.i16 q10, q14, q15 @ t5|t6 = old t1|t2 + old t3|t4
  156. .if \add1beforeshift
  157. vmov.i16 q15, #1
  158. .endif
  159. vadd.i16 d18, d18, d24 @ t3 += temp3
  160. vsub.i16 d19, d19, d25 @ t4 -= temp4
  161. vswp d22, d23 @ q11 = t7|t8
  162. vneg.s16 d17, d17 @ t2 = -t2
  163. vmla.i16 q9, q1, d0[2] @ t3|t4 += 15 * src[56]|src[40]
  164. vmla.i16 q8, q3, d0[2] @ t1|t2 += 15 * src[24]|src[8]
  165. @ At this point:
  166. @ t1 d16
  167. @ t2 d17
  168. @ t3 d18
  169. @ t4 d19
  170. @ t5 d20
  171. @ t6 d21
  172. @ t7 d22
  173. @ t8 d23
  174. @ #1 q15
  175. .if \add1beforeshift
  176. vadd.i16 q3, q15, q10 @ line[7,6] = t5|t6 + 1
  177. vadd.i16 q2, q15, q11 @ line[5,4] = t7|t8 + 1
  178. .endif
  179. @ Sometimes this overflows, so to get one additional bit of precision, use
  180. @ a single instruction that both adds and shifts right (halving).
  181. vhadd.s16 q1, q9, q11 @ line[2,3] = (t3|t4 + t7|t8) >> 1
  182. vhadd.s16 q0, q8, q10 @ line[0,1] = (t1|t2 + t5|t6) >> 1
  183. .if \add1beforeshift
  184. vhsub.s16 q2, q2, q9 @ line[5,4] = (t7|t8 - t3|t4 + 1) >> 1
  185. vhsub.s16 q3, q3, q8 @ line[7,6] = (t5|t6 - t1|t2 + 1) >> 1
  186. .else
  187. vhsub.s16 q2, q11, q9 @ line[5,4] = (t7|t8 - t3|t4) >> 1
  188. vhsub.s16 q3, q10, q8 @ line[7,6] = (t5|t6 - t1|t2) >> 1
  189. .endif
  190. vshr.s16 q9, q1, #(\rshift - 1) @ one shift is already done by vhadd/vhsub above
  191. vshr.s16 q8, q0, #(\rshift - 1)
  192. vshr.s16 q10, q2, #(\rshift - 1)
  193. vshr.s16 q11, q3, #(\rshift - 1)
  194. @ At this point:
  195. @ dst[0] d16
  196. @ dst[1] d17
  197. @ dst[2] d18
  198. @ dst[3] d19
  199. @ dst[4] d21
  200. @ dst[5] d20
  201. @ dst[6] d23
  202. @ dst[7] d22
  203. .endm
  204. @ This is modeled after the first and second for loop in vc1_inv_trans_8x8_c.
  205. @ Input columns: q8, q9, q10, q11, q12, q13, q14, q15
  206. @ Output columns: q8, q9, q10, q11, q12, q13, q14, q15
  207. @ Trashes all NEON registers (and r12) except for: q4 q5 q6 q7
  208. .macro vc1_inv_trans_8x8_helper add add1beforeshift rshift
  209. @ This actually computes half of t1, t2, t3, t4, as explained below
  210. @ near `tNhalf`.
  211. vmov.i16 q0, #(6 / 2) @ q0 = #6/2
  212. vshl.i16 q1, q10, #3 @ t3 = 16/2 * src[16]
  213. vshl.i16 q3, q14, #3 @ temp4 = 16/2 * src[48]
  214. vmul.i16 q2, q10, q0 @ t4 = 6/2 * src[16]
  215. vmla.i16 q1, q14, q0 @ t3 += 6/2 * src[48]
  216. @ unused: q0, q10, q14
  217. vmov.i16 q0, #(12 / 2) @ q0 = #12/2
  218. vadd.i16 q10, q8, q12 @ temp1 = src[0] + src[32]
  219. vsub.i16 q14, q8, q12 @ temp2 = src[0] - src[32]
  220. @ unused: q8, q12
  221. vmov.i16 q8, #(\add / 2) @ t1 will accumulate here
  222. vmov.i16 q12, #(\add / 2) @ t2 will accumulate here
  223. movw r12, #15
  224. vsub.i16 q2, q2, q3 @ t4 = 6/2 * src[16] - 16/2 * src[48]
  225. movt r12, #9
  226. @ unused: q3
  227. vmla.i16 q8, q10, q0 @ t1 = 12/2 * temp1 + add
  228. vmla.i16 q12, q14, q0 @ t2 = 12/2 * temp2 + add
  229. vmov.i32 d0[0], r12
  230. @ unused: q3, q10, q14
  231. @ At this point:
  232. @ q0 d0=#15|#9
  233. @ q1 old t3
  234. @ q2 old t4
  235. @ q3
  236. @ q8 old t1
  237. @ q9 src[8]
  238. @ q10
  239. @ q11 src[24]
  240. @ q12 old t2
  241. @ q13 src[40]
  242. @ q14
  243. @ q15 src[56]
  244. @ unused: q3, q10, q14
  245. movw r12, #16
  246. vshl.i16 q3, q9, #4 @ t1 = 16 * src[8]
  247. movt r12, #4
  248. vshl.i16 q10, q9, #2 @ t4 = 4 * src[8]
  249. vmov.i32 d1[0], r12
  250. vmul.i16 q14, q9, d0[0] @ t2 = 15 * src[8]
  251. vmul.i16 q9, q9, d0[1] @ t3 = 9 * src[8]
  252. @ unused: none
  253. vmla.i16 q3, q11, d0[0] @ t1 += 15 * src[24]
  254. vmls.i16 q10, q11, d0[1] @ t4 -= 9 * src[24]
  255. vmls.i16 q14, q11, d1[1] @ t2 -= 4 * src[24]
  256. vmls.i16 q9, q11, d1[0] @ t3 -= 16 * src[24]
  257. @ unused: q11
  258. vmla.i16 q3, q13, d0[1] @ t1 += 9 * src[40]
  259. vmla.i16 q10, q13, d0[0] @ t4 += 15 * src[40]
  260. vmls.i16 q14, q13, d1[0] @ t2 -= 16 * src[40]
  261. vmla.i16 q9, q13, d1[1] @ t3 += 4 * src[40]
  262. @ unused: q11, q13
  263. @ Compute t5, t6, t7, t8 from old t1, t2, t3, t4. Actually, it computes
  264. @ half of t5, t6, t7, t8 since t1, t2, t3, t4 are halved.
  265. vadd.i16 q11, q8, q1 @ t5 = t1 + t3
  266. vsub.i16 q1, q8, q1 @ t8 = t1 - t3
  267. vadd.i16 q13, q12, q2 @ t6 = t2 + t4
  268. vsub.i16 q2, q12, q2 @ t7 = t2 - t4
  269. @ unused: q8, q12
  270. .if \add1beforeshift
  271. vmov.i16 q12, #1
  272. .endif
  273. @ unused: q8
  274. vmla.i16 q3, q15, d1[1] @ t1 += 4 * src[56]
  275. vmls.i16 q14, q15, d0[1] @ t2 -= 9 * src[56]
  276. vmla.i16 q9, q15, d0[0] @ t3 += 15 * src[56]
  277. vmls.i16 q10, q15, d1[0] @ t4 -= 16 * src[56]
  278. @ unused: q0, q8, q15
  279. @ At this point:
  280. @ t1 q3
  281. @ t2 q14
  282. @ t3 q9
  283. @ t4 q10
  284. @ t5half q11
  285. @ t6half q13
  286. @ t7half q2
  287. @ t8half q1
  288. @ #1 q12
  289. @
  290. @ tNhalf is half of the value of tN (as described in vc1_inv_trans_8x8_c).
  291. @ This is done because sometimes files have input that causes tN + tM to
  292. @ overflow. To avoid this overflow, we compute tNhalf, then compute
  293. @ tNhalf + tM (which doesn't overflow), and then we use vhadd to compute
  294. @ (tNhalf + (tNhalf + tM)) >> 1 which does not overflow because it is
  295. @ one instruction.
  296. @ For each pair of tN and tM, do:
  297. @ lineA = t5half + t1
  298. @ if add1beforeshift: t1 -= 1
  299. @ lineA = (t5half + lineA) >> 1
  300. @ lineB = t5half - t1
  301. @ lineB = (t5half + lineB) >> 1
  302. @ lineA >>= rshift - 1
  303. @ lineB >>= rshift - 1
  304. vadd.i16 q8, q11, q3 @ q8 = t5half + t1
  305. .if \add1beforeshift
  306. vsub.i16 q3, q3, q12 @ q3 = t1 - 1
  307. .endif
  308. vadd.i16 q0, q13, q14 @ q0 = t6half + t2
  309. .if \add1beforeshift
  310. vsub.i16 q14, q14, q12 @ q14 = t2 - 1
  311. .endif
  312. vadd.i16 q15, q2, q9 @ q15 = t7half + t3
  313. .if \add1beforeshift
  314. vsub.i16 q9, q9, q12 @ q9 = t3 - 1
  315. .endif
  316. @ unused: none
  317. vhadd.s16 q8, q11, q8 @ q8 = (t5half + t5half + t1) >> 1
  318. vsub.i16 q3, q11, q3 @ q3 = t5half - t1 + 1
  319. vhadd.s16 q0, q13, q0 @ q0 = (t6half + t6half + t2) >> 1
  320. vsub.i16 q14, q13, q14 @ q14 = t6half - t2 + 1
  321. vhadd.s16 q15, q2, q15 @ q15 = (t7half + t7half + t3) >> 1
  322. vsub.i16 q9, q2, q9 @ q9 = t7half - t3 + 1
  323. vhadd.s16 q3, q11, q3 @ q3 = (t5half + t5half - t1 + 1) >> 1
  324. @ unused: q11
  325. vadd.i16 q11, q1, q10 @ q11 = t8half + t4
  326. .if \add1beforeshift
  327. vsub.i16 q10, q10, q12 @ q10 = t4 - 1
  328. .endif
  329. @ unused: q12
  330. vhadd.s16 q14, q13, q14 @ q14 = (t6half + t6half - t2 + 1) >> 1
  331. @ unused: q12, q13
  332. vhadd.s16 q13, q2, q9 @ q9 = (t7half + t7half - t3 + 1) >> 1
  333. @ unused: q12, q2, q9
  334. vsub.i16 q10, q1, q10 @ q10 = t8half - t4 + 1
  335. vhadd.s16 q11, q1, q11 @ q11 = (t8half + t8half + t4) >> 1
  336. vshr.s16 q8, q8, #(\rshift - 1) @ q8 = line[0]
  337. vhadd.s16 q12, q1, q10 @ q12 = (t8half + t8half - t4 + 1) >> 1
  338. vshr.s16 q9, q0, #(\rshift - 1) @ q9 = line[1]
  339. vshr.s16 q10, q15, #(\rshift - 1) @ q10 = line[2]
  340. vshr.s16 q11, q11, #(\rshift - 1) @ q11 = line[3]
  341. vshr.s16 q12, q12, #(\rshift - 1) @ q12 = line[4]
  342. vshr.s16 q13, q13, #(\rshift - 1) @ q13 = line[5]
  343. vshr.s16 q14, q14, #(\rshift - 1) @ q14 = line[6]
  344. vshr.s16 q15, q3, #(\rshift - 1) @ q15 = line[7]
  345. .endm
  346. @ (int16_t *block [r0])
  347. function ff_vc1_inv_trans_8x8_neon, export=1
  348. vld1.64 {q8-q9}, [r0,:128]!
  349. vld1.64 {q10-q11}, [r0,:128]!
  350. vld1.64 {q12-q13}, [r0,:128]!
  351. vld1.64 {q14-q15}, [r0,:128]
  352. sub r0, r0, #(16 * 2 * 3) @ restore r0
  353. @ At this point:
  354. @ src[0] q8
  355. @ src[8] q9
  356. @ src[16] q10
  357. @ src[24] q11
  358. @ src[32] q12
  359. @ src[40] q13
  360. @ src[48] q14
  361. @ src[56] q15
  362. vc1_inv_trans_8x8_helper add=4 add1beforeshift=0 rshift=3
  363. @ Transpose result matrix of 8x8
  364. swap4 d17, d19, d21, d23, d24, d26, d28, d30
  365. transpose16_4x4 q8, q9, q10, q11, q12, q13, q14, q15
  366. vc1_inv_trans_8x8_helper add=64 add1beforeshift=1 rshift=7
  367. vst1.64 {q8-q9}, [r0,:128]!
  368. vst1.64 {q10-q11}, [r0,:128]!
  369. vst1.64 {q12-q13}, [r0,:128]!
  370. vst1.64 {q14-q15}, [r0,:128]
  371. bx lr
  372. endfunc
  373. @ (uint8_t *dest [r0], ptrdiff_t stride [r1], int16_t *block [r2])
  374. function ff_vc1_inv_trans_8x4_neon, export=1
  375. vld1.64 {q0-q1}, [r2,:128]! @ load 8 * 4 * 2 = 64 bytes / 16 bytes per quad = 4 quad registers
  376. vld1.64 {q2-q3}, [r2,:128]
  377. transpose16 q0 q1 q2 q3 @ transpose rows to columns
  378. @ At this point:
  379. @ src[0] d0
  380. @ src[1] d2
  381. @ src[2] d4
  382. @ src[3] d6
  383. @ src[4] d1
  384. @ src[5] d3
  385. @ src[6] d5
  386. @ src[7] d7
  387. vc1_inv_trans_8x4_helper add=4 add1beforeshift=0 rshift=3
  388. @ Move output to more standardized registers
  389. vmov d0, d16
  390. vmov d2, d17
  391. vmov d4, d18
  392. vmov d6, d19
  393. vmov d1, d21
  394. vmov d3, d20
  395. vmov d5, d23
  396. vmov d7, d22
  397. @ At this point:
  398. @ dst[0] d0
  399. @ dst[1] d2
  400. @ dst[2] d4
  401. @ dst[3] d6
  402. @ dst[4] d1
  403. @ dst[5] d3
  404. @ dst[6] d5
  405. @ dst[7] d7
  406. transpose16 q0 q1 q2 q3 @ turn columns into rows
  407. @ At this point:
  408. @ row[0] q0
  409. @ row[1] q1
  410. @ row[2] q2
  411. @ row[3] q3
  412. vc1_inv_trans_4x8_helper add=64 rshift=7
  413. @ At this point:
  414. @ line[0].l d0
  415. @ line[0].h d1
  416. @ line[1].l d2
  417. @ line[1].h d3
  418. @ line[2].l d4
  419. @ line[2].h d5
  420. @ line[3].l d6
  421. @ line[3].h d7
  422. @ unused registers: q12, q13, q14, q15
  423. vld1.64 {d28}, [r0,:64], r1 @ read dest
  424. vld1.64 {d29}, [r0,:64], r1
  425. vld1.64 {d30}, [r0,:64], r1
  426. vld1.64 {d31}, [r0,:64], r1
  427. sub r0, r0, r1, lsl #2 @ restore original r0 value
  428. vaddw.u8 q0, q0, d28 @ line[0] += dest[0]
  429. vaddw.u8 q1, q1, d29 @ line[1] += dest[1]
  430. vaddw.u8 q2, q2, d30 @ line[2] += dest[2]
  431. vaddw.u8 q3, q3, d31 @ line[3] += dest[3]
  432. vqmovun.s16 d0, q0 @ line[0]
  433. vqmovun.s16 d1, q1 @ line[1]
  434. vqmovun.s16 d2, q2 @ line[2]
  435. vqmovun.s16 d3, q3 @ line[3]
  436. vst1.64 {d0}, [r0,:64], r1 @ write dest
  437. vst1.64 {d1}, [r0,:64], r1
  438. vst1.64 {d2}, [r0,:64], r1
  439. vst1.64 {d3}, [r0,:64]
  440. bx lr
  441. endfunc
  442. @ (uint8_t *dest [r0], ptrdiff_t stride [r1], int16_t *block [r2])
  443. function ff_vc1_inv_trans_4x8_neon, export=1
  444. mov r12, #(8 * 2) @ 8 elements per line, each element 2 bytes
  445. vld4.16 {d0[], d2[], d4[], d6[]}, [r2,:64], r12 @ read each column into a q register
  446. vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r2,:64], r12
  447. vld4.16 {d0[2], d2[2], d4[2], d6[2]}, [r2,:64], r12
  448. vld4.16 {d0[3], d2[3], d4[3], d6[3]}, [r2,:64], r12
  449. vld4.16 {d1[], d3[], d5[], d7[]}, [r2,:64], r12
  450. vld4.16 {d1[1], d3[1], d5[1], d7[1]}, [r2,:64], r12
  451. vld4.16 {d1[2], d3[2], d5[2], d7[2]}, [r2,:64], r12
  452. vld4.16 {d1[3], d3[3], d5[3], d7[3]}, [r2,:64]
  453. vc1_inv_trans_4x8_helper add=4 rshift=3
  454. @ At this point:
  455. @ dst[0] = q0
  456. @ dst[1] = q1
  457. @ dst[2] = q2
  458. @ dst[3] = q3
  459. transpose16 q0 q1 q2 q3 @ Transpose rows (registers) into columns
  460. vc1_inv_trans_8x4_helper add=64 add1beforeshift=1 rshift=7
  461. vld1.32 {d28[]}, [r0,:32], r1 @ read dest
  462. vld1.32 {d28[1]}, [r0,:32], r1
  463. vld1.32 {d29[]}, [r0,:32], r1
  464. vld1.32 {d29[1]}, [r0,:32], r1
  465. vld1.32 {d30[]}, [r0,:32], r1
  466. vld1.32 {d30[0]}, [r0,:32], r1
  467. vld1.32 {d31[]}, [r0,:32], r1
  468. vld1.32 {d31[0]}, [r0,:32], r1
  469. sub r0, r0, r1, lsl #3 @ restore original r0 value
  470. vaddw.u8 q8, q8, d28 @ line[0,1] += dest[0,1]
  471. vaddw.u8 q9, q9, d29 @ line[2,3] += dest[2,3]
  472. vaddw.u8 q10, q10, d30 @ line[5,4] += dest[5,4]
  473. vaddw.u8 q11, q11, d31 @ line[7,6] += dest[7,6]
  474. vqmovun.s16 d16, q8 @ clip(line[0,1])
  475. vqmovun.s16 d18, q9 @ clip(line[2,3])
  476. vqmovun.s16 d20, q10 @ clip(line[5,4])
  477. vqmovun.s16 d22, q11 @ clip(line[7,6])
  478. vst1.32 {d16[0]}, [r0,:32], r1 @ write dest
  479. vst1.32 {d16[1]}, [r0,:32], r1
  480. vst1.32 {d18[0]}, [r0,:32], r1
  481. vst1.32 {d18[1]}, [r0,:32], r1
  482. vst1.32 {d20[1]}, [r0,:32], r1
  483. vst1.32 {d20[0]}, [r0,:32], r1
  484. vst1.32 {d22[1]}, [r0,:32], r1
  485. vst1.32 {d22[0]}, [r0,:32]
  486. bx lr
  487. endfunc
  488. @ Setup constants in registers which are used by vc1_inv_trans_4x4_helper
  489. .macro vc1_inv_trans_4x4_helper_setup
  490. vmov.i16 q13, #17
  491. vmov.i16 q14, #22
  492. vmov.i16 d30, #10 @ only need double-word, not quad-word
  493. .endm
  494. @ This is modeled after the first for loop in vc1_inv_trans_4x4_c.
  495. .macro vc1_inv_trans_4x4_helper add rshift
  496. vmov.i16 q2, #\add @ t1|t2 will accumulate here
  497. vadd.i16 d16, d0, d1 @ temp1 = src[0] + src[2]
  498. vsub.i16 d17, d0, d1 @ temp2 = src[0] - src[2]
  499. vmul.i16 q3, q14, q1 @ t3|t4 = 22 * (src[1]|src[3])
  500. vmla.i16 q2, q13, q8 @ t1|t2 = 17 * (temp1|temp2) + add
  501. vmla.i16 d6, d30, d3 @ t3 += 10 * src[3]
  502. vmls.i16 d7, d30, d2 @ t4 -= 10 * src[1]
  503. vadd.i16 q0, q2, q3 @ dst[0,2] = (t1|t2 + t3|t4)
  504. vsub.i16 q1, q2, q3 @ dst[3,1] = (t1|t2 - t3|t4)
  505. vshr.s16 q0, q0, #\rshift @ dst[0,2] >>= rshift
  506. vshr.s16 q1, q1, #\rshift @ dst[3,1] >>= rshift
  507. .endm
  508. @ (uint8_t *dest [r0], ptrdiff_t stride [r1], int16_t *block [r2])
  509. function ff_vc1_inv_trans_4x4_neon, export=1
  510. mov r12, #(8 * 2) @ 8 elements per line, each element 2 bytes
  511. vld4.16 {d0[], d1[], d2[], d3[]}, [r2,:64], r12 @ read each column into a register
  512. vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r12
  513. vld4.16 {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r12
  514. vld4.16 {d0[3], d1[3], d2[3], d3[3]}, [r2,:64]
  515. vswp d1, d2 @ so that we can later access column 1 and column 3 as a single q1 register
  516. vc1_inv_trans_4x4_helper_setup
  517. @ At this point:
  518. @ src[0] = d0
  519. @ src[1] = d2
  520. @ src[2] = d1
  521. @ src[3] = d3
  522. vc1_inv_trans_4x4_helper add=4 rshift=3 @ compute t1, t2, t3, t4 and combine them into dst[0-3]
  523. @ At this point:
  524. @ dst[0] = d0
  525. @ dst[1] = d3
  526. @ dst[2] = d1
  527. @ dst[3] = d2
  528. transpose16 d0 d3 d1 d2 @ Transpose rows (registers) into columns
  529. @ At this point:
  530. @ src[0] = d0
  531. @ src[8] = d3
  532. @ src[16] = d1
  533. @ src[24] = d2
  534. vswp d2, d3 @ so that we can later access column 1 and column 3 in order as a single q1 register
  535. @ At this point:
  536. @ src[0] = d0
  537. @ src[8] = d2
  538. @ src[16] = d1
  539. @ src[24] = d3
  540. vc1_inv_trans_4x4_helper add=64 rshift=7 @ compute t1, t2, t3, t4 and combine them into dst[0-3]
  541. @ At this point:
  542. @ line[0] = d0
  543. @ line[1] = d3
  544. @ line[2] = d1
  545. @ line[3] = d2
  546. vld1.32 {d18[]}, [r0,:32], r1 @ read dest
  547. vld1.32 {d19[]}, [r0,:32], r1
  548. vld1.32 {d18[1]}, [r0,:32], r1
  549. vld1.32 {d19[0]}, [r0,:32], r1
  550. sub r0, r0, r1, lsl #2 @ restore original r0 value
  551. vaddw.u8 q0, q0, d18 @ line[0,2] += dest[0,2]
  552. vaddw.u8 q1, q1, d19 @ line[3,1] += dest[3,1]
  553. vqmovun.s16 d0, q0 @ clip(line[0,2])
  554. vqmovun.s16 d1, q1 @ clip(line[3,1])
  555. vst1.32 {d0[0]}, [r0,:32], r1 @ write dest
  556. vst1.32 {d1[1]}, [r0,:32], r1
  557. vst1.32 {d0[1]}, [r0,:32], r1
  558. vst1.32 {d1[0]}, [r0,:32]
  559. bx lr
  560. endfunc
  561. #if HAVE_AS_DN_DIRECTIVE
  562. @ The absolute value of multiplication constants from vc1_mspel_filter and vc1_mspel_{ver,hor}_filter_16bits.
  563. @ The sign is embedded in the code below that carries out the multiplication (mspel_filter{,.16}).
  564. #define MSPEL_MODE_1_MUL_CONSTANTS 4 53 18 3
  565. #define MSPEL_MODE_2_MUL_CONSTANTS 1 9 9 1
  566. #define MSPEL_MODE_3_MUL_CONSTANTS 3 18 53 4
  567. @ These constants are from reading the source code of vc1_mspel_mc and determining the value that
  568. @ is added to `rnd` to result in the variable `r`, and the value of the variable `shift`.
  569. #define MSPEL_MODES_11_ADDSHIFT_CONSTANTS 15 5
  570. #define MSPEL_MODES_12_ADDSHIFT_CONSTANTS 3 3
  571. #define MSPEL_MODES_13_ADDSHIFT_CONSTANTS 15 5
  572. #define MSPEL_MODES_21_ADDSHIFT_CONSTANTS MSPEL_MODES_12_ADDSHIFT_CONSTANTS
  573. #define MSPEL_MODES_22_ADDSHIFT_CONSTANTS 0 1
  574. #define MSPEL_MODES_23_ADDSHIFT_CONSTANTS 3 3
  575. #define MSPEL_MODES_31_ADDSHIFT_CONSTANTS MSPEL_MODES_13_ADDSHIFT_CONSTANTS
  576. #define MSPEL_MODES_32_ADDSHIFT_CONSTANTS MSPEL_MODES_23_ADDSHIFT_CONSTANTS
  577. #define MSPEL_MODES_33_ADDSHIFT_CONSTANTS 15 5
  578. @ The addition and shift constants from vc1_mspel_filter.
  579. #define MSPEL_MODE_1_ADDSHIFT_CONSTANTS 32 6
  580. #define MSPEL_MODE_2_ADDSHIFT_CONSTANTS 8 4
  581. #define MSPEL_MODE_3_ADDSHIFT_CONSTANTS 32 6
  582. @ Setup constants in registers for a subsequent use of mspel_filter{,.16}.
  583. .macro mspel_constants typesize reg_a reg_b reg_c reg_d filter_a filter_b filter_c filter_d reg_add filter_add_register
  584. @ Define double-word register aliases. Typesize should be i8 or i16.
  585. ra .dn \reg_a\().\typesize
  586. rb .dn \reg_b\().\typesize
  587. rc .dn \reg_c\().\typesize
  588. rd .dn \reg_d\().\typesize
  589. @ Only set the register if the value is not 1 and unique
  590. .if \filter_a != 1
  591. vmov ra, #\filter_a @ ra = filter_a
  592. .endif
  593. vmov rb, #\filter_b @ rb = filter_b
  594. .if \filter_b != \filter_c
  595. vmov rc, #\filter_c @ rc = filter_c
  596. .endif
  597. .if \filter_d != 1
  598. vmov rd, #\filter_d @ rd = filter_d
  599. .endif
  600. @ vdup to double the size of typesize
  601. .ifc \typesize,i8
  602. vdup.16 \reg_add, \filter_add_register @ reg_add = filter_add_register
  603. .else
  604. vdup.32 \reg_add, \filter_add_register @ reg_add = filter_add_register
  605. .endif
  606. .unreq ra
  607. .unreq rb
  608. .unreq rc
  609. .unreq rd
  610. .endm
  611. @ After mspel_constants has been used, do the filtering.
  612. .macro mspel_filter acc dest src0 src1 src2 src3 filter_a filter_b filter_c filter_d reg_a reg_b reg_c reg_d reg_add filter_shift narrow=1
  613. .if \filter_a != 1
  614. @ If filter_a != 1, then we need a move and subtract instruction
  615. vmov \acc, \reg_add @ acc = reg_add
  616. vmlsl.u8 \acc, \reg_a, \src0 @ acc -= filter_a * src[-stride]
  617. .else
  618. @ If filter_a is 1, then just subtract without an extra move
  619. vsubw.u8 \acc, \reg_add, \src0 @ acc = reg_add - src[-stride] @ since filter_a == 1
  620. .endif
  621. vmlal.u8 \acc, \reg_b, \src1 @ acc += filter_b * src[0]
  622. .if \filter_b != \filter_c
  623. vmlal.u8 \acc, \reg_c, \src2 @ acc += filter_c * src[stride]
  624. .else
  625. @ If filter_b is the same as filter_c, use the same reg_b register
  626. vmlal.u8 \acc, \reg_b, \src2 @ acc += filter_c * src[stride] @ where filter_c == filter_b
  627. .endif
  628. .if \filter_d != 1
  629. @ If filter_d != 1, then do a multiply accumulate
  630. vmlsl.u8 \acc, \reg_d, \src3 @ acc -= filter_d * src[stride * 2]
  631. .else
  632. @ If filter_d is 1, then just do a subtract
  633. vsubw.u8 \acc, \acc, \src3 @ acc -= src[stride * 2] @ since filter_d == 1
  634. .endif
  635. .if \narrow
  636. vqshrun.s16 \dest, \acc, #\filter_shift @ dest = clip_uint8(acc >> filter_shift)
  637. .else
  638. vshr.s16 \dest, \acc, #\filter_shift @ dest = acc >> filter_shift
  639. .endif
  640. .endm
  641. @ This is similar to mspel_filter, but the input is 16-bit instead of 8-bit and narrow=0 is not supported.
  642. .macro mspel_filter.16 acc0 acc1 acc0_0 acc0_1 dest src0 src1 src2 src3 src4 src5 src6 src7 filter_a filter_b filter_c filter_d reg_a reg_b reg_c reg_d reg_add filter_shift
  643. .if \filter_a != 1
  644. vmov \acc0, \reg_add
  645. vmov \acc1, \reg_add
  646. vmlsl.s16 \acc0, \reg_a, \src0
  647. vmlsl.s16 \acc1, \reg_a, \src1
  648. .else
  649. vsubw.s16 \acc0, \reg_add, \src0
  650. vsubw.s16 \acc1, \reg_add, \src1
  651. .endif
  652. vmlal.s16 \acc0, \reg_b, \src2
  653. vmlal.s16 \acc1, \reg_b, \src3
  654. .if \filter_b != \filter_c
  655. vmlal.s16 \acc0, \reg_c, \src4
  656. vmlal.s16 \acc1, \reg_c, \src5
  657. .else
  658. vmlal.s16 \acc0, \reg_b, \src4
  659. vmlal.s16 \acc1, \reg_b, \src5
  660. .endif
  661. .if \filter_d != 1
  662. vmlsl.s16 \acc0, \reg_d, \src6
  663. vmlsl.s16 \acc1, \reg_d, \src7
  664. .else
  665. vsubw.s16 \acc0, \acc0, \src6
  666. vsubw.s16 \acc1, \acc1, \src7
  667. .endif
  668. @ Use acc0_0 and acc0_1 as temp space
  669. vqshrun.s32 \acc0_0, \acc0, #\filter_shift @ Shift and narrow with saturation from s32 to u16
  670. vqshrun.s32 \acc0_1, \acc1, #\filter_shift
  671. vqmovn.u16 \dest, \acc0 @ Narrow with saturation from u16 to u8
  672. .endm
  673. @ Register usage for put_vc1_mspel_mc functions. Registers marked 'hv' are only used in put_vc1_mspel_mc_hv.
  674. @
  675. @ r0 adjusted dst
  676. @ r1 adjusted src
  677. @ r2 stride
  678. @ r3 adjusted rnd
  679. @ r4 [hv] tmp
  680. @ r11 [hv] sp saved
  681. @ r12 loop counter
  682. @ d0 src[-stride]
  683. @ d1 src[0]
  684. @ d2 src[stride]
  685. @ d3 src[stride * 2]
  686. @ q0 [hv] src[-stride]
  687. @ q1 [hv] src[0]
  688. @ q2 [hv] src[stride]
  689. @ q3 [hv] src[stride * 2]
  690. @ d21 often result from mspel_filter
  691. @ q11 accumulator 0
  692. @ q12 [hv] accumulator 1
  693. @ q13 accumulator initial value
  694. @ d28 filter_a
  695. @ d29 filter_b
  696. @ d30 filter_c
  697. @ d31 filter_d
  698. @ (uint8_t *dst [r0], const uint8_t *src [r1], ptrdiff_t stride [r2], int rnd [r3])
  699. .macro put_vc1_mspel_mc_hv hmode vmode filter_h_a filter_h_b filter_h_c filter_h_d filter_v_a filter_v_b filter_v_c filter_v_d filter_add filter_shift
  700. function ff_put_vc1_mspel_mc\hmode\()\vmode\()_neon, export=1
  701. push {r4, r11, lr}
  702. mov r11, sp @ r11 = stack pointer before realignmnet
  703. A bic sp, sp, #15 @ sp = round down to multiple of 16 bytes
  704. T bic r4, r11, #15
  705. T mov sp, r4
  706. sub sp, sp, #(8*2*16) @ make space for 8 rows * 2 byte per element * 16 elements per row (to fit 11 actual elements per row)
  707. mov r4, sp @ r4 = int16_t tmp[8 * 16]
  708. sub r1, r1, #1 @ src -= 1
  709. .if \filter_add != 0
  710. add r3, r3, #\filter_add @ r3 = filter_add + rnd
  711. .endif
  712. mov r12, #8 @ loop counter
  713. sub r1, r1, r2 @ r1 = &src[-stride] @ slide back
  714. @ Do vertical filtering from src into tmp
  715. mspel_constants i8 d28 d29 d30 d31 \filter_v_a \filter_v_b \filter_v_c \filter_v_d q13 r3
  716. vld1.64 {d0,d1}, [r1], r2
  717. vld1.64 {d2,d3}, [r1], r2
  718. vld1.64 {d4,d5}, [r1], r2
  719. 1:
  720. subs r12, r12, #4
  721. vld1.64 {d6,d7}, [r1], r2
  722. mspel_filter q11 q11 d0 d2 d4 d6 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  723. mspel_filter q12 q12 d1 d3 d5 d7 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  724. vst1.64 {q11,q12}, [r4,:128]! @ store and increment
  725. vld1.64 {d0,d1}, [r1], r2
  726. mspel_filter q11 q11 d2 d4 d6 d0 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  727. mspel_filter q12 q12 d3 d5 d7 d1 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  728. vst1.64 {q11,q12}, [r4,:128]! @ store and increment
  729. vld1.64 {d2,d3}, [r1], r2
  730. mspel_filter q11 q11 d4 d6 d0 d2 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  731. mspel_filter q12 q12 d5 d7 d1 d3 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  732. vst1.64 {q11,q12}, [r4,:128]! @ store and increment
  733. vld1.64 {d4,d5}, [r1], r2
  734. mspel_filter q11 q11 d6 d0 d2 d4 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  735. mspel_filter q12 q12 d7 d1 d3 d5 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  736. vst1.64 {q11,q12}, [r4,:128]! @ store and increment
  737. bne 1b
  738. rsb r3, r3, #(64 + \filter_add) @ r3 = (64 + filter_add) - r3
  739. mov r12, #8 @ loop counter
  740. mov r4, sp @ r4 = tmp
  741. @ Do horizontal filtering from temp to dst
  742. mspel_constants i16 d28 d29 d30 d31 \filter_h_a \filter_h_b \filter_h_c \filter_h_d q13 r3
  743. 2:
  744. subs r12, r12, #1
  745. vld1.64 {q0,q1}, [r4,:128]! @ read one line of tmp
  746. vext.16 q2, q0, q1, #2
  747. vext.16 q3, q0, q1, #3
  748. vext.16 q1, q0, q1, #1 @ do last because it writes to q1 which is read by the other vext instructions
  749. mspel_filter.16 q11 q12 d22 d23 d21 d0 d1 d2 d3 d4 d5 d6 d7 \filter_h_a \filter_h_b \filter_h_c \filter_h_d d28 d29 d30 d31 q13 7
  750. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  751. bne 2b
  752. mov sp, r11
  753. pop {r4, r11, pc}
  754. endfunc
  755. .endm
  756. @ Use C preprocessor and assembler macros to expand to functions for horizontal and vertical filtering.
  757. #define PUT_VC1_MSPEL_MC_HV(hmode, vmode) \
  758. put_vc1_mspel_mc_hv hmode vmode \
  759. MSPEL_MODE_ ## hmode ## _MUL_CONSTANTS \
  760. MSPEL_MODE_ ## vmode ## _MUL_CONSTANTS \
  761. MSPEL_MODES_ ## hmode ## vmode ## _ADDSHIFT_CONSTANTS
  762. PUT_VC1_MSPEL_MC_HV(1, 1)
  763. PUT_VC1_MSPEL_MC_HV(1, 2)
  764. PUT_VC1_MSPEL_MC_HV(1, 3)
  765. PUT_VC1_MSPEL_MC_HV(2, 1)
  766. PUT_VC1_MSPEL_MC_HV(2, 2)
  767. PUT_VC1_MSPEL_MC_HV(2, 3)
  768. PUT_VC1_MSPEL_MC_HV(3, 1)
  769. PUT_VC1_MSPEL_MC_HV(3, 2)
  770. PUT_VC1_MSPEL_MC_HV(3, 3)
  771. #undef PUT_VC1_MSPEL_MC_HV
  772. .macro put_vc1_mspel_mc_h_only hmode filter_a filter_b filter_c filter_d filter_add filter_shift
  773. function ff_put_vc1_mspel_mc\hmode\()0_neon, export=1
  774. rsb r3, r3, #\filter_add @ r3 = filter_add - r = filter_add - rnd
  775. mov r12, #8 @ loop counter
  776. sub r1, r1, #1 @ slide back, using immediate
  777. mspel_constants i8 d28 d29 d30 d31 \filter_a \filter_b \filter_c \filter_d q13 r3
  778. 1:
  779. subs r12, r12, #1
  780. vld1.64 {d0,d1}, [r1], r2 @ read 16 bytes even though we only need 11, also src += stride
  781. vext.8 d2, d0, d1, #2
  782. vext.8 d3, d0, d1, #3
  783. vext.8 d1, d0, d1, #1 @ do last because it writes to d1 which is read by the other vext instructions
  784. mspel_filter q11 d21 d0 d1 d2 d3 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift
  785. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  786. bne 1b
  787. bx lr
  788. endfunc
  789. .endm
  790. @ Use C preprocessor and assembler macros to expand to functions for horizontal only filtering.
  791. #define PUT_VC1_MSPEL_MC_H_ONLY(hmode) \
  792. put_vc1_mspel_mc_h_only hmode MSPEL_MODE_ ## hmode ## _MUL_CONSTANTS MSPEL_MODE_ ## hmode ## _ADDSHIFT_CONSTANTS
  793. PUT_VC1_MSPEL_MC_H_ONLY(1)
  794. PUT_VC1_MSPEL_MC_H_ONLY(2)
  795. PUT_VC1_MSPEL_MC_H_ONLY(3)
  796. #undef PUT_VC1_MSPEL_MC_H_ONLY
  797. @ (uint8_t *dst [r0], const uint8_t *src [r1], ptrdiff_t stride [r2], int rnd [r3])
  798. .macro put_vc1_mspel_mc_v_only vmode filter_a filter_b filter_c filter_d filter_add filter_shift
  799. function ff_put_vc1_mspel_mc0\vmode\()_neon, export=1
  800. add r3, r3, #\filter_add - 1 @ r3 = filter_add - r = filter_add - (1 - rnd) = filter_add - 1 + rnd
  801. mov r12, #8 @ loop counter
  802. sub r1, r1, r2 @ r1 = &src[-stride] @ slide back
  803. mspel_constants i8 d28 d29 d30 d31 \filter_a \filter_b \filter_c \filter_d q13 r3
  804. vld1.64 {d0}, [r1], r2 @ d0 = src[-stride]
  805. vld1.64 {d1}, [r1], r2 @ d1 = src[0]
  806. vld1.64 {d2}, [r1], r2 @ d2 = src[stride]
  807. 1:
  808. subs r12, r12, #4
  809. vld1.64 {d3}, [r1], r2 @ d3 = src[stride * 2]
  810. mspel_filter q11 d21 d0 d1 d2 d3 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift
  811. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  812. vld1.64 {d0}, [r1], r2 @ d0 = next line
  813. mspel_filter q11 d21 d1 d2 d3 d0 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift
  814. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  815. vld1.64 {d1}, [r1], r2 @ d1 = next line
  816. mspel_filter q11 d21 d2 d3 d0 d1 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift
  817. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  818. vld1.64 {d2}, [r1], r2 @ d2 = next line
  819. mspel_filter q11 d21 d3 d0 d1 d2 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift
  820. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  821. bne 1b
  822. bx lr
  823. endfunc
  824. .endm
  825. @ Use C preprocessor and assembler macros to expand to functions for vertical only filtering.
  826. #define PUT_VC1_MSPEL_MC_V_ONLY(vmode) \
  827. put_vc1_mspel_mc_v_only vmode MSPEL_MODE_ ## vmode ## _MUL_CONSTANTS MSPEL_MODE_ ## vmode ## _ADDSHIFT_CONSTANTS
  828. PUT_VC1_MSPEL_MC_V_ONLY(1)
  829. PUT_VC1_MSPEL_MC_V_ONLY(2)
  830. PUT_VC1_MSPEL_MC_V_ONLY(3)
  831. #undef PUT_VC1_MSPEL_MC_V_ONLY
  832. #endif
  833. function ff_put_pixels8x8_neon, export=1
  834. vld1.64 {d0}, [r1], r2
  835. vld1.64 {d1}, [r1], r2
  836. vld1.64 {d2}, [r1], r2
  837. vld1.64 {d3}, [r1], r2
  838. vld1.64 {d4}, [r1], r2
  839. vld1.64 {d5}, [r1], r2
  840. vld1.64 {d6}, [r1], r2
  841. vld1.64 {d7}, [r1]
  842. vst1.64 {d0}, [r0,:64], r2
  843. vst1.64 {d1}, [r0,:64], r2
  844. vst1.64 {d2}, [r0,:64], r2
  845. vst1.64 {d3}, [r0,:64], r2
  846. vst1.64 {d4}, [r0,:64], r2
  847. vst1.64 {d5}, [r0,:64], r2
  848. vst1.64 {d6}, [r0,:64], r2
  849. vst1.64 {d7}, [r0,:64]
  850. bx lr
  851. endfunc
  852. function ff_vc1_inv_trans_8x8_dc_neon, export=1
  853. ldrsh r2, [r2] @ int dc = block[0];
  854. vld1.64 {d0}, [r0,:64], r1
  855. vld1.64 {d1}, [r0,:64], r1
  856. vld1.64 {d4}, [r0,:64], r1
  857. vld1.64 {d5}, [r0,:64], r1
  858. add r2, r2, r2, lsl #1 @ dc = (3 * dc + 1) >> 1;
  859. vld1.64 {d6}, [r0,:64], r1
  860. add r2, r2, #1
  861. vld1.64 {d7}, [r0,:64], r1
  862. vld1.64 {d16}, [r0,:64], r1
  863. vld1.64 {d17}, [r0,:64], r1
  864. asr r2, r2, #1
  865. sub r0, r0, r1, lsl #3 @ restore r0 to original value
  866. add r2, r2, r2, lsl #1 @ dc = (3 * dc + 16) >> 5;
  867. add r2, r2, #16
  868. asr r2, r2, #5
  869. vdup.16 q1, r2 @ dc
  870. vaddw.u8 q9, q1, d0
  871. vaddw.u8 q10, q1, d1
  872. vaddw.u8 q11, q1, d4
  873. vaddw.u8 q12, q1, d5
  874. vqmovun.s16 d0, q9
  875. vqmovun.s16 d1, q10
  876. vqmovun.s16 d4, q11
  877. vst1.64 {d0}, [r0,:64], r1
  878. vqmovun.s16 d5, q12
  879. vst1.64 {d1}, [r0,:64], r1
  880. vaddw.u8 q13, q1, d6
  881. vst1.64 {d4}, [r0,:64], r1
  882. vaddw.u8 q14, q1, d7
  883. vst1.64 {d5}, [r0,:64], r1
  884. vaddw.u8 q15, q1, d16
  885. vaddw.u8 q1, q1, d17 @ this destroys q1
  886. vqmovun.s16 d6, q13
  887. vqmovun.s16 d7, q14
  888. vqmovun.s16 d16, q15
  889. vqmovun.s16 d17, q1
  890. vst1.64 {d6}, [r0,:64], r1
  891. vst1.64 {d7}, [r0,:64], r1
  892. vst1.64 {d16}, [r0,:64], r1
  893. vst1.64 {d17}, [r0,:64]
  894. bx lr
  895. endfunc
  896. function ff_vc1_inv_trans_8x4_dc_neon, export=1
  897. ldrsh r2, [r2] @ int dc = block[0];
  898. vld1.64 {d0}, [r0,:64], r1
  899. vld1.64 {d1}, [r0,:64], r1
  900. vld1.64 {d4}, [r0,:64], r1
  901. vld1.64 {d5}, [r0,:64], r1
  902. add r2, r2, r2, lsl #1 @ dc = ( 3 * dc + 1) >> 1;
  903. sub r0, r0, r1, lsl #2 @ restore r0 to original value
  904. add r2, r2, #1
  905. asr r2, r2, #1
  906. add r2, r2, r2, lsl #4 @ dc = (17 * dc + 64) >> 7;
  907. add r2, r2, #64
  908. asr r2, r2, #7
  909. vdup.16 q1, r2 @ dc
  910. vaddw.u8 q3, q1, d0
  911. vaddw.u8 q8, q1, d1
  912. vaddw.u8 q9, q1, d4
  913. vaddw.u8 q10, q1, d5
  914. vqmovun.s16 d0, q3
  915. vqmovun.s16 d1, q8
  916. vqmovun.s16 d4, q9
  917. vst1.64 {d0}, [r0,:64], r1
  918. vqmovun.s16 d5, q10
  919. vst1.64 {d1}, [r0,:64], r1
  920. vst1.64 {d4}, [r0,:64], r1
  921. vst1.64 {d5}, [r0,:64]
  922. bx lr
  923. endfunc
  924. function ff_vc1_inv_trans_4x8_dc_neon, export=1
  925. ldrsh r2, [r2] @ int dc = block[0];
  926. vld1.32 {d0[]}, [r0,:32], r1
  927. vld1.32 {d1[]}, [r0,:32], r1
  928. vld1.32 {d0[1]}, [r0,:32], r1
  929. vld1.32 {d1[1]}, [r0,:32], r1
  930. add r2, r2, r2, lsl #4 @ dc = (17 * dc + 4) >> 3;
  931. vld1.32 {d4[]}, [r0,:32], r1
  932. add r2, r2, #4
  933. vld1.32 {d5[]}, [r0,:32], r1
  934. vld1.32 {d4[1]}, [r0,:32], r1
  935. asr r2, r2, #3
  936. vld1.32 {d5[1]}, [r0,:32], r1
  937. add r2, r2, r2, lsl #1 @ dc = (12 * dc + 64) >> 7;
  938. sub r0, r0, r1, lsl #3 @ restore r0 to original value
  939. lsl r2, r2, #2
  940. add r2, r2, #64
  941. asr r2, r2, #7
  942. vdup.16 q1, r2 @ dc
  943. vaddw.u8 q3, q1, d0
  944. vaddw.u8 q8, q1, d1
  945. vaddw.u8 q9, q1, d4
  946. vaddw.u8 q10, q1, d5
  947. vqmovun.s16 d0, q3
  948. vst1.32 {d0[0]}, [r0,:32], r1
  949. vqmovun.s16 d1, q8
  950. vst1.32 {d1[0]}, [r0,:32], r1
  951. vqmovun.s16 d4, q9
  952. vst1.32 {d0[1]}, [r0,:32], r1
  953. vqmovun.s16 d5, q10
  954. vst1.32 {d1[1]}, [r0,:32], r1
  955. vst1.32 {d4[0]}, [r0,:32], r1
  956. vst1.32 {d5[0]}, [r0,:32], r1
  957. vst1.32 {d4[1]}, [r0,:32], r1
  958. vst1.32 {d5[1]}, [r0,:32]
  959. bx lr
  960. endfunc
  961. function ff_vc1_inv_trans_4x4_dc_neon, export=1
  962. ldrsh r2, [r2] @ int dc = block[0];
  963. vld1.32 {d0[]}, [r0,:32], r1
  964. vld1.32 {d1[]}, [r0,:32], r1
  965. vld1.32 {d0[1]}, [r0,:32], r1
  966. vld1.32 {d1[1]}, [r0,:32], r1
  967. add r2, r2, r2, lsl #4 @ dc = (17 * dc + 4) >> 3;
  968. sub r0, r0, r1, lsl #2 @ restore r0 to original value
  969. add r2, r2, #4
  970. asr r2, r2, #3
  971. add r2, r2, r2, lsl #4 @ dc = (17 * dc + 64) >> 7;
  972. add r2, r2, #64
  973. asr r2, r2, #7
  974. vdup.16 q1, r2 @ dc
  975. vaddw.u8 q2, q1, d0
  976. vaddw.u8 q3, q1, d1
  977. vqmovun.s16 d0, q2
  978. vst1.32 {d0[0]}, [r0,:32], r1
  979. vqmovun.s16 d1, q3
  980. vst1.32 {d1[0]}, [r0,:32], r1
  981. vst1.32 {d0[1]}, [r0,:32], r1
  982. vst1.32 {d1[1]}, [r0,:32]
  983. bx lr
  984. endfunc