You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1171 lines
47KB

  1. /*
  2. * VC1 NEON optimisations
  3. *
  4. * Copyright (c) 2010 Rob Clark <rob@ti.com>
  5. * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "libavutil/arm/asm.S"
  24. #include "neon.S"
  25. @ Transpose rows into columns of a matrix of 16-bit elements. For 4x4, pass
  26. @ double-word registers, for 8x4, pass quad-word registers.
  27. .macro transpose16 r0, r1, r2, r3
  28. @ At this point:
  29. @ row[0] r0
  30. @ row[1] r1
  31. @ row[2] r2
  32. @ row[3] r3
  33. vtrn.16 \r0, \r1 @ first and second row
  34. vtrn.16 \r2, \r3 @ third and fourth row
  35. vtrn.32 \r0, \r2 @ first and third row
  36. vtrn.32 \r1, \r3 @ second and fourth row
  37. @ At this point, if registers are quad-word:
  38. @ column[0] d0
  39. @ column[1] d2
  40. @ column[2] d4
  41. @ column[3] d6
  42. @ column[4] d1
  43. @ column[5] d3
  44. @ column[6] d5
  45. @ column[7] d7
  46. @ At this point, if registers are double-word:
  47. @ column[0] d0
  48. @ column[1] d1
  49. @ column[2] d2
  50. @ column[3] d3
  51. .endm
  52. @ ff_vc1_inv_trans_{4,8}x{4,8}_neon and overflow: The input values in the file
  53. @ are supposed to be in a specific range as to allow for 16-bit math without
  54. @ causing overflows, but sometimes the input values are just big enough to
  55. @ barely cause overflow in vadd instructions like:
  56. @
  57. @ vadd.i16 q0, q8, q10
  58. @ vshr.s16 q0, q0, #\rshift
  59. @
  60. @ To prevent these borderline cases from overflowing, we just need one more
  61. @ bit of precision, which is accomplished by replacing the sequence above with:
  62. @
  63. @ vhadd.s16 q0, q8, q10
  64. @ vshr.s16 q0, q0, #(\rshift -1)
  65. @
  66. @ This works because vhadd is a single instruction that adds, then shifts to
  67. @ the right once, all before writing the result to the destination register.
  68. @
  69. @ Even with this workaround, there were still some files that caused overflows
  70. @ in ff_vc1_inv_trans_8x8_neon. See the comments in ff_vc1_inv_trans_8x8_neon
  71. @ for the additional workaround.
  72. @ Takes 4 columns of 8 values each and operates on it. Modeled after the first
  73. @ for loop in vc1_inv_trans_4x8_c.
  74. @ Input columns: q0 q1 q2 q3
  75. @ Output columns: q0 q1 q2 q3
  76. @ Trashes: r12 q8 q9 q10 q11 q12 q13
  77. .macro vc1_inv_trans_4x8_helper add rshift
  78. @ Compute temp1, temp2 and setup scalar #17, #22, #10
  79. vadd.i16 q12, q0, q2 @ temp1 = src[0] + src[2]
  80. movw r12, #17
  81. vsub.i16 q13, q0, q2 @ temp2 = src[0] - src[2]
  82. movt r12, #22
  83. vmov.32 d0[0], r12
  84. movw r12, #10
  85. vmov.16 d1[0], r12
  86. vmov.i16 q8, #\add @ t1 will accumulate here
  87. vmov.i16 q9, #\add @ t2 will accumulate here
  88. vmul.i16 q10, q1, d0[1] @ t3 = 22 * (src[1])
  89. vmul.i16 q11, q3, d0[1] @ t4 = 22 * (src[3])
  90. vmla.i16 q8, q12, d0[0] @ t1 = 17 * (temp1) + 4
  91. vmla.i16 q9, q13, d0[0] @ t2 = 17 * (temp2) + 4
  92. vmla.i16 q10, q3, d1[0] @ t3 += 10 * src[3]
  93. vmls.i16 q11, q1, d1[0] @ t4 -= 10 * src[1]
  94. vhadd.s16 q0, q8, q10 @ dst[0] = (t1 + t3) >> 1
  95. vhsub.s16 q3, q8, q10 @ dst[3] = (t1 - t3) >> 1
  96. vhsub.s16 q1, q9, q11 @ dst[1] = (t2 - t4) >> 1
  97. vhadd.s16 q2, q9, q11 @ dst[2] = (t2 + t4) >> 1
  98. @ Halving add/sub above already did one shift
  99. vshr.s16 q0, q0, #(\rshift - 1) @ dst[0] >>= (rshift - 1)
  100. vshr.s16 q3, q3, #(\rshift - 1) @ dst[3] >>= (rshift - 1)
  101. vshr.s16 q1, q1, #(\rshift - 1) @ dst[1] >>= (rshift - 1)
  102. vshr.s16 q2, q2, #(\rshift - 1) @ dst[2] >>= (rshift - 1)
  103. .endm
  104. @ Takes 8 columns of 4 values each and operates on it. Modeled after the second
  105. @ for loop in vc1_inv_trans_4x8_c.
  106. @ Input columns: d0 d2 d4 d6 d1 d3 d5 d7
  107. @ Output columns: d16 d17 d18 d19 d21 d20 d23 d22
  108. @ Trashes all NEON registers (and r12) except for: q4 q5 q6 q7
  109. .macro vc1_inv_trans_8x4_helper add add1beforeshift rshift
  110. @ At this point:
  111. @ src[0] d0 overwritten later
  112. @ src[8] d2
  113. @ src[16] d4 overwritten later
  114. @ src[24] d6
  115. @ src[32] d1 overwritten later
  116. @ src[40] d3
  117. @ src[48] d5 overwritten later
  118. @ src[56] d7
  119. movw r12, #12
  120. vmov.i16 q14, #\add @ t1|t2 will accumulate here
  121. movt r12, #6
  122. vadd.i16 d20, d0, d1 @ temp1 = src[0] + src[32]
  123. vsub.i16 d21, d0, d1 @ temp2 = src[0] - src[32]
  124. vmov.i32 d0[0], r12 @ 16-bit: d0[0] = #12, d0[1] = #6
  125. vshl.i16 q15, q2, #4 @ t3|t4 = 16 * (src[16]|src[48])
  126. vswp d4, d5 @ q2 = src[48]|src[16]
  127. vmla.i16 q14, q10, d0[0] @ t1|t2 = 12 * (temp1|temp2) + 64
  128. movw r12, #15
  129. movt r12, #9
  130. vmov.i32 d0[1], r12 @ 16-bit: d0[2] = #15, d0[3] = #9
  131. vneg.s16 d31, d31 @ t4 = -t4
  132. vmla.i16 q15, q2, d0[1] @ t3|t4 += 6 * (src[48]|src[16])
  133. @ At this point:
  134. @ d0[2] #15
  135. @ d0[3] #9
  136. @ q1 src[8]|src[40]
  137. @ q3 src[24]|src[56]
  138. @ q14 old t1|t2
  139. @ q15 old t3|t4
  140. vshl.i16 q8, q1, #4 @ t1|t2 = 16 * (src[8]|src[40])
  141. vswp d2, d3 @ q1 = src[40]|src[8]
  142. vshl.i16 q12, q3, #4 @ temp3a|temp4a = 16 * src[24]|src[56]
  143. vswp d6, d7 @ q3 = src[56]|src[24]
  144. vshl.i16 q13, q1, #2 @ temp3b|temp4b = 4 * (src[40]|src[8])
  145. vshl.i16 q2, q3, #2 @ temp1|temp2 = 4 * (src[56]|src[24])
  146. vswp d3, d6 @ q1 = src[40]|src[56], q3 = src[8]|src[24]
  147. vsub.i16 q9, q13, q12 @ t3|t4 = - (temp3a|temp4a) + (temp3b|temp4b)
  148. vadd.i16 q8, q8, q2 @ t1|t2 += temp1|temp2
  149. vmul.i16 q12, q3, d0[3] @ temp3|temp4 = 9 * src[8]|src[24]
  150. vmla.i16 q8, q1, d0[3] @ t1|t2 += 9 * (src[40]|src[56])
  151. vswp d6, d7 @ q3 = src[24]|src[8]
  152. vswp d2, d3 @ q1 = src[56]|src[40]
  153. vsub.i16 q11, q14, q15 @ t8|t7 = old t1|t2 - old t3|t4
  154. vadd.i16 q10, q14, q15 @ t5|t6 = old t1|t2 + old t3|t4
  155. .if \add1beforeshift
  156. vmov.i16 q15, #1
  157. .endif
  158. vadd.i16 d18, d18, d24 @ t3 += temp3
  159. vsub.i16 d19, d19, d25 @ t4 -= temp4
  160. vswp d22, d23 @ q11 = t7|t8
  161. vneg.s16 d17, d17 @ t2 = -t2
  162. vmla.i16 q9, q1, d0[2] @ t3|t4 += 15 * src[56]|src[40]
  163. vmla.i16 q8, q3, d0[2] @ t1|t2 += 15 * src[24]|src[8]
  164. @ At this point:
  165. @ t1 d16
  166. @ t2 d17
  167. @ t3 d18
  168. @ t4 d19
  169. @ t5 d20
  170. @ t6 d21
  171. @ t7 d22
  172. @ t8 d23
  173. @ #1 q15
  174. .if \add1beforeshift
  175. vadd.i16 q3, q15, q10 @ line[7,6] = t5|t6 + 1
  176. vadd.i16 q2, q15, q11 @ line[5,4] = t7|t8 + 1
  177. .endif
  178. @ Sometimes this overflows, so to get one additional bit of precision, use
  179. @ a single instruction that both adds and shifts right (halving).
  180. vhadd.s16 q1, q9, q11 @ line[2,3] = (t3|t4 + t7|t8) >> 1
  181. vhadd.s16 q0, q8, q10 @ line[0,1] = (t1|t2 + t5|t6) >> 1
  182. .if \add1beforeshift
  183. vhsub.s16 q2, q2, q9 @ line[5,4] = (t7|t8 - t3|t4 + 1) >> 1
  184. vhsub.s16 q3, q3, q8 @ line[7,6] = (t5|t6 - t1|t2 + 1) >> 1
  185. .else
  186. vhsub.s16 q2, q11, q9 @ line[5,4] = (t7|t8 - t3|t4) >> 1
  187. vhsub.s16 q3, q10, q8 @ line[7,6] = (t5|t6 - t1|t2) >> 1
  188. .endif
  189. vshr.s16 q9, q1, #(\rshift - 1) @ one shift is already done by vhadd/vhsub above
  190. vshr.s16 q8, q0, #(\rshift - 1)
  191. vshr.s16 q10, q2, #(\rshift - 1)
  192. vshr.s16 q11, q3, #(\rshift - 1)
  193. @ At this point:
  194. @ dst[0] d16
  195. @ dst[1] d17
  196. @ dst[2] d18
  197. @ dst[3] d19
  198. @ dst[4] d21
  199. @ dst[5] d20
  200. @ dst[6] d23
  201. @ dst[7] d22
  202. .endm
  203. @ This is modeled after the first and second for loop in vc1_inv_trans_8x8_c.
  204. @ Input columns: q8, q9, q10, q11, q12, q13, q14, q15
  205. @ Output columns: q8, q9, q10, q11, q12, q13, q14, q15
  206. @ Trashes all NEON registers (and r12) except for: q4 q5 q6 q7
  207. .macro vc1_inv_trans_8x8_helper add add1beforeshift rshift
  208. @ This actually computes half of t1, t2, t3, t4, as explained below
  209. @ near `tNhalf`.
  210. vmov.i16 q0, #(6 / 2) @ q0 = #6/2
  211. vshl.i16 q1, q10, #3 @ t3 = 16/2 * src[16]
  212. vshl.i16 q3, q14, #3 @ temp4 = 16/2 * src[48]
  213. vmul.i16 q2, q10, q0 @ t4 = 6/2 * src[16]
  214. vmla.i16 q1, q14, q0 @ t3 += 6/2 * src[48]
  215. @ unused: q0, q10, q14
  216. vmov.i16 q0, #(12 / 2) @ q0 = #12/2
  217. vadd.i16 q10, q8, q12 @ temp1 = src[0] + src[32]
  218. vsub.i16 q14, q8, q12 @ temp2 = src[0] - src[32]
  219. @ unused: q8, q12
  220. vmov.i16 q8, #(\add / 2) @ t1 will accumulate here
  221. vmov.i16 q12, #(\add / 2) @ t2 will accumulate here
  222. movw r12, #15
  223. vsub.i16 q2, q2, q3 @ t4 = 6/2 * src[16] - 16/2 * src[48]
  224. movt r12, #9
  225. @ unused: q3
  226. vmla.i16 q8, q10, q0 @ t1 = 12/2 * temp1 + add
  227. vmla.i16 q12, q14, q0 @ t2 = 12/2 * temp2 + add
  228. vmov.i32 d0[0], r12
  229. @ unused: q3, q10, q14
  230. @ At this point:
  231. @ q0 d0=#15|#9
  232. @ q1 old t3
  233. @ q2 old t4
  234. @ q3
  235. @ q8 old t1
  236. @ q9 src[8]
  237. @ q10
  238. @ q11 src[24]
  239. @ q12 old t2
  240. @ q13 src[40]
  241. @ q14
  242. @ q15 src[56]
  243. @ unused: q3, q10, q14
  244. movw r12, #16
  245. vshl.i16 q3, q9, #4 @ t1 = 16 * src[8]
  246. movt r12, #4
  247. vshl.i16 q10, q9, #2 @ t4 = 4 * src[8]
  248. vmov.i32 d1[0], r12
  249. vmul.i16 q14, q9, d0[0] @ t2 = 15 * src[8]
  250. vmul.i16 q9, q9, d0[1] @ t3 = 9 * src[8]
  251. @ unused: none
  252. vmla.i16 q3, q11, d0[0] @ t1 += 15 * src[24]
  253. vmls.i16 q10, q11, d0[1] @ t4 -= 9 * src[24]
  254. vmls.i16 q14, q11, d1[1] @ t2 -= 4 * src[24]
  255. vmls.i16 q9, q11, d1[0] @ t3 -= 16 * src[24]
  256. @ unused: q11
  257. vmla.i16 q3, q13, d0[1] @ t1 += 9 * src[40]
  258. vmla.i16 q10, q13, d0[0] @ t4 += 15 * src[40]
  259. vmls.i16 q14, q13, d1[0] @ t2 -= 16 * src[40]
  260. vmla.i16 q9, q13, d1[1] @ t3 += 4 * src[40]
  261. @ unused: q11, q13
  262. @ Compute t5, t6, t7, t8 from old t1, t2, t3, t4. Actually, it computes
  263. @ half of t5, t6, t7, t8 since t1, t2, t3, t4 are halved.
  264. vadd.i16 q11, q8, q1 @ t5 = t1 + t3
  265. vsub.i16 q1, q8, q1 @ t8 = t1 - t3
  266. vadd.i16 q13, q12, q2 @ t6 = t2 + t4
  267. vsub.i16 q2, q12, q2 @ t7 = t2 - t4
  268. @ unused: q8, q12
  269. .if \add1beforeshift
  270. vmov.i16 q12, #1
  271. .endif
  272. @ unused: q8
  273. vmla.i16 q3, q15, d1[1] @ t1 += 4 * src[56]
  274. vmls.i16 q14, q15, d0[1] @ t2 -= 9 * src[56]
  275. vmla.i16 q9, q15, d0[0] @ t3 += 15 * src[56]
  276. vmls.i16 q10, q15, d1[0] @ t4 -= 16 * src[56]
  277. @ unused: q0, q8, q15
  278. @ At this point:
  279. @ t1 q3
  280. @ t2 q14
  281. @ t3 q9
  282. @ t4 q10
  283. @ t5half q11
  284. @ t6half q13
  285. @ t7half q2
  286. @ t8half q1
  287. @ #1 q12
  288. @
  289. @ tNhalf is half of the value of tN (as described in vc1_inv_trans_8x8_c).
  290. @ This is done because sometimes files have input that causes tN + tM to
  291. @ overflow. To avoid this overflow, we compute tNhalf, then compute
  292. @ tNhalf + tM (which doesn't overflow), and then we use vhadd to compute
  293. @ (tNhalf + (tNhalf + tM)) >> 1 which does not overflow because it is
  294. @ one instruction.
  295. @ For each pair of tN and tM, do:
  296. @ lineA = t5half + t1
  297. @ if add1beforeshift: t1 -= 1
  298. @ lineA = (t5half + lineA) >> 1
  299. @ lineB = t5half - t1
  300. @ lineB = (t5half + lineB) >> 1
  301. @ lineA >>= rshift - 1
  302. @ lineB >>= rshift - 1
  303. vadd.i16 q8, q11, q3 @ q8 = t5half + t1
  304. .if \add1beforeshift
  305. vsub.i16 q3, q3, q12 @ q3 = t1 - 1
  306. .endif
  307. vadd.i16 q0, q13, q14 @ q0 = t6half + t2
  308. .if \add1beforeshift
  309. vsub.i16 q14, q14, q12 @ q14 = t2 - 1
  310. .endif
  311. vadd.i16 q15, q2, q9 @ q15 = t7half + t3
  312. .if \add1beforeshift
  313. vsub.i16 q9, q9, q12 @ q9 = t3 - 1
  314. .endif
  315. @ unused: none
  316. vhadd.s16 q8, q11, q8 @ q8 = (t5half + t5half + t1) >> 1
  317. vsub.i16 q3, q11, q3 @ q3 = t5half - t1 + 1
  318. vhadd.s16 q0, q13, q0 @ q0 = (t6half + t6half + t2) >> 1
  319. vsub.i16 q14, q13, q14 @ q14 = t6half - t2 + 1
  320. vhadd.s16 q15, q2, q15 @ q15 = (t7half + t7half + t3) >> 1
  321. vsub.i16 q9, q2, q9 @ q9 = t7half - t3 + 1
  322. vhadd.s16 q3, q11, q3 @ q3 = (t5half + t5half - t1 + 1) >> 1
  323. @ unused: q11
  324. vadd.i16 q11, q1, q10 @ q11 = t8half + t4
  325. .if \add1beforeshift
  326. vsub.i16 q10, q10, q12 @ q10 = t4 - 1
  327. .endif
  328. @ unused: q12
  329. vhadd.s16 q14, q13, q14 @ q14 = (t6half + t6half - t2 + 1) >> 1
  330. @ unused: q12, q13
  331. vhadd.s16 q13, q2, q9 @ q9 = (t7half + t7half - t3 + 1) >> 1
  332. @ unused: q12, q2, q9
  333. vsub.i16 q10, q1, q10 @ q10 = t8half - t4 + 1
  334. vhadd.s16 q11, q1, q11 @ q11 = (t8half + t8half + t4) >> 1
  335. vshr.s16 q8, q8, #(\rshift - 1) @ q8 = line[0]
  336. vhadd.s16 q12, q1, q10 @ q12 = (t8half + t8half - t4 + 1) >> 1
  337. vshr.s16 q9, q0, #(\rshift - 1) @ q9 = line[1]
  338. vshr.s16 q10, q15, #(\rshift - 1) @ q10 = line[2]
  339. vshr.s16 q11, q11, #(\rshift - 1) @ q11 = line[3]
  340. vshr.s16 q12, q12, #(\rshift - 1) @ q12 = line[4]
  341. vshr.s16 q13, q13, #(\rshift - 1) @ q13 = line[5]
  342. vshr.s16 q14, q14, #(\rshift - 1) @ q14 = line[6]
  343. vshr.s16 q15, q3, #(\rshift - 1) @ q15 = line[7]
  344. .endm
  345. @ (int16_t *block [r0])
  346. function ff_vc1_inv_trans_8x8_neon, export=1
  347. vld1.64 {q8-q9}, [r0,:128]!
  348. vld1.64 {q10-q11}, [r0,:128]!
  349. vld1.64 {q12-q13}, [r0,:128]!
  350. vld1.64 {q14-q15}, [r0,:128]
  351. sub r0, r0, #(16 * 2 * 3) @ restore r0
  352. @ At this point:
  353. @ src[0] q8
  354. @ src[8] q9
  355. @ src[16] q10
  356. @ src[24] q11
  357. @ src[32] q12
  358. @ src[40] q13
  359. @ src[48] q14
  360. @ src[56] q15
  361. vc1_inv_trans_8x8_helper add=4 add1beforeshift=0 rshift=3
  362. @ Transpose result matrix of 8x8
  363. swap4 d17, d19, d21, d23, d24, d26, d28, d30
  364. transpose16_4x4 q8, q9, q10, q11, q12, q13, q14, q15
  365. vc1_inv_trans_8x8_helper add=64 add1beforeshift=1 rshift=7
  366. vst1.64 {q8-q9}, [r0,:128]!
  367. vst1.64 {q10-q11}, [r0,:128]!
  368. vst1.64 {q12-q13}, [r0,:128]!
  369. vst1.64 {q14-q15}, [r0,:128]
  370. bx lr
  371. endfunc
  372. @ (uint8_t *dest [r0], int linesize [r1], int16_t *block [r2])
  373. function ff_vc1_inv_trans_8x4_neon, export=1
  374. vld1.64 {q0-q1}, [r2,:128]! @ load 8 * 4 * 2 = 64 bytes / 16 bytes per quad = 4 quad registers
  375. vld1.64 {q2-q3}, [r2,:128]
  376. transpose16 q0 q1 q2 q3 @ transpose rows to columns
  377. @ At this point:
  378. @ src[0] d0
  379. @ src[1] d2
  380. @ src[2] d4
  381. @ src[3] d6
  382. @ src[4] d1
  383. @ src[5] d3
  384. @ src[6] d5
  385. @ src[7] d7
  386. vc1_inv_trans_8x4_helper add=4 add1beforeshift=0 rshift=3
  387. @ Move output to more standardized registers
  388. vmov d0, d16
  389. vmov d2, d17
  390. vmov d4, d18
  391. vmov d6, d19
  392. vmov d1, d21
  393. vmov d3, d20
  394. vmov d5, d23
  395. vmov d7, d22
  396. @ At this point:
  397. @ dst[0] d0
  398. @ dst[1] d2
  399. @ dst[2] d4
  400. @ dst[3] d6
  401. @ dst[4] d1
  402. @ dst[5] d3
  403. @ dst[6] d5
  404. @ dst[7] d7
  405. transpose16 q0 q1 q2 q3 @ turn columns into rows
  406. @ At this point:
  407. @ row[0] q0
  408. @ row[1] q1
  409. @ row[2] q2
  410. @ row[3] q3
  411. vc1_inv_trans_4x8_helper add=64 rshift=7
  412. @ At this point:
  413. @ line[0].l d0
  414. @ line[0].h d1
  415. @ line[1].l d2
  416. @ line[1].h d3
  417. @ line[2].l d4
  418. @ line[2].h d5
  419. @ line[3].l d6
  420. @ line[3].h d7
  421. @ unused registers: q12, q13, q14, q15
  422. vld1.64 {d28}, [r0,:64], r1 @ read dest
  423. vld1.64 {d29}, [r0,:64], r1
  424. vld1.64 {d30}, [r0,:64], r1
  425. vld1.64 {d31}, [r0,:64], r1
  426. sub r0, r0, r1, lsl #2 @ restore original r0 value
  427. vaddw.u8 q0, q0, d28 @ line[0] += dest[0]
  428. vaddw.u8 q1, q1, d29 @ line[1] += dest[1]
  429. vaddw.u8 q2, q2, d30 @ line[2] += dest[2]
  430. vaddw.u8 q3, q3, d31 @ line[3] += dest[3]
  431. vqmovun.s16 d0, q0 @ line[0]
  432. vqmovun.s16 d1, q1 @ line[1]
  433. vqmovun.s16 d2, q2 @ line[2]
  434. vqmovun.s16 d3, q3 @ line[3]
  435. vst1.64 {d0}, [r0,:64], r1 @ write dest
  436. vst1.64 {d1}, [r0,:64], r1
  437. vst1.64 {d2}, [r0,:64], r1
  438. vst1.64 {d3}, [r0,:64]
  439. bx lr
  440. endfunc
  441. @ (uint8_t *dest [r0], int linesize [r1], int16_t *block [r2])
  442. function ff_vc1_inv_trans_4x8_neon, export=1
  443. mov r12, #(8 * 2) @ 8 elements per line, each element 2 bytes
  444. vld4.16 {d0[], d2[], d4[], d6[]}, [r2,:64], r12 @ read each column into a q register
  445. vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r2,:64], r12
  446. vld4.16 {d0[2], d2[2], d4[2], d6[2]}, [r2,:64], r12
  447. vld4.16 {d0[3], d2[3], d4[3], d6[3]}, [r2,:64], r12
  448. vld4.16 {d1[], d3[], d5[], d7[]}, [r2,:64], r12
  449. vld4.16 {d1[1], d3[1], d5[1], d7[1]}, [r2,:64], r12
  450. vld4.16 {d1[2], d3[2], d5[2], d7[2]}, [r2,:64], r12
  451. vld4.16 {d1[3], d3[3], d5[3], d7[3]}, [r2,:64]
  452. vc1_inv_trans_4x8_helper add=4 rshift=3
  453. @ At this point:
  454. @ dst[0] = q0
  455. @ dst[1] = q1
  456. @ dst[2] = q2
  457. @ dst[3] = q3
  458. transpose16 q0 q1 q2 q3 @ Transpose rows (registers) into columns
  459. vc1_inv_trans_8x4_helper add=64 add1beforeshift=1 rshift=7
  460. vld1.32 {d28[]}, [r0,:32], r1 @ read dest
  461. vld1.32 {d28[1]}, [r0,:32], r1
  462. vld1.32 {d29[]}, [r0,:32], r1
  463. vld1.32 {d29[1]}, [r0,:32], r1
  464. vld1.32 {d30[]}, [r0,:32], r1
  465. vld1.32 {d30[0]}, [r0,:32], r1
  466. vld1.32 {d31[]}, [r0,:32], r1
  467. vld1.32 {d31[0]}, [r0,:32], r1
  468. sub r0, r0, r1, lsl #3 @ restore original r0 value
  469. vaddw.u8 q8, q8, d28 @ line[0,1] += dest[0,1]
  470. vaddw.u8 q9, q9, d29 @ line[2,3] += dest[2,3]
  471. vaddw.u8 q10, q10, d30 @ line[5,4] += dest[5,4]
  472. vaddw.u8 q11, q11, d31 @ line[7,6] += dest[7,6]
  473. vqmovun.s16 d16, q8 @ clip(line[0,1])
  474. vqmovun.s16 d18, q9 @ clip(line[2,3])
  475. vqmovun.s16 d20, q10 @ clip(line[5,4])
  476. vqmovun.s16 d22, q11 @ clip(line[7,6])
  477. vst1.32 {d16[0]}, [r0,:32], r1 @ write dest
  478. vst1.32 {d16[1]}, [r0,:32], r1
  479. vst1.32 {d18[0]}, [r0,:32], r1
  480. vst1.32 {d18[1]}, [r0,:32], r1
  481. vst1.32 {d20[1]}, [r0,:32], r1
  482. vst1.32 {d20[0]}, [r0,:32], r1
  483. vst1.32 {d22[1]}, [r0,:32], r1
  484. vst1.32 {d22[0]}, [r0,:32]
  485. bx lr
  486. endfunc
  487. @ Setup constants in registers which are used by vc1_inv_trans_4x4_helper
  488. .macro vc1_inv_trans_4x4_helper_setup
  489. vmov.i16 q13, #17
  490. vmov.i16 q14, #22
  491. vmov.i16 d30, #10 @ only need double-word, not quad-word
  492. .endm
  493. @ This is modeled after the first for loop in vc1_inv_trans_4x4_c.
  494. .macro vc1_inv_trans_4x4_helper add rshift
  495. vmov.i16 q2, #\add @ t1|t2 will accumulate here
  496. vadd.i16 d16, d0, d1 @ temp1 = src[0] + src[2]
  497. vsub.i16 d17, d0, d1 @ temp2 = src[0] - src[2]
  498. vmul.i16 q3, q14, q1 @ t3|t4 = 22 * (src[1]|src[3])
  499. vmla.i16 q2, q13, q8 @ t1|t2 = 17 * (temp1|temp2) + add
  500. vmla.i16 d6, d30, d3 @ t3 += 10 * src[3]
  501. vmls.i16 d7, d30, d2 @ t4 -= 10 * src[1]
  502. vadd.i16 q0, q2, q3 @ dst[0,2] = (t1|t2 + t3|t4)
  503. vsub.i16 q1, q2, q3 @ dst[3,1] = (t1|t2 - t3|t4)
  504. vshr.s16 q0, q0, #\rshift @ dst[0,2] >>= rshift
  505. vshr.s16 q1, q1, #\rshift @ dst[3,1] >>= rshift
  506. .endm
  507. @ (uint8_t *dest [r0], int linesize [r1], int16_t *block [r2])
  508. function ff_vc1_inv_trans_4x4_neon, export=1
  509. mov r12, #(8 * 2) @ 8 elements per line, each element 2 bytes
  510. vld4.16 {d0[], d1[], d2[], d3[]}, [r2,:64], r12 @ read each column into a register
  511. vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r12
  512. vld4.16 {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r12
  513. vld4.16 {d0[3], d1[3], d2[3], d3[3]}, [r2,:64]
  514. vswp d1, d2 @ so that we can later access column 1 and column 3 as a single q1 register
  515. vc1_inv_trans_4x4_helper_setup
  516. @ At this point:
  517. @ src[0] = d0
  518. @ src[1] = d2
  519. @ src[2] = d1
  520. @ src[3] = d3
  521. vc1_inv_trans_4x4_helper add=4 rshift=3 @ compute t1, t2, t3, t4 and combine them into dst[0-3]
  522. @ At this point:
  523. @ dst[0] = d0
  524. @ dst[1] = d3
  525. @ dst[2] = d1
  526. @ dst[3] = d2
  527. transpose16 d0 d3 d1 d2 @ Transpose rows (registers) into columns
  528. @ At this point:
  529. @ src[0] = d0
  530. @ src[8] = d3
  531. @ src[16] = d1
  532. @ src[24] = d2
  533. vswp d2, d3 @ so that we can later access column 1 and column 3 in order as a single q1 register
  534. @ At this point:
  535. @ src[0] = d0
  536. @ src[8] = d2
  537. @ src[16] = d1
  538. @ src[24] = d3
  539. vc1_inv_trans_4x4_helper add=64 rshift=7 @ compute t1, t2, t3, t4 and combine them into dst[0-3]
  540. @ At this point:
  541. @ line[0] = d0
  542. @ line[1] = d3
  543. @ line[2] = d1
  544. @ line[3] = d2
  545. vld1.32 {d18[]}, [r0,:32], r1 @ read dest
  546. vld1.32 {d19[]}, [r0,:32], r1
  547. vld1.32 {d18[1]}, [r0,:32], r1
  548. vld1.32 {d19[0]}, [r0,:32], r1
  549. sub r0, r0, r1, lsl #2 @ restore original r0 value
  550. vaddw.u8 q0, q0, d18 @ line[0,2] += dest[0,2]
  551. vaddw.u8 q1, q1, d19 @ line[3,1] += dest[3,1]
  552. vqmovun.s16 d0, q0 @ clip(line[0,2])
  553. vqmovun.s16 d1, q1 @ clip(line[3,1])
  554. vst1.32 {d0[0]}, [r0,:32], r1 @ write dest
  555. vst1.32 {d1[1]}, [r0,:32], r1
  556. vst1.32 {d0[1]}, [r0,:32], r1
  557. vst1.32 {d1[0]}, [r0,:32]
  558. bx lr
  559. endfunc
  560. @ The absolute value of multiplication constants from vc1_mspel_filter and vc1_mspel_{ver,hor}_filter_16bits.
  561. @ The sign is embedded in the code below that carries out the multiplication (mspel_filter{,.16}).
  562. #define MSPEL_MODE_1_MUL_CONSTANTS 4 53 18 3
  563. #define MSPEL_MODE_2_MUL_CONSTANTS 1 9 9 1
  564. #define MSPEL_MODE_3_MUL_CONSTANTS 3 18 53 4
  565. @ These constants are from reading the source code of vc1_mspel_mc and determining the value that
  566. @ is added to `rnd` to result in the variable `r`, and the value of the variable `shift`.
  567. #define MSPEL_MODES_11_ADDSHIFT_CONSTANTS 15 5
  568. #define MSPEL_MODES_12_ADDSHIFT_CONSTANTS 3 3
  569. #define MSPEL_MODES_13_ADDSHIFT_CONSTANTS 15 5
  570. #define MSPEL_MODES_21_ADDSHIFT_CONSTANTS MSPEL_MODES_12_ADDSHIFT_CONSTANTS
  571. #define MSPEL_MODES_22_ADDSHIFT_CONSTANTS 0 1
  572. #define MSPEL_MODES_23_ADDSHIFT_CONSTANTS 3 3
  573. #define MSPEL_MODES_31_ADDSHIFT_CONSTANTS MSPEL_MODES_13_ADDSHIFT_CONSTANTS
  574. #define MSPEL_MODES_32_ADDSHIFT_CONSTANTS MSPEL_MODES_23_ADDSHIFT_CONSTANTS
  575. #define MSPEL_MODES_33_ADDSHIFT_CONSTANTS 15 5
  576. @ The addition and shift constants from vc1_mspel_filter.
  577. #define MSPEL_MODE_1_ADDSHIFT_CONSTANTS 32 6
  578. #define MSPEL_MODE_2_ADDSHIFT_CONSTANTS 8 4
  579. #define MSPEL_MODE_3_ADDSHIFT_CONSTANTS 32 6
  580. @ Setup constants in registers for a subsequent use of mspel_filter{,.16}.
  581. .macro mspel_constants typesize reg_a reg_b reg_c reg_d filter_a filter_b filter_c filter_d reg_add filter_add_register
  582. @ Define double-word register aliases. Typesize should be i8 or i16.
  583. ra .dn \reg_a\().\typesize
  584. rb .dn \reg_b\().\typesize
  585. rc .dn \reg_c\().\typesize
  586. rd .dn \reg_d\().\typesize
  587. @ Only set the register if the value is not 1 and unique
  588. .if \filter_a != 1
  589. vmov ra, #\filter_a @ ra = filter_a
  590. .endif
  591. vmov rb, #\filter_b @ rb = filter_b
  592. .if \filter_b != \filter_c
  593. vmov rc, #\filter_c @ rc = filter_c
  594. .endif
  595. .if \filter_d != 1
  596. vmov rd, #\filter_d @ rd = filter_d
  597. .endif
  598. @ vdup to double the size of typesize
  599. .ifc \typesize,i8
  600. vdup.16 \reg_add, \filter_add_register @ reg_add = filter_add_register
  601. .else
  602. vdup.32 \reg_add, \filter_add_register @ reg_add = filter_add_register
  603. .endif
  604. .unreq ra
  605. .unreq rb
  606. .unreq rc
  607. .unreq rd
  608. .endm
  609. @ After mspel_constants has been used, do the filtering.
  610. .macro mspel_filter acc dest src0 src1 src2 src3 filter_a filter_b filter_c filter_d reg_a reg_b reg_c reg_d reg_add filter_shift narrow=1
  611. .if \filter_a != 1
  612. @ If filter_a != 1, then we need a move and subtract instruction
  613. vmov \acc, \reg_add @ acc = reg_add
  614. vmlsl.u8 \acc, \reg_a, \src0 @ acc -= filter_a * src[-stride]
  615. .else
  616. @ If filter_a is 1, then just subtract without an extra move
  617. vsubw.u8 \acc, \reg_add, \src0 @ acc = reg_add - src[-stride] @ since filter_a == 1
  618. .endif
  619. vmlal.u8 \acc, \reg_b, \src1 @ acc += filter_b * src[0]
  620. .if \filter_b != \filter_c
  621. vmlal.u8 \acc, \reg_c, \src2 @ acc += filter_c * src[stride]
  622. .else
  623. @ If filter_b is the same as filter_c, use the same reg_b register
  624. vmlal.u8 \acc, \reg_b, \src2 @ acc += filter_c * src[stride] @ where filter_c == filter_b
  625. .endif
  626. .if \filter_d != 1
  627. @ If filter_d != 1, then do a multiply accumulate
  628. vmlsl.u8 \acc, \reg_d, \src3 @ acc -= filter_d * src[stride * 2]
  629. .else
  630. @ If filter_d is 1, then just do a subtract
  631. vsubw.u8 \acc, \acc, \src3 @ acc -= src[stride * 2] @ since filter_d == 1
  632. .endif
  633. .if \narrow
  634. vqshrun.s16 \dest, \acc, #\filter_shift @ dest = clip_uint8(acc >> filter_shift)
  635. .else
  636. vshr.s16 \dest, \acc, #\filter_shift @ dest = acc >> filter_shift
  637. .endif
  638. .endm
  639. @ This is similar to mspel_filter, but the input is 16-bit instead of 8-bit and narrow=0 is not supported.
  640. .macro mspel_filter.16 acc0 acc1 acc0_0 acc0_1 dest src0 src1 src2 src3 src4 src5 src6 src7 filter_a filter_b filter_c filter_d reg_a reg_b reg_c reg_d reg_add filter_shift
  641. .if \filter_a != 1
  642. vmov \acc0, \reg_add
  643. vmov \acc1, \reg_add
  644. vmlsl.s16 \acc0, \reg_a, \src0
  645. vmlsl.s16 \acc1, \reg_a, \src1
  646. .else
  647. vsubw.s16 \acc0, \reg_add, \src0
  648. vsubw.s16 \acc1, \reg_add, \src1
  649. .endif
  650. vmlal.s16 \acc0, \reg_b, \src2
  651. vmlal.s16 \acc1, \reg_b, \src3
  652. .if \filter_b != \filter_c
  653. vmlal.s16 \acc0, \reg_c, \src4
  654. vmlal.s16 \acc1, \reg_c, \src5
  655. .else
  656. vmlal.s16 \acc0, \reg_b, \src4
  657. vmlal.s16 \acc1, \reg_b, \src5
  658. .endif
  659. .if \filter_d != 1
  660. vmlsl.s16 \acc0, \reg_d, \src6
  661. vmlsl.s16 \acc1, \reg_d, \src7
  662. .else
  663. vsubw.s16 \acc0, \acc0, \src6
  664. vsubw.s16 \acc1, \acc1, \src7
  665. .endif
  666. @ Use acc0_0 and acc0_1 as temp space
  667. vqshrun.s32 \acc0_0, \acc0, #\filter_shift @ Shift and narrow with saturation from s32 to u16
  668. vqshrun.s32 \acc0_1, \acc1, #\filter_shift
  669. vqmovn.u16 \dest, \acc0 @ Narrow with saturation from u16 to u8
  670. .endm
  671. @ Register usage for put_vc1_mspel_mc functions. Registers marked 'hv' are only used in put_vc1_mspel_mc_hv.
  672. @
  673. @ r0 adjusted dst
  674. @ r1 adjusted src
  675. @ r2 stride
  676. @ r3 adjusted rnd
  677. @ r4 [hv] tmp
  678. @ r11 [hv] sp saved
  679. @ r12 loop counter
  680. @ d0 src[-stride]
  681. @ d1 src[0]
  682. @ d2 src[stride]
  683. @ d3 src[stride * 2]
  684. @ q0 [hv] src[-stride]
  685. @ q1 [hv] src[0]
  686. @ q2 [hv] src[stride]
  687. @ q3 [hv] src[stride * 2]
  688. @ d21 often result from mspel_filter
  689. @ q11 accumulator 0
  690. @ q12 [hv] accumulator 1
  691. @ q13 accumulator initial value
  692. @ d28 filter_a
  693. @ d29 filter_b
  694. @ d30 filter_c
  695. @ d31 filter_d
  696. @ (uint8_t *dst [r0], const uint8_t *src [r1], ptrdiff_t stride [r2], int rnd [r3])
  697. .macro put_vc1_mspel_mc_hv hmode vmode filter_h_a filter_h_b filter_h_c filter_h_d filter_v_a filter_v_b filter_v_c filter_v_d filter_add filter_shift
  698. function ff_put_vc1_mspel_mc\hmode\()\vmode\()_neon, export=1
  699. push {r4, r11, lr}
  700. mov r11, sp @ r11 = stack pointer before realignmnet
  701. A bic sp, sp, #15 @ sp = round down to multiple of 16 bytes
  702. T bic r4, r11, #15
  703. T mov sp, r4
  704. sub sp, sp, #(8*2*16) @ make space for 8 rows * 2 byte per element * 16 elements per row (to fit 11 actual elements per row)
  705. mov r4, sp @ r4 = int16_t tmp[8 * 16]
  706. sub r1, r1, #1 @ src -= 1
  707. .if \filter_add != 0
  708. add r3, r3, #\filter_add @ r3 = filter_add + rnd
  709. .endif
  710. mov r12, #8 @ loop counter
  711. sub r1, r1, r2 @ r1 = &src[-stride] @ slide back
  712. @ Do vertical filtering from src into tmp
  713. mspel_constants i8 d28 d29 d30 d31 \filter_v_a \filter_v_b \filter_v_c \filter_v_d q13 r3
  714. vld1.64 {d0,d1}, [r1], r2
  715. vld1.64 {d2,d3}, [r1], r2
  716. vld1.64 {d4,d5}, [r1], r2
  717. 1:
  718. subs r12, r12, #4
  719. vld1.64 {d6,d7}, [r1], r2
  720. mspel_filter q11 q11 d0 d2 d4 d6 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  721. mspel_filter q12 q12 d1 d3 d5 d7 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  722. vst1.64 {q11,q12}, [r4,:128]! @ store and increment
  723. vld1.64 {d0,d1}, [r1], r2
  724. mspel_filter q11 q11 d2 d4 d6 d0 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  725. mspel_filter q12 q12 d3 d5 d7 d1 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  726. vst1.64 {q11,q12}, [r4,:128]! @ store and increment
  727. vld1.64 {d2,d3}, [r1], r2
  728. mspel_filter q11 q11 d4 d6 d0 d2 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  729. mspel_filter q12 q12 d5 d7 d1 d3 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  730. vst1.64 {q11,q12}, [r4,:128]! @ store and increment
  731. vld1.64 {d4,d5}, [r1], r2
  732. mspel_filter q11 q11 d6 d0 d2 d4 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  733. mspel_filter q12 q12 d7 d1 d3 d5 \filter_v_a \filter_v_b \filter_v_c \filter_v_d d28 d29 d30 d31 q13 \filter_shift narrow=0
  734. vst1.64 {q11,q12}, [r4,:128]! @ store and increment
  735. bne 1b
  736. rsb r3, r3, #(64 + \filter_add) @ r3 = (64 + filter_add) - r3
  737. mov r12, #8 @ loop counter
  738. mov r4, sp @ r4 = tmp
  739. @ Do horizontal filtering from temp to dst
  740. mspel_constants i16 d28 d29 d30 d31 \filter_h_a \filter_h_b \filter_h_c \filter_h_d q13 r3
  741. 2:
  742. subs r12, r12, #1
  743. vld1.64 {q0,q1}, [r4,:128]! @ read one line of tmp
  744. vext.16 q2, q0, q1, #2
  745. vext.16 q3, q0, q1, #3
  746. vext.16 q1, q0, q1, #1 @ do last because it writes to q1 which is read by the other vext instructions
  747. mspel_filter.16 q11 q12 d22 d23 d21 d0 d1 d2 d3 d4 d5 d6 d7 \filter_h_a \filter_h_b \filter_h_c \filter_h_d d28 d29 d30 d31 q13 7
  748. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  749. bne 2b
  750. mov sp, r11
  751. pop {r4, r11, pc}
  752. endfunc
  753. .endm
  754. @ Use C preprocessor and assembler macros to expand to functions for horizontal and vertical filtering.
  755. #define PUT_VC1_MSPEL_MC_HV(hmode, vmode) \
  756. put_vc1_mspel_mc_hv hmode vmode \
  757. MSPEL_MODE_ ## hmode ## _MUL_CONSTANTS \
  758. MSPEL_MODE_ ## vmode ## _MUL_CONSTANTS \
  759. MSPEL_MODES_ ## hmode ## vmode ## _ADDSHIFT_CONSTANTS
  760. PUT_VC1_MSPEL_MC_HV(1, 1)
  761. PUT_VC1_MSPEL_MC_HV(1, 2)
  762. PUT_VC1_MSPEL_MC_HV(1, 3)
  763. PUT_VC1_MSPEL_MC_HV(2, 1)
  764. PUT_VC1_MSPEL_MC_HV(2, 2)
  765. PUT_VC1_MSPEL_MC_HV(2, 3)
  766. PUT_VC1_MSPEL_MC_HV(3, 1)
  767. PUT_VC1_MSPEL_MC_HV(3, 2)
  768. PUT_VC1_MSPEL_MC_HV(3, 3)
  769. #undef PUT_VC1_MSPEL_MC_HV
  770. .macro put_vc1_mspel_mc_h_only hmode filter_a filter_b filter_c filter_d filter_add filter_shift
  771. function ff_put_vc1_mspel_mc\hmode\()0_neon, export=1
  772. rsb r3, r3, #\filter_add @ r3 = filter_add - r = filter_add - rnd
  773. mov r12, #8 @ loop counter
  774. sub r1, r1, #1 @ slide back, using immediate
  775. mspel_constants i8 d28 d29 d30 d31 \filter_a \filter_b \filter_c \filter_d q13 r3
  776. 1:
  777. subs r12, r12, #1
  778. vld1.64 {d0,d1}, [r1], r2 @ read 16 bytes even though we only need 11, also src += stride
  779. vext.8 d2, d0, d1, #2
  780. vext.8 d3, d0, d1, #3
  781. vext.8 d1, d0, d1, #1 @ do last because it writes to d1 which is read by the other vext instructions
  782. mspel_filter q11 d21 d0 d1 d2 d3 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift
  783. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  784. bne 1b
  785. bx lr
  786. endfunc
  787. .endm
  788. @ Use C preprocessor and assembler macros to expand to functions for horizontal only filtering.
  789. #define PUT_VC1_MSPEL_MC_H_ONLY(hmode) \
  790. put_vc1_mspel_mc_h_only hmode MSPEL_MODE_ ## hmode ## _MUL_CONSTANTS MSPEL_MODE_ ## hmode ## _ADDSHIFT_CONSTANTS
  791. PUT_VC1_MSPEL_MC_H_ONLY(1)
  792. PUT_VC1_MSPEL_MC_H_ONLY(2)
  793. PUT_VC1_MSPEL_MC_H_ONLY(3)
  794. #undef PUT_VC1_MSPEL_MC_H_ONLY
  795. @ (uint8_t *dst [r0], const uint8_t *src [r1], ptrdiff_t stride [r2], int rnd [r3])
  796. .macro put_vc1_mspel_mc_v_only vmode filter_a filter_b filter_c filter_d filter_add filter_shift
  797. function ff_put_vc1_mspel_mc0\vmode\()_neon, export=1
  798. add r3, r3, #\filter_add - 1 @ r3 = filter_add - r = filter_add - (1 - rnd) = filter_add - 1 + rnd
  799. mov r12, #8 @ loop counter
  800. sub r1, r1, r2 @ r1 = &src[-stride] @ slide back
  801. mspel_constants i8 d28 d29 d30 d31 \filter_a \filter_b \filter_c \filter_d q13 r3
  802. vld1.64 {d0}, [r1], r2 @ d0 = src[-stride]
  803. vld1.64 {d1}, [r1], r2 @ d1 = src[0]
  804. vld1.64 {d2}, [r1], r2 @ d2 = src[stride]
  805. 1:
  806. subs r12, r12, #4
  807. vld1.64 {d3}, [r1], r2 @ d3 = src[stride * 2]
  808. mspel_filter q11 d21 d0 d1 d2 d3 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift
  809. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  810. vld1.64 {d0}, [r1], r2 @ d0 = next line
  811. mspel_filter q11 d21 d1 d2 d3 d0 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift
  812. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  813. vld1.64 {d1}, [r1], r2 @ d1 = next line
  814. mspel_filter q11 d21 d2 d3 d0 d1 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift
  815. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  816. vld1.64 {d2}, [r1], r2 @ d2 = next line
  817. mspel_filter q11 d21 d3 d0 d1 d2 \filter_a \filter_b \filter_c \filter_d d28 d29 d30 d31 q13 \filter_shift
  818. vst1.64 {d21}, [r0,:64], r2 @ store and increment dst
  819. bne 1b
  820. bx lr
  821. endfunc
  822. .endm
  823. @ Use C preprocessor and assembler macros to expand to functions for vertical only filtering.
  824. #define PUT_VC1_MSPEL_MC_V_ONLY(vmode) \
  825. put_vc1_mspel_mc_v_only vmode MSPEL_MODE_ ## vmode ## _MUL_CONSTANTS MSPEL_MODE_ ## vmode ## _ADDSHIFT_CONSTANTS
  826. PUT_VC1_MSPEL_MC_V_ONLY(1)
  827. PUT_VC1_MSPEL_MC_V_ONLY(2)
  828. PUT_VC1_MSPEL_MC_V_ONLY(3)
  829. #undef PUT_VC1_MSPEL_MC_V_ONLY
  830. function ff_put_pixels8x8_neon, export=1
  831. vld1.64 {d0}, [r1], r2
  832. vld1.64 {d1}, [r1], r2
  833. vld1.64 {d2}, [r1], r2
  834. vld1.64 {d3}, [r1], r2
  835. vld1.64 {d4}, [r1], r2
  836. vld1.64 {d5}, [r1], r2
  837. vld1.64 {d6}, [r1], r2
  838. vld1.64 {d7}, [r1]
  839. vst1.64 {d0}, [r0,:64], r2
  840. vst1.64 {d1}, [r0,:64], r2
  841. vst1.64 {d2}, [r0,:64], r2
  842. vst1.64 {d3}, [r0,:64], r2
  843. vst1.64 {d4}, [r0,:64], r2
  844. vst1.64 {d5}, [r0,:64], r2
  845. vst1.64 {d6}, [r0,:64], r2
  846. vst1.64 {d7}, [r0,:64]
  847. bx lr
  848. endfunc
  849. function ff_vc1_inv_trans_8x8_dc_neon, export=1
  850. ldrsh r2, [r2] @ int dc = block[0];
  851. vld1.64 {d0}, [r0,:64], r1
  852. vld1.64 {d1}, [r0,:64], r1
  853. vld1.64 {d4}, [r0,:64], r1
  854. vld1.64 {d5}, [r0,:64], r1
  855. add r2, r2, r2, lsl #1 @ dc = (3 * dc + 1) >> 1;
  856. vld1.64 {d6}, [r0,:64], r1
  857. add r2, r2, #1
  858. vld1.64 {d7}, [r0,:64], r1
  859. vld1.64 {d16}, [r0,:64], r1
  860. vld1.64 {d17}, [r0,:64], r1
  861. asr r2, r2, #1
  862. sub r0, r0, r1, lsl #3 @ restore r0 to original value
  863. add r2, r2, r2, lsl #1 @ dc = (3 * dc + 16) >> 5;
  864. add r2, r2, #16
  865. asr r2, r2, #5
  866. vdup.16 q1, r2 @ dc
  867. vaddw.u8 q9, q1, d0
  868. vaddw.u8 q10, q1, d1
  869. vaddw.u8 q11, q1, d4
  870. vaddw.u8 q12, q1, d5
  871. vqmovun.s16 d0, q9
  872. vqmovun.s16 d1, q10
  873. vqmovun.s16 d4, q11
  874. vst1.64 {d0}, [r0,:64], r1
  875. vqmovun.s16 d5, q12
  876. vst1.64 {d1}, [r0,:64], r1
  877. vaddw.u8 q13, q1, d6
  878. vst1.64 {d4}, [r0,:64], r1
  879. vaddw.u8 q14, q1, d7
  880. vst1.64 {d5}, [r0,:64], r1
  881. vaddw.u8 q15, q1, d16
  882. vaddw.u8 q1, q1, d17 @ this destroys q1
  883. vqmovun.s16 d6, q13
  884. vqmovun.s16 d7, q14
  885. vqmovun.s16 d16, q15
  886. vqmovun.s16 d17, q1
  887. vst1.64 {d6}, [r0,:64], r1
  888. vst1.64 {d7}, [r0,:64], r1
  889. vst1.64 {d16}, [r0,:64], r1
  890. vst1.64 {d17}, [r0,:64]
  891. bx lr
  892. endfunc
  893. function ff_vc1_inv_trans_8x4_dc_neon, export=1
  894. ldrsh r2, [r2] @ int dc = block[0];
  895. vld1.64 {d0}, [r0,:64], r1
  896. vld1.64 {d1}, [r0,:64], r1
  897. vld1.64 {d4}, [r0,:64], r1
  898. vld1.64 {d5}, [r0,:64], r1
  899. add r2, r2, r2, lsl #1 @ dc = ( 3 * dc + 1) >> 1;
  900. sub r0, r0, r1, lsl #2 @ restore r0 to original value
  901. add r2, r2, #1
  902. asr r2, r2, #1
  903. add r2, r2, r2, lsl #4 @ dc = (17 * dc + 64) >> 7;
  904. add r2, r2, #64
  905. asr r2, r2, #7
  906. vdup.16 q1, r2 @ dc
  907. vaddw.u8 q3, q1, d0
  908. vaddw.u8 q8, q1, d1
  909. vaddw.u8 q9, q1, d4
  910. vaddw.u8 q10, q1, d5
  911. vqmovun.s16 d0, q3
  912. vqmovun.s16 d1, q8
  913. vqmovun.s16 d4, q9
  914. vst1.64 {d0}, [r0,:64], r1
  915. vqmovun.s16 d5, q10
  916. vst1.64 {d1}, [r0,:64], r1
  917. vst1.64 {d4}, [r0,:64], r1
  918. vst1.64 {d5}, [r0,:64]
  919. bx lr
  920. endfunc
  921. function ff_vc1_inv_trans_4x8_dc_neon, export=1
  922. ldrsh r2, [r2] @ int dc = block[0];
  923. vld1.32 {d0[]}, [r0,:32], r1
  924. vld1.32 {d1[]}, [r0,:32], r1
  925. vld1.32 {d0[1]}, [r0,:32], r1
  926. vld1.32 {d1[1]}, [r0,:32], r1
  927. add r2, r2, r2, lsl #4 @ dc = (17 * dc + 4) >> 3;
  928. vld1.32 {d4[]}, [r0,:32], r1
  929. add r2, r2, #4
  930. vld1.32 {d5[]}, [r0,:32], r1
  931. vld1.32 {d4[1]}, [r0,:32], r1
  932. asr r2, r2, #3
  933. vld1.32 {d5[1]}, [r0,:32], r1
  934. add r2, r2, r2, lsl #1 @ dc = (12 * dc + 64) >> 7;
  935. sub r0, r0, r1, lsl #3 @ restore r0 to original value
  936. lsl r2, r2, #2
  937. add r2, r2, #64
  938. asr r2, r2, #7
  939. vdup.16 q1, r2 @ dc
  940. vaddw.u8 q3, q1, d0
  941. vaddw.u8 q8, q1, d1
  942. vaddw.u8 q9, q1, d4
  943. vaddw.u8 q10, q1, d5
  944. vqmovun.s16 d0, q3
  945. vst1.32 {d0[0]}, [r0,:32], r1
  946. vqmovun.s16 d1, q8
  947. vst1.32 {d1[0]}, [r0,:32], r1
  948. vqmovun.s16 d4, q9
  949. vst1.32 {d0[1]}, [r0,:32], r1
  950. vqmovun.s16 d5, q10
  951. vst1.32 {d1[1]}, [r0,:32], r1
  952. vst1.32 {d4[0]}, [r0,:32], r1
  953. vst1.32 {d5[0]}, [r0,:32], r1
  954. vst1.32 {d4[1]}, [r0,:32], r1
  955. vst1.32 {d5[1]}, [r0,:32]
  956. bx lr
  957. endfunc
  958. function ff_vc1_inv_trans_4x4_dc_neon, export=1
  959. ldrsh r2, [r2] @ int dc = block[0];
  960. vld1.32 {d0[]}, [r0,:32], r1
  961. vld1.32 {d1[]}, [r0,:32], r1
  962. vld1.32 {d0[1]}, [r0,:32], r1
  963. vld1.32 {d1[1]}, [r0,:32], r1
  964. add r2, r2, r2, lsl #4 @ dc = (17 * dc + 4) >> 3;
  965. sub r0, r0, r1, lsl #2 @ restore r0 to original value
  966. add r2, r2, #4
  967. asr r2, r2, #3
  968. add r2, r2, r2, lsl #4 @ dc = (17 * dc + 64) >> 7;
  969. add r2, r2, #64
  970. asr r2, r2, #7
  971. vdup.16 q1, r2 @ dc
  972. vaddw.u8 q2, q1, d0
  973. vaddw.u8 q3, q1, d1
  974. vqmovun.s16 d0, q2
  975. vst1.32 {d0[0]}, [r0,:32], r1
  976. vqmovun.s16 d1, q3
  977. vst1.32 {d1[0]}, [r0,:32], r1
  978. vst1.32 {d0[1]}, [r0,:32], r1
  979. vst1.32 {d1[1]}, [r0,:32]
  980. bx lr
  981. endfunc