You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1635 lines
63KB

  1. /*
  2. * VP8 ARMv6 optimisations
  3. *
  4. * Copyright (c) 2010 Google Inc.
  5. * Copyright (c) 2010 Rob Clark <rob@ti.com>
  6. * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. *
  24. * This code was partially ported from libvpx, which uses this license:
  25. *
  26. * Redistribution and use in source and binary forms, with or without
  27. * modification, are permitted provided that the following conditions are
  28. * met:
  29. *
  30. * * Redistributions of source code must retain the above copyright
  31. * notice, this list of conditions and the following disclaimer.
  32. *
  33. * * Redistributions in binary form must reproduce the above copyright
  34. * notice, this list of conditions and the following disclaimer in
  35. * the documentation and/or other materials provided with the
  36. * distribution.
  37. *
  38. * * Neither the name of Google nor the names of its contributors may
  39. * be used to endorse or promote products derived from this software
  40. * without specific prior written permission.
  41. *
  42. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  43. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  44. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  45. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  46. * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  47. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  48. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  49. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  50. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  51. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  52. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  53. */
  54. #include "libavutil/arm/asm.S"
  55. @ idct
  56. @ void vp8_luma_dc_wht(int16_t block[4][4][16], int16_t dc[16])
  57. function ff_vp8_luma_dc_wht_armv6, export=1
  58. push {r4-r10, lr}
  59. ldm r1, {r2-r9}
  60. mov r10, #0
  61. mov lr, #0
  62. uadd16 r12, r2, r8 @ t0[0,1]
  63. usub16 r2, r2, r8 @ t3[0,1]
  64. stm r1!, {r10, lr}
  65. uadd16 r8, r4, r6 @ t1[0,1]
  66. usub16 r4, r4, r6 @ t2[0,1]
  67. stm r1!, {r10, lr}
  68. uadd16 r6, r12, r8 @ dc0[0,1]
  69. usub16 r12, r12, r8 @ dc2[0,1]
  70. stm r1!, {r10, lr}
  71. uadd16 r8, r2, r4 @ dc1[0,1]
  72. usub16 r2, r2, r4 @ dc3[0,1]
  73. stm r1!, {r10, lr}
  74. uadd16 lr, r3, r9 @ t0[2,3]
  75. usub16 r3, r3, r9 @ t3[2,3]
  76. uadd16 r9, r5, r7 @ t1[2,3]
  77. usub16 r5, r5, r7 @ t2[2,3]
  78. uadd16 r7, lr, r9 @ dc0[2,3]
  79. usub16 lr, lr, r9 @ dc2[2,3]
  80. uadd16 r9, r3, r5 @ dc1[2,3]
  81. usub16 r3, r3, r5 @ dc3[2,3]
  82. mov r1, #3
  83. orr r1, r1, #0x30000 @ 3 | 3 (round)
  84. pkhbt r4, r6, r8, lsl #16 @ dc{0,1}[0]
  85. pkhtb r6, r8, r6, asr #16 @ dc{0,1}[1]
  86. pkhbt r5, r12, r2, lsl #16 @ dc{2,3}[0]
  87. pkhtb r12, r2, r12, asr #16 @ dc{2,3}[1]
  88. pkhbt r8, r7, r9, lsl #16 @ dc{0,1}[2]
  89. uadd16 r4, r4, r1
  90. uadd16 r5, r5, r1
  91. pkhtb r7, r9, r7, asr #16 @ dc{0,1}[3]
  92. pkhbt r2, lr, r3, lsl #16 @ dc{2,3}[2]
  93. pkhtb lr, r3, lr, asr #16 @ dc{2,3}[3]
  94. uadd16 r9, r4, r7 @ t0[0,1]
  95. uadd16 r3, r5, lr @ t0[2,3]
  96. usub16 r4, r4, r7 @ t3[0,1]
  97. usub16 r5, r5, lr @ t3[2,3]
  98. uadd16 r7, r6, r8 @ t1[0,1]
  99. uadd16 lr, r12, r2 @ t1[2,3]
  100. usub16 r6, r6, r8 @ t2[0,1]
  101. usub16 r12, r12, r2 @ t2[2,3]
  102. uadd16 r8, r9, r7 @ block[0,1][0]
  103. uadd16 r2, r3, lr @ block[2,3][0]
  104. usub16 r9, r9, r7 @ block[0,1][2]
  105. usub16 r3, r3, lr @ block[2,3][2]
  106. uadd16 r7, r4, r6 @ block[0,1][1]
  107. uadd16 lr, r5, r12 @ block[2,3][1]
  108. usub16 r4, r4, r6 @ block[0,1][3]
  109. usub16 r5, r5, r12 @ block[2,3][3]
  110. #if HAVE_ARMV6T2_EXTERNAL
  111. sbfx r6, r8, #3, #13
  112. sbfx r12, r7, #3, #13
  113. sbfx r1, r9, #3, #13
  114. sbfx r10, r4, #3, #13
  115. #else
  116. sxth r6, r8
  117. sxth r12, r7
  118. sxth r1, r9
  119. sxth r10, r4
  120. asr r6, #3 @ block[0][0]
  121. asr r12, #3 @ block[0][1]
  122. asr r1, #3 @ block[0][2]
  123. asr r10, #3 @ block[0][3]
  124. #endif
  125. strh r6, [r0], #32
  126. asr r8, r8, #19 @ block[1][0]
  127. strh r12, [r0], #32
  128. asr r7, r7, #19 @ block[1][1]
  129. strh r1, [r0], #32
  130. asr r9, r9, #19 @ block[1][2]
  131. strh r10, [r0], #32
  132. asr r4, r4, #19 @ block[1][3]
  133. strh r8, [r0], #32
  134. asr r6, r2, #19 @ block[3][0]
  135. strh r7, [r0], #32
  136. asr r12, lr, #19 @ block[3][1]
  137. strh r9, [r0], #32
  138. asr r1, r3, #19 @ block[3][2]
  139. strh r4, [r0], #32
  140. asr r10, r5, #19 @ block[3][3]
  141. #if HAVE_ARMV6T2_EXTERNAL
  142. sbfx r2, r2, #3, #13
  143. sbfx lr, lr, #3, #13
  144. sbfx r3, r3, #3, #13
  145. sbfx r5, r5, #3, #13
  146. #else
  147. sxth r2, r2
  148. sxth lr, lr
  149. sxth r3, r3
  150. sxth r5, r5
  151. asr r2, #3 @ block[2][0]
  152. asr lr, #3 @ block[2][1]
  153. asr r3, #3 @ block[2][2]
  154. asr r5, #3 @ block[2][3]
  155. #endif
  156. strh r2, [r0], #32
  157. strh lr, [r0], #32
  158. strh r3, [r0], #32
  159. strh r5, [r0], #32
  160. strh r6, [r0], #32
  161. strh r12, [r0], #32
  162. strh r1, [r0], #32
  163. strh r10, [r0], #32
  164. pop {r4-r10, pc}
  165. endfunc
  166. @ void vp8_luma_dc_wht_dc(int16_t block[4][4][16], int16_t dc[16])
  167. function ff_vp8_luma_dc_wht_dc_armv6, export=1
  168. ldrsh r2, [r1]
  169. mov r3, #0
  170. add r2, r2, #3
  171. strh r3, [r1]
  172. asr r2, r2, #3
  173. .rept 16
  174. strh r2, [r0], #32
  175. .endr
  176. bx lr
  177. endfunc
  178. @ void vp8_idct_add(uint8_t *dst, int16_t block[16], int stride)
  179. function ff_vp8_idct_add_armv6, export=1
  180. push {r4-r12, lr}
  181. sub sp, sp, #32
  182. movw r3, #20091 @ cospi8sqrt2minus1
  183. movw r4, #35468 @ sinpi8sqrt2
  184. mov r5, sp
  185. 1:
  186. ldr r6, [r1, #8] @ i5 | i4 = block1[1] | block1[0]
  187. ldr lr, [r1, #16] @ i9 | i8 = block2[1] | block2[0]
  188. ldr r12, [r1, #24] @ i13 | i12 = block3[1] | block3[0]
  189. smulwt r9, r3, r6 @ ip[5] * cospi8sqrt2minus1
  190. smulwb r7, r3, r6 @ ip[4] * cospi8sqrt2minus1
  191. smulwt r10, r4, r6 @ ip[5] * sinpi8sqrt2
  192. smulwb r8, r4, r6 @ ip[4] * sinpi8sqrt2
  193. pkhbt r7, r7, r9, lsl #16 @ 5c | 4c
  194. smulwt r11, r3, r12 @ ip[13] * cospi8sqrt2minus1
  195. pkhbt r8, r8, r10, lsl #16 @ 5s | 4s = t2 first half
  196. uadd16 r6, r6, r7 @ 5c+5 | 4c+4 = t3 first half
  197. smulwb r9, r3, r12 @ ip[12] * cospi8sqrt2minus1
  198. smulwt r7, r4, r12 @ ip[13] * sinpi8sqrt2
  199. smulwb r10, r4, r12 @ ip[12] * sinpi8sqrt2
  200. pkhbt r9, r9, r11, lsl #16 @ 13c | 12c
  201. ldr r11, [r1] @ i1 | i0
  202. pkhbt r10, r10, r7, lsl #16 @ 13s | 12s = t3 second half
  203. uadd16 r7, r12, r9 @ 13c+13 | 12c+12 = t2 2nd half
  204. uadd16 r6, r6, r10 @ d = t3
  205. uadd16 r10, r11, lr @ a = t0
  206. usub16 r7, r8, r7 @ c = t2
  207. usub16 r8, r11, lr @ b = t1
  208. uadd16 r9, r10, r6 @ a+d = tmp{0,1}[0]
  209. usub16 r10, r10, r6 @ a-d = tmp{0,1}[3]
  210. uadd16 r6, r8, r7 @ b+c = tmp{0,1}[1]
  211. usub16 r7, r8, r7 @ b-c = tmp{0,1}[2]
  212. mov r8, #0
  213. cmp sp, r5
  214. str r6, [r5, #8] @ o5 | o4
  215. str r7, [r5, #16] @ o9 | o8
  216. str r10, [r5, #24] @ o13 | o12
  217. str r9, [r5], #4 @ o1 | o0
  218. str r8, [r1, #8]
  219. str r8, [r1, #16]
  220. str r8, [r1, #24]
  221. str r8, [r1], #4
  222. beq 1b
  223. mov r5, #2
  224. 2:
  225. pop {r1, r6, r12, lr}
  226. smulwt r9, r3, r12 @ ip[5] * cospi8sqrt2minus1
  227. smulwt r7, r3, r1 @ ip[1] * cospi8sqrt2minus1
  228. smulwt r10, r4, r12 @ ip[5] * sinpi8sqrt2
  229. smulwt r8, r4, r1 @ ip[1] * sinpi8sqrt2
  230. pkhbt r11, r1, r12, lsl #16 @ i4 | i0 = t0/t1 first half
  231. pkhtb r1, r12, r1, asr #16 @ i5 | i1
  232. pkhbt r7, r7, r9, lsl #16 @ 5c | 1c
  233. pkhbt r8, r8, r10, lsl #16 @ 5s | 1s = t2 first half
  234. pkhbt r9, r6, lr, lsl #16 @ i6 | i2 = t0/t1 second half
  235. pkhtb r12, lr, r6, asr #16 @ i7 | i3
  236. uadd16 r1, r7, r1 @ 5c+5 | 1c+1 = t3 first half
  237. uadd16 r10, r11, r9 @ a = t0
  238. usub16 r9, r11, r9 @ b = t1
  239. smulwt r7, r3, r12 @ ip[7] * cospi8sqrt2minus1
  240. smulwb lr, r3, r12 @ ip[3] * cospi8sqrt2minus1
  241. smulwt r11, r4, r12 @ ip[7] * sinpi8sqrt2
  242. smulwb r6, r4, r12 @ ip[3] * sinpi8sqrt2
  243. subs r5, r5, #1
  244. pkhbt r7, lr, r7, lsl #16 @ 7c | 3c
  245. pkhbt r11, r6, r11, lsl #16 @ 7s | 3s = t3 second half
  246. mov r6, #0x4
  247. orr r6, r6, #0x40000
  248. uadd16 r12, r7, r12 @ 7c+7 | 3c+3 = t2 second half
  249. uadd16 r10, r10, r6 @ t0 + 4
  250. uadd16 r9, r9, r6 @ t1 + 4
  251. usub16 lr, r8, r12 @ c (o5 | o1) = t2
  252. uadd16 r12, r11, r1 @ d (o7 | o3) = t3
  253. usub16 r1, r9, lr @ b-c = dst{0,1}[2]
  254. uadd16 r7, r10, r12 @ a+d = dst{0,1}[0]
  255. usub16 r12, r10, r12 @ a-d = dst{0,1}[3]
  256. uadd16 r10, r9, lr @ b+c = dst{0,1}[1]
  257. asr lr, r1, #3 @ o[1][2]
  258. asr r9, r12, #3 @ o[1][3]
  259. pkhtb r8, lr, r7, asr #19 @ o[1][0,2]
  260. pkhtb r11, r9, r10, asr #19 @ o[1][1,3]
  261. ldr lr, [r0]
  262. sxth r12, r12
  263. ldr r9, [r0, r2]
  264. sxth r1, r1
  265. #if HAVE_ARMV6T2_EXTERNAL
  266. sbfx r7, r7, #3, #13
  267. sbfx r10, r10, #3, #13
  268. #else
  269. sxth r7, r7
  270. sxth r10, r10
  271. asr r7, #3 @ o[0][0]
  272. asr r10, #3 @ o[0][1]
  273. #endif
  274. pkhbt r7, r7, r1, lsl #13 @ o[0][0,2]
  275. pkhbt r10, r10, r12, lsl #13 @ o[0][1,3]
  276. uxtab16 r7, r7, lr
  277. uxtab16 r10, r10, lr, ror #8
  278. uxtab16 r8, r8, r9
  279. uxtab16 r11, r11, r9, ror #8
  280. usat16 r7, #8, r7
  281. usat16 r10, #8, r10
  282. usat16 r8, #8, r8
  283. usat16 r11, #8, r11
  284. orr r7, r7, r10, lsl #8
  285. orr r8, r8, r11, lsl #8
  286. str r8, [r0, r2]
  287. str_post r7, r0, r2, lsl #1
  288. bne 2b
  289. pop {r4-r12, pc}
  290. endfunc
  291. @ void vp8_idct_dc_add(uint8_t *dst, int16_t block[16], int stride)
  292. function ff_vp8_idct_dc_add_armv6, export=1
  293. push {r4-r6, lr}
  294. add r6, r0, r2, lsl #1
  295. ldrsh r3, [r1]
  296. mov r4, #0
  297. add r3, r3, #4
  298. strh r4, [r1], #32
  299. asr r3, #3
  300. ldr r5, [r0]
  301. ldr r4, [r0, r2]
  302. pkhbt r3, r3, r3, lsl #16
  303. uxtab16 lr, r3, r5 @ a1+2 | a1+0
  304. uxtab16 r5, r3, r5, ror #8 @ a1+3 | a1+1
  305. uxtab16 r12, r3, r4
  306. uxtab16 r4, r3, r4, ror #8
  307. usat16 lr, #8, lr
  308. usat16 r5, #8, r5
  309. usat16 r12, #8, r12
  310. usat16 r4, #8, r4
  311. orr lr, lr, r5, lsl #8
  312. ldr r5, [r6]
  313. orr r12, r12, r4, lsl #8
  314. ldr r4, [r6, r2]
  315. str lr, [r0]
  316. uxtab16 lr, r3, r5
  317. str r12, [r0, r2]
  318. uxtab16 r5, r3, r5, ror #8
  319. uxtab16 r12, r3, r4
  320. uxtab16 r4, r3, r4, ror #8
  321. usat16 lr, #8, lr
  322. usat16 r5, #8, r5
  323. usat16 r12, #8, r12
  324. usat16 r4, #8, r4
  325. orr lr, lr, r5, lsl #8
  326. orr r12, r12, r4, lsl #8
  327. str lr, [r6]
  328. str r12, [r6, r2]
  329. pop {r4-r6, pc}
  330. endfunc
  331. @ void vp8_idct_dc_add4uv(uint8_t *dst, int16_t block[4][16], int stride)
  332. function ff_vp8_idct_dc_add4uv_armv6, export=1
  333. push {r4, lr}
  334. bl ff_vp8_idct_dc_add_armv6
  335. add r0, r0, #4
  336. bl ff_vp8_idct_dc_add_armv6
  337. add r0, r0, r2, lsl #2
  338. sub r0, r0, #4
  339. bl ff_vp8_idct_dc_add_armv6
  340. add r0, r0, #4
  341. bl ff_vp8_idct_dc_add_armv6
  342. pop {r4, pc}
  343. endfunc
  344. @ void vp8_idct_dc_add4y(uint8_t *dst, int16_t block[4][16], int stride)
  345. function ff_vp8_idct_dc_add4y_armv6, export=1
  346. push {r4, lr}
  347. bl ff_vp8_idct_dc_add_armv6
  348. add r0, r0, #4
  349. bl ff_vp8_idct_dc_add_armv6
  350. add r0, r0, #4
  351. bl ff_vp8_idct_dc_add_armv6
  352. add r0, r0, #4
  353. bl ff_vp8_idct_dc_add_armv6
  354. pop {r4, pc}
  355. endfunc
  356. @ loopfilter
  357. .macro transpose o3, o2, o1, o0, i0, i1, i2, i3
  358. uxtb16 \o1, \i1 @ xx 12 xx 10
  359. uxtb16 \o0, \i0 @ xx 02 xx 00
  360. uxtb16 \o3, \i3 @ xx 32 xx 30
  361. uxtb16 \o2, \i2 @ xx 22 xx 20
  362. orr \o1, \o0, \o1, lsl #8 @ 12 02 10 00
  363. orr \o3, \o2, \o3, lsl #8 @ 32 22 30 20
  364. uxtb16 \i1, \i1, ror #8 @ xx 13 xx 11
  365. uxtb16 \i3, \i3, ror #8 @ xx 33 xx 31
  366. uxtb16 \i0, \i0, ror #8 @ xx 03 xx 01
  367. uxtb16 \i2, \i2, ror #8 @ xx 23 xx 21
  368. orr \i0, \i0, \i1, lsl #8 @ 13 03 11 01
  369. orr \i2, \i2, \i3, lsl #8 @ 33 23 31 21
  370. pkhtb \o2, \o3, \o1, asr #16 @ 32 22 12 02
  371. pkhbt \o0, \o1, \o3, lsl #16 @ 30 20 10 00
  372. pkhtb \o3, \i2, \i0, asr #16 @ 33 23 13 03
  373. pkhbt \o1, \i0, \i2, lsl #16 @ 31 21 11 01
  374. .endm
  375. .macro simple_filter
  376. uqsub8 r7, r3, r6 @ p1 - q1
  377. uqsub8 r8, r6, r3 @ q1 - p1
  378. uqsub8 r10, r4, r5 @ p0 - q0
  379. uqsub8 r9, r5, r4 @ q0 - p0
  380. orr r7, r7, r8 @ abs(p1 - q1)
  381. orr r9, r9, r10 @ abs(p0 - q0)
  382. uhadd8 r7, r7, lr @ abs(p1 - q2) >> 1
  383. uqadd8 r9, r9, r9 @ abs(p0 - q0) * 2
  384. uqadd8 r7, r7, r9 @ abs(p0 - q0)*2 + abs(p1-q1)/2
  385. mvn r8, #0
  386. usub8 r10, r12, r7 @ compare to flimit
  387. sel r10, r8, lr @ filter mask: F or 0
  388. cmp r10, #0
  389. beq 2f
  390. eor r3, r3, r2 @ ps1
  391. eor r6, r6, r2 @ qs1
  392. eor r4, r4, r2 @ ps0
  393. eor r5, r5, r2 @ qs0
  394. qsub8 r3, r3, r6 @ vp8_filter = p1 - q1
  395. qsub8 r6, r5, r4 @ q0 - p0
  396. qadd8 r3, r3, r6 @ += q0 - p0
  397. lsr r7, r2, #5 @ 0x04040404
  398. qadd8 r3, r3, r6 @ += q0 - p0
  399. sub r9, r7, r2, lsr #7 @ 0x03030303
  400. qadd8 r3, r3, r6 @ vp8_filter = p1-q1 + 3*(q0-p0)
  401. and r3, r3, r10 @ vp8_filter &= mask
  402. qadd8 r9, r3, r9 @ Filter2 = vp8_filter + 3
  403. qadd8 r3, r3, r7 @ Filter1 = vp8_filter + 4
  404. shadd8 r9, r9, lr
  405. shadd8 r3, r3, lr
  406. shadd8 r9, r9, lr
  407. shadd8 r3, r3, lr
  408. shadd8 r9, r9, lr @ Filter2 >>= 3
  409. shadd8 r3, r3, lr @ Filter1 >>= 3
  410. qadd8 r4, r4, r9 @ u = p0 + Filter2
  411. qsub8 r5, r5, r3 @ u = q0 - Filter1
  412. eor r4, r4, r2 @ *op0 = u ^ 0x80
  413. eor r5, r5, r2 @ *oq0 = u ^ 0x80
  414. .endm
  415. @ void vp8_v_loop_filter16_simple(uint8_t *dst, int stride, int flim)
  416. function ff_vp8_v_loop_filter16_simple_armv6, export=1
  417. push {r4-r11, lr}
  418. orr r2, r2, r2, lsl #16
  419. mov r11, #4
  420. mov lr, #0
  421. orr r12, r2, r2, lsl #8
  422. mov32 r2, 0x80808080
  423. 1:
  424. ldr_nreg r3, r0, r1, lsl #1 @ p1
  425. ldr_nreg r4, r0, r1 @ p0
  426. ldr r5, [r0] @ q0
  427. ldr r6, [r0, r1] @ q1
  428. simple_filter
  429. T sub r7, r0, r1
  430. str r5, [r0] @ oq0
  431. A str r4, [r0, -r1] @ op0
  432. T str r4, [r7]
  433. 2:
  434. subs r11, r11, #1
  435. add r0, r0, #4
  436. bne 1b
  437. pop {r4-r11, pc}
  438. endfunc
  439. .macro filter_mask_p
  440. uqsub8 r6, r9, r10 @ p3 - p2
  441. uqsub8 r7, r10, r9 @ p2 - p3
  442. uqsub8 r8, r10, r11 @ p2 - p1
  443. uqsub8 r10, r11, r10 @ p1 - p2
  444. orr r6, r6, r7 @ abs(p3-p2)
  445. orr r8, r8, r10 @ abs(p2-p1)
  446. uqsub8 lr, r6, r2 @ compare to limit
  447. uqsub8 r8, r8, r2 @ compare to limit
  448. uqsub8 r6, r11, r12 @ p1 - p0
  449. orr lr, lr, r8
  450. uqsub8 r7, r12, r11 @ p0 - p1
  451. orr r6, r6, r7 @ abs(p1-p0)
  452. uqsub8 r7, r6, r2 @ compare to limit
  453. uqsub8 r8, r6, r3 @ compare to thresh
  454. orr lr, lr, r7
  455. .endm
  456. .macro filter_mask_pq
  457. uqsub8 r6, r11, r10 @ p1 - q1
  458. uqsub8 r7, r10, r11 @ q1 - p1
  459. uqsub8 r11, r12, r9 @ p0 - q0
  460. uqsub8 r12, r9, r12 @ q0 - p0
  461. orr r6, r6, r7 @ abs(p1-q1)
  462. orr r12, r11, r12 @ abs(p0-q0)
  463. mov32 r7, 0x7f7f7f7f
  464. uqadd8 r12, r12, r12 @ abs(p0-q0) * 2
  465. and r6, r7, r6, lsr #1 @ abs(p1-q1) / 2
  466. uqadd8 r12, r12, r6 @ abs(p0-q0) * 2 + abs(p1-q1)/2
  467. .endm
  468. .macro filter_mask_v
  469. filter_mask_p
  470. ldr r10, [r0, r1] @ q1
  471. ldr_post r9, r0, r1, lsl #1 @ q0
  472. filter_mask_pq
  473. ldr r11, [r0] @ q2
  474. uqsub8 r7, r9, r10 @ q0 - q1
  475. uqsub8 r6, r10, r9 @ q1 - q0
  476. uqsub8 r12, r12, r4 @ compare to flimit
  477. uqsub8 r9, r11, r10 @ q2 - q1
  478. uqsub8 r10, r10, r11 @ q1 - q2
  479. orr lr, lr, r12
  480. ldr r12, [r0, r1] @ q3
  481. orr r6, r7, r6 @ abs(q1-q0)
  482. orr r10, r9, r10 @ abs(q2-q1)
  483. uqsub8 r9, r12, r11 @ q3 - q2
  484. uqsub8 r11, r11, r12 @ q2 - q3
  485. uqsub8 r7, r6, r2 @ compare to limit
  486. uqsub8 r10, r10, r2 @ compare to limit
  487. uqsub8 r6, r6, r3 @ compare to thresh
  488. orr r9, r9, r11 @ abs(q3-q2)
  489. orr lr, lr, r7
  490. orr lr, lr, r10
  491. uqsub8 r9, r9, r2 @ compare to limit
  492. orr lr, lr, r9
  493. mov r12, #0
  494. usub8 lr, r12, lr
  495. mvn r11, #0
  496. sel lr, r11, r12 @ filter mask
  497. sub r0, r0, r1, lsl #1
  498. .endm
  499. .macro filter_mask_h
  500. transpose r12, r11, r10, r9, r6, r7, r8, lr
  501. filter_mask_p
  502. stm sp, {r8, r11, r12, lr}
  503. sub r0, r0, r1, lsl #2
  504. add r0, r0, #4
  505. ldr r7, [r0, r1]
  506. ldr_post r6, r0, r1, lsl #1
  507. ldr lr, [r0, r1]
  508. ldr r8, [r0]
  509. transpose r12, r11, r10, r9, r6, r7, r8, lr
  510. uqsub8 r8, r12, r11 @ q3 - q2
  511. uqsub8 lr, r11, r12 @ q2 - q3
  512. uqsub8 r7, r9, r10 @ q0 - q1
  513. uqsub8 r6, r10, r9 @ q1 - q0
  514. uqsub8 r12, r11, r10 @ q2 - q1
  515. uqsub8 r11, r10, r11 @ q1 - q2
  516. orr r8, r8, lr @ abs(q3-q2)
  517. orr r6, r7, r6 @ abs(q1-q0)
  518. orr r11, r12, r11 @ abs(q2-q1)
  519. ldr lr, [sp, #12] @ load back (f)limit accumulator
  520. uqsub8 r8, r8, r2 @ compare to limit
  521. uqsub8 r7, r6, r2 @ compare to limit
  522. uqsub8 r11, r11, r2 @ compare to limit
  523. orr lr, lr, r8
  524. uqsub8 r8, r6, r3 @ compare to thresh
  525. orr lr, lr, r7
  526. ldr r12, [sp, #8] @ p1
  527. orr lr, lr, r11
  528. ldr r11, [sp, #4] @ p0
  529. filter_mask_pq
  530. mov r10, #0
  531. uqsub8 r12, r12, r4 @ compare to flimit
  532. mvn r11, #0
  533. orr lr, lr, r12
  534. usub8 lr, r10, lr
  535. sel lr, r11, r10 @ filter mask
  536. .endm
  537. .macro filter inner
  538. mov32 r12, 0x80808080
  539. eor r11, r7, r12 @ ps1
  540. eor r8, r8, r12 @ ps0
  541. eor r9, r9, r12 @ qs0
  542. eor r10, r10, r12 @ qs1
  543. stm sp, {r8-r11}
  544. qsub8 r7, r11, r10 @ vp8_signed_char_clamp(ps1-qs1)
  545. qsub8 r8, r9, r8 @ vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
  546. .if \inner
  547. and r7, r7, r6 @ vp8_filter &= hev
  548. .endif
  549. qadd8 r7, r7, r8
  550. lsr r10, r12, #5 @ 0x04040404
  551. qadd8 r7, r7, r8
  552. sub r9, r10, r12, lsr #7 @ 0x03030303
  553. qadd8 r7, r7, r8
  554. and r7, r7, lr @ vp8_filter &= mask
  555. .if !\inner
  556. mov r12, r7 @ Filter2
  557. and r7, r7, r6 @ Filter2 &= hev
  558. .endif
  559. qadd8 lr, r7, r9 @ Filter2 = vp8_signed_char_clamp(vp8_filter+3)
  560. qadd8 r7, r7, r10 @ Filter1 = vp8_signed_char_clamp(vp8_filter+4)
  561. mov r9, #0
  562. shadd8 lr, lr, r9 @ Filter2 >>= 3
  563. shadd8 r7, r7, r9 @ Filter1 >>= 3
  564. shadd8 lr, lr, r9
  565. shadd8 r7, r7, r9
  566. shadd8 lr, lr, r9 @ Filter2
  567. shadd8 r7, r7, r9 @ Filter1
  568. .endm
  569. .macro filter_v inner
  570. orr r10, r6, r8 @ calculate vp8_hevmask
  571. ldr_nreg r7, r0, r1, lsl #1 @ p1
  572. usub8 r10, r12, r10
  573. ldr_nreg r8, r0, r1 @ p0
  574. sel r6, r12, r11 @ obtain vp8_hevmask
  575. ldr r9, [r0] @ q0
  576. ldr r10, [r0, r1] @ q1
  577. filter \inner
  578. .endm
  579. .macro filter_h inner
  580. orr r9, r6, r8
  581. usub8 r9, r12, r9
  582. sel r6, r12, r11 @ hev mask
  583. stm sp, {r6, lr}
  584. ldr_nreg r12, r0, r1, lsl #1
  585. ldr_nreg r11, r0, r1
  586. ldr r6, [r0]
  587. ldr lr, [r0, r1]
  588. transpose r10, r9, r8, r7, r12, r11, r6, lr
  589. ldm sp, {r6, lr}
  590. filter \inner
  591. .endm
  592. .macro filter_inner
  593. ldm sp, {r8, r9}
  594. lsr r10, r10, #2 @ 0x01010101
  595. qadd8 r8, r8, lr @ u = vp8_signed_char_clamp(ps0 + Filter2)
  596. mov lr, #0
  597. qsub8 r9, r9, r7 @ u = vp8_signed_char_clamp(qs0 - Filter1)
  598. sadd8 r7, r7, r10 @ vp8_filter += 1
  599. ldr r10, [sp, #8] @ qs1
  600. shadd8 r7, r7, lr @ vp8_filter >>= 1
  601. eor r8, r8, r12 @ *op0 = u ^ 0x80
  602. bic r7, r7, r6 @ vp8_filter &= ~hev
  603. qadd8 r11, r11, r7 @ u = vp8_signed_char_clamp(ps1 + vp8_filter)
  604. eor r9, r9, r12 @ *oq0 = u ^ 0x80
  605. qsub8 r10, r10, r7 @ u = vp8_signed_char_clamp(qs1 - vp8_filter)
  606. eor r11, r11, r12 @ *op1 = u ^ 0x80
  607. eor r10, r10, r12 @ *oq1 = u ^ 0x80
  608. .endm
  609. .macro filter_x c0
  610. mov lr, \c0
  611. mov r7, #63
  612. sxtb16 r6, r12
  613. sxtb16 r10, r12, ror #8
  614. smlabb r8, r6, lr, r7
  615. smlatb r6, r6, lr, r7
  616. smlabb r7, r10, lr, r7
  617. smultb r10, r10, lr
  618. ssat r8, #8, r8, asr #7
  619. ssat r6, #8, r6, asr #7
  620. add r10, r10, #63
  621. ssat r7, #8, r7, asr #7
  622. ssat r10, #8, r10, asr #7
  623. pkhbt r6, r8, r6, lsl #16
  624. pkhbt r10, r7, r10, lsl #16
  625. uxtb16 r6, r6
  626. uxtb16 r10, r10
  627. mov32 lr, 0x80808080
  628. orr r10, r6, r10, lsl #8 @ u = vp8_signed_char_clamp((63 + Filter2 * 27)>>7)
  629. qsub8 r8, r9, r10 @ s = vp8_signed_char_clamp(qs0 - u)
  630. qadd8 r10, r11, r10 @ s = vp8_signed_char_clamp(ps0 + u)
  631. eor r8, r8, lr @ *oq0 = s ^ 0x80
  632. eor r10, r10, lr @ *op0 = s ^ 0x80
  633. .endm
  634. .macro filter_1
  635. ldm sp, {r8, r9}
  636. qadd8 r11, r8, lr
  637. qsub8 r9, r9, r7
  638. bic r12, r12, r6 @ vp8_filter &= ~hev
  639. filter_x #27
  640. .endm
  641. .macro filter_2
  642. ldr r9, [sp, #8] @ qs1
  643. ldr r11, [sp, #12] @ ps1
  644. filter_x #18
  645. .endm
  646. .macro filter_3
  647. eor r9, r9, lr
  648. eor r11, r11, lr
  649. filter_x #9
  650. .endm
  651. function vp8_v_loop_filter_inner_armv6
  652. mov r5, #4
  653. sub sp, sp, #16
  654. orr r2, r2, r2, lsl #16
  655. orr r3, r3, r3, lsl #16
  656. orr r6, r6, r6, lsl #16
  657. orr r4, r2, r2, lsl #8 @ flimE
  658. orr r2, r3, r3, lsl #8 @ flimI
  659. orr r3, r6, r6, lsl #8 @ thresh
  660. 1:
  661. sub r0, r0, r1, lsl #2
  662. ldr r10, [r0, r1] @ p2
  663. ldr_post r9, r0, r1, lsl #1 @ p3
  664. ldr r12, [r0, r1] @ p0
  665. ldr_post r11, r0, r1, lsl #1 @ p1
  666. filter_mask_v
  667. cmp lr, #0
  668. beq 2f
  669. filter_v inner=1
  670. filter_inner
  671. A str r11, [r0, -r1, lsl #1] @ op1
  672. A str r8, [r0, -r1] @ op0
  673. T sub r0, r0, r1, lsl #1
  674. T str r8, [r0, r1]
  675. T str_post r11, r0, r1, lsl #1
  676. str r9, [r0] @ oq0
  677. str r10, [r0, r1] @ oq1
  678. 2:
  679. add r0, r0, #4
  680. cmp r5, #3
  681. it eq
  682. ldreq r0, [sp, #16]
  683. subs r5, r5, #1
  684. bne 1b
  685. add sp, sp, #16
  686. pop {r0, r4-r11, pc}
  687. endfunc
  688. function ff_vp8_v_loop_filter16_inner_armv6, export=1
  689. push {r4-r11, lr}
  690. add r12, r0, #8
  691. push {r12}
  692. ldr r6, [sp, #40]
  693. orr r2, r2, r2, lsl #16
  694. b vp8_v_loop_filter_inner_armv6
  695. endfunc
  696. function ff_vp8_v_loop_filter8uv_inner_armv6, export=1
  697. push {r1, r4-r11, lr}
  698. mov r1, r2
  699. orr r2, r3, r3, lsl #16
  700. ldr r3, [sp, #40]
  701. ldr r6, [sp, #44]
  702. b vp8_v_loop_filter_inner_armv6
  703. endfunc
  704. function vp8_v_loop_filter_armv6
  705. mov r5, #4
  706. sub sp, sp, #16
  707. orr r3, r3, r3, lsl #16
  708. orr r6, r6, r6, lsl #16
  709. orr r4, r2, r2, lsl #8 @ flimE
  710. orr r2, r3, r3, lsl #8 @ flimI
  711. orr r3, r6, r6, lsl #8 @ thresh
  712. 1:
  713. sub r0, r0, r1, lsl #2
  714. ldr r10, [r0, r1] @ p2
  715. ldr_post r9, r0, r1, lsl #1 @ p3
  716. ldr r12, [r0, r1] @ p0
  717. ldr_post r11, r0, r1, lsl #1 @ p1
  718. filter_mask_v
  719. cmp lr, #0
  720. beq 2f
  721. filter_v inner=0
  722. filter_1
  723. str r8, [r0] @ *oq0
  724. A str r10, [r0, -r1] @ *op0
  725. T sub r0, r0, r1, lsl #1
  726. T str r10, [r0, r1]
  727. filter_2
  728. A str r10, [r0, -r1, lsl #1] @ *op1
  729. T str_post r10, r0, r1, lsl #1
  730. str r8, [r0, r1] @ *oq1
  731. ldr r9, [r0, r1, lsl #1] @ q2
  732. add r0, r0, r1
  733. A ldr r11, [r0, -r1, lsl #2] @ p2
  734. T ldr_dpre r11, r0, r1, lsl #2
  735. filter_3
  736. A str r10, [r0, -r1, lsl #2] @ *op2
  737. T str_post r10, r0, r1, lsl #2
  738. str r8, [r0, r1] @ *oq2
  739. sub r0, r0, r1
  740. 2:
  741. add r0, r0, #4
  742. cmp r5, #3
  743. it eq
  744. ldreq r0, [sp, #16]
  745. subs r5, r5, #1
  746. bne 1b
  747. add sp, sp, #16
  748. pop {r0, r4-r11, pc}
  749. endfunc
  750. function ff_vp8_v_loop_filter16_armv6, export=1
  751. push {r4-r11, lr}
  752. add r12, r0, #8
  753. push {r12}
  754. ldr r6, [sp, #40]
  755. orr r2, r2, r2, lsl #16
  756. b vp8_v_loop_filter_armv6
  757. endfunc
  758. function ff_vp8_v_loop_filter8uv_armv6, export=1
  759. push {r1, r4-r11, lr}
  760. mov r1, r2
  761. orr r2, r3, r3, lsl #16
  762. ldr r3, [sp, #40]
  763. ldr r6, [sp, #44]
  764. b vp8_v_loop_filter_armv6
  765. endfunc
  766. @ void vp8_h_loop_filter16_simple(uint8_t *dst, int stride, int flim)
  767. function ff_vp8_h_loop_filter16_simple_armv6, export=1
  768. push {r4-r11, lr}
  769. orr r12, r2, r2, lsl #16
  770. mov32 r2, 0x80808080
  771. orr r12, r12, r12, lsl #8
  772. mov lr, #0
  773. mov r11, #4
  774. 1:
  775. sub r0, r0, #2
  776. ldr r8, [r0, r1]
  777. ldr_post r7, r0, r1, lsl #1
  778. ldr r10, [r0, r1]
  779. ldr_post r9, r0, r1, lsl #1
  780. add r0, r0, #2
  781. transpose r6, r5, r4, r3, r7, r8, r9, r10
  782. simple_filter
  783. sub r0, r0, r1, lsl #2
  784. sub r0, r0, #1
  785. uxtb16 r6, r4
  786. uxtb16 r8, r5
  787. uxtb16 r7, r4, ror #8
  788. uxtb16 r9, r5, ror #8
  789. orr r6, r6, r8, lsl #8
  790. orr r7, r7, r9, lsl #8
  791. lsr r4, r6, #16
  792. lsr r5, r7, #16
  793. strh_post r6, r0, r1
  794. strh_post r7, r0, r1
  795. strh_post r4, r0, r1
  796. strh_post r5, r0, r1
  797. add r0, r0, #1
  798. 2:
  799. subs r11, r11, #1
  800. bne 1b
  801. pop {r4-r11, pc}
  802. endfunc
  803. function vp8_h_loop_filter_inner_armv6
  804. mov r5, #4
  805. sub sp, sp, #16
  806. orr r3, r3, r3, lsl #16
  807. orr r9, r9, r9, lsl #16
  808. orr r4, r2, r2, lsl #8 @ flimE
  809. orr r2, r3, r3, lsl #8 @ flimI
  810. orr r3, r9, r9, lsl #8 @ thresh
  811. sub r0, r0, #4
  812. 1:
  813. ldr r7, [r0, r1]
  814. ldr_post r6, r0, r1, lsl #1
  815. ldr lr, [r0, r1]
  816. ldr_post r8, r0, r1, lsl #1
  817. filter_mask_h
  818. cmp lr, #0
  819. sub r0, r0, #2
  820. beq 2f
  821. ldr r6, [sp]
  822. filter_h inner=1
  823. filter_inner
  824. transpose lr, r12, r7, r6, r11, r8, r9, r10
  825. A str r6, [r0, -r1, lsl #1]
  826. A str r7, [r0, -r1]
  827. T sub r0, r0, r1, lsl #1
  828. T str r7, [r0, r1]
  829. T str_post r6, r0, r1, lsl #1
  830. str r12, [r0]
  831. str lr, [r0, r1]
  832. 2:
  833. sub r0, r0, #2
  834. add r0, r0, r1, lsl #1
  835. cmp r5, #3
  836. it eq
  837. ldreq r0, [sp, #16]
  838. subs r5, r5, #1
  839. bne 1b
  840. add sp, sp, #16
  841. pop {r0, r4-r11, pc}
  842. endfunc
  843. function ff_vp8_h_loop_filter16_inner_armv6, export=1
  844. push {r4-r11, lr}
  845. add r12, r0, r1, lsl #3
  846. sub r12, r12, #4
  847. push {r12}
  848. ldr r9, [sp, #40]
  849. orr r2, r2, r2, lsl #16
  850. b vp8_h_loop_filter_inner_armv6
  851. endfunc
  852. function ff_vp8_h_loop_filter8uv_inner_armv6, export=1
  853. sub r1, r1, #4
  854. push {r1, r4-r11, lr}
  855. mov r1, r2
  856. orr r2, r3, r3, lsl #16
  857. ldr r3, [sp, #40]
  858. ldr r9, [sp, #44]
  859. b vp8_h_loop_filter_inner_armv6
  860. endfunc
  861. function vp8_h_loop_filter_armv6
  862. mov r5, #4
  863. sub sp, sp, #16
  864. orr r3, r3, r3, lsl #16
  865. orr r9, r9, r9, lsl #16
  866. orr r4, r2, r2, lsl #8 @ flimE
  867. orr r2, r3, r3, lsl #8 @ flimI
  868. orr r3, r9, r9, lsl #8 @ thresh
  869. 1:
  870. sub r0, r0, #4
  871. ldr r7, [r0, r1]
  872. ldr_post r6, r0, r1, lsl #1
  873. ldr lr, [r0, r1]
  874. ldr_post r8, r0, r1, lsl #1
  875. filter_mask_h
  876. cmp lr, #0
  877. it eq
  878. addeq r0, r0, r1, lsl #1
  879. beq 2f
  880. ldr r6, [sp]
  881. sub r0, r0, #2
  882. filter_h inner=0
  883. filter_1
  884. sub r0, r0, r1, lsl #1
  885. uxtb16 r6, r10
  886. uxtb16 r7, r8
  887. uxtb16 r10, r10, ror #8
  888. uxtb16 r8, r8, ror #8
  889. orr r6, r6, r7, lsl #8
  890. orr r10, r10, r8, lsl #8
  891. lsr r7, r6, #16
  892. lsr r8, r10, #16
  893. add r0, r0, #1
  894. strh_post r6, r0, r1
  895. strh_post r10, r0, r1
  896. strh_post r7, r0, r1
  897. strh_post r8, r0, r1
  898. filter_2
  899. sub r0, r0, r1, lsl #2
  900. add r0, r0, #3
  901. ldrb r11, [r0, #-5] @ p2 for 1/7th difference
  902. strb r10, [r0, #-4] @ op1
  903. strb r8, [r0, #-1] @ oq1
  904. ldrb_post r9, r0, r1 @ q2 for 1/7th difference
  905. lsr r10, r10, #8
  906. lsr r8, r8, #8
  907. ldrb r6, [r0, #-5]
  908. strb r10, [r0, #-4]
  909. strb r8, [r0, #-1]
  910. ldrb_post r7, r0, r1
  911. lsr r10, r10, #8
  912. lsr r8, r8, #8
  913. orr r11, r11, r6, lsl #8
  914. orr r9, r9, r7, lsl #8
  915. ldrb r6, [r0, #-5]
  916. strb r10, [r0, #-4]
  917. strb r8, [r0, #-1]
  918. ldrb_post r7, r0, r1
  919. lsr r10, r10, #8
  920. lsr r8, r8, #8
  921. orr r11, r11, r6, lsl #16
  922. orr r9, r9, r7, lsl #16
  923. ldrb r6, [r0, #-5]
  924. strb r10, [r0, #-4]
  925. strb r8, [r0, #-1]
  926. ldrb_post r7, r0, r1
  927. orr r11, r11, r6, lsl #24
  928. orr r9, r9, r7, lsl #24
  929. filter_3
  930. sub r0, r0, r1, lsl #2
  931. strb r10, [r0, #-5]
  932. strb_post r8, r0, r1
  933. lsr r10, r10, #8
  934. lsr r8, r8, #8
  935. strb r10, [r0, #-5]
  936. strb_post r8, r0, r1
  937. lsr r10, r10, #8
  938. lsr r8, r8, #8
  939. strb r10, [r0, #-5]
  940. strb_post r8, r0, r1
  941. lsr r10, r10, #8
  942. lsr r8, r8, #8
  943. strb r10, [r0, #-5]
  944. strb_post r8, r0, r1
  945. sub r0, r0, #2
  946. 2:
  947. cmp r5, #3
  948. it eq
  949. ldreq r0, [sp, #16]
  950. subs r5, r5, #1
  951. bne 1b
  952. add sp, sp, #16
  953. pop {r0, r4-r11, pc}
  954. endfunc
  955. function ff_vp8_h_loop_filter16_armv6, export=1
  956. push {r4-r11, lr}
  957. add r12, r0, r1, lsl #3
  958. push {r12}
  959. ldr r9, [sp, #40]
  960. orr r2, r2, r2, lsl #16
  961. b vp8_h_loop_filter_armv6
  962. endfunc
  963. function ff_vp8_h_loop_filter8uv_armv6, export=1
  964. push {r1, r4-r11, lr}
  965. mov r1, r2
  966. orr r2, r3, r3, lsl #16
  967. ldr r3, [sp, #40]
  968. ldr r9, [sp, #44]
  969. b vp8_h_loop_filter_armv6
  970. endfunc
  971. .ltorg
  972. @ MC
  973. @ void put_vp8_pixels16(uint8_t *dst, int dststride, uint8_t *src,
  974. @ int srcstride, int h, int mx, int my)
  975. function ff_put_vp8_pixels16_armv6, export=1
  976. push {r4-r11}
  977. ldr r12, [sp, #32] @ h
  978. 1:
  979. subs r12, r12, #2
  980. ldr r5, [r2, #4]
  981. ldr r6, [r2, #8]
  982. ldr r7, [r2, #12]
  983. ldr_post r4, r2, r3
  984. ldr r9, [r2, #4]
  985. ldr r10, [r2, #8]
  986. ldr r11, [r2, #12]
  987. ldr_post r8, r2, r3
  988. strd r6, r7, [r0, #8]
  989. strd_post r4, r5, r0, r1
  990. strd r10, r11, [r0, #8]
  991. strd_post r8, r9, r0, r1
  992. bgt 1b
  993. pop {r4-r11}
  994. bx lr
  995. endfunc
  996. @ void put_vp8_pixels8(uint8_t *dst, int dststride, uint8_t *src,
  997. @ int srcstride, int h, int mx, int my)
  998. function ff_put_vp8_pixels8_armv6, export=1
  999. push {r4-r11}
  1000. ldr r12, [sp, #32] @ h
  1001. 1:
  1002. subs r12, r12, #4
  1003. ldr r5, [r2, #4]
  1004. ldr_post r4, r2, r3
  1005. ldr r7, [r2, #4]
  1006. ldr_post r6, r2, r3
  1007. ldr r9, [r2, #4]
  1008. ldr_post r8, r2, r3
  1009. ldr r11, [r2, #4]
  1010. ldr_post r10, r2, r3
  1011. strd_post r4, r5, r0, r1
  1012. strd_post r6, r7, r0, r1
  1013. strd_post r8, r9, r0, r1
  1014. strd_post r10, r11, r0, r1
  1015. bgt 1b
  1016. pop {r4-r11}
  1017. bx lr
  1018. endfunc
  1019. @ void put_vp8_pixels4(uint8_t *dst, int dststride, uint8_t *src,
  1020. @ int srcstride, int h, int mx, int my)
  1021. function ff_put_vp8_pixels4_armv6, export=1
  1022. ldr r12, [sp, #0] @ h
  1023. push {r4-r6,lr}
  1024. 1:
  1025. subs r12, r12, #4
  1026. ldr_post r4, r2, r3
  1027. ldr_post r5, r2, r3
  1028. ldr_post r6, r2, r3
  1029. ldr_post lr, r2, r3
  1030. str_post r4, r0, r1
  1031. str_post r5, r0, r1
  1032. str_post r6, r0, r1
  1033. str_post lr, r0, r1
  1034. bgt 1b
  1035. pop {r4-r6,pc}
  1036. endfunc
  1037. @ note: worst case sum of all 6-tap filter values * 255 is 0x7f80 so 16 bit
  1038. @ arithmatic can be used to apply filters
  1039. const sixtap_filters_13245600, align=4
  1040. .short 2, 108, -11, 36, -8, 1, 0, 0
  1041. .short 3, 77, -16, 77, -16, 3, 0, 0
  1042. .short 1, 36, -8, 108, -11, 2, 0, 0
  1043. endconst
  1044. const fourtap_filters_1324, align=4
  1045. .short -6, 12, 123, -1
  1046. .short -9, 50, 93, -6
  1047. .short -6, 93, 50, -9
  1048. .short -1, 123, 12, -6
  1049. endconst
  1050. .macro vp8_mc_1 name, size, hv
  1051. function ff_put_vp8_\name\size\()_\hv\()_armv6, export=1
  1052. sub r1, r1, #\size
  1053. mov r12, sp
  1054. push {r1, r4-r11, lr}
  1055. ldm r12, {r5-r7}
  1056. mov r4, #\size
  1057. stm r12, {r4, r5}
  1058. orr r12, r6, r7
  1059. b vp8_put_\name\()_\hv\()_armv6 + 4
  1060. endfunc
  1061. .endm
  1062. vp8_mc_1 epel, 16, h6
  1063. vp8_mc_1 epel, 16, v6
  1064. vp8_mc_1 epel, 8, h6
  1065. vp8_mc_1 epel, 8, v6
  1066. vp8_mc_1 epel, 8, h4
  1067. vp8_mc_1 epel, 8, v4
  1068. vp8_mc_1 epel, 4, h6
  1069. vp8_mc_1 epel, 4, v6
  1070. vp8_mc_1 epel, 4, h4
  1071. vp8_mc_1 epel, 4, v4
  1072. vp8_mc_1 bilin, 16, h
  1073. vp8_mc_1 bilin, 16, v
  1074. vp8_mc_1 bilin, 8, h
  1075. vp8_mc_1 bilin, 8, v
  1076. vp8_mc_1 bilin, 4, h
  1077. vp8_mc_1 bilin, 4, v
  1078. /* True relational expressions have the value -1 in the GNU assembler,
  1079. +1 in Apple's. */
  1080. #ifdef __APPLE__
  1081. # define TMPSIZE \size * (8 + 8*(\size > 4) + \ytaps - 1)
  1082. #else
  1083. # define TMPSIZE \size * (8 - 8*(\size > 4) + \ytaps - 1)
  1084. #endif
  1085. .macro vp8_mc_hv name, size, h, v, ytaps
  1086. function ff_put_vp8_\name\size\()_\h\v\()_armv6, export=1
  1087. push {r0, r1, r4, lr}
  1088. add r0, sp, #16
  1089. sub sp, sp, #TMPSIZE+16
  1090. ldm r0, {r0, r12}
  1091. mov r4, #\size
  1092. add lr, r0, #\ytaps-1
  1093. .if \ytaps > 2
  1094. sub r2, r2, r3, lsl #\ytaps >> 1 & 1
  1095. .endif
  1096. stm sp, {r4, lr}
  1097. add r0, sp, #16
  1098. mov r1, #0
  1099. bl vp8_put_\name\()_\h\()_armv6
  1100. add r0, sp, #TMPSIZE+16
  1101. ldr lr, [sp, #TMPSIZE+16+16]
  1102. ldm r0, {r0, r1}
  1103. mov r3, #\size
  1104. ldr r12, [sp, #TMPSIZE+16+16+8]
  1105. str lr, [sp, #4]
  1106. add r2, sp, #16 + \size * (\ytaps / 2 - 1)
  1107. sub r1, r1, #\size
  1108. bl vp8_put_\name\()_\v\()_armv6
  1109. add sp, sp, #TMPSIZE+16+8
  1110. pop {r4, pc}
  1111. endfunc
  1112. .endm
  1113. vp8_mc_hv epel, 16, h6, v6, 6
  1114. vp8_mc_hv epel, 8, h6, v6, 6
  1115. vp8_mc_hv epel, 8, h4, v6, 6
  1116. vp8_mc_hv epel, 8, h6, v4, 4
  1117. vp8_mc_hv epel, 8, h4, v4, 4
  1118. vp8_mc_hv epel, 4, h6, v6, 6
  1119. vp8_mc_hv epel, 4, h4, v6, 6
  1120. vp8_mc_hv epel, 4, h6, v4, 4
  1121. vp8_mc_hv epel, 4, h4, v4, 4
  1122. vp8_mc_hv bilin, 16, h, v, 2
  1123. vp8_mc_hv bilin, 8, h, v, 2
  1124. vp8_mc_hv bilin, 4, h, v, 2
  1125. .macro sat4 r0, r1, r2, r3
  1126. asr \r0, \r0, #7
  1127. asr \r1, \r1, #7
  1128. pkhbt \r0, \r0, \r2, lsl #9
  1129. pkhbt \r1, \r1, \r3, lsl #9
  1130. usat16 \r0, #8, \r0
  1131. usat16 \r1, #8, \r1
  1132. orr \r0, \r0, \r1, lsl #8
  1133. .endm
  1134. @ Calling convention for the inner MC functions:
  1135. @ r0 dst
  1136. @ r1 dst_stride - block_width
  1137. @ r2 src
  1138. @ r3 src_stride
  1139. @ r4 block_width
  1140. @ r12 filter_index
  1141. @ [sp] block_width
  1142. @ [sp+4] height
  1143. @ [sp+8] scratch
  1144. function vp8_put_epel_h6_armv6
  1145. push {r1, r4-r11, lr}
  1146. sub r2, r2, #2
  1147. movrel lr, sixtap_filters_13245600 - 16
  1148. add lr, lr, r12, lsl #3
  1149. sub r3, r3, r4
  1150. str r3, [sp, #48]
  1151. ldm lr, {r1, r3, lr}
  1152. 1:
  1153. ldr r7, [r2, #5] @ src[5-8]
  1154. ldr r6, [r2, #2] @ src[2-5]
  1155. ldr r5, [r2], #4 @ src[0-3]
  1156. pkhtb r7, r7, r7, asr #8 @ src[8,7,7,6]
  1157. uxtb16 r9, r6, ror #8 @ src[5] | src[3]
  1158. uxtb16 r6, r6 @ src[4] | src[2]
  1159. uxtb16 r8, r5, ror #8 @ src[3] | src[1]
  1160. uxtb16 r11, r7, ror #8 @ src[8] | src[7]
  1161. uxtb16 r7, r7 @ src[7] | src[6]
  1162. uxtb16 r5, r5 @ src[2] | src[0]
  1163. mov r10, #0x40
  1164. smlad r5, r5, r1, r10 @ filter[0][0]
  1165. smlad r11, r11, lr, r10 @ filter[3][2]
  1166. smlad r12, r7, lr, r10 @ filter[2][2]
  1167. smlad r10, r8, r1, r10 @ filter[1][0]
  1168. smlad r5, r8, r3, r5 @ filter[0][1]
  1169. smlad r11, r9, r1, r11 @ filter[3][0]
  1170. smlad r12, r9, r3, r12 @ filter[2][1]
  1171. pkhtb r9, r9, r6, asr #16 @ src[5] | src[4]
  1172. smlad r10, r6, r3, r10 @ filter[1][1]
  1173. pkhbt r7, r9, r7, lsl #16 @ src[6] | src[4]
  1174. smlad r5, r9, lr, r5 @ filter[0][2]
  1175. pkhtb r8, r7, r9, asr #16 @ src[6] | src[5]
  1176. smlad r11, r7, r3, r11 @ filter[3][1]
  1177. smlad r9, r8, lr, r10 @ filter[1][2]
  1178. smlad r7, r6, r1, r12 @ filter[2][0]
  1179. subs r4, r4, #4
  1180. sat4 r5, r9, r7, r11
  1181. str r5, [r0], #4
  1182. bne 1b
  1183. add r4, sp, #40
  1184. ldm r4, {r4, r5, r12}
  1185. ldr r6, [sp]
  1186. subs r5, r5, #1
  1187. add r2, r2, r12
  1188. str r5, [sp, #44]
  1189. add r0, r0, r6
  1190. bne 1b
  1191. pop {r1, r4-r11, pc}
  1192. endfunc
  1193. function vp8_put_epel_v6_armv6
  1194. push {r1, r4-r11, lr}
  1195. movrel lr, sixtap_filters_13245600 - 16
  1196. add lr, lr, r12, lsl #3
  1197. str r3, [sp, #48]
  1198. 1:
  1199. add r1, r3, r3, lsl #1 @ stride * 3
  1200. ldr_nreg r5, r2, r3 @ src[0,1,2,3 + stride * 1]
  1201. ldr r6, [r2, r3] @ src[0,1,2,3 + stride * 3]
  1202. ldr r7, [r2, r3, lsl #1] @ src[0,1,2,3 + stride * 4]
  1203. ldr r8, [r2, r1] @ src[0,1,2,3 + stride * 5]
  1204. uxtb16 r9, r5, ror #8 @ src[3 + s*1] | src[1 + s*1]
  1205. uxtb16 r10, r6, ror #8 @ src[3 + s*3] | src[1 + s*3]
  1206. uxtb16 r11, r7, ror #8 @ src[3 + s*4] | src[1 + s*4]
  1207. uxtb16 r12, r8, ror #8 @ src[3 + s*5] | src[1 + s*5]
  1208. uxtb16 r5, r5 @ src[2 + s*1] | src[0 + s*1]
  1209. uxtb16 r6, r6 @ src[2 + s*3] | src[0 + s*3]
  1210. uxtb16 r7, r7 @ src[2 + s*4] | src[0 + s*4]
  1211. uxtb16 r8, r8 @ src[2 + s*5] | src[0 + s*5]
  1212. pkhbt r1, r9, r10, lsl #16 @ src[1 + s*3] | src[1 + s*1]
  1213. pkhtb r9, r10, r9, asr #16 @ src[3 + s*3] | src[3 + s*1]
  1214. pkhbt r10, r11, r12, lsl #16 @ src[1 + s*5] | src[1 + s*4]
  1215. pkhtb r11, r12, r11, asr #16 @ src[3 + s*5] | src[3 + s*4]
  1216. pkhbt r12, r5, r6, lsl #16 @ src[0 + s*3] | src[0 + s*1]
  1217. pkhtb r5, r6, r5, asr #16 @ src[2 + s*3] | src[2 + s*1]
  1218. pkhbt r6, r7, r8, lsl #16 @ src[0 + s*5] | src[0 + s*4]
  1219. pkhtb r7, r8, r7, asr #16 @ src[2 + s*5] | src[2 + s*4]
  1220. ldr r8, [lr, #4]
  1221. mov r3, #0x40
  1222. smlad r12, r12, r8, r3 @ filter[0][1]
  1223. smlad r1, r1, r8, r3 @ filter[1][1]
  1224. smlad r5, r5, r8, r3 @ filter[2][1]
  1225. smlad r9, r9, r8, r3 @ filter[3][1]
  1226. ldr r8, [lr, #8]
  1227. ldr r3, [sp, #48]
  1228. smlad r12, r6, r8, r12 @ filter[0][2]
  1229. smlad r1, r10, r8, r1 @ filter[1][2]
  1230. ldr_nreg r6, r2, r3, lsl #1 @ src[0,1,2,3 + stride * 0]
  1231. ldr r10, [r2], #4 @ src[0,1,2,3 + stride * 2]
  1232. smlad r5, r7, r8, r5 @ filter[2][2]
  1233. smlad r9, r11, r8, r9 @ filter[3][2]
  1234. uxtb16 r7, r6, ror #8 @ src[3 + s*0] | src[1 + s*0]
  1235. uxtb16 r11, r10, ror #8 @ src[3 + s*2] | src[1 + s*2]
  1236. uxtb16 r6, r6 @ src[2 + s*0] | src[0 + s*0]
  1237. uxtb16 r10, r10 @ src[2 + s*2] | src[0 + s*2]
  1238. pkhbt r8, r7, r11, lsl #16 @ src[1 + s*2] | src[1 + s*0]
  1239. pkhtb r7, r11, r7, asr #16 @ src[3 + s*2] | src[3 + s*0]
  1240. pkhbt r11, r6, r10, lsl #16 @ src[0 + s*2] | src[0 + s*0]
  1241. pkhtb r6, r10, r6, asr #16 @ src[2 + s*2] | src[2 + s*0]
  1242. ldr r10, [lr]
  1243. subs r4, r4, #4
  1244. smlad r12, r11, r10, r12 @ filter[0][0]
  1245. smlad r1, r8, r10, r1 @ filter[1][0]
  1246. smlad r5, r6, r10, r5 @ filter[2][0]
  1247. smlad r9, r7, r10, r9 @ filter[3][0]
  1248. sat4 r12, r1, r5, r9
  1249. str r12, [r0], #4
  1250. bne 1b
  1251. ldrd r4, r5, [sp, #40]
  1252. ldr r6, [sp]
  1253. subs r5, r5, #1
  1254. sub r2, r2, r4
  1255. str r5, [sp, #44]
  1256. add r0, r0, r6
  1257. add r2, r2, r3
  1258. bne 1b
  1259. pop {r1, r4-r11, pc}
  1260. endfunc
  1261. function vp8_put_epel_h4_armv6
  1262. push {r1, r4-r11, lr}
  1263. subs r2, r2, #1
  1264. movrel lr, fourtap_filters_1324 - 4
  1265. add lr, lr, r12, lsl #2
  1266. sub r3, r3, r4
  1267. ldm lr, {r5, r6}
  1268. ldr lr, [sp, #44]
  1269. 1:
  1270. ldr r9, [r2, #3]
  1271. ldr r8, [r2, #2]
  1272. ldr r7, [r2], #4
  1273. uxtb16 r9, r9, ror #8 @ src[6] | src[4]
  1274. uxtb16 r10, r8, ror #8 @ src[5] | src[3]
  1275. uxtb16 r8, r8 @ src[4] | src[2]
  1276. uxtb16 r11, r7, ror #8 @ src[3] | src[1]
  1277. uxtb16 r7, r7 @ src[2] | src[0]
  1278. mov r12, #0x40
  1279. smlad r9, r9, r6, r12 @ filter[3][1]
  1280. smlad r7, r7, r5, r12 @ filter[0][0]
  1281. smlad r9, r10, r5, r9 @ filter[3][0]
  1282. smlad r10, r10, r6, r12 @ filter[2][1]
  1283. smlad r12, r11, r5, r12 @ filter[1][0]
  1284. smlad r7, r11, r6, r7 @ filter[0][1]
  1285. smlad r10, r8, r5, r10 @ filter[2][0]
  1286. smlad r12, r8, r6, r12 @ filter[1][1]
  1287. subs r4, r4, #4
  1288. sat4 r7, r12, r10, r9
  1289. str r7, [r0], #4
  1290. bne 1b
  1291. subs lr, lr, #1
  1292. ldr r4, [sp, #40]
  1293. add r2, r2, r3
  1294. add r0, r0, r1
  1295. bne 1b
  1296. pop {r1, r4-r11, pc}
  1297. endfunc
  1298. function vp8_put_epel_v4_armv6
  1299. push {r1, r4-r11, lr}
  1300. movrel lr, fourtap_filters_1324 - 4
  1301. add lr, lr, r12, lsl #2
  1302. ldm lr, {r5, r6}
  1303. str r3, [sp, #48]
  1304. 1:
  1305. ldr lr, [r2, r3, lsl #1]
  1306. ldr r12, [r2, r3]
  1307. ldr_nreg r7, r2, r3
  1308. ldr r11, [r2], #4
  1309. uxtb16 r8, lr, ror #8 @ src[3 + s*3] | src[1 + s*3]
  1310. uxtb16 r9, r12, ror #8 @ src[3 + s*2] | src[1 + s*2]
  1311. uxtb16 r3, r7, ror #8 @ src[3 + s*0] | src[1 + s*0]
  1312. uxtb16 r1, r11, ror #8 @ src[3 + s*1] | src[1 + s*1]
  1313. uxtb16 lr, lr @ src[2 + s*3] | src[0 + s*3]
  1314. uxtb16 r12, r12 @ src[2 + s*2] | src[0 + s*2]
  1315. uxtb16 r7, r7 @ src[2 + s*0] | src[0 + s*0]
  1316. uxtb16 r11, r11 @ src[2 + s*1] | src[0 + s*1]
  1317. pkhbt r10, r1, r8, lsl #16 @ src[1 + s*3] | src[1 + s*1]
  1318. pkhtb r1, r8, r1, asr #16 @ src[3 + s*3] | src[3 + s*1]
  1319. pkhbt r8, r3, r9, lsl #16 @ src[1 + s*2] | src[1 + s*0]
  1320. pkhtb r3, r9, r3, asr #16 @ src[3 + s*2] | src[3 + s*0]
  1321. pkhbt r9, r11, lr, lsl #16 @ src[0 + s*3] | src[0 + s*1]
  1322. pkhtb r11, lr, r11, asr #16 @ src[2 + s*3] | src[2 + s*1]
  1323. pkhbt lr, r7, r12, lsl #16 @ src[0 + s*2] | src[0 + s*0]
  1324. pkhtb r7, r12, r7, asr #16 @ src[2 + s*2] | src[2 + s*0]
  1325. mov r12, #0x40
  1326. smlad r9, r9, r6, r12 @ filter[0][1]
  1327. smlad r10, r10, r6, r12 @ filter[1][1]
  1328. smlad r11, r11, r6, r12 @ filter[2][1]
  1329. smlad r1, r1, r6, r12 @ filter[3][1]
  1330. smlad r9, lr, r5, r9 @ filter[0][0]
  1331. smlad r10, r8, r5, r10 @ filter[1][0]
  1332. smlad r11, r7, r5, r11 @ filter[2][0]
  1333. smlad r1, r3, r5, r1 @ filter[3][0]
  1334. subs r4, r4, #4
  1335. ldr r3, [sp, #48]
  1336. sat4 r9, r10, r11, r1
  1337. str r9, [r0], #4
  1338. bne 1b
  1339. ldr r4, [sp, #40]
  1340. ldr r12, [sp, #44]
  1341. add r2, r2, r3
  1342. ldr r9, [sp, #0]
  1343. subs r12, r12, #1
  1344. sub r2, r2, r4
  1345. str r12, [sp, #44]
  1346. add r0, r0, r9
  1347. bne 1b
  1348. pop {r1, r4-r11, pc}
  1349. endfunc
  1350. function vp8_put_bilin_h_armv6
  1351. push {r1, r4-r11, lr}
  1352. rsb r5, r12, r12, lsl #16
  1353. ldr r12, [sp, #44]
  1354. sub r3, r3, r4
  1355. add r5, r5, #8
  1356. 1:
  1357. ldrb r6, [r2], #1
  1358. ldrb r7, [r2], #1
  1359. ldrb r8, [r2], #1
  1360. ldrb r9, [r2], #1
  1361. ldrb lr, [r2]
  1362. pkhbt r6, r6, r7, lsl #16 @ src[1] | src[0]
  1363. pkhbt r7, r7, r8, lsl #16 @ src[2] | src[1]
  1364. pkhbt r8, r8, r9, lsl #16 @ src[3] | src[2]
  1365. pkhbt r9, r9, lr, lsl #16 @ src[4] | src[3]
  1366. mov r10, #4
  1367. smlad r6, r6, r5, r10
  1368. smlad r7, r7, r5, r10
  1369. smlad r8, r8, r5, r10
  1370. smlad r9, r9, r5, r10
  1371. subs r4, r4, #4
  1372. asr r6, #3
  1373. asr r7, #3
  1374. pkhbt r6, r6, r8, lsl #13
  1375. pkhbt r7, r7, r9, lsl #13
  1376. orr r6, r6, r7, lsl #8
  1377. str r6, [r0], #4
  1378. bne 1b
  1379. ldr r4, [sp, #40]
  1380. subs r12, r12, #1
  1381. add r2, r2, r3
  1382. add r0, r0, r1
  1383. bne 1b
  1384. pop {r1, r4-r11, pc}
  1385. endfunc
  1386. function vp8_put_bilin_v_armv6
  1387. push {r1, r4-r11, lr}
  1388. rsb r5, r12, r12, lsl #16
  1389. ldr r12, [sp, #44]
  1390. add r5, r5, #8
  1391. 1:
  1392. ldrb r10, [r2, r3]
  1393. ldrb r6, [r2], #1
  1394. ldrb r11, [r2, r3]
  1395. ldrb r7, [r2], #1
  1396. ldrb lr, [r2, r3]
  1397. ldrb r8, [r2], #1
  1398. ldrb r9, [r2, r3]
  1399. pkhbt r6, r6, r10, lsl #16
  1400. ldrb r10, [r2], #1
  1401. pkhbt r7, r7, r11, lsl #16
  1402. pkhbt r8, r8, lr, lsl #16
  1403. pkhbt r9, r10, r9, lsl #16
  1404. mov r10, #4
  1405. smlad r6, r6, r5, r10
  1406. smlad r7, r7, r5, r10
  1407. smlad r8, r8, r5, r10
  1408. smlad r9, r9, r5, r10
  1409. subs r4, r4, #4
  1410. asr r6, #3
  1411. asr r7, #3
  1412. pkhbt r6, r6, r8, lsl #13
  1413. pkhbt r7, r7, r9, lsl #13
  1414. orr r6, r6, r7, lsl #8
  1415. str r6, [r0], #4
  1416. bne 1b
  1417. ldr r4, [sp, #40]
  1418. subs r12, r12, #1
  1419. add r2, r2, r3
  1420. add r0, r0, r1
  1421. sub r2, r2, r4
  1422. bne 1b
  1423. pop {r1, r4-r11, pc}
  1424. endfunc