You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

292 lines
7.1KB

  1. ;******************************************************************************
  2. ;* MMX optimized discrete wavelet trasnform
  3. ;* Copyright (c) 2010 David Conrad
  4. ;*
  5. ;* This file is part of FFmpeg.
  6. ;*
  7. ;* FFmpeg is free software; you can redistribute it and/or
  8. ;* modify it under the terms of the GNU Lesser General Public
  9. ;* License as published by the Free Software Foundation; either
  10. ;* version 2.1 of the License, or (at your option) any later version.
  11. ;*
  12. ;* FFmpeg is distributed in the hope that it will be useful,
  13. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. ;* Lesser General Public License for more details.
  16. ;*
  17. ;* You should have received a copy of the GNU Lesser General Public
  18. ;* License along with FFmpeg; if not, write to the Free Software
  19. ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. ;******************************************************************************
  21. %include "x86inc.asm"
  22. SECTION_RODATA
  23. pw_1: times 8 dw 1
  24. pw_2: times 8 dw 2
  25. pw_8: times 8 dw 8
  26. pw_16: times 8 dw 16
  27. pw_1991: times 4 dw 9,-1
  28. section .text
  29. ; %1 -= (%2 + %3 + 2)>>2 %4 is pw_2
  30. %macro COMPOSE_53iL0 4
  31. paddw %2, %3
  32. paddw %2, %4
  33. psraw %2, 2
  34. psubw %1, %2
  35. %endm
  36. ; m1 = %1 + (-m0 + 9*m1 + 9*%2 -%3 + 8)>>4
  37. ; if %4 is supplied, %1 is loaded unaligned from there
  38. ; m2: clobbered m3: pw_8 m4: pw_1991
  39. %macro COMPOSE_DD97iH0 3-4
  40. paddw m0, %3
  41. paddw m1, %2
  42. psubw m0, m3
  43. mova m2, m1
  44. punpcklwd m1, m0
  45. punpckhwd m2, m0
  46. pmaddwd m1, m4
  47. pmaddwd m2, m4
  48. %if %0 > 3
  49. movu %1, %4
  50. %endif
  51. psrad m1, 4
  52. psrad m2, 4
  53. packssdw m1, m2
  54. paddw m1, %1
  55. %endm
  56. %macro COMPOSE_VERTICAL 1
  57. ; void vertical_compose53iL0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
  58. ; int width)
  59. cglobal vertical_compose53iL0_%1, 4,4,1, b0, b1, b2, width
  60. mova m2, [pw_2]
  61. .loop:
  62. sub widthd, mmsize/2
  63. mova m1, [b0q+2*widthq]
  64. mova m0, [b1q+2*widthq]
  65. COMPOSE_53iL0 m0, m1, [b2q+2*widthq], m2
  66. mova [b1q+2*widthq], m0
  67. jg .loop
  68. REP_RET
  69. ; void vertical_compose_dirac53iH0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
  70. ; int width)
  71. cglobal vertical_compose_dirac53iH0_%1, 4,4,1, b0, b1, b2, width
  72. mova m1, [pw_1]
  73. .loop:
  74. sub widthd, mmsize/2
  75. mova m0, [b0q+2*widthq]
  76. paddw m0, [b2q+2*widthq]
  77. paddw m0, m1
  78. psraw m0, 1
  79. paddw m0, [b1q+2*widthq]
  80. mova [b1q+2*widthq], m0
  81. jg .loop
  82. REP_RET
  83. ; void vertical_compose_dd97iH0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
  84. ; IDWTELEM *b3, IDWTELEM *b4, int width)
  85. cglobal vertical_compose_dd97iH0_%1, 6,6,5, b0, b1, b2, b3, b4, width
  86. mova m3, [pw_8]
  87. mova m4, [pw_1991]
  88. .loop:
  89. sub widthd, mmsize/2
  90. mova m0, [b0q+2*widthq]
  91. mova m1, [b1q+2*widthq]
  92. COMPOSE_DD97iH0 [b2q+2*widthq], [b3q+2*widthq], [b4q+2*widthq]
  93. mova [b2q+2*widthq], m1
  94. jg .loop
  95. REP_RET
  96. ; void vertical_compose_dd137iL0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
  97. ; IDWTELEM *b3, IDWTELEM *b4, int width)
  98. cglobal vertical_compose_dd137iL0_%1, 6,6,6, b0, b1, b2, b3, b4, width
  99. mova m3, [pw_16]
  100. mova m4, [pw_1991]
  101. .loop:
  102. sub widthd, mmsize/2
  103. mova m0, [b0q+2*widthq]
  104. mova m1, [b1q+2*widthq]
  105. mova m5, [b2q+2*widthq]
  106. paddw m0, [b4q+2*widthq]
  107. paddw m1, [b3q+2*widthq]
  108. psubw m0, m3
  109. mova m2, m1
  110. punpcklwd m1, m0
  111. punpckhwd m2, m0
  112. pmaddwd m1, m4
  113. pmaddwd m2, m4
  114. psrad m1, 5
  115. psrad m2, 5
  116. packssdw m1, m2
  117. psubw m5, m1
  118. mova [b2q+2*widthq], m5
  119. jg .loop
  120. REP_RET
  121. ; void vertical_compose_haar(IDWTELEM *b0, IDWTELEM *b1, int width)
  122. cglobal vertical_compose_haar_%1, 3,4,3, b0, b1, width
  123. mova m3, [pw_1]
  124. .loop:
  125. sub widthd, mmsize/2
  126. mova m1, [b1q+2*widthq]
  127. mova m0, [b0q+2*widthq]
  128. mova m2, m1
  129. paddw m1, m3
  130. psraw m1, 1
  131. psubw m0, m1
  132. mova [b0q+2*widthq], m0
  133. paddw m2, m0
  134. mova [b1q+2*widthq], m2
  135. jg .loop
  136. REP_RET
  137. %endmacro
  138. ; extend the left and right edges of the tmp array by %1 and %2 respectively
  139. %macro EDGE_EXTENSION 3
  140. mov %3, [tmpq]
  141. %assign %%i 1
  142. %rep %1
  143. mov [tmpq-2*%%i], %3
  144. %assign %%i %%i+1
  145. %endrep
  146. mov %3, [tmpq+2*w2q-2]
  147. %assign %%i 0
  148. %rep %2
  149. mov [tmpq+2*w2q+2*%%i], %3
  150. %assign %%i %%i+1
  151. %endrep
  152. %endmacro
  153. %macro HAAR_HORIZONTAL 2
  154. ; void horizontal_compose_haari(IDWTELEM *b, IDWTELEM *tmp, int width)
  155. cglobal horizontal_compose_haar%2i_%1, 3,6,4, b, tmp, w, x, w2, b_w2
  156. mov w2d, wd
  157. xor xq, xq
  158. shr w2d, 1
  159. lea b_w2q, [bq+wq]
  160. mova m3, [pw_1]
  161. .lowpass_loop:
  162. movu m1, [b_w2q + 2*xq]
  163. mova m0, [bq + 2*xq]
  164. paddw m1, m3
  165. psraw m1, 1
  166. psubw m0, m1
  167. mova [tmpq + 2*xq], m0
  168. add xq, mmsize/2
  169. cmp xq, w2q
  170. jl .lowpass_loop
  171. xor xq, xq
  172. and w2q, ~(mmsize/2 - 1)
  173. cmp w2q, mmsize/2
  174. jl .end
  175. .highpass_loop:
  176. movu m1, [b_w2q + 2*xq]
  177. mova m0, [tmpq + 2*xq]
  178. paddw m1, m0
  179. ; shift and interleave
  180. %if %2 == 1
  181. paddw m0, m3
  182. paddw m1, m3
  183. psraw m0, 1
  184. psraw m1, 1
  185. %endif
  186. mova m2, m0
  187. punpcklwd m0, m1
  188. punpckhwd m2, m1
  189. mova [bq+4*xq], m0
  190. mova [bq+4*xq+mmsize], m2
  191. add xq, mmsize/2
  192. cmp xq, w2q
  193. jl .highpass_loop
  194. .end:
  195. REP_RET
  196. %endmacro
  197. INIT_XMM
  198. ; void horizontal_compose_dd97i(IDWTELEM *b, IDWTELEM *tmp, int width)
  199. cglobal horizontal_compose_dd97i_ssse3, 3,6,8, b, tmp, w, x, w2, b_w2
  200. mov w2d, wd
  201. xor xd, xd
  202. shr w2d, 1
  203. lea b_w2q, [bq+wq]
  204. movu m4, [bq+wq]
  205. mova m7, [pw_2]
  206. pslldq m4, 14
  207. .lowpass_loop:
  208. movu m1, [b_w2q + 2*xq]
  209. mova m0, [bq + 2*xq]
  210. mova m2, m1
  211. palignr m1, m4, 14
  212. mova m4, m2
  213. COMPOSE_53iL0 m0, m1, m2, m7
  214. mova [tmpq + 2*xq], m0
  215. add xd, mmsize/2
  216. cmp xd, w2d
  217. jl .lowpass_loop
  218. EDGE_EXTENSION 1, 2, xw
  219. ; leave the last up to 7 (sse) or 3 (mmx) values for C
  220. xor xd, xd
  221. and w2d, ~(mmsize/2 - 1)
  222. cmp w2d, mmsize/2
  223. jl .end
  224. mova m7, [tmpq-mmsize]
  225. mova m0, [tmpq]
  226. mova m5, [pw_1]
  227. mova m3, [pw_8]
  228. mova m4, [pw_1991]
  229. .highpass_loop:
  230. mova m6, m0
  231. palignr m0, m7, 14
  232. mova m7, [tmpq + 2*xq + 16]
  233. mova m1, m7
  234. mova m2, m7
  235. palignr m1, m6, 2
  236. palignr m2, m6, 4
  237. COMPOSE_DD97iH0 m0, m6, m2, [b_w2q + 2*xq]
  238. mova m0, m7
  239. mova m7, m6
  240. ; shift and interleave
  241. paddw m6, m5
  242. paddw m1, m5
  243. psraw m6, 1
  244. psraw m1, 1
  245. mova m2, m6
  246. punpcklwd m6, m1
  247. punpckhwd m2, m1
  248. mova [bq+4*xq], m6
  249. mova [bq+4*xq+mmsize], m2
  250. add xd, mmsize/2
  251. cmp xd, w2d
  252. jl .highpass_loop
  253. .end:
  254. REP_RET
  255. %if ARCH_X86_64 == 0
  256. INIT_MMX
  257. COMPOSE_VERTICAL mmx
  258. HAAR_HORIZONTAL mmx, 0
  259. HAAR_HORIZONTAL mmx, 1
  260. %endif
  261. ;;INIT_XMM
  262. INIT_XMM
  263. COMPOSE_VERTICAL sse2
  264. HAAR_HORIZONTAL sse2, 0
  265. HAAR_HORIZONTAL sse2, 1