You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

343 lines
9.5KB

  1. ;*****************************************************************************
  2. ;* MMX optimized DSP utils
  3. ;*****************************************************************************
  4. ;* Copyright (c) 2000, 2001 Fabrice Bellard
  5. ;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  6. ;*
  7. ;* This file is part of Libav.
  8. ;*
  9. ;* Libav is free software; you can redistribute it and/or
  10. ;* modify it under the terms of the GNU Lesser General Public
  11. ;* License as published by the Free Software Foundation; either
  12. ;* version 2.1 of the License, or (at your option) any later version.
  13. ;*
  14. ;* Libav is distributed in the hope that it will be useful,
  15. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. ;* Lesser General Public License for more details.
  18. ;*
  19. ;* You should have received a copy of the GNU Lesser General Public
  20. ;* License along with Libav; if not, write to the Free Software
  21. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. ;*****************************************************************************
  23. %include "x86inc.asm"
  24. %include "x86util.asm"
  25. SECTION .text
  26. %macro DIFF_PIXELS_1 4
  27. movh %1, %3
  28. movh %2, %4
  29. punpcklbw %2, %1
  30. punpcklbw %1, %1
  31. psubw %1, %2
  32. %endmacro
  33. ; %1=uint8_t *pix1, %2=uint8_t *pix2, %3=static offset, %4=stride, %5=stride*3
  34. ; %6=temporary storage location
  35. ; this macro requires $mmsize stack space (aligned) on %6 (except on SSE+x86-64)
  36. %macro DIFF_PIXELS_8 6
  37. DIFF_PIXELS_1 m0, m7, [%1 +%3], [%2 +%3]
  38. DIFF_PIXELS_1 m1, m7, [%1+%4 +%3], [%2+%4 +%3]
  39. DIFF_PIXELS_1 m2, m7, [%1+%4*2+%3], [%2+%4*2+%3]
  40. add %1, %5
  41. add %2, %5
  42. DIFF_PIXELS_1 m3, m7, [%1 +%3], [%2 +%3]
  43. DIFF_PIXELS_1 m4, m7, [%1+%4 +%3], [%2+%4 +%3]
  44. DIFF_PIXELS_1 m5, m7, [%1+%4*2+%3], [%2+%4*2+%3]
  45. DIFF_PIXELS_1 m6, m7, [%1+%5 +%3], [%2+%5 +%3]
  46. %ifdef m8
  47. DIFF_PIXELS_1 m7, m8, [%1+%4*4+%3], [%2+%4*4+%3]
  48. %else
  49. mova [%6], m0
  50. DIFF_PIXELS_1 m7, m0, [%1+%4*4+%3], [%2+%4*4+%3]
  51. mova m0, [%6]
  52. %endif
  53. sub %1, %5
  54. sub %2, %5
  55. %endmacro
  56. %macro HADAMARD8 0
  57. SUMSUB_BADC w, 0, 1, 2, 3
  58. SUMSUB_BADC w, 4, 5, 6, 7
  59. SUMSUB_BADC w, 0, 2, 1, 3
  60. SUMSUB_BADC w, 4, 6, 5, 7
  61. SUMSUB_BADC w, 0, 4, 1, 5
  62. SUMSUB_BADC w, 2, 6, 3, 7
  63. %endmacro
  64. %macro ABS1_SUM 3
  65. ABS1 %1, %2
  66. paddusw %3, %1
  67. %endmacro
  68. %macro ABS2_SUM 6
  69. ABS2 %1, %2, %3, %4
  70. paddusw %5, %1
  71. paddusw %6, %2
  72. %endmacro
  73. %macro ABS_SUM_8x8_64 1
  74. ABS2 m0, m1, m8, m9
  75. ABS2_SUM m2, m3, m8, m9, m0, m1
  76. ABS2_SUM m4, m5, m8, m9, m0, m1
  77. ABS2_SUM m6, m7, m8, m9, m0, m1
  78. paddusw m0, m1
  79. %endmacro
  80. %macro ABS_SUM_8x8_32 1
  81. mova [%1], m7
  82. ABS1 m0, m7
  83. ABS1 m1, m7
  84. ABS1_SUM m2, m7, m0
  85. ABS1_SUM m3, m7, m1
  86. ABS1_SUM m4, m7, m0
  87. ABS1_SUM m5, m7, m1
  88. ABS1_SUM m6, m7, m0
  89. mova m2, [%1]
  90. ABS1_SUM m2, m7, m1
  91. paddusw m0, m1
  92. %endmacro
  93. ; FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
  94. ; about 100k on extreme inputs. But that's very unlikely to occur in natural video,
  95. ; and it's even more unlikely to not have any alternative mvs/modes with lower cost.
  96. %macro HSUM_MMX 3
  97. mova %2, %1
  98. psrlq %1, 32
  99. paddusw %1, %2
  100. mova %2, %1
  101. psrlq %1, 16
  102. paddusw %1, %2
  103. movd %3, %1
  104. %endmacro
  105. %macro HSUM_MMX2 3
  106. pshufw %2, %1, 0xE
  107. paddusw %1, %2
  108. pshufw %2, %1, 0x1
  109. paddusw %1, %2
  110. movd %3, %1
  111. %endmacro
  112. %macro HSUM_SSE2 3
  113. movhlps %2, %1
  114. paddusw %1, %2
  115. pshuflw %2, %1, 0xE
  116. paddusw %1, %2
  117. pshuflw %2, %1, 0x1
  118. paddusw %1, %2
  119. movd %3, %1
  120. %endmacro
  121. %macro STORE4 5
  122. mova [%1+mmsize*0], %2
  123. mova [%1+mmsize*1], %3
  124. mova [%1+mmsize*2], %4
  125. mova [%1+mmsize*3], %5
  126. %endmacro
  127. %macro LOAD4 5
  128. mova %2, [%1+mmsize*0]
  129. mova %3, [%1+mmsize*1]
  130. mova %4, [%1+mmsize*2]
  131. mova %5, [%1+mmsize*3]
  132. %endmacro
  133. %macro hadamard8_16_wrapper 3
  134. cglobal hadamard8_diff_%1, 4, 4, %2
  135. %ifndef m8
  136. %assign pad %3*mmsize-(4+stack_offset&(mmsize-1))
  137. SUB rsp, pad
  138. %endif
  139. call hadamard8x8_diff_%1
  140. %ifndef m8
  141. ADD rsp, pad
  142. %endif
  143. RET
  144. cglobal hadamard8_diff16_%1, 5, 6, %2
  145. %ifndef m8
  146. %assign pad %3*mmsize-(4+stack_offset&(mmsize-1))
  147. SUB rsp, pad
  148. %endif
  149. call hadamard8x8_diff_%1
  150. mov r5d, eax
  151. add r1, 8
  152. add r2, 8
  153. call hadamard8x8_diff_%1
  154. add r5d, eax
  155. cmp r4d, 16
  156. jne .done
  157. lea r1, [r1+r3*8-8]
  158. lea r2, [r2+r3*8-8]
  159. call hadamard8x8_diff_%1
  160. add r5d, eax
  161. add r1, 8
  162. add r2, 8
  163. call hadamard8x8_diff_%1
  164. add r5d, eax
  165. .done
  166. mov eax, r5d
  167. %ifndef m8
  168. ADD rsp, pad
  169. %endif
  170. RET
  171. %endmacro
  172. %macro HADAMARD8_DIFF_MMX 1
  173. ALIGN 16
  174. ; int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2,
  175. ; int stride, int h)
  176. ; r0 = void *s = unused, int h = unused (always 8)
  177. ; note how r1, r2 and r3 are not clobbered in this function, so 16x16
  178. ; can simply call this 2x2x (and that's why we access rsp+gprsize
  179. ; everywhere, which is rsp of calling func
  180. hadamard8x8_diff_%1:
  181. lea r0, [r3*3]
  182. ; first 4x8 pixels
  183. DIFF_PIXELS_8 r1, r2, 0, r3, r0, rsp+gprsize+0x60
  184. HADAMARD8
  185. mova [rsp+gprsize+0x60], m7
  186. TRANSPOSE4x4W 0, 1, 2, 3, 7
  187. STORE4 rsp+gprsize, m0, m1, m2, m3
  188. mova m7, [rsp+gprsize+0x60]
  189. TRANSPOSE4x4W 4, 5, 6, 7, 0
  190. STORE4 rsp+gprsize+0x40, m4, m5, m6, m7
  191. ; second 4x8 pixels
  192. DIFF_PIXELS_8 r1, r2, 4, r3, r0, rsp+gprsize+0x60
  193. HADAMARD8
  194. mova [rsp+gprsize+0x60], m7
  195. TRANSPOSE4x4W 0, 1, 2, 3, 7
  196. STORE4 rsp+gprsize+0x20, m0, m1, m2, m3
  197. mova m7, [rsp+gprsize+0x60]
  198. TRANSPOSE4x4W 4, 5, 6, 7, 0
  199. LOAD4 rsp+gprsize+0x40, m0, m1, m2, m3
  200. HADAMARD8
  201. ABS_SUM_8x8_32 rsp+gprsize+0x60
  202. mova [rsp+gprsize+0x60], m0
  203. LOAD4 rsp+gprsize , m0, m1, m2, m3
  204. LOAD4 rsp+gprsize+0x20, m4, m5, m6, m7
  205. HADAMARD8
  206. ABS_SUM_8x8_32 rsp+gprsize
  207. paddusw m0, [rsp+gprsize+0x60]
  208. HSUM m0, m1, eax
  209. and rax, 0xFFFF
  210. ret
  211. hadamard8_16_wrapper %1, 0, 14
  212. %endmacro
  213. %macro HADAMARD8_DIFF_SSE2 2
  214. hadamard8x8_diff_%1:
  215. lea r0, [r3*3]
  216. DIFF_PIXELS_8 r1, r2, 0, r3, r0, rsp+gprsize
  217. HADAMARD8
  218. %if ARCH_X86_64
  219. TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
  220. %else
  221. TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [rsp+gprsize], [rsp+mmsize+gprsize]
  222. %endif
  223. HADAMARD8
  224. ABS_SUM_8x8 rsp+gprsize
  225. HSUM_SSE2 m0, m1, eax
  226. and eax, 0xFFFF
  227. ret
  228. hadamard8_16_wrapper %1, %2, 3
  229. %endmacro
  230. INIT_MMX
  231. %define ABS1 ABS1_MMX
  232. %define HSUM HSUM_MMX
  233. HADAMARD8_DIFF_MMX mmx
  234. %define ABS1 ABS1_MMX2
  235. %define HSUM HSUM_MMX2
  236. HADAMARD8_DIFF_MMX mmx2
  237. INIT_XMM
  238. %define ABS2 ABS2_MMX2
  239. %if ARCH_X86_64
  240. %define ABS_SUM_8x8 ABS_SUM_8x8_64
  241. %else
  242. %define ABS_SUM_8x8 ABS_SUM_8x8_32
  243. %endif
  244. HADAMARD8_DIFF_SSE2 sse2, 10
  245. %define ABS2 ABS2_SSSE3
  246. %define ABS_SUM_8x8 ABS_SUM_8x8_64
  247. HADAMARD8_DIFF_SSE2 ssse3, 9
  248. INIT_XMM
  249. ; sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
  250. cglobal sse16_sse2, 5, 5, 8
  251. shr r4d, 1
  252. pxor m0, m0 ; mm0 = 0
  253. pxor m7, m7 ; mm7 holds the sum
  254. .next2lines ; FIXME why are these unaligned movs? pix1[] is aligned
  255. movu m1, [r1 ] ; mm1 = pix1[0][0-15]
  256. movu m2, [r2 ] ; mm2 = pix2[0][0-15]
  257. movu m3, [r1+r3] ; mm3 = pix1[1][0-15]
  258. movu m4, [r2+r3] ; mm4 = pix2[1][0-15]
  259. ; todo: mm1-mm2, mm3-mm4
  260. ; algo: subtract mm1 from mm2 with saturation and vice versa
  261. ; OR the result to get the absolute difference
  262. mova m5, m1
  263. mova m6, m3
  264. psubusb m1, m2
  265. psubusb m3, m4
  266. psubusb m2, m5
  267. psubusb m4, m6
  268. por m2, m1
  269. por m4, m3
  270. ; now convert to 16-bit vectors so we can square them
  271. mova m1, m2
  272. mova m3, m4
  273. punpckhbw m2, m0
  274. punpckhbw m4, m0
  275. punpcklbw m1, m0 ; mm1 not spread over (mm1,mm2)
  276. punpcklbw m3, m0 ; mm4 not spread over (mm3,mm4)
  277. pmaddwd m2, m2
  278. pmaddwd m4, m4
  279. pmaddwd m1, m1
  280. pmaddwd m3, m3
  281. lea r1, [r1+r3*2] ; pix1 += 2*line_size
  282. lea r2, [r2+r3*2] ; pix2 += 2*line_size
  283. paddd m1, m2
  284. paddd m3, m4
  285. paddd m7, m1
  286. paddd m7, m3
  287. dec r4
  288. jnz .next2lines
  289. mova m1, m7
  290. psrldq m7, 8 ; shift hi qword to lo
  291. paddd m7, m1
  292. mova m1, m7
  293. psrldq m7, 4 ; shift hi dword to lo
  294. paddd m7, m1
  295. movd eax, m7 ; return value
  296. RET