You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

342 lines
9.5KB

  1. ;*****************************************************************************
  2. ;* MMX optimized DSP utils
  3. ;*****************************************************************************
  4. ;* Copyright (c) 2000, 2001 Fabrice Bellard
  5. ;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  6. ;*
  7. ;* This file is part of FFmpeg.
  8. ;*
  9. ;* FFmpeg is free software; you can redistribute it and/or
  10. ;* modify it under the terms of the GNU Lesser General Public
  11. ;* License as published by the Free Software Foundation; either
  12. ;* version 2.1 of the License, or (at your option) any later version.
  13. ;*
  14. ;* FFmpeg is distributed in the hope that it will be useful,
  15. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. ;* Lesser General Public License for more details.
  18. ;*
  19. ;* You should have received a copy of the GNU Lesser General Public
  20. ;* License along with FFmpeg; if not, write to the Free Software
  21. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. ;*****************************************************************************
  23. %include "libavutil/x86/x86util.asm"
  24. SECTION .text
  25. %macro DIFF_PIXELS_1 4
  26. movh %1, %3
  27. movh %2, %4
  28. punpcklbw %2, %1
  29. punpcklbw %1, %1
  30. psubw %1, %2
  31. %endmacro
  32. ; %1=uint8_t *pix1, %2=uint8_t *pix2, %3=static offset, %4=stride, %5=stride*3
  33. ; %6=temporary storage location
  34. ; this macro requires $mmsize stack space (aligned) on %6 (except on SSE+x86-64)
  35. %macro DIFF_PIXELS_8 6
  36. DIFF_PIXELS_1 m0, m7, [%1 +%3], [%2 +%3]
  37. DIFF_PIXELS_1 m1, m7, [%1+%4 +%3], [%2+%4 +%3]
  38. DIFF_PIXELS_1 m2, m7, [%1+%4*2+%3], [%2+%4*2+%3]
  39. add %1, %5
  40. add %2, %5
  41. DIFF_PIXELS_1 m3, m7, [%1 +%3], [%2 +%3]
  42. DIFF_PIXELS_1 m4, m7, [%1+%4 +%3], [%2+%4 +%3]
  43. DIFF_PIXELS_1 m5, m7, [%1+%4*2+%3], [%2+%4*2+%3]
  44. DIFF_PIXELS_1 m6, m7, [%1+%5 +%3], [%2+%5 +%3]
  45. %ifdef m8
  46. DIFF_PIXELS_1 m7, m8, [%1+%4*4+%3], [%2+%4*4+%3]
  47. %else
  48. mova [%6], m0
  49. DIFF_PIXELS_1 m7, m0, [%1+%4*4+%3], [%2+%4*4+%3]
  50. mova m0, [%6]
  51. %endif
  52. sub %1, %5
  53. sub %2, %5
  54. %endmacro
  55. %macro HADAMARD8 0
  56. SUMSUB_BADC w, 0, 1, 2, 3
  57. SUMSUB_BADC w, 4, 5, 6, 7
  58. SUMSUB_BADC w, 0, 2, 1, 3
  59. SUMSUB_BADC w, 4, 6, 5, 7
  60. SUMSUB_BADC w, 0, 4, 1, 5
  61. SUMSUB_BADC w, 2, 6, 3, 7
  62. %endmacro
  63. %macro ABS1_SUM 3
  64. ABS1 %1, %2
  65. paddusw %3, %1
  66. %endmacro
  67. %macro ABS2_SUM 6
  68. ABS2 %1, %2, %3, %4
  69. paddusw %5, %1
  70. paddusw %6, %2
  71. %endmacro
  72. %macro ABS_SUM_8x8_64 1
  73. ABS2 m0, m1, m8, m9
  74. ABS2_SUM m2, m3, m8, m9, m0, m1
  75. ABS2_SUM m4, m5, m8, m9, m0, m1
  76. ABS2_SUM m6, m7, m8, m9, m0, m1
  77. paddusw m0, m1
  78. %endmacro
  79. %macro ABS_SUM_8x8_32 1
  80. mova [%1], m7
  81. ABS1 m0, m7
  82. ABS1 m1, m7
  83. ABS1_SUM m2, m7, m0
  84. ABS1_SUM m3, m7, m1
  85. ABS1_SUM m4, m7, m0
  86. ABS1_SUM m5, m7, m1
  87. ABS1_SUM m6, m7, m0
  88. mova m2, [%1]
  89. ABS1_SUM m2, m7, m1
  90. paddusw m0, m1
  91. %endmacro
  92. ; FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
  93. ; about 100k on extreme inputs. But that's very unlikely to occur in natural video,
  94. ; and it's even more unlikely to not have any alternative mvs/modes with lower cost.
  95. %macro HSUM_MMX 3
  96. mova %2, %1
  97. psrlq %1, 32
  98. paddusw %1, %2
  99. mova %2, %1
  100. psrlq %1, 16
  101. paddusw %1, %2
  102. movd %3, %1
  103. %endmacro
  104. %macro HSUM_MMXEXT 3
  105. pshufw %2, %1, 0xE
  106. paddusw %1, %2
  107. pshufw %2, %1, 0x1
  108. paddusw %1, %2
  109. movd %3, %1
  110. %endmacro
  111. %macro HSUM_SSE2 3
  112. movhlps %2, %1
  113. paddusw %1, %2
  114. pshuflw %2, %1, 0xE
  115. paddusw %1, %2
  116. pshuflw %2, %1, 0x1
  117. paddusw %1, %2
  118. movd %3, %1
  119. %endmacro
  120. %macro STORE4 5
  121. mova [%1+mmsize*0], %2
  122. mova [%1+mmsize*1], %3
  123. mova [%1+mmsize*2], %4
  124. mova [%1+mmsize*3], %5
  125. %endmacro
  126. %macro LOAD4 5
  127. mova %2, [%1+mmsize*0]
  128. mova %3, [%1+mmsize*1]
  129. mova %4, [%1+mmsize*2]
  130. mova %5, [%1+mmsize*3]
  131. %endmacro
  132. %macro hadamard8_16_wrapper 3
  133. cglobal hadamard8_diff_%1, 4, 4, %2
  134. %ifndef m8
  135. %assign pad %3*mmsize-(4+stack_offset&(mmsize-1))
  136. SUB rsp, pad
  137. %endif
  138. call hadamard8x8_diff_%1
  139. %ifndef m8
  140. ADD rsp, pad
  141. %endif
  142. RET
  143. cglobal hadamard8_diff16_%1, 5, 6, %2
  144. %ifndef m8
  145. %assign pad %3*mmsize-(4+stack_offset&(mmsize-1))
  146. SUB rsp, pad
  147. %endif
  148. call hadamard8x8_diff_%1
  149. mov r5d, eax
  150. add r1, 8
  151. add r2, 8
  152. call hadamard8x8_diff_%1
  153. add r5d, eax
  154. cmp r4d, 16
  155. jne .done
  156. lea r1, [r1+r3*8-8]
  157. lea r2, [r2+r3*8-8]
  158. call hadamard8x8_diff_%1
  159. add r5d, eax
  160. add r1, 8
  161. add r2, 8
  162. call hadamard8x8_diff_%1
  163. add r5d, eax
  164. .done:
  165. mov eax, r5d
  166. %ifndef m8
  167. ADD rsp, pad
  168. %endif
  169. RET
  170. %endmacro
  171. %macro HADAMARD8_DIFF_MMX 1
  172. ALIGN 16
  173. ; int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2,
  174. ; int stride, int h)
  175. ; r0 = void *s = unused, int h = unused (always 8)
  176. ; note how r1, r2 and r3 are not clobbered in this function, so 16x16
  177. ; can simply call this 2x2x (and that's why we access rsp+gprsize
  178. ; everywhere, which is rsp of calling func
  179. hadamard8x8_diff_%1:
  180. lea r0, [r3*3]
  181. ; first 4x8 pixels
  182. DIFF_PIXELS_8 r1, r2, 0, r3, r0, rsp+gprsize+0x60
  183. HADAMARD8
  184. mova [rsp+gprsize+0x60], m7
  185. TRANSPOSE4x4W 0, 1, 2, 3, 7
  186. STORE4 rsp+gprsize, m0, m1, m2, m3
  187. mova m7, [rsp+gprsize+0x60]
  188. TRANSPOSE4x4W 4, 5, 6, 7, 0
  189. STORE4 rsp+gprsize+0x40, m4, m5, m6, m7
  190. ; second 4x8 pixels
  191. DIFF_PIXELS_8 r1, r2, 4, r3, r0, rsp+gprsize+0x60
  192. HADAMARD8
  193. mova [rsp+gprsize+0x60], m7
  194. TRANSPOSE4x4W 0, 1, 2, 3, 7
  195. STORE4 rsp+gprsize+0x20, m0, m1, m2, m3
  196. mova m7, [rsp+gprsize+0x60]
  197. TRANSPOSE4x4W 4, 5, 6, 7, 0
  198. LOAD4 rsp+gprsize+0x40, m0, m1, m2, m3
  199. HADAMARD8
  200. ABS_SUM_8x8_32 rsp+gprsize+0x60
  201. mova [rsp+gprsize+0x60], m0
  202. LOAD4 rsp+gprsize , m0, m1, m2, m3
  203. LOAD4 rsp+gprsize+0x20, m4, m5, m6, m7
  204. HADAMARD8
  205. ABS_SUM_8x8_32 rsp+gprsize
  206. paddusw m0, [rsp+gprsize+0x60]
  207. HSUM m0, m1, eax
  208. and rax, 0xFFFF
  209. ret
  210. hadamard8_16_wrapper %1, 0, 14
  211. %endmacro
  212. %macro HADAMARD8_DIFF_SSE2 2
  213. hadamard8x8_diff_%1:
  214. lea r0, [r3*3]
  215. DIFF_PIXELS_8 r1, r2, 0, r3, r0, rsp+gprsize
  216. HADAMARD8
  217. %if ARCH_X86_64
  218. TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
  219. %else
  220. TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [rsp+gprsize], [rsp+mmsize+gprsize]
  221. %endif
  222. HADAMARD8
  223. ABS_SUM_8x8 rsp+gprsize
  224. HSUM_SSE2 m0, m1, eax
  225. and eax, 0xFFFF
  226. ret
  227. hadamard8_16_wrapper %1, %2, 3
  228. %endmacro
  229. INIT_MMX
  230. %define ABS1 ABS1_MMX
  231. %define HSUM HSUM_MMX
  232. HADAMARD8_DIFF_MMX mmx
  233. %define ABS1 ABS1_MMXEXT
  234. %define HSUM HSUM_MMXEXT
  235. HADAMARD8_DIFF_MMX mmxext
  236. INIT_XMM
  237. %define ABS2 ABS2_MMXEXT
  238. %if ARCH_X86_64
  239. %define ABS_SUM_8x8 ABS_SUM_8x8_64
  240. %else
  241. %define ABS_SUM_8x8 ABS_SUM_8x8_32
  242. %endif
  243. HADAMARD8_DIFF_SSE2 sse2, 10
  244. %define ABS2 ABS2_SSSE3
  245. %define ABS_SUM_8x8 ABS_SUM_8x8_64
  246. HADAMARD8_DIFF_SSE2 ssse3, 9
  247. INIT_XMM
  248. ; sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
  249. cglobal sse16_sse2, 5, 5, 8
  250. shr r4d, 1
  251. pxor m0, m0 ; mm0 = 0
  252. pxor m7, m7 ; mm7 holds the sum
  253. .next2lines: ; FIXME why are these unaligned movs? pix1[] is aligned
  254. movu m1, [r1 ] ; mm1 = pix1[0][0-15]
  255. movu m2, [r2 ] ; mm2 = pix2[0][0-15]
  256. movu m3, [r1+r3] ; mm3 = pix1[1][0-15]
  257. movu m4, [r2+r3] ; mm4 = pix2[1][0-15]
  258. ; todo: mm1-mm2, mm3-mm4
  259. ; algo: subtract mm1 from mm2 with saturation and vice versa
  260. ; OR the result to get the absolute difference
  261. mova m5, m1
  262. mova m6, m3
  263. psubusb m1, m2
  264. psubusb m3, m4
  265. psubusb m2, m5
  266. psubusb m4, m6
  267. por m2, m1
  268. por m4, m3
  269. ; now convert to 16-bit vectors so we can square them
  270. mova m1, m2
  271. mova m3, m4
  272. punpckhbw m2, m0
  273. punpckhbw m4, m0
  274. punpcklbw m1, m0 ; mm1 not spread over (mm1,mm2)
  275. punpcklbw m3, m0 ; mm4 not spread over (mm3,mm4)
  276. pmaddwd m2, m2
  277. pmaddwd m4, m4
  278. pmaddwd m1, m1
  279. pmaddwd m3, m3
  280. lea r1, [r1+r3*2] ; pix1 += 2*line_size
  281. lea r2, [r2+r3*2] ; pix2 += 2*line_size
  282. paddd m1, m2
  283. paddd m3, m4
  284. paddd m7, m1
  285. paddd m7, m3
  286. dec r4
  287. jnz .next2lines
  288. mova m1, m7
  289. psrldq m7, 8 ; shift hi qword to lo
  290. paddd m7, m1
  291. mova m1, m7
  292. psrldq m7, 4 ; shift hi dword to lo
  293. paddd m7, m1
  294. movd eax, m7 ; return value
  295. RET