You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

368 lines
11KB

  1. /*
  2. * MMX optimized motion estimation
  3. * Copyright (c) 2001 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. *
  19. * mostly by Michael Niedermayer <michaelni@gmx.at>
  20. */
  21. #include "../dsputil.h"
  22. static const __attribute__ ((aligned(8))) UINT64 round_tab[3]={
  23. 0x0000000000000000,
  24. 0x0001000100010001,
  25. 0x0002000200020002,
  26. };
  27. static __attribute__ ((aligned(8))) uint64_t bone= 0x0101010101010101LL;
  28. static inline void sad8_mmx(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  29. {
  30. int len= -(stride<<h);
  31. asm volatile(
  32. ".balign 16 \n\t"
  33. "1: \n\t"
  34. "movq (%1, %%eax), %%mm0 \n\t"
  35. "movq (%2, %%eax), %%mm2 \n\t"
  36. "movq (%2, %%eax), %%mm4 \n\t"
  37. "addl %3, %%eax \n\t"
  38. "psubusb %%mm0, %%mm2 \n\t"
  39. "psubusb %%mm4, %%mm0 \n\t"
  40. "movq (%1, %%eax), %%mm1 \n\t"
  41. "movq (%2, %%eax), %%mm3 \n\t"
  42. "movq (%2, %%eax), %%mm5 \n\t"
  43. "psubusb %%mm1, %%mm3 \n\t"
  44. "psubusb %%mm5, %%mm1 \n\t"
  45. "por %%mm2, %%mm0 \n\t"
  46. "por %%mm1, %%mm3 \n\t"
  47. "movq %%mm0, %%mm1 \n\t"
  48. "movq %%mm3, %%mm2 \n\t"
  49. "punpcklbw %%mm7, %%mm0 \n\t"
  50. "punpckhbw %%mm7, %%mm1 \n\t"
  51. "punpcklbw %%mm7, %%mm3 \n\t"
  52. "punpckhbw %%mm7, %%mm2 \n\t"
  53. "paddw %%mm1, %%mm0 \n\t"
  54. "paddw %%mm3, %%mm2 \n\t"
  55. "paddw %%mm2, %%mm0 \n\t"
  56. "paddw %%mm0, %%mm6 \n\t"
  57. "addl %3, %%eax \n\t"
  58. " js 1b \n\t"
  59. : "+a" (len)
  60. : "r" (blk1 - len), "r" (blk2 - len), "r" (stride)
  61. );
  62. }
  63. static inline void sad8_mmx2(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  64. {
  65. int len= -(stride<<h);
  66. asm volatile(
  67. ".balign 16 \n\t"
  68. "1: \n\t"
  69. "movq (%1, %%eax), %%mm0 \n\t"
  70. "movq (%2, %%eax), %%mm2 \n\t"
  71. "psadbw %%mm2, %%mm0 \n\t"
  72. "addl %3, %%eax \n\t"
  73. "movq (%1, %%eax), %%mm1 \n\t"
  74. "movq (%2, %%eax), %%mm3 \n\t"
  75. "psadbw %%mm1, %%mm3 \n\t"
  76. "paddw %%mm3, %%mm0 \n\t"
  77. "paddw %%mm0, %%mm6 \n\t"
  78. "addl %3, %%eax \n\t"
  79. " js 1b \n\t"
  80. : "+a" (len)
  81. : "r" (blk1 - len), "r" (blk2 - len), "r" (stride)
  82. );
  83. }
  84. static inline void sad8_2_mmx2(UINT8 *blk1a, UINT8 *blk1b, UINT8 *blk2, int stride, int h)
  85. {
  86. int len= -(stride<<h);
  87. asm volatile(
  88. ".balign 16 \n\t"
  89. "1: \n\t"
  90. "movq (%1, %%eax), %%mm0 \n\t"
  91. "movq (%2, %%eax), %%mm2 \n\t"
  92. "pavgb %%mm2, %%mm0 \n\t"
  93. "movq (%3, %%eax), %%mm2 \n\t"
  94. "psadbw %%mm2, %%mm0 \n\t"
  95. "addl %4, %%eax \n\t"
  96. "movq (%1, %%eax), %%mm1 \n\t"
  97. "movq (%2, %%eax), %%mm3 \n\t"
  98. "pavgb %%mm1, %%mm3 \n\t"
  99. "movq (%3, %%eax), %%mm1 \n\t"
  100. "psadbw %%mm1, %%mm3 \n\t"
  101. "paddw %%mm3, %%mm0 \n\t"
  102. "paddw %%mm0, %%mm6 \n\t"
  103. "addl %4, %%eax \n\t"
  104. " js 1b \n\t"
  105. : "+a" (len)
  106. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" (stride)
  107. );
  108. }
  109. static inline void sad8_4_mmx2(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  110. { //FIXME reuse src
  111. int len= -(stride<<h);
  112. asm volatile(
  113. ".balign 16 \n\t"
  114. "movq "MANGLE(bone)", %%mm5 \n\t"
  115. "1: \n\t"
  116. "movq (%1, %%eax), %%mm0 \n\t"
  117. "movq (%2, %%eax), %%mm2 \n\t"
  118. "movq 1(%1, %%eax), %%mm1 \n\t"
  119. "movq 1(%2, %%eax), %%mm3 \n\t"
  120. "pavgb %%mm2, %%mm0 \n\t"
  121. "pavgb %%mm1, %%mm3 \n\t"
  122. "psubusb %%mm5, %%mm3 \n\t"
  123. "pavgb %%mm3, %%mm0 \n\t"
  124. "movq (%3, %%eax), %%mm2 \n\t"
  125. "psadbw %%mm2, %%mm0 \n\t"
  126. "addl %4, %%eax \n\t"
  127. "movq (%1, %%eax), %%mm1 \n\t"
  128. "movq (%2, %%eax), %%mm3 \n\t"
  129. "movq 1(%1, %%eax), %%mm2 \n\t"
  130. "movq 1(%2, %%eax), %%mm4 \n\t"
  131. "pavgb %%mm3, %%mm1 \n\t"
  132. "pavgb %%mm4, %%mm2 \n\t"
  133. "psubusb %%mm5, %%mm2 \n\t"
  134. "pavgb %%mm1, %%mm2 \n\t"
  135. "movq (%3, %%eax), %%mm1 \n\t"
  136. "psadbw %%mm1, %%mm2 \n\t"
  137. "paddw %%mm2, %%mm0 \n\t"
  138. "paddw %%mm0, %%mm6 \n\t"
  139. "addl %4, %%eax \n\t"
  140. " js 1b \n\t"
  141. : "+a" (len)
  142. : "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len), "r" (stride)
  143. );
  144. }
  145. static inline void sad8_2_mmx(UINT8 *blk1a, UINT8 *blk1b, UINT8 *blk2, int stride, int h)
  146. {
  147. int len= -(stride<<h);
  148. asm volatile(
  149. ".balign 16 \n\t"
  150. "1: \n\t"
  151. "movq (%1, %%eax), %%mm0 \n\t"
  152. "movq (%2, %%eax), %%mm1 \n\t"
  153. "movq (%1, %%eax), %%mm2 \n\t"
  154. "movq (%2, %%eax), %%mm3 \n\t"
  155. "punpcklbw %%mm7, %%mm0 \n\t"
  156. "punpcklbw %%mm7, %%mm1 \n\t"
  157. "punpckhbw %%mm7, %%mm2 \n\t"
  158. "punpckhbw %%mm7, %%mm3 \n\t"
  159. "paddw %%mm0, %%mm1 \n\t"
  160. "paddw %%mm2, %%mm3 \n\t"
  161. "movq (%3, %%eax), %%mm4 \n\t"
  162. "movq (%3, %%eax), %%mm2 \n\t"
  163. "paddw %%mm5, %%mm1 \n\t"
  164. "paddw %%mm5, %%mm3 \n\t"
  165. "psrlw $1, %%mm1 \n\t"
  166. "psrlw $1, %%mm3 \n\t"
  167. "packuswb %%mm3, %%mm1 \n\t"
  168. "psubusb %%mm1, %%mm4 \n\t"
  169. "psubusb %%mm2, %%mm1 \n\t"
  170. "por %%mm4, %%mm1 \n\t"
  171. "movq %%mm1, %%mm0 \n\t"
  172. "punpcklbw %%mm7, %%mm0 \n\t"
  173. "punpckhbw %%mm7, %%mm1 \n\t"
  174. "paddw %%mm1, %%mm0 \n\t"
  175. "paddw %%mm0, %%mm6 \n\t"
  176. "addl %4, %%eax \n\t"
  177. " js 1b \n\t"
  178. : "+a" (len)
  179. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" (stride)
  180. );
  181. }
  182. static inline void sad8_4_mmx(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  183. {
  184. int len= -(stride<<h);
  185. asm volatile(
  186. ".balign 16 \n\t"
  187. "1: \n\t"
  188. "movq (%1, %%eax), %%mm0 \n\t"
  189. "movq (%2, %%eax), %%mm1 \n\t"
  190. "movq %%mm0, %%mm4 \n\t"
  191. "movq %%mm1, %%mm2 \n\t"
  192. "punpcklbw %%mm7, %%mm0 \n\t"
  193. "punpcklbw %%mm7, %%mm1 \n\t"
  194. "punpckhbw %%mm7, %%mm4 \n\t"
  195. "punpckhbw %%mm7, %%mm2 \n\t"
  196. "paddw %%mm1, %%mm0 \n\t"
  197. "paddw %%mm2, %%mm4 \n\t"
  198. "movq 1(%1, %%eax), %%mm2 \n\t"
  199. "movq 1(%2, %%eax), %%mm3 \n\t"
  200. "movq %%mm2, %%mm1 \n\t"
  201. "punpcklbw %%mm7, %%mm2 \n\t"
  202. "punpckhbw %%mm7, %%mm1 \n\t"
  203. "paddw %%mm0, %%mm2 \n\t"
  204. "paddw %%mm4, %%mm1 \n\t"
  205. "movq %%mm3, %%mm4 \n\t"
  206. "punpcklbw %%mm7, %%mm3 \n\t"
  207. "punpckhbw %%mm7, %%mm4 \n\t"
  208. "paddw %%mm3, %%mm2 \n\t"
  209. "paddw %%mm4, %%mm1 \n\t"
  210. "movq (%3, %%eax), %%mm3 \n\t"
  211. "movq (%3, %%eax), %%mm4 \n\t"
  212. "paddw %%mm5, %%mm2 \n\t"
  213. "paddw %%mm5, %%mm1 \n\t"
  214. "psrlw $2, %%mm2 \n\t"
  215. "psrlw $2, %%mm1 \n\t"
  216. "packuswb %%mm1, %%mm2 \n\t"
  217. "psubusb %%mm2, %%mm3 \n\t"
  218. "psubusb %%mm4, %%mm2 \n\t"
  219. "por %%mm3, %%mm2 \n\t"
  220. "movq %%mm2, %%mm0 \n\t"
  221. "punpcklbw %%mm7, %%mm0 \n\t"
  222. "punpckhbw %%mm7, %%mm2 \n\t"
  223. "paddw %%mm2, %%mm0 \n\t"
  224. "paddw %%mm0, %%mm6 \n\t"
  225. "addl %4, %%eax \n\t"
  226. " js 1b \n\t"
  227. : "+a" (len)
  228. : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" (stride)
  229. );
  230. }
  231. static inline int sum_mmx()
  232. {
  233. int ret;
  234. asm volatile(
  235. "movq %%mm6, %%mm0 \n\t"
  236. "psrlq $32, %%mm6 \n\t"
  237. "paddw %%mm0, %%mm6 \n\t"
  238. "movq %%mm6, %%mm0 \n\t"
  239. "psrlq $16, %%mm6 \n\t"
  240. "paddw %%mm0, %%mm6 \n\t"
  241. "movd %%mm6, %0 \n\t"
  242. : "=r" (ret)
  243. );
  244. return ret&0xFFFF;
  245. }
  246. static inline int sum_mmx2()
  247. {
  248. int ret;
  249. asm volatile(
  250. "movd %%mm6, %0 \n\t"
  251. : "=r" (ret)
  252. );
  253. return ret;
  254. }
  255. #define PIX_SAD(suf)\
  256. int pix_abs8x8_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  257. {\
  258. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  259. "pxor %%mm6, %%mm6 \n\t":);\
  260. \
  261. sad8_ ## suf(blk1, blk2, stride, 3);\
  262. \
  263. return sum_ ## suf();\
  264. }\
  265. \
  266. int pix_abs8x8_x2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  267. {\
  268. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  269. "pxor %%mm6, %%mm6 \n\t"\
  270. "movq %0, %%mm5 \n\t"\
  271. :: "m"(round_tab[1]) \
  272. );\
  273. \
  274. sad8_2_ ## suf(blk1, blk2+1, blk2, stride, 3);\
  275. \
  276. return sum_ ## suf();\
  277. }\
  278. \
  279. int pix_abs8x8_y2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  280. {\
  281. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  282. "pxor %%mm6, %%mm6 \n\t"\
  283. "movq %0, %%mm5 \n\t"\
  284. :: "m"(round_tab[1]) \
  285. );\
  286. \
  287. sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 3);\
  288. \
  289. return sum_ ## suf();\
  290. }\
  291. \
  292. int pix_abs8x8_xy2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  293. {\
  294. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  295. "pxor %%mm6, %%mm6 \n\t"\
  296. "movq %0, %%mm5 \n\t"\
  297. :: "m"(round_tab[2]) \
  298. );\
  299. \
  300. sad8_4_ ## suf(blk1, blk2, stride, 3);\
  301. \
  302. return sum_ ## suf();\
  303. }\
  304. \
  305. int pix_abs16x16_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  306. {\
  307. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  308. "pxor %%mm6, %%mm6 \n\t":);\
  309. \
  310. sad8_ ## suf(blk1 , blk2 , stride, 4);\
  311. sad8_ ## suf(blk1+8, blk2+8, stride, 4);\
  312. \
  313. return sum_ ## suf();\
  314. }\
  315. int pix_abs16x16_x2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  316. {\
  317. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  318. "pxor %%mm6, %%mm6 \n\t"\
  319. "movq %0, %%mm5 \n\t"\
  320. :: "m"(round_tab[1]) \
  321. );\
  322. \
  323. sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, 4);\
  324. sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, 4);\
  325. \
  326. return sum_ ## suf();\
  327. }\
  328. int pix_abs16x16_y2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  329. {\
  330. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  331. "pxor %%mm6, %%mm6 \n\t"\
  332. "movq %0, %%mm5 \n\t"\
  333. :: "m"(round_tab[1]) \
  334. );\
  335. \
  336. sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, 4);\
  337. sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, 4);\
  338. \
  339. return sum_ ## suf();\
  340. }\
  341. int pix_abs16x16_xy2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  342. {\
  343. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  344. "pxor %%mm6, %%mm6 \n\t"\
  345. "movq %0, %%mm5 \n\t"\
  346. :: "m"(round_tab[2]) \
  347. );\
  348. \
  349. sad8_4_ ## suf(blk1 , blk2 , stride, 4);\
  350. sad8_4_ ## suf(blk1+8, blk2+8, stride, 4);\
  351. \
  352. return sum_ ## suf();\
  353. }\
  354. PIX_SAD(mmx)
  355. PIX_SAD(mmx2)