You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

363 lines
11KB

  1. /*
  2. * MMX optimized motion estimation
  3. * Copyright (c) 2001 Gerard Lantau.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. *
  19. * mostly by Michael Niedermayer <michaelni@gmx.at>
  20. */
  21. #include "../dsputil.h"
  22. static const __attribute__ ((aligned(8))) UINT64 round_tab[3]={
  23. 0x0000000000000000,
  24. 0x0001000100010001,
  25. 0x0002000200020002,
  26. };
  27. static inline void sad8_mmx(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  28. {
  29. int len= -(stride<<h);
  30. asm volatile(
  31. ".balign 16 \n\t"
  32. "1: \n\t"
  33. "movq (%1, %%eax), %%mm0 \n\t"
  34. "movq (%2, %%eax), %%mm2 \n\t"
  35. "movq (%2, %%eax), %%mm4 \n\t"
  36. "addl %3, %%eax \n\t"
  37. "psubusb %%mm0, %%mm2 \n\t"
  38. "psubusb %%mm4, %%mm0 \n\t"
  39. "movq (%1, %%eax), %%mm1 \n\t"
  40. "movq (%2, %%eax), %%mm3 \n\t"
  41. "movq (%2, %%eax), %%mm5 \n\t"
  42. "psubusb %%mm1, %%mm3 \n\t"
  43. "psubusb %%mm5, %%mm1 \n\t"
  44. "por %%mm2, %%mm0 \n\t"
  45. "por %%mm1, %%mm3 \n\t"
  46. "movq %%mm0, %%mm1 \n\t"
  47. "movq %%mm3, %%mm2 \n\t"
  48. "punpcklbw %%mm7, %%mm0 \n\t"
  49. "punpckhbw %%mm7, %%mm1 \n\t"
  50. "punpcklbw %%mm7, %%mm3 \n\t"
  51. "punpckhbw %%mm7, %%mm2 \n\t"
  52. "paddw %%mm1, %%mm0 \n\t"
  53. "paddw %%mm3, %%mm2 \n\t"
  54. "paddw %%mm2, %%mm0 \n\t"
  55. "paddw %%mm0, %%mm6 \n\t"
  56. "addl %3, %%eax \n\t"
  57. " js 1b \n\t"
  58. : "+a" (len)
  59. : "r" (blk1 - len), "r" (blk2 - len), "r" (stride)
  60. );
  61. }
  62. static inline void sad8_mmx2(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  63. {
  64. int len= -(stride<<h);
  65. asm volatile(
  66. ".balign 16 \n\t"
  67. "1: \n\t"
  68. "movq (%1, %%eax), %%mm0 \n\t"
  69. "movq (%2, %%eax), %%mm2 \n\t"
  70. "psadbw %%mm2, %%mm0 \n\t"
  71. "addl %3, %%eax \n\t"
  72. "movq (%1, %%eax), %%mm1 \n\t"
  73. "movq (%2, %%eax), %%mm3 \n\t"
  74. "psadbw %%mm1, %%mm3 \n\t"
  75. "paddw %%mm3, %%mm0 \n\t"
  76. "paddw %%mm0, %%mm6 \n\t"
  77. "addl %3, %%eax \n\t"
  78. " js 1b \n\t"
  79. : "+a" (len)
  80. : "r" (blk1 - len), "r" (blk2 - len), "r" (stride)
  81. );
  82. }
  83. static inline void sad8_2_mmx2(UINT8 *blk1a, UINT8 *blk1b, UINT8 *blk2, int stride, int h)
  84. {
  85. int len= -(stride<<h);
  86. asm volatile(
  87. ".balign 16 \n\t"
  88. "1: \n\t"
  89. "movq (%1, %%eax), %%mm0 \n\t"
  90. "movq (%2, %%eax), %%mm2 \n\t"
  91. "pavgb %%mm2, %%mm0 \n\t"
  92. "movq (%3, %%eax), %%mm2 \n\t"
  93. "psadbw %%mm2, %%mm0 \n\t"
  94. "addl %4, %%eax \n\t"
  95. "movq (%1, %%eax), %%mm1 \n\t"
  96. "movq (%2, %%eax), %%mm3 \n\t"
  97. "pavgb %%mm1, %%mm3 \n\t"
  98. "movq (%3, %%eax), %%mm1 \n\t"
  99. "psadbw %%mm1, %%mm3 \n\t"
  100. "paddw %%mm3, %%mm0 \n\t"
  101. "paddw %%mm0, %%mm6 \n\t"
  102. "addl %4, %%eax \n\t"
  103. " js 1b \n\t"
  104. : "+a" (len)
  105. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" (stride)
  106. );
  107. }
  108. static inline void sad8_4_mmx2(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  109. { //FIXME reuse src
  110. int len= -(stride<<h);
  111. asm volatile(
  112. ".balign 16 \n\t"
  113. "1: \n\t"
  114. "movq (%1, %%eax), %%mm0 \n\t"
  115. "movq (%2, %%eax), %%mm2 \n\t"
  116. "movq 1(%1, %%eax), %%mm1 \n\t"
  117. "movq 1(%2, %%eax), %%mm3 \n\t"
  118. "pavgb %%mm2, %%mm0 \n\t"
  119. "pavgb %%mm1, %%mm3 \n\t"
  120. "pavgb %%mm3, %%mm0 \n\t"
  121. "movq (%3, %%eax), %%mm2 \n\t"
  122. "psadbw %%mm2, %%mm0 \n\t"
  123. "addl %4, %%eax \n\t"
  124. "movq (%1, %%eax), %%mm1 \n\t"
  125. "movq (%2, %%eax), %%mm3 \n\t"
  126. "movq 1(%1, %%eax), %%mm2 \n\t"
  127. "movq 1(%2, %%eax), %%mm4 \n\t"
  128. "pavgb %%mm3, %%mm1 \n\t"
  129. "pavgb %%mm4, %%mm2 \n\t"
  130. "pavgb %%mm1, %%mm2 \n\t"
  131. "movq (%3, %%eax), %%mm1 \n\t"
  132. "psadbw %%mm1, %%mm2 \n\t"
  133. "paddw %%mm2, %%mm0 \n\t"
  134. "paddw %%mm0, %%mm6 \n\t"
  135. "addl %4, %%eax \n\t"
  136. " js 1b \n\t"
  137. : "+a" (len)
  138. : "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len), "r" (stride)
  139. );
  140. }
  141. static inline void sad8_2_mmx(UINT8 *blk1a, UINT8 *blk1b, UINT8 *blk2, int stride, int h)
  142. {
  143. int len= -(stride<<h);
  144. asm volatile(
  145. ".balign 16 \n\t"
  146. "1: \n\t"
  147. "movq (%1, %%eax), %%mm0 \n\t"
  148. "movq (%2, %%eax), %%mm1 \n\t"
  149. "movq (%1, %%eax), %%mm2 \n\t"
  150. "movq (%2, %%eax), %%mm3 \n\t"
  151. "punpcklbw %%mm7, %%mm0 \n\t"
  152. "punpcklbw %%mm7, %%mm1 \n\t"
  153. "punpckhbw %%mm7, %%mm2 \n\t"
  154. "punpckhbw %%mm7, %%mm3 \n\t"
  155. "paddw %%mm0, %%mm1 \n\t"
  156. "paddw %%mm2, %%mm3 \n\t"
  157. "movq (%3, %%eax), %%mm4 \n\t"
  158. "movq (%3, %%eax), %%mm2 \n\t"
  159. "paddw %%mm5, %%mm1 \n\t"
  160. "paddw %%mm5, %%mm3 \n\t"
  161. "psrlw $1, %%mm1 \n\t"
  162. "psrlw $1, %%mm3 \n\t"
  163. "packuswb %%mm3, %%mm1 \n\t"
  164. "psubusb %%mm1, %%mm4 \n\t"
  165. "psubusb %%mm2, %%mm1 \n\t"
  166. "por %%mm4, %%mm1 \n\t"
  167. "movq %%mm1, %%mm0 \n\t"
  168. "punpcklbw %%mm7, %%mm0 \n\t"
  169. "punpckhbw %%mm7, %%mm1 \n\t"
  170. "paddw %%mm1, %%mm0 \n\t"
  171. "paddw %%mm0, %%mm6 \n\t"
  172. "addl %4, %%eax \n\t"
  173. " js 1b \n\t"
  174. : "+a" (len)
  175. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" (stride)
  176. );
  177. }
  178. static inline void sad8_4_mmx(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  179. {
  180. int len= -(stride<<h);
  181. asm volatile(
  182. ".balign 16 \n\t"
  183. "1: \n\t"
  184. "movq (%1, %%eax), %%mm0 \n\t"
  185. "movq (%2, %%eax), %%mm1 \n\t"
  186. "movq %%mm0, %%mm4 \n\t"
  187. "movq %%mm1, %%mm2 \n\t"
  188. "punpcklbw %%mm7, %%mm0 \n\t"
  189. "punpcklbw %%mm7, %%mm1 \n\t"
  190. "punpckhbw %%mm7, %%mm4 \n\t"
  191. "punpckhbw %%mm7, %%mm2 \n\t"
  192. "paddw %%mm1, %%mm0 \n\t"
  193. "paddw %%mm2, %%mm4 \n\t"
  194. "movq 1(%1, %%eax), %%mm2 \n\t"
  195. "movq 1(%2, %%eax), %%mm3 \n\t"
  196. "movq %%mm2, %%mm1 \n\t"
  197. "punpcklbw %%mm7, %%mm2 \n\t"
  198. "punpckhbw %%mm7, %%mm1 \n\t"
  199. "paddw %%mm0, %%mm2 \n\t"
  200. "paddw %%mm4, %%mm1 \n\t"
  201. "movq %%mm3, %%mm4 \n\t"
  202. "punpcklbw %%mm7, %%mm3 \n\t"
  203. "punpckhbw %%mm7, %%mm4 \n\t"
  204. "paddw %%mm3, %%mm2 \n\t"
  205. "paddw %%mm4, %%mm1 \n\t"
  206. "movq (%3, %%eax), %%mm3 \n\t"
  207. "movq (%3, %%eax), %%mm4 \n\t"
  208. "paddw %%mm5, %%mm2 \n\t"
  209. "paddw %%mm5, %%mm1 \n\t"
  210. "psrlw $2, %%mm2 \n\t"
  211. "psrlw $2, %%mm1 \n\t"
  212. "packuswb %%mm1, %%mm2 \n\t"
  213. "psubusb %%mm2, %%mm3 \n\t"
  214. "psubusb %%mm4, %%mm2 \n\t"
  215. "por %%mm3, %%mm2 \n\t"
  216. "movq %%mm2, %%mm0 \n\t"
  217. "punpcklbw %%mm7, %%mm0 \n\t"
  218. "punpckhbw %%mm7, %%mm2 \n\t"
  219. "paddw %%mm2, %%mm0 \n\t"
  220. "paddw %%mm0, %%mm6 \n\t"
  221. "addl %4, %%eax \n\t"
  222. " js 1b \n\t"
  223. : "+a" (len)
  224. : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" (stride)
  225. );
  226. }
  227. static inline int sum_mmx()
  228. {
  229. int ret;
  230. asm volatile(
  231. "movq %%mm6, %%mm0 \n\t"
  232. "psrlq $32, %%mm6 \n\t"
  233. "paddw %%mm0, %%mm6 \n\t"
  234. "movq %%mm6, %%mm0 \n\t"
  235. "psrlq $16, %%mm6 \n\t"
  236. "paddw %%mm0, %%mm6 \n\t"
  237. "movd %%mm6, %0 \n\t"
  238. : "=r" (ret)
  239. );
  240. return ret&0xFFFF;
  241. }
  242. static inline int sum_mmx2()
  243. {
  244. int ret;
  245. asm volatile(
  246. "movd %%mm6, %0 \n\t"
  247. : "=r" (ret)
  248. );
  249. return ret;
  250. }
  251. #define PIX_SAD(suf)\
  252. int pix_abs8x8_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  253. {\
  254. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  255. "pxor %%mm6, %%mm6 \n\t":);\
  256. \
  257. sad8_ ## suf(blk1, blk2, stride, 3);\
  258. \
  259. return sum_ ## suf();\
  260. }\
  261. \
  262. int pix_abs8x8_x2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  263. {\
  264. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  265. "pxor %%mm6, %%mm6 \n\t"\
  266. "movq %0, %%mm5 \n\t"\
  267. :: "m"(round_tab[1]) \
  268. );\
  269. \
  270. sad8_2_ ## suf(blk1, blk2+1, blk2, stride, 3);\
  271. \
  272. return sum_ ## suf();\
  273. }\
  274. \
  275. int pix_abs8x8_y2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  276. {\
  277. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  278. "pxor %%mm6, %%mm6 \n\t"\
  279. "movq %0, %%mm5 \n\t"\
  280. :: "m"(round_tab[1]) \
  281. );\
  282. \
  283. sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 3);\
  284. \
  285. return sum_ ## suf();\
  286. }\
  287. \
  288. int pix_abs8x8_xy2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  289. {\
  290. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  291. "pxor %%mm6, %%mm6 \n\t"\
  292. "movq %0, %%mm5 \n\t"\
  293. :: "m"(round_tab[2]) \
  294. );\
  295. \
  296. sad8_4_ ## suf(blk1, blk2, stride, 3);\
  297. \
  298. return sum_ ## suf();\
  299. }\
  300. \
  301. int pix_abs16x16_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  302. {\
  303. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  304. "pxor %%mm6, %%mm6 \n\t":);\
  305. \
  306. sad8_ ## suf(blk1 , blk2 , stride, 4);\
  307. sad8_ ## suf(blk1+8, blk2+8, stride, 4);\
  308. \
  309. return sum_ ## suf();\
  310. }\
  311. int pix_abs16x16_x2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  312. {\
  313. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  314. "pxor %%mm6, %%mm6 \n\t"\
  315. "movq %0, %%mm5 \n\t"\
  316. :: "m"(round_tab[1]) \
  317. );\
  318. \
  319. sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, 4);\
  320. sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, 4);\
  321. \
  322. return sum_ ## suf();\
  323. }\
  324. int pix_abs16x16_y2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  325. {\
  326. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  327. "pxor %%mm6, %%mm6 \n\t"\
  328. "movq %0, %%mm5 \n\t"\
  329. :: "m"(round_tab[1]) \
  330. );\
  331. \
  332. sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, 4);\
  333. sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, 4);\
  334. \
  335. return sum_ ## suf();\
  336. }\
  337. int pix_abs16x16_xy2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  338. {\
  339. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  340. "pxor %%mm6, %%mm6 \n\t"\
  341. "movq %0, %%mm5 \n\t"\
  342. :: "m"(round_tab[2]) \
  343. );\
  344. \
  345. sad8_4_ ## suf(blk1 , blk2 , stride, 4);\
  346. sad8_4_ ## suf(blk1+8, blk2+8, stride, 4);\
  347. \
  348. return sum_ ## suf();\
  349. }\
  350. PIX_SAD(mmx)
  351. PIX_SAD(mmx2)