You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

369 lines
11KB

  1. /*
  2. * MMX optimized motion estimation
  3. * Copyright (c) 2001 Gerard Lantau.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. *
  19. * mostly by Michael Niedermayer <michaelni@gmx.at>
  20. */
  21. #include "../dsputil.h"
  22. #include "../mangle.h"
  23. static const __attribute__ ((aligned(8))) UINT64 round_tab[3]={
  24. 0x0000000000000000,
  25. 0x0001000100010001,
  26. 0x0002000200020002,
  27. };
  28. static __attribute__ ((aligned(8))) uint64_t bone= 0x0101010101010101LL;
  29. static inline void sad8_mmx(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  30. {
  31. int len= -(stride<<h);
  32. asm volatile(
  33. ".balign 16 \n\t"
  34. "1: \n\t"
  35. "movq (%1, %%eax), %%mm0 \n\t"
  36. "movq (%2, %%eax), %%mm2 \n\t"
  37. "movq (%2, %%eax), %%mm4 \n\t"
  38. "addl %3, %%eax \n\t"
  39. "psubusb %%mm0, %%mm2 \n\t"
  40. "psubusb %%mm4, %%mm0 \n\t"
  41. "movq (%1, %%eax), %%mm1 \n\t"
  42. "movq (%2, %%eax), %%mm3 \n\t"
  43. "movq (%2, %%eax), %%mm5 \n\t"
  44. "psubusb %%mm1, %%mm3 \n\t"
  45. "psubusb %%mm5, %%mm1 \n\t"
  46. "por %%mm2, %%mm0 \n\t"
  47. "por %%mm1, %%mm3 \n\t"
  48. "movq %%mm0, %%mm1 \n\t"
  49. "movq %%mm3, %%mm2 \n\t"
  50. "punpcklbw %%mm7, %%mm0 \n\t"
  51. "punpckhbw %%mm7, %%mm1 \n\t"
  52. "punpcklbw %%mm7, %%mm3 \n\t"
  53. "punpckhbw %%mm7, %%mm2 \n\t"
  54. "paddw %%mm1, %%mm0 \n\t"
  55. "paddw %%mm3, %%mm2 \n\t"
  56. "paddw %%mm2, %%mm0 \n\t"
  57. "paddw %%mm0, %%mm6 \n\t"
  58. "addl %3, %%eax \n\t"
  59. " js 1b \n\t"
  60. : "+a" (len)
  61. : "r" (blk1 - len), "r" (blk2 - len), "r" (stride)
  62. );
  63. }
  64. static inline void sad8_mmx2(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  65. {
  66. int len= -(stride<<h);
  67. asm volatile(
  68. ".balign 16 \n\t"
  69. "1: \n\t"
  70. "movq (%1, %%eax), %%mm0 \n\t"
  71. "movq (%2, %%eax), %%mm2 \n\t"
  72. "psadbw %%mm2, %%mm0 \n\t"
  73. "addl %3, %%eax \n\t"
  74. "movq (%1, %%eax), %%mm1 \n\t"
  75. "movq (%2, %%eax), %%mm3 \n\t"
  76. "psadbw %%mm1, %%mm3 \n\t"
  77. "paddw %%mm3, %%mm0 \n\t"
  78. "paddw %%mm0, %%mm6 \n\t"
  79. "addl %3, %%eax \n\t"
  80. " js 1b \n\t"
  81. : "+a" (len)
  82. : "r" (blk1 - len), "r" (blk2 - len), "r" (stride)
  83. );
  84. }
  85. static inline void sad8_2_mmx2(UINT8 *blk1a, UINT8 *blk1b, UINT8 *blk2, int stride, int h)
  86. {
  87. int len= -(stride<<h);
  88. asm volatile(
  89. ".balign 16 \n\t"
  90. "1: \n\t"
  91. "movq (%1, %%eax), %%mm0 \n\t"
  92. "movq (%2, %%eax), %%mm2 \n\t"
  93. "pavgb %%mm2, %%mm0 \n\t"
  94. "movq (%3, %%eax), %%mm2 \n\t"
  95. "psadbw %%mm2, %%mm0 \n\t"
  96. "addl %4, %%eax \n\t"
  97. "movq (%1, %%eax), %%mm1 \n\t"
  98. "movq (%2, %%eax), %%mm3 \n\t"
  99. "pavgb %%mm1, %%mm3 \n\t"
  100. "movq (%3, %%eax), %%mm1 \n\t"
  101. "psadbw %%mm1, %%mm3 \n\t"
  102. "paddw %%mm3, %%mm0 \n\t"
  103. "paddw %%mm0, %%mm6 \n\t"
  104. "addl %4, %%eax \n\t"
  105. " js 1b \n\t"
  106. : "+a" (len)
  107. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" (stride)
  108. );
  109. }
  110. static inline void sad8_4_mmx2(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  111. { //FIXME reuse src
  112. int len= -(stride<<h);
  113. asm volatile(
  114. ".balign 16 \n\t"
  115. "movq "MANGLE(bone)", %%mm5 \n\t"
  116. "1: \n\t"
  117. "movq (%1, %%eax), %%mm0 \n\t"
  118. "movq (%2, %%eax), %%mm2 \n\t"
  119. "movq 1(%1, %%eax), %%mm1 \n\t"
  120. "movq 1(%2, %%eax), %%mm3 \n\t"
  121. "pavgb %%mm2, %%mm0 \n\t"
  122. "pavgb %%mm1, %%mm3 \n\t"
  123. "psubusb %%mm5, %%mm3 \n\t"
  124. "pavgb %%mm3, %%mm0 \n\t"
  125. "movq (%3, %%eax), %%mm2 \n\t"
  126. "psadbw %%mm2, %%mm0 \n\t"
  127. "addl %4, %%eax \n\t"
  128. "movq (%1, %%eax), %%mm1 \n\t"
  129. "movq (%2, %%eax), %%mm3 \n\t"
  130. "movq 1(%1, %%eax), %%mm2 \n\t"
  131. "movq 1(%2, %%eax), %%mm4 \n\t"
  132. "pavgb %%mm3, %%mm1 \n\t"
  133. "pavgb %%mm4, %%mm2 \n\t"
  134. "psubusb %%mm5, %%mm2 \n\t"
  135. "pavgb %%mm1, %%mm2 \n\t"
  136. "movq (%3, %%eax), %%mm1 \n\t"
  137. "psadbw %%mm1, %%mm2 \n\t"
  138. "paddw %%mm2, %%mm0 \n\t"
  139. "paddw %%mm0, %%mm6 \n\t"
  140. "addl %4, %%eax \n\t"
  141. " js 1b \n\t"
  142. : "+a" (len)
  143. : "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len), "r" (stride)
  144. );
  145. }
  146. static inline void sad8_2_mmx(UINT8 *blk1a, UINT8 *blk1b, UINT8 *blk2, int stride, int h)
  147. {
  148. int len= -(stride<<h);
  149. asm volatile(
  150. ".balign 16 \n\t"
  151. "1: \n\t"
  152. "movq (%1, %%eax), %%mm0 \n\t"
  153. "movq (%2, %%eax), %%mm1 \n\t"
  154. "movq (%1, %%eax), %%mm2 \n\t"
  155. "movq (%2, %%eax), %%mm3 \n\t"
  156. "punpcklbw %%mm7, %%mm0 \n\t"
  157. "punpcklbw %%mm7, %%mm1 \n\t"
  158. "punpckhbw %%mm7, %%mm2 \n\t"
  159. "punpckhbw %%mm7, %%mm3 \n\t"
  160. "paddw %%mm0, %%mm1 \n\t"
  161. "paddw %%mm2, %%mm3 \n\t"
  162. "movq (%3, %%eax), %%mm4 \n\t"
  163. "movq (%3, %%eax), %%mm2 \n\t"
  164. "paddw %%mm5, %%mm1 \n\t"
  165. "paddw %%mm5, %%mm3 \n\t"
  166. "psrlw $1, %%mm1 \n\t"
  167. "psrlw $1, %%mm3 \n\t"
  168. "packuswb %%mm3, %%mm1 \n\t"
  169. "psubusb %%mm1, %%mm4 \n\t"
  170. "psubusb %%mm2, %%mm1 \n\t"
  171. "por %%mm4, %%mm1 \n\t"
  172. "movq %%mm1, %%mm0 \n\t"
  173. "punpcklbw %%mm7, %%mm0 \n\t"
  174. "punpckhbw %%mm7, %%mm1 \n\t"
  175. "paddw %%mm1, %%mm0 \n\t"
  176. "paddw %%mm0, %%mm6 \n\t"
  177. "addl %4, %%eax \n\t"
  178. " js 1b \n\t"
  179. : "+a" (len)
  180. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" (stride)
  181. );
  182. }
  183. static inline void sad8_4_mmx(UINT8 *blk1, UINT8 *blk2, int stride, int h)
  184. {
  185. int len= -(stride<<h);
  186. asm volatile(
  187. ".balign 16 \n\t"
  188. "1: \n\t"
  189. "movq (%1, %%eax), %%mm0 \n\t"
  190. "movq (%2, %%eax), %%mm1 \n\t"
  191. "movq %%mm0, %%mm4 \n\t"
  192. "movq %%mm1, %%mm2 \n\t"
  193. "punpcklbw %%mm7, %%mm0 \n\t"
  194. "punpcklbw %%mm7, %%mm1 \n\t"
  195. "punpckhbw %%mm7, %%mm4 \n\t"
  196. "punpckhbw %%mm7, %%mm2 \n\t"
  197. "paddw %%mm1, %%mm0 \n\t"
  198. "paddw %%mm2, %%mm4 \n\t"
  199. "movq 1(%1, %%eax), %%mm2 \n\t"
  200. "movq 1(%2, %%eax), %%mm3 \n\t"
  201. "movq %%mm2, %%mm1 \n\t"
  202. "punpcklbw %%mm7, %%mm2 \n\t"
  203. "punpckhbw %%mm7, %%mm1 \n\t"
  204. "paddw %%mm0, %%mm2 \n\t"
  205. "paddw %%mm4, %%mm1 \n\t"
  206. "movq %%mm3, %%mm4 \n\t"
  207. "punpcklbw %%mm7, %%mm3 \n\t"
  208. "punpckhbw %%mm7, %%mm4 \n\t"
  209. "paddw %%mm3, %%mm2 \n\t"
  210. "paddw %%mm4, %%mm1 \n\t"
  211. "movq (%3, %%eax), %%mm3 \n\t"
  212. "movq (%3, %%eax), %%mm4 \n\t"
  213. "paddw %%mm5, %%mm2 \n\t"
  214. "paddw %%mm5, %%mm1 \n\t"
  215. "psrlw $2, %%mm2 \n\t"
  216. "psrlw $2, %%mm1 \n\t"
  217. "packuswb %%mm1, %%mm2 \n\t"
  218. "psubusb %%mm2, %%mm3 \n\t"
  219. "psubusb %%mm4, %%mm2 \n\t"
  220. "por %%mm3, %%mm2 \n\t"
  221. "movq %%mm2, %%mm0 \n\t"
  222. "punpcklbw %%mm7, %%mm0 \n\t"
  223. "punpckhbw %%mm7, %%mm2 \n\t"
  224. "paddw %%mm2, %%mm0 \n\t"
  225. "paddw %%mm0, %%mm6 \n\t"
  226. "addl %4, %%eax \n\t"
  227. " js 1b \n\t"
  228. : "+a" (len)
  229. : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" (stride)
  230. );
  231. }
  232. static inline int sum_mmx()
  233. {
  234. int ret;
  235. asm volatile(
  236. "movq %%mm6, %%mm0 \n\t"
  237. "psrlq $32, %%mm6 \n\t"
  238. "paddw %%mm0, %%mm6 \n\t"
  239. "movq %%mm6, %%mm0 \n\t"
  240. "psrlq $16, %%mm6 \n\t"
  241. "paddw %%mm0, %%mm6 \n\t"
  242. "movd %%mm6, %0 \n\t"
  243. : "=r" (ret)
  244. );
  245. return ret&0xFFFF;
  246. }
  247. static inline int sum_mmx2()
  248. {
  249. int ret;
  250. asm volatile(
  251. "movd %%mm6, %0 \n\t"
  252. : "=r" (ret)
  253. );
  254. return ret;
  255. }
  256. #define PIX_SAD(suf)\
  257. int pix_abs8x8_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  258. {\
  259. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  260. "pxor %%mm6, %%mm6 \n\t":);\
  261. \
  262. sad8_ ## suf(blk1, blk2, stride, 3);\
  263. \
  264. return sum_ ## suf();\
  265. }\
  266. \
  267. int pix_abs8x8_x2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  268. {\
  269. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  270. "pxor %%mm6, %%mm6 \n\t"\
  271. "movq %0, %%mm5 \n\t"\
  272. :: "m"(round_tab[1]) \
  273. );\
  274. \
  275. sad8_2_ ## suf(blk1, blk2+1, blk2, stride, 3);\
  276. \
  277. return sum_ ## suf();\
  278. }\
  279. \
  280. int pix_abs8x8_y2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  281. {\
  282. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  283. "pxor %%mm6, %%mm6 \n\t"\
  284. "movq %0, %%mm5 \n\t"\
  285. :: "m"(round_tab[1]) \
  286. );\
  287. \
  288. sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 3);\
  289. \
  290. return sum_ ## suf();\
  291. }\
  292. \
  293. int pix_abs8x8_xy2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  294. {\
  295. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  296. "pxor %%mm6, %%mm6 \n\t"\
  297. "movq %0, %%mm5 \n\t"\
  298. :: "m"(round_tab[2]) \
  299. );\
  300. \
  301. sad8_4_ ## suf(blk1, blk2, stride, 3);\
  302. \
  303. return sum_ ## suf();\
  304. }\
  305. \
  306. int pix_abs16x16_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  307. {\
  308. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  309. "pxor %%mm6, %%mm6 \n\t":);\
  310. \
  311. sad8_ ## suf(blk1 , blk2 , stride, 4);\
  312. sad8_ ## suf(blk1+8, blk2+8, stride, 4);\
  313. \
  314. return sum_ ## suf();\
  315. }\
  316. int pix_abs16x16_x2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  317. {\
  318. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  319. "pxor %%mm6, %%mm6 \n\t"\
  320. "movq %0, %%mm5 \n\t"\
  321. :: "m"(round_tab[1]) \
  322. );\
  323. \
  324. sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, 4);\
  325. sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, 4);\
  326. \
  327. return sum_ ## suf();\
  328. }\
  329. int pix_abs16x16_y2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  330. {\
  331. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  332. "pxor %%mm6, %%mm6 \n\t"\
  333. "movq %0, %%mm5 \n\t"\
  334. :: "m"(round_tab[1]) \
  335. );\
  336. \
  337. sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, 4);\
  338. sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, 4);\
  339. \
  340. return sum_ ## suf();\
  341. }\
  342. int pix_abs16x16_xy2_ ## suf(UINT8 *blk2, UINT8 *blk1, int stride)\
  343. {\
  344. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  345. "pxor %%mm6, %%mm6 \n\t"\
  346. "movq %0, %%mm5 \n\t"\
  347. :: "m"(round_tab[2]) \
  348. );\
  349. \
  350. sad8_4_ ## suf(blk1 , blk2 , stride, 4);\
  351. sad8_4_ ## suf(blk1+8, blk2+8, stride, 4);\
  352. \
  353. return sum_ ## suf();\
  354. }\
  355. PIX_SAD(mmx)
  356. PIX_SAD(mmx2)