You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

470 lines
16KB

  1. /*
  2. * MMX optimized motion estimation
  3. * Copyright (c) 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer
  5. *
  6. * mostly by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/internal.h"
  25. #include "libavutil/mem.h"
  26. #include "libavutil/x86/asm.h"
  27. #include "libavcodec/dsputil.h"
  28. #include "dsputil_mmx.h"
  29. #if HAVE_INLINE_ASM
  30. DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={
  31. 0x0000000000000000ULL,
  32. 0x0001000100010001ULL,
  33. 0x0002000200020002ULL,
  34. };
  35. DECLARE_ASM_CONST(8, uint64_t, bone)= 0x0101010101010101LL;
  36. static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  37. {
  38. x86_reg len= -(stride*h);
  39. __asm__ volatile(
  40. ".p2align 4 \n\t"
  41. "1: \n\t"
  42. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  43. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  44. "movq (%2, %%"REG_a"), %%mm4 \n\t"
  45. "add %3, %%"REG_a" \n\t"
  46. "psubusb %%mm0, %%mm2 \n\t"
  47. "psubusb %%mm4, %%mm0 \n\t"
  48. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  49. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  50. "movq (%2, %%"REG_a"), %%mm5 \n\t"
  51. "psubusb %%mm1, %%mm3 \n\t"
  52. "psubusb %%mm5, %%mm1 \n\t"
  53. "por %%mm2, %%mm0 \n\t"
  54. "por %%mm1, %%mm3 \n\t"
  55. "movq %%mm0, %%mm1 \n\t"
  56. "movq %%mm3, %%mm2 \n\t"
  57. "punpcklbw %%mm7, %%mm0 \n\t"
  58. "punpckhbw %%mm7, %%mm1 \n\t"
  59. "punpcklbw %%mm7, %%mm3 \n\t"
  60. "punpckhbw %%mm7, %%mm2 \n\t"
  61. "paddw %%mm1, %%mm0 \n\t"
  62. "paddw %%mm3, %%mm2 \n\t"
  63. "paddw %%mm2, %%mm0 \n\t"
  64. "paddw %%mm0, %%mm6 \n\t"
  65. "add %3, %%"REG_a" \n\t"
  66. " js 1b \n\t"
  67. : "+a" (len)
  68. : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg)stride)
  69. );
  70. }
  71. static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  72. {
  73. __asm__ volatile(
  74. ".p2align 4 \n\t"
  75. "1: \n\t"
  76. "movq (%1), %%mm0 \n\t"
  77. "movq (%1, %3), %%mm1 \n\t"
  78. "psadbw (%2), %%mm0 \n\t"
  79. "psadbw (%2, %3), %%mm1 \n\t"
  80. "paddw %%mm0, %%mm6 \n\t"
  81. "paddw %%mm1, %%mm6 \n\t"
  82. "lea (%1,%3,2), %1 \n\t"
  83. "lea (%2,%3,2), %2 \n\t"
  84. "sub $2, %0 \n\t"
  85. " jg 1b \n\t"
  86. : "+r" (h), "+r" (blk1), "+r" (blk2)
  87. : "r" ((x86_reg)stride)
  88. );
  89. }
  90. static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
  91. {
  92. int ret;
  93. __asm__ volatile(
  94. "pxor %%xmm2, %%xmm2 \n\t"
  95. ".p2align 4 \n\t"
  96. "1: \n\t"
  97. "movdqu (%1), %%xmm0 \n\t"
  98. "movdqu (%1, %4), %%xmm1 \n\t"
  99. "psadbw (%2), %%xmm0 \n\t"
  100. "psadbw (%2, %4), %%xmm1 \n\t"
  101. "paddw %%xmm0, %%xmm2 \n\t"
  102. "paddw %%xmm1, %%xmm2 \n\t"
  103. "lea (%1,%4,2), %1 \n\t"
  104. "lea (%2,%4,2), %2 \n\t"
  105. "sub $2, %0 \n\t"
  106. " jg 1b \n\t"
  107. "movhlps %%xmm2, %%xmm0 \n\t"
  108. "paddw %%xmm0, %%xmm2 \n\t"
  109. "movd %%xmm2, %3 \n\t"
  110. : "+r" (h), "+r" (blk1), "+r" (blk2), "=r"(ret)
  111. : "r" ((x86_reg)stride)
  112. );
  113. return ret;
  114. }
  115. static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  116. {
  117. __asm__ volatile(
  118. ".p2align 4 \n\t"
  119. "1: \n\t"
  120. "movq (%1), %%mm0 \n\t"
  121. "movq (%1, %3), %%mm1 \n\t"
  122. "pavgb 1(%1), %%mm0 \n\t"
  123. "pavgb 1(%1, %3), %%mm1 \n\t"
  124. "psadbw (%2), %%mm0 \n\t"
  125. "psadbw (%2, %3), %%mm1 \n\t"
  126. "paddw %%mm0, %%mm6 \n\t"
  127. "paddw %%mm1, %%mm6 \n\t"
  128. "lea (%1,%3,2), %1 \n\t"
  129. "lea (%2,%3,2), %2 \n\t"
  130. "sub $2, %0 \n\t"
  131. " jg 1b \n\t"
  132. : "+r" (h), "+r" (blk1), "+r" (blk2)
  133. : "r" ((x86_reg)stride)
  134. );
  135. }
  136. static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  137. {
  138. __asm__ volatile(
  139. "movq (%1), %%mm0 \n\t"
  140. "add %3, %1 \n\t"
  141. ".p2align 4 \n\t"
  142. "1: \n\t"
  143. "movq (%1), %%mm1 \n\t"
  144. "movq (%1, %3), %%mm2 \n\t"
  145. "pavgb %%mm1, %%mm0 \n\t"
  146. "pavgb %%mm2, %%mm1 \n\t"
  147. "psadbw (%2), %%mm0 \n\t"
  148. "psadbw (%2, %3), %%mm1 \n\t"
  149. "paddw %%mm0, %%mm6 \n\t"
  150. "paddw %%mm1, %%mm6 \n\t"
  151. "movq %%mm2, %%mm0 \n\t"
  152. "lea (%1,%3,2), %1 \n\t"
  153. "lea (%2,%3,2), %2 \n\t"
  154. "sub $2, %0 \n\t"
  155. " jg 1b \n\t"
  156. : "+r" (h), "+r" (blk1), "+r" (blk2)
  157. : "r" ((x86_reg)stride)
  158. );
  159. }
  160. static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  161. {
  162. __asm__ volatile(
  163. "movq "MANGLE(bone)", %%mm5 \n\t"
  164. "movq (%1), %%mm0 \n\t"
  165. "pavgb 1(%1), %%mm0 \n\t"
  166. "add %3, %1 \n\t"
  167. ".p2align 4 \n\t"
  168. "1: \n\t"
  169. "movq (%1), %%mm1 \n\t"
  170. "movq (%1,%3), %%mm2 \n\t"
  171. "pavgb 1(%1), %%mm1 \n\t"
  172. "pavgb 1(%1,%3), %%mm2 \n\t"
  173. "psubusb %%mm5, %%mm1 \n\t"
  174. "pavgb %%mm1, %%mm0 \n\t"
  175. "pavgb %%mm2, %%mm1 \n\t"
  176. "psadbw (%2), %%mm0 \n\t"
  177. "psadbw (%2,%3), %%mm1 \n\t"
  178. "paddw %%mm0, %%mm6 \n\t"
  179. "paddw %%mm1, %%mm6 \n\t"
  180. "movq %%mm2, %%mm0 \n\t"
  181. "lea (%1,%3,2), %1 \n\t"
  182. "lea (%2,%3,2), %2 \n\t"
  183. "sub $2, %0 \n\t"
  184. " jg 1b \n\t"
  185. : "+r" (h), "+r" (blk1), "+r" (blk2)
  186. : "r" ((x86_reg)stride)
  187. );
  188. }
  189. static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
  190. {
  191. x86_reg len= -(stride*h);
  192. __asm__ volatile(
  193. ".p2align 4 \n\t"
  194. "1: \n\t"
  195. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  196. "movq (%2, %%"REG_a"), %%mm1 \n\t"
  197. "movq (%1, %%"REG_a"), %%mm2 \n\t"
  198. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  199. "punpcklbw %%mm7, %%mm0 \n\t"
  200. "punpcklbw %%mm7, %%mm1 \n\t"
  201. "punpckhbw %%mm7, %%mm2 \n\t"
  202. "punpckhbw %%mm7, %%mm3 \n\t"
  203. "paddw %%mm0, %%mm1 \n\t"
  204. "paddw %%mm2, %%mm3 \n\t"
  205. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  206. "movq (%3, %%"REG_a"), %%mm2 \n\t"
  207. "paddw %%mm5, %%mm1 \n\t"
  208. "paddw %%mm5, %%mm3 \n\t"
  209. "psrlw $1, %%mm1 \n\t"
  210. "psrlw $1, %%mm3 \n\t"
  211. "packuswb %%mm3, %%mm1 \n\t"
  212. "psubusb %%mm1, %%mm4 \n\t"
  213. "psubusb %%mm2, %%mm1 \n\t"
  214. "por %%mm4, %%mm1 \n\t"
  215. "movq %%mm1, %%mm0 \n\t"
  216. "punpcklbw %%mm7, %%mm0 \n\t"
  217. "punpckhbw %%mm7, %%mm1 \n\t"
  218. "paddw %%mm1, %%mm0 \n\t"
  219. "paddw %%mm0, %%mm6 \n\t"
  220. "add %4, %%"REG_a" \n\t"
  221. " js 1b \n\t"
  222. : "+a" (len)
  223. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((x86_reg)stride)
  224. );
  225. }
  226. static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  227. {
  228. x86_reg len= -(stride*h);
  229. __asm__ volatile(
  230. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  231. "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
  232. "movq %%mm0, %%mm1 \n\t"
  233. "movq %%mm2, %%mm3 \n\t"
  234. "punpcklbw %%mm7, %%mm0 \n\t"
  235. "punpckhbw %%mm7, %%mm1 \n\t"
  236. "punpcklbw %%mm7, %%mm2 \n\t"
  237. "punpckhbw %%mm7, %%mm3 \n\t"
  238. "paddw %%mm2, %%mm0 \n\t"
  239. "paddw %%mm3, %%mm1 \n\t"
  240. ".p2align 4 \n\t"
  241. "1: \n\t"
  242. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  243. "movq 1(%2, %%"REG_a"), %%mm4 \n\t"
  244. "movq %%mm2, %%mm3 \n\t"
  245. "movq %%mm4, %%mm5 \n\t"
  246. "punpcklbw %%mm7, %%mm2 \n\t"
  247. "punpckhbw %%mm7, %%mm3 \n\t"
  248. "punpcklbw %%mm7, %%mm4 \n\t"
  249. "punpckhbw %%mm7, %%mm5 \n\t"
  250. "paddw %%mm4, %%mm2 \n\t"
  251. "paddw %%mm5, %%mm3 \n\t"
  252. "movq 16+"MANGLE(round_tab)", %%mm5 \n\t"
  253. "paddw %%mm2, %%mm0 \n\t"
  254. "paddw %%mm3, %%mm1 \n\t"
  255. "paddw %%mm5, %%mm0 \n\t"
  256. "paddw %%mm5, %%mm1 \n\t"
  257. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  258. "movq (%3, %%"REG_a"), %%mm5 \n\t"
  259. "psrlw $2, %%mm0 \n\t"
  260. "psrlw $2, %%mm1 \n\t"
  261. "packuswb %%mm1, %%mm0 \n\t"
  262. "psubusb %%mm0, %%mm4 \n\t"
  263. "psubusb %%mm5, %%mm0 \n\t"
  264. "por %%mm4, %%mm0 \n\t"
  265. "movq %%mm0, %%mm4 \n\t"
  266. "punpcklbw %%mm7, %%mm0 \n\t"
  267. "punpckhbw %%mm7, %%mm4 \n\t"
  268. "paddw %%mm0, %%mm6 \n\t"
  269. "paddw %%mm4, %%mm6 \n\t"
  270. "movq %%mm2, %%mm0 \n\t"
  271. "movq %%mm3, %%mm1 \n\t"
  272. "add %4, %%"REG_a" \n\t"
  273. " js 1b \n\t"
  274. : "+a" (len)
  275. : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((x86_reg)stride)
  276. );
  277. }
  278. static inline int sum_mmx(void)
  279. {
  280. int ret;
  281. __asm__ volatile(
  282. "movq %%mm6, %%mm0 \n\t"
  283. "psrlq $32, %%mm6 \n\t"
  284. "paddw %%mm0, %%mm6 \n\t"
  285. "movq %%mm6, %%mm0 \n\t"
  286. "psrlq $16, %%mm6 \n\t"
  287. "paddw %%mm0, %%mm6 \n\t"
  288. "movd %%mm6, %0 \n\t"
  289. : "=r" (ret)
  290. );
  291. return ret&0xFFFF;
  292. }
  293. static inline int sum_mmx2(void)
  294. {
  295. int ret;
  296. __asm__ volatile(
  297. "movd %%mm6, %0 \n\t"
  298. : "=r" (ret)
  299. );
  300. return ret;
  301. }
  302. static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  303. {
  304. sad8_2_mmx(blk1, blk1+1, blk2, stride, h);
  305. }
  306. static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  307. {
  308. sad8_2_mmx(blk1, blk1+stride, blk2, stride, h);
  309. }
  310. #define PIX_SAD(suf)\
  311. static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  312. {\
  313. assert(h==8);\
  314. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  315. "pxor %%mm6, %%mm6 \n\t":);\
  316. \
  317. sad8_1_ ## suf(blk1, blk2, stride, 8);\
  318. \
  319. return sum_ ## suf();\
  320. }\
  321. static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  322. {\
  323. assert(h==8);\
  324. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  325. "pxor %%mm6, %%mm6 \n\t"\
  326. "movq %0, %%mm5 \n\t"\
  327. :: "m"(round_tab[1]) \
  328. );\
  329. \
  330. sad8_x2a_ ## suf(blk1, blk2, stride, 8);\
  331. \
  332. return sum_ ## suf();\
  333. }\
  334. \
  335. static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  336. {\
  337. assert(h==8);\
  338. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  339. "pxor %%mm6, %%mm6 \n\t"\
  340. "movq %0, %%mm5 \n\t"\
  341. :: "m"(round_tab[1]) \
  342. );\
  343. \
  344. sad8_y2a_ ## suf(blk1, blk2, stride, 8);\
  345. \
  346. return sum_ ## suf();\
  347. }\
  348. \
  349. static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  350. {\
  351. assert(h==8);\
  352. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  353. "pxor %%mm6, %%mm6 \n\t"\
  354. ::);\
  355. \
  356. sad8_4_ ## suf(blk1, blk2, stride, 8);\
  357. \
  358. return sum_ ## suf();\
  359. }\
  360. \
  361. static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  362. {\
  363. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  364. "pxor %%mm6, %%mm6 \n\t":);\
  365. \
  366. sad8_1_ ## suf(blk1 , blk2 , stride, h);\
  367. sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
  368. \
  369. return sum_ ## suf();\
  370. }\
  371. static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  372. {\
  373. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  374. "pxor %%mm6, %%mm6 \n\t"\
  375. "movq %0, %%mm5 \n\t"\
  376. :: "m"(round_tab[1]) \
  377. );\
  378. \
  379. sad8_x2a_ ## suf(blk1 , blk2 , stride, h);\
  380. sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\
  381. \
  382. return sum_ ## suf();\
  383. }\
  384. static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  385. {\
  386. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  387. "pxor %%mm6, %%mm6 \n\t"\
  388. "movq %0, %%mm5 \n\t"\
  389. :: "m"(round_tab[1]) \
  390. );\
  391. \
  392. sad8_y2a_ ## suf(blk1 , blk2 , stride, h);\
  393. sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\
  394. \
  395. return sum_ ## suf();\
  396. }\
  397. static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  398. {\
  399. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  400. "pxor %%mm6, %%mm6 \n\t"\
  401. ::);\
  402. \
  403. sad8_4_ ## suf(blk1 , blk2 , stride, h);\
  404. sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
  405. \
  406. return sum_ ## suf();\
  407. }\
  408. PIX_SAD(mmx)
  409. PIX_SAD(mmx2)
  410. #endif /* HAVE_INLINE_ASM */
  411. void ff_dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
  412. {
  413. #if HAVE_INLINE_ASM
  414. int mm_flags = av_get_cpu_flags();
  415. if (mm_flags & AV_CPU_FLAG_MMX) {
  416. c->pix_abs[0][0] = sad16_mmx;
  417. c->pix_abs[0][1] = sad16_x2_mmx;
  418. c->pix_abs[0][2] = sad16_y2_mmx;
  419. c->pix_abs[0][3] = sad16_xy2_mmx;
  420. c->pix_abs[1][0] = sad8_mmx;
  421. c->pix_abs[1][1] = sad8_x2_mmx;
  422. c->pix_abs[1][2] = sad8_y2_mmx;
  423. c->pix_abs[1][3] = sad8_xy2_mmx;
  424. c->sad[0]= sad16_mmx;
  425. c->sad[1]= sad8_mmx;
  426. }
  427. if (mm_flags & AV_CPU_FLAG_MMXEXT) {
  428. c->pix_abs[0][0] = sad16_mmx2;
  429. c->pix_abs[1][0] = sad8_mmx2;
  430. c->sad[0]= sad16_mmx2;
  431. c->sad[1]= sad8_mmx2;
  432. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  433. c->pix_abs[0][1] = sad16_x2_mmx2;
  434. c->pix_abs[0][2] = sad16_y2_mmx2;
  435. c->pix_abs[0][3] = sad16_xy2_mmx2;
  436. c->pix_abs[1][1] = sad8_x2_mmx2;
  437. c->pix_abs[1][2] = sad8_y2_mmx2;
  438. c->pix_abs[1][3] = sad8_xy2_mmx2;
  439. }
  440. }
  441. if ((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != AV_CODEC_ID_SNOW) {
  442. c->sad[0]= sad16_sse2;
  443. }
  444. #endif /* HAVE_INLINE_ASM */
  445. }