You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

475 lines
16KB

  1. /*
  2. * MMX optimized motion estimation
  3. * Copyright (c) 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer
  5. *
  6. * mostly by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/attributes.h"
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/mem.h"
  27. #include "libavutil/x86/asm.h"
  28. #include "libavcodec/dsputil.h"
  29. #include "dsputil_mmx.h"
  30. #if HAVE_INLINE_ASM
  31. DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={
  32. 0x0000000000000000ULL,
  33. 0x0001000100010001ULL,
  34. 0x0002000200020002ULL,
  35. };
  36. DECLARE_ASM_CONST(8, uint64_t, bone)= 0x0101010101010101LL;
  37. static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  38. {
  39. x86_reg len= -(x86_reg)stride*h;
  40. __asm__ volatile(
  41. ".p2align 4 \n\t"
  42. "1: \n\t"
  43. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  44. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  45. "movq (%2, %%"REG_a"), %%mm4 \n\t"
  46. "add %3, %%"REG_a" \n\t"
  47. "psubusb %%mm0, %%mm2 \n\t"
  48. "psubusb %%mm4, %%mm0 \n\t"
  49. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  50. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  51. "movq (%2, %%"REG_a"), %%mm5 \n\t"
  52. "psubusb %%mm1, %%mm3 \n\t"
  53. "psubusb %%mm5, %%mm1 \n\t"
  54. "por %%mm2, %%mm0 \n\t"
  55. "por %%mm1, %%mm3 \n\t"
  56. "movq %%mm0, %%mm1 \n\t"
  57. "movq %%mm3, %%mm2 \n\t"
  58. "punpcklbw %%mm7, %%mm0 \n\t"
  59. "punpckhbw %%mm7, %%mm1 \n\t"
  60. "punpcklbw %%mm7, %%mm3 \n\t"
  61. "punpckhbw %%mm7, %%mm2 \n\t"
  62. "paddw %%mm1, %%mm0 \n\t"
  63. "paddw %%mm3, %%mm2 \n\t"
  64. "paddw %%mm2, %%mm0 \n\t"
  65. "paddw %%mm0, %%mm6 \n\t"
  66. "add %3, %%"REG_a" \n\t"
  67. " js 1b \n\t"
  68. : "+a" (len)
  69. : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg)stride)
  70. );
  71. }
  72. static inline void sad8_1_mmxext(uint8_t *blk1, uint8_t *blk2,
  73. int stride, int h)
  74. {
  75. __asm__ volatile(
  76. ".p2align 4 \n\t"
  77. "1: \n\t"
  78. "movq (%1), %%mm0 \n\t"
  79. "movq (%1, %3), %%mm1 \n\t"
  80. "psadbw (%2), %%mm0 \n\t"
  81. "psadbw (%2, %3), %%mm1 \n\t"
  82. "paddw %%mm0, %%mm6 \n\t"
  83. "paddw %%mm1, %%mm6 \n\t"
  84. "lea (%1,%3,2), %1 \n\t"
  85. "lea (%2,%3,2), %2 \n\t"
  86. "sub $2, %0 \n\t"
  87. " jg 1b \n\t"
  88. : "+r" (h), "+r" (blk1), "+r" (blk2)
  89. : "r" ((x86_reg)stride)
  90. );
  91. }
  92. static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
  93. {
  94. int ret;
  95. __asm__ volatile(
  96. "pxor %%xmm2, %%xmm2 \n\t"
  97. ".p2align 4 \n\t"
  98. "1: \n\t"
  99. "movdqu (%1), %%xmm0 \n\t"
  100. "movdqu (%1, %4), %%xmm1 \n\t"
  101. "psadbw (%2), %%xmm0 \n\t"
  102. "psadbw (%2, %4), %%xmm1 \n\t"
  103. "paddw %%xmm0, %%xmm2 \n\t"
  104. "paddw %%xmm1, %%xmm2 \n\t"
  105. "lea (%1,%4,2), %1 \n\t"
  106. "lea (%2,%4,2), %2 \n\t"
  107. "sub $2, %0 \n\t"
  108. " jg 1b \n\t"
  109. "movhlps %%xmm2, %%xmm0 \n\t"
  110. "paddw %%xmm0, %%xmm2 \n\t"
  111. "movd %%xmm2, %3 \n\t"
  112. : "+r" (h), "+r" (blk1), "+r" (blk2), "=r"(ret)
  113. : "r" ((x86_reg)stride)
  114. );
  115. return ret;
  116. }
  117. static inline void sad8_x2a_mmxext(uint8_t *blk1, uint8_t *blk2,
  118. int stride, int h)
  119. {
  120. __asm__ volatile(
  121. ".p2align 4 \n\t"
  122. "1: \n\t"
  123. "movq (%1), %%mm0 \n\t"
  124. "movq (%1, %3), %%mm1 \n\t"
  125. "pavgb 1(%1), %%mm0 \n\t"
  126. "pavgb 1(%1, %3), %%mm1 \n\t"
  127. "psadbw (%2), %%mm0 \n\t"
  128. "psadbw (%2, %3), %%mm1 \n\t"
  129. "paddw %%mm0, %%mm6 \n\t"
  130. "paddw %%mm1, %%mm6 \n\t"
  131. "lea (%1,%3,2), %1 \n\t"
  132. "lea (%2,%3,2), %2 \n\t"
  133. "sub $2, %0 \n\t"
  134. " jg 1b \n\t"
  135. : "+r" (h), "+r" (blk1), "+r" (blk2)
  136. : "r" ((x86_reg)stride)
  137. );
  138. }
  139. static inline void sad8_y2a_mmxext(uint8_t *blk1, uint8_t *blk2,
  140. int stride, int h)
  141. {
  142. __asm__ volatile(
  143. "movq (%1), %%mm0 \n\t"
  144. "add %3, %1 \n\t"
  145. ".p2align 4 \n\t"
  146. "1: \n\t"
  147. "movq (%1), %%mm1 \n\t"
  148. "movq (%1, %3), %%mm2 \n\t"
  149. "pavgb %%mm1, %%mm0 \n\t"
  150. "pavgb %%mm2, %%mm1 \n\t"
  151. "psadbw (%2), %%mm0 \n\t"
  152. "psadbw (%2, %3), %%mm1 \n\t"
  153. "paddw %%mm0, %%mm6 \n\t"
  154. "paddw %%mm1, %%mm6 \n\t"
  155. "movq %%mm2, %%mm0 \n\t"
  156. "lea (%1,%3,2), %1 \n\t"
  157. "lea (%2,%3,2), %2 \n\t"
  158. "sub $2, %0 \n\t"
  159. " jg 1b \n\t"
  160. : "+r" (h), "+r" (blk1), "+r" (blk2)
  161. : "r" ((x86_reg)stride)
  162. );
  163. }
  164. static inline void sad8_4_mmxext(uint8_t *blk1, uint8_t *blk2,
  165. int stride, int h)
  166. {
  167. __asm__ volatile(
  168. "movq "MANGLE(bone)", %%mm5 \n\t"
  169. "movq (%1), %%mm0 \n\t"
  170. "pavgb 1(%1), %%mm0 \n\t"
  171. "add %3, %1 \n\t"
  172. ".p2align 4 \n\t"
  173. "1: \n\t"
  174. "movq (%1), %%mm1 \n\t"
  175. "movq (%1,%3), %%mm2 \n\t"
  176. "pavgb 1(%1), %%mm1 \n\t"
  177. "pavgb 1(%1,%3), %%mm2 \n\t"
  178. "psubusb %%mm5, %%mm1 \n\t"
  179. "pavgb %%mm1, %%mm0 \n\t"
  180. "pavgb %%mm2, %%mm1 \n\t"
  181. "psadbw (%2), %%mm0 \n\t"
  182. "psadbw (%2,%3), %%mm1 \n\t"
  183. "paddw %%mm0, %%mm6 \n\t"
  184. "paddw %%mm1, %%mm6 \n\t"
  185. "movq %%mm2, %%mm0 \n\t"
  186. "lea (%1,%3,2), %1 \n\t"
  187. "lea (%2,%3,2), %2 \n\t"
  188. "sub $2, %0 \n\t"
  189. " jg 1b \n\t"
  190. : "+r" (h), "+r" (blk1), "+r" (blk2)
  191. : "r" ((x86_reg)stride)
  192. );
  193. }
  194. static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
  195. {
  196. x86_reg len= -(x86_reg)stride*h;
  197. __asm__ volatile(
  198. ".p2align 4 \n\t"
  199. "1: \n\t"
  200. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  201. "movq (%2, %%"REG_a"), %%mm1 \n\t"
  202. "movq (%1, %%"REG_a"), %%mm2 \n\t"
  203. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  204. "punpcklbw %%mm7, %%mm0 \n\t"
  205. "punpcklbw %%mm7, %%mm1 \n\t"
  206. "punpckhbw %%mm7, %%mm2 \n\t"
  207. "punpckhbw %%mm7, %%mm3 \n\t"
  208. "paddw %%mm0, %%mm1 \n\t"
  209. "paddw %%mm2, %%mm3 \n\t"
  210. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  211. "movq (%3, %%"REG_a"), %%mm2 \n\t"
  212. "paddw %%mm5, %%mm1 \n\t"
  213. "paddw %%mm5, %%mm3 \n\t"
  214. "psrlw $1, %%mm1 \n\t"
  215. "psrlw $1, %%mm3 \n\t"
  216. "packuswb %%mm3, %%mm1 \n\t"
  217. "psubusb %%mm1, %%mm4 \n\t"
  218. "psubusb %%mm2, %%mm1 \n\t"
  219. "por %%mm4, %%mm1 \n\t"
  220. "movq %%mm1, %%mm0 \n\t"
  221. "punpcklbw %%mm7, %%mm0 \n\t"
  222. "punpckhbw %%mm7, %%mm1 \n\t"
  223. "paddw %%mm1, %%mm0 \n\t"
  224. "paddw %%mm0, %%mm6 \n\t"
  225. "add %4, %%"REG_a" \n\t"
  226. " js 1b \n\t"
  227. : "+a" (len)
  228. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((x86_reg)stride)
  229. );
  230. }
  231. static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  232. {
  233. x86_reg len= -(x86_reg)stride*h;
  234. __asm__ volatile(
  235. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  236. "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
  237. "movq %%mm0, %%mm1 \n\t"
  238. "movq %%mm2, %%mm3 \n\t"
  239. "punpcklbw %%mm7, %%mm0 \n\t"
  240. "punpckhbw %%mm7, %%mm1 \n\t"
  241. "punpcklbw %%mm7, %%mm2 \n\t"
  242. "punpckhbw %%mm7, %%mm3 \n\t"
  243. "paddw %%mm2, %%mm0 \n\t"
  244. "paddw %%mm3, %%mm1 \n\t"
  245. ".p2align 4 \n\t"
  246. "1: \n\t"
  247. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  248. "movq 1(%2, %%"REG_a"), %%mm4 \n\t"
  249. "movq %%mm2, %%mm3 \n\t"
  250. "movq %%mm4, %%mm5 \n\t"
  251. "punpcklbw %%mm7, %%mm2 \n\t"
  252. "punpckhbw %%mm7, %%mm3 \n\t"
  253. "punpcklbw %%mm7, %%mm4 \n\t"
  254. "punpckhbw %%mm7, %%mm5 \n\t"
  255. "paddw %%mm4, %%mm2 \n\t"
  256. "paddw %%mm5, %%mm3 \n\t"
  257. "movq 16+"MANGLE(round_tab)", %%mm5 \n\t"
  258. "paddw %%mm2, %%mm0 \n\t"
  259. "paddw %%mm3, %%mm1 \n\t"
  260. "paddw %%mm5, %%mm0 \n\t"
  261. "paddw %%mm5, %%mm1 \n\t"
  262. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  263. "movq (%3, %%"REG_a"), %%mm5 \n\t"
  264. "psrlw $2, %%mm0 \n\t"
  265. "psrlw $2, %%mm1 \n\t"
  266. "packuswb %%mm1, %%mm0 \n\t"
  267. "psubusb %%mm0, %%mm4 \n\t"
  268. "psubusb %%mm5, %%mm0 \n\t"
  269. "por %%mm4, %%mm0 \n\t"
  270. "movq %%mm0, %%mm4 \n\t"
  271. "punpcklbw %%mm7, %%mm0 \n\t"
  272. "punpckhbw %%mm7, %%mm4 \n\t"
  273. "paddw %%mm0, %%mm6 \n\t"
  274. "paddw %%mm4, %%mm6 \n\t"
  275. "movq %%mm2, %%mm0 \n\t"
  276. "movq %%mm3, %%mm1 \n\t"
  277. "add %4, %%"REG_a" \n\t"
  278. " js 1b \n\t"
  279. : "+a" (len)
  280. : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((x86_reg)stride)
  281. );
  282. }
  283. static inline int sum_mmx(void)
  284. {
  285. int ret;
  286. __asm__ volatile(
  287. "movq %%mm6, %%mm0 \n\t"
  288. "psrlq $32, %%mm6 \n\t"
  289. "paddw %%mm0, %%mm6 \n\t"
  290. "movq %%mm6, %%mm0 \n\t"
  291. "psrlq $16, %%mm6 \n\t"
  292. "paddw %%mm0, %%mm6 \n\t"
  293. "movd %%mm6, %0 \n\t"
  294. : "=r" (ret)
  295. );
  296. return ret&0xFFFF;
  297. }
  298. static inline int sum_mmxext(void)
  299. {
  300. int ret;
  301. __asm__ volatile(
  302. "movd %%mm6, %0 \n\t"
  303. : "=r" (ret)
  304. );
  305. return ret;
  306. }
  307. static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  308. {
  309. sad8_2_mmx(blk1, blk1+1, blk2, stride, h);
  310. }
  311. static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  312. {
  313. sad8_2_mmx(blk1, blk1+stride, blk2, stride, h);
  314. }
  315. #define PIX_SAD(suf)\
  316. static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  317. {\
  318. av_assert2(h==8);\
  319. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  320. "pxor %%mm6, %%mm6 \n\t":);\
  321. \
  322. sad8_1_ ## suf(blk1, blk2, stride, 8);\
  323. \
  324. return sum_ ## suf();\
  325. }\
  326. static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  327. {\
  328. av_assert2(h==8);\
  329. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  330. "pxor %%mm6, %%mm6 \n\t"\
  331. "movq %0, %%mm5 \n\t"\
  332. :: "m"(round_tab[1]) \
  333. );\
  334. \
  335. sad8_x2a_ ## suf(blk1, blk2, stride, 8);\
  336. \
  337. return sum_ ## suf();\
  338. }\
  339. \
  340. static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  341. {\
  342. av_assert2(h==8);\
  343. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  344. "pxor %%mm6, %%mm6 \n\t"\
  345. "movq %0, %%mm5 \n\t"\
  346. :: "m"(round_tab[1]) \
  347. );\
  348. \
  349. sad8_y2a_ ## suf(blk1, blk2, stride, 8);\
  350. \
  351. return sum_ ## suf();\
  352. }\
  353. \
  354. static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  355. {\
  356. av_assert2(h==8);\
  357. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  358. "pxor %%mm6, %%mm6 \n\t"\
  359. ::);\
  360. \
  361. sad8_4_ ## suf(blk1, blk2, stride, 8);\
  362. \
  363. return sum_ ## suf();\
  364. }\
  365. \
  366. static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  367. {\
  368. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  369. "pxor %%mm6, %%mm6 \n\t":);\
  370. \
  371. sad8_1_ ## suf(blk1 , blk2 , stride, h);\
  372. sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
  373. \
  374. return sum_ ## suf();\
  375. }\
  376. static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  377. {\
  378. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  379. "pxor %%mm6, %%mm6 \n\t"\
  380. "movq %0, %%mm5 \n\t"\
  381. :: "m"(round_tab[1]) \
  382. );\
  383. \
  384. sad8_x2a_ ## suf(blk1 , blk2 , stride, h);\
  385. sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\
  386. \
  387. return sum_ ## suf();\
  388. }\
  389. static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  390. {\
  391. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  392. "pxor %%mm6, %%mm6 \n\t"\
  393. "movq %0, %%mm5 \n\t"\
  394. :: "m"(round_tab[1]) \
  395. );\
  396. \
  397. sad8_y2a_ ## suf(blk1 , blk2 , stride, h);\
  398. sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\
  399. \
  400. return sum_ ## suf();\
  401. }\
  402. static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  403. {\
  404. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  405. "pxor %%mm6, %%mm6 \n\t"\
  406. ::);\
  407. \
  408. sad8_4_ ## suf(blk1 , blk2 , stride, h);\
  409. sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
  410. \
  411. return sum_ ## suf();\
  412. }\
  413. PIX_SAD(mmx)
  414. PIX_SAD(mmxext)
  415. #endif /* HAVE_INLINE_ASM */
  416. av_cold void ff_dsputil_init_pix_mmx(DSPContext *c, AVCodecContext *avctx)
  417. {
  418. #if HAVE_INLINE_ASM
  419. int mm_flags = av_get_cpu_flags();
  420. if (mm_flags & AV_CPU_FLAG_MMX) {
  421. c->pix_abs[0][0] = sad16_mmx;
  422. c->pix_abs[0][1] = sad16_x2_mmx;
  423. c->pix_abs[0][2] = sad16_y2_mmx;
  424. c->pix_abs[0][3] = sad16_xy2_mmx;
  425. c->pix_abs[1][0] = sad8_mmx;
  426. c->pix_abs[1][1] = sad8_x2_mmx;
  427. c->pix_abs[1][2] = sad8_y2_mmx;
  428. c->pix_abs[1][3] = sad8_xy2_mmx;
  429. c->sad[0]= sad16_mmx;
  430. c->sad[1]= sad8_mmx;
  431. }
  432. if (mm_flags & AV_CPU_FLAG_MMXEXT) {
  433. c->pix_abs[0][0] = sad16_mmxext;
  434. c->pix_abs[1][0] = sad8_mmxext;
  435. c->sad[0] = sad16_mmxext;
  436. c->sad[1] = sad8_mmxext;
  437. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  438. c->pix_abs[0][1] = sad16_x2_mmxext;
  439. c->pix_abs[0][2] = sad16_y2_mmxext;
  440. c->pix_abs[0][3] = sad16_xy2_mmxext;
  441. c->pix_abs[1][1] = sad8_x2_mmxext;
  442. c->pix_abs[1][2] = sad8_y2_mmxext;
  443. c->pix_abs[1][3] = sad8_xy2_mmxext;
  444. }
  445. }
  446. if ((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != AV_CODEC_ID_SNOW) {
  447. c->sad[0]= sad16_sse2;
  448. }
  449. #endif /* HAVE_INLINE_ASM */
  450. }