You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

462 lines
16KB

  1. /*
  2. * MMX optimized motion estimation
  3. * Copyright (c) 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer
  5. *
  6. * mostly by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/x86_cpu.h"
  25. #include "libavcodec/dsputil.h"
  26. #include "dsputil_mmx.h"
  27. DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={
  28. 0x0000000000000000ULL,
  29. 0x0001000100010001ULL,
  30. 0x0002000200020002ULL,
  31. };
  32. DECLARE_ASM_CONST(8, uint64_t, bone)= 0x0101010101010101LL;
  33. static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  34. {
  35. x86_reg len= -(stride*h);
  36. __asm__ volatile(
  37. ".p2align 4 \n\t"
  38. "1: \n\t"
  39. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  40. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  41. "movq (%2, %%"REG_a"), %%mm4 \n\t"
  42. "add %3, %%"REG_a" \n\t"
  43. "psubusb %%mm0, %%mm2 \n\t"
  44. "psubusb %%mm4, %%mm0 \n\t"
  45. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  46. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  47. "movq (%2, %%"REG_a"), %%mm5 \n\t"
  48. "psubusb %%mm1, %%mm3 \n\t"
  49. "psubusb %%mm5, %%mm1 \n\t"
  50. "por %%mm2, %%mm0 \n\t"
  51. "por %%mm1, %%mm3 \n\t"
  52. "movq %%mm0, %%mm1 \n\t"
  53. "movq %%mm3, %%mm2 \n\t"
  54. "punpcklbw %%mm7, %%mm0 \n\t"
  55. "punpckhbw %%mm7, %%mm1 \n\t"
  56. "punpcklbw %%mm7, %%mm3 \n\t"
  57. "punpckhbw %%mm7, %%mm2 \n\t"
  58. "paddw %%mm1, %%mm0 \n\t"
  59. "paddw %%mm3, %%mm2 \n\t"
  60. "paddw %%mm2, %%mm0 \n\t"
  61. "paddw %%mm0, %%mm6 \n\t"
  62. "add %3, %%"REG_a" \n\t"
  63. " js 1b \n\t"
  64. : "+a" (len)
  65. : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg)stride)
  66. );
  67. }
  68. static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  69. {
  70. __asm__ volatile(
  71. ".p2align 4 \n\t"
  72. "1: \n\t"
  73. "movq (%1), %%mm0 \n\t"
  74. "movq (%1, %3), %%mm1 \n\t"
  75. "psadbw (%2), %%mm0 \n\t"
  76. "psadbw (%2, %3), %%mm1 \n\t"
  77. "paddw %%mm0, %%mm6 \n\t"
  78. "paddw %%mm1, %%mm6 \n\t"
  79. "lea (%1,%3,2), %1 \n\t"
  80. "lea (%2,%3,2), %2 \n\t"
  81. "sub $2, %0 \n\t"
  82. " jg 1b \n\t"
  83. : "+r" (h), "+r" (blk1), "+r" (blk2)
  84. : "r" ((x86_reg)stride)
  85. );
  86. }
  87. static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
  88. {
  89. int ret;
  90. __asm__ volatile(
  91. "pxor %%xmm2, %%xmm2 \n\t"
  92. ".p2align 4 \n\t"
  93. "1: \n\t"
  94. "movdqu (%1), %%xmm0 \n\t"
  95. "movdqu (%1, %4), %%xmm1 \n\t"
  96. "psadbw (%2), %%xmm0 \n\t"
  97. "psadbw (%2, %4), %%xmm1 \n\t"
  98. "paddw %%xmm0, %%xmm2 \n\t"
  99. "paddw %%xmm1, %%xmm2 \n\t"
  100. "lea (%1,%4,2), %1 \n\t"
  101. "lea (%2,%4,2), %2 \n\t"
  102. "sub $2, %0 \n\t"
  103. " jg 1b \n\t"
  104. "movhlps %%xmm2, %%xmm0 \n\t"
  105. "paddw %%xmm0, %%xmm2 \n\t"
  106. "movd %%xmm2, %3 \n\t"
  107. : "+r" (h), "+r" (blk1), "+r" (blk2), "=r"(ret)
  108. : "r" ((x86_reg)stride)
  109. );
  110. return ret;
  111. }
  112. static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  113. {
  114. __asm__ volatile(
  115. ".p2align 4 \n\t"
  116. "1: \n\t"
  117. "movq (%1), %%mm0 \n\t"
  118. "movq (%1, %3), %%mm1 \n\t"
  119. "pavgb 1(%1), %%mm0 \n\t"
  120. "pavgb 1(%1, %3), %%mm1 \n\t"
  121. "psadbw (%2), %%mm0 \n\t"
  122. "psadbw (%2, %3), %%mm1 \n\t"
  123. "paddw %%mm0, %%mm6 \n\t"
  124. "paddw %%mm1, %%mm6 \n\t"
  125. "lea (%1,%3,2), %1 \n\t"
  126. "lea (%2,%3,2), %2 \n\t"
  127. "sub $2, %0 \n\t"
  128. " jg 1b \n\t"
  129. : "+r" (h), "+r" (blk1), "+r" (blk2)
  130. : "r" ((x86_reg)stride)
  131. );
  132. }
  133. static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  134. {
  135. __asm__ volatile(
  136. "movq (%1), %%mm0 \n\t"
  137. "add %3, %1 \n\t"
  138. ".p2align 4 \n\t"
  139. "1: \n\t"
  140. "movq (%1), %%mm1 \n\t"
  141. "movq (%1, %3), %%mm2 \n\t"
  142. "pavgb %%mm1, %%mm0 \n\t"
  143. "pavgb %%mm2, %%mm1 \n\t"
  144. "psadbw (%2), %%mm0 \n\t"
  145. "psadbw (%2, %3), %%mm1 \n\t"
  146. "paddw %%mm0, %%mm6 \n\t"
  147. "paddw %%mm1, %%mm6 \n\t"
  148. "movq %%mm2, %%mm0 \n\t"
  149. "lea (%1,%3,2), %1 \n\t"
  150. "lea (%2,%3,2), %2 \n\t"
  151. "sub $2, %0 \n\t"
  152. " jg 1b \n\t"
  153. : "+r" (h), "+r" (blk1), "+r" (blk2)
  154. : "r" ((x86_reg)stride)
  155. );
  156. }
  157. static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  158. {
  159. __asm__ volatile(
  160. "movq "MANGLE(bone)", %%mm5 \n\t"
  161. "movq (%1), %%mm0 \n\t"
  162. "pavgb 1(%1), %%mm0 \n\t"
  163. "add %3, %1 \n\t"
  164. ".p2align 4 \n\t"
  165. "1: \n\t"
  166. "movq (%1), %%mm1 \n\t"
  167. "movq (%1,%3), %%mm2 \n\t"
  168. "pavgb 1(%1), %%mm1 \n\t"
  169. "pavgb 1(%1,%3), %%mm2 \n\t"
  170. "psubusb %%mm5, %%mm1 \n\t"
  171. "pavgb %%mm1, %%mm0 \n\t"
  172. "pavgb %%mm2, %%mm1 \n\t"
  173. "psadbw (%2), %%mm0 \n\t"
  174. "psadbw (%2,%3), %%mm1 \n\t"
  175. "paddw %%mm0, %%mm6 \n\t"
  176. "paddw %%mm1, %%mm6 \n\t"
  177. "movq %%mm2, %%mm0 \n\t"
  178. "lea (%1,%3,2), %1 \n\t"
  179. "lea (%2,%3,2), %2 \n\t"
  180. "sub $2, %0 \n\t"
  181. " jg 1b \n\t"
  182. : "+r" (h), "+r" (blk1), "+r" (blk2)
  183. : "r" ((x86_reg)stride)
  184. );
  185. }
  186. static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
  187. {
  188. x86_reg len= -(stride*h);
  189. __asm__ volatile(
  190. ".p2align 4 \n\t"
  191. "1: \n\t"
  192. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  193. "movq (%2, %%"REG_a"), %%mm1 \n\t"
  194. "movq (%1, %%"REG_a"), %%mm2 \n\t"
  195. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  196. "punpcklbw %%mm7, %%mm0 \n\t"
  197. "punpcklbw %%mm7, %%mm1 \n\t"
  198. "punpckhbw %%mm7, %%mm2 \n\t"
  199. "punpckhbw %%mm7, %%mm3 \n\t"
  200. "paddw %%mm0, %%mm1 \n\t"
  201. "paddw %%mm2, %%mm3 \n\t"
  202. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  203. "movq (%3, %%"REG_a"), %%mm2 \n\t"
  204. "paddw %%mm5, %%mm1 \n\t"
  205. "paddw %%mm5, %%mm3 \n\t"
  206. "psrlw $1, %%mm1 \n\t"
  207. "psrlw $1, %%mm3 \n\t"
  208. "packuswb %%mm3, %%mm1 \n\t"
  209. "psubusb %%mm1, %%mm4 \n\t"
  210. "psubusb %%mm2, %%mm1 \n\t"
  211. "por %%mm4, %%mm1 \n\t"
  212. "movq %%mm1, %%mm0 \n\t"
  213. "punpcklbw %%mm7, %%mm0 \n\t"
  214. "punpckhbw %%mm7, %%mm1 \n\t"
  215. "paddw %%mm1, %%mm0 \n\t"
  216. "paddw %%mm0, %%mm6 \n\t"
  217. "add %4, %%"REG_a" \n\t"
  218. " js 1b \n\t"
  219. : "+a" (len)
  220. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((x86_reg)stride)
  221. );
  222. }
  223. static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  224. {
  225. x86_reg len= -(stride*h);
  226. __asm__ volatile(
  227. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  228. "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
  229. "movq %%mm0, %%mm1 \n\t"
  230. "movq %%mm2, %%mm3 \n\t"
  231. "punpcklbw %%mm7, %%mm0 \n\t"
  232. "punpckhbw %%mm7, %%mm1 \n\t"
  233. "punpcklbw %%mm7, %%mm2 \n\t"
  234. "punpckhbw %%mm7, %%mm3 \n\t"
  235. "paddw %%mm2, %%mm0 \n\t"
  236. "paddw %%mm3, %%mm1 \n\t"
  237. ".p2align 4 \n\t"
  238. "1: \n\t"
  239. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  240. "movq 1(%2, %%"REG_a"), %%mm4 \n\t"
  241. "movq %%mm2, %%mm3 \n\t"
  242. "movq %%mm4, %%mm5 \n\t"
  243. "punpcklbw %%mm7, %%mm2 \n\t"
  244. "punpckhbw %%mm7, %%mm3 \n\t"
  245. "punpcklbw %%mm7, %%mm4 \n\t"
  246. "punpckhbw %%mm7, %%mm5 \n\t"
  247. "paddw %%mm4, %%mm2 \n\t"
  248. "paddw %%mm5, %%mm3 \n\t"
  249. "movq 16+"MANGLE(round_tab)", %%mm5 \n\t"
  250. "paddw %%mm2, %%mm0 \n\t"
  251. "paddw %%mm3, %%mm1 \n\t"
  252. "paddw %%mm5, %%mm0 \n\t"
  253. "paddw %%mm5, %%mm1 \n\t"
  254. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  255. "movq (%3, %%"REG_a"), %%mm5 \n\t"
  256. "psrlw $2, %%mm0 \n\t"
  257. "psrlw $2, %%mm1 \n\t"
  258. "packuswb %%mm1, %%mm0 \n\t"
  259. "psubusb %%mm0, %%mm4 \n\t"
  260. "psubusb %%mm5, %%mm0 \n\t"
  261. "por %%mm4, %%mm0 \n\t"
  262. "movq %%mm0, %%mm4 \n\t"
  263. "punpcklbw %%mm7, %%mm0 \n\t"
  264. "punpckhbw %%mm7, %%mm4 \n\t"
  265. "paddw %%mm0, %%mm6 \n\t"
  266. "paddw %%mm4, %%mm6 \n\t"
  267. "movq %%mm2, %%mm0 \n\t"
  268. "movq %%mm3, %%mm1 \n\t"
  269. "add %4, %%"REG_a" \n\t"
  270. " js 1b \n\t"
  271. : "+a" (len)
  272. : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((x86_reg)stride)
  273. );
  274. }
  275. static inline int sum_mmx(void)
  276. {
  277. int ret;
  278. __asm__ volatile(
  279. "movq %%mm6, %%mm0 \n\t"
  280. "psrlq $32, %%mm6 \n\t"
  281. "paddw %%mm0, %%mm6 \n\t"
  282. "movq %%mm6, %%mm0 \n\t"
  283. "psrlq $16, %%mm6 \n\t"
  284. "paddw %%mm0, %%mm6 \n\t"
  285. "movd %%mm6, %0 \n\t"
  286. : "=r" (ret)
  287. );
  288. return ret&0xFFFF;
  289. }
  290. static inline int sum_mmx2(void)
  291. {
  292. int ret;
  293. __asm__ volatile(
  294. "movd %%mm6, %0 \n\t"
  295. : "=r" (ret)
  296. );
  297. return ret;
  298. }
  299. static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  300. {
  301. sad8_2_mmx(blk1, blk1+1, blk2, stride, h);
  302. }
  303. static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  304. {
  305. sad8_2_mmx(blk1, blk1+stride, blk2, stride, h);
  306. }
  307. #define PIX_SAD(suf)\
  308. static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  309. {\
  310. assert(h==8);\
  311. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  312. "pxor %%mm6, %%mm6 \n\t":);\
  313. \
  314. sad8_1_ ## suf(blk1, blk2, stride, 8);\
  315. \
  316. return sum_ ## suf();\
  317. }\
  318. static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  319. {\
  320. assert(h==8);\
  321. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  322. "pxor %%mm6, %%mm6 \n\t"\
  323. "movq %0, %%mm5 \n\t"\
  324. :: "m"(round_tab[1]) \
  325. );\
  326. \
  327. sad8_x2a_ ## suf(blk1, blk2, stride, 8);\
  328. \
  329. return sum_ ## suf();\
  330. }\
  331. \
  332. static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  333. {\
  334. assert(h==8);\
  335. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  336. "pxor %%mm6, %%mm6 \n\t"\
  337. "movq %0, %%mm5 \n\t"\
  338. :: "m"(round_tab[1]) \
  339. );\
  340. \
  341. sad8_y2a_ ## suf(blk1, blk2, stride, 8);\
  342. \
  343. return sum_ ## suf();\
  344. }\
  345. \
  346. static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  347. {\
  348. assert(h==8);\
  349. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  350. "pxor %%mm6, %%mm6 \n\t"\
  351. ::);\
  352. \
  353. sad8_4_ ## suf(blk1, blk2, stride, 8);\
  354. \
  355. return sum_ ## suf();\
  356. }\
  357. \
  358. static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  359. {\
  360. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  361. "pxor %%mm6, %%mm6 \n\t":);\
  362. \
  363. sad8_1_ ## suf(blk1 , blk2 , stride, h);\
  364. sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
  365. \
  366. return sum_ ## suf();\
  367. }\
  368. static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  369. {\
  370. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  371. "pxor %%mm6, %%mm6 \n\t"\
  372. "movq %0, %%mm5 \n\t"\
  373. :: "m"(round_tab[1]) \
  374. );\
  375. \
  376. sad8_x2a_ ## suf(blk1 , blk2 , stride, h);\
  377. sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\
  378. \
  379. return sum_ ## suf();\
  380. }\
  381. static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  382. {\
  383. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  384. "pxor %%mm6, %%mm6 \n\t"\
  385. "movq %0, %%mm5 \n\t"\
  386. :: "m"(round_tab[1]) \
  387. );\
  388. \
  389. sad8_y2a_ ## suf(blk1 , blk2 , stride, h);\
  390. sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\
  391. \
  392. return sum_ ## suf();\
  393. }\
  394. static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  395. {\
  396. __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\
  397. "pxor %%mm6, %%mm6 \n\t"\
  398. ::);\
  399. \
  400. sad8_4_ ## suf(blk1 , blk2 , stride, h);\
  401. sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
  402. \
  403. return sum_ ## suf();\
  404. }\
  405. PIX_SAD(mmx)
  406. PIX_SAD(mmx2)
  407. void ff_dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
  408. {
  409. int mm_flags = av_get_cpu_flags();
  410. if (mm_flags & AV_CPU_FLAG_MMX) {
  411. c->pix_abs[0][0] = sad16_mmx;
  412. c->pix_abs[0][1] = sad16_x2_mmx;
  413. c->pix_abs[0][2] = sad16_y2_mmx;
  414. c->pix_abs[0][3] = sad16_xy2_mmx;
  415. c->pix_abs[1][0] = sad8_mmx;
  416. c->pix_abs[1][1] = sad8_x2_mmx;
  417. c->pix_abs[1][2] = sad8_y2_mmx;
  418. c->pix_abs[1][3] = sad8_xy2_mmx;
  419. c->sad[0]= sad16_mmx;
  420. c->sad[1]= sad8_mmx;
  421. }
  422. if (mm_flags & AV_CPU_FLAG_MMX2) {
  423. c->pix_abs[0][0] = sad16_mmx2;
  424. c->pix_abs[1][0] = sad8_mmx2;
  425. c->sad[0]= sad16_mmx2;
  426. c->sad[1]= sad8_mmx2;
  427. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  428. c->pix_abs[0][1] = sad16_x2_mmx2;
  429. c->pix_abs[0][2] = sad16_y2_mmx2;
  430. c->pix_abs[0][3] = sad16_xy2_mmx2;
  431. c->pix_abs[1][1] = sad8_x2_mmx2;
  432. c->pix_abs[1][2] = sad8_y2_mmx2;
  433. c->pix_abs[1][3] = sad8_xy2_mmx2;
  434. }
  435. }
  436. if ((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != CODEC_ID_SNOW) {
  437. c->sad[0]= sad16_sse2;
  438. }
  439. }