You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

461 lines
16KB

  1. /*
  2. * MMX optimized motion estimation
  3. * Copyright (c) 2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * mostly by Michael Niedermayer <michaelni@gmx.at>
  23. */
  24. #include "../dsputil.h"
  25. #include "x86_cpu.h"
  26. static const __attribute__ ((aligned(8))) uint64_t round_tab[3]={
  27. 0x0000000000000000ULL,
  28. 0x0001000100010001ULL,
  29. 0x0002000200020002ULL,
  30. };
  31. static attribute_used __attribute__ ((aligned(8))) uint64_t bone= 0x0101010101010101LL;
  32. static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  33. {
  34. long len= -(stride*h);
  35. asm volatile(
  36. ASMALIGN(4)
  37. "1: \n\t"
  38. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  39. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  40. "movq (%2, %%"REG_a"), %%mm4 \n\t"
  41. "add %3, %%"REG_a" \n\t"
  42. "psubusb %%mm0, %%mm2 \n\t"
  43. "psubusb %%mm4, %%mm0 \n\t"
  44. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  45. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  46. "movq (%2, %%"REG_a"), %%mm5 \n\t"
  47. "psubusb %%mm1, %%mm3 \n\t"
  48. "psubusb %%mm5, %%mm1 \n\t"
  49. "por %%mm2, %%mm0 \n\t"
  50. "por %%mm1, %%mm3 \n\t"
  51. "movq %%mm0, %%mm1 \n\t"
  52. "movq %%mm3, %%mm2 \n\t"
  53. "punpcklbw %%mm7, %%mm0 \n\t"
  54. "punpckhbw %%mm7, %%mm1 \n\t"
  55. "punpcklbw %%mm7, %%mm3 \n\t"
  56. "punpckhbw %%mm7, %%mm2 \n\t"
  57. "paddw %%mm1, %%mm0 \n\t"
  58. "paddw %%mm3, %%mm2 \n\t"
  59. "paddw %%mm2, %%mm0 \n\t"
  60. "paddw %%mm0, %%mm6 \n\t"
  61. "add %3, %%"REG_a" \n\t"
  62. " js 1b \n\t"
  63. : "+a" (len)
  64. : "r" (blk1 - len), "r" (blk2 - len), "r" ((long)stride)
  65. );
  66. }
  67. static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  68. {
  69. asm volatile(
  70. ASMALIGN(4)
  71. "1: \n\t"
  72. "movq (%1), %%mm0 \n\t"
  73. "movq (%1, %3), %%mm1 \n\t"
  74. "psadbw (%2), %%mm0 \n\t"
  75. "psadbw (%2, %3), %%mm1 \n\t"
  76. "paddw %%mm0, %%mm6 \n\t"
  77. "paddw %%mm1, %%mm6 \n\t"
  78. "lea (%1,%3,2), %1 \n\t"
  79. "lea (%2,%3,2), %2 \n\t"
  80. "sub $2, %0 \n\t"
  81. " jg 1b \n\t"
  82. : "+r" (h), "+r" (blk1), "+r" (blk2)
  83. : "r" ((long)stride)
  84. );
  85. }
  86. static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
  87. {
  88. int ret;
  89. asm volatile(
  90. "pxor %%xmm6, %%xmm6 \n\t"
  91. ASMALIGN(4)
  92. "1: \n\t"
  93. "movdqu (%1), %%xmm0 \n\t"
  94. "movdqu (%1, %3), %%xmm1 \n\t"
  95. "psadbw (%2), %%xmm0 \n\t"
  96. "psadbw (%2, %3), %%xmm1 \n\t"
  97. "paddw %%xmm0, %%xmm6 \n\t"
  98. "paddw %%xmm1, %%xmm6 \n\t"
  99. "lea (%1,%3,2), %1 \n\t"
  100. "lea (%2,%3,2), %2 \n\t"
  101. "sub $2, %0 \n\t"
  102. " jg 1b \n\t"
  103. : "+r" (h), "+r" (blk1), "+r" (blk2)
  104. : "r" ((long)stride)
  105. );
  106. asm volatile(
  107. "movhlps %%xmm6, %%xmm0 \n\t"
  108. "paddw %%xmm0, %%xmm6 \n\t"
  109. "movd %%xmm6, %0 \n\t"
  110. : "=r"(ret)
  111. );
  112. return ret;
  113. }
  114. static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  115. {
  116. asm volatile(
  117. ASMALIGN(4)
  118. "1: \n\t"
  119. "movq (%1), %%mm0 \n\t"
  120. "movq (%1, %3), %%mm1 \n\t"
  121. "pavgb 1(%1), %%mm0 \n\t"
  122. "pavgb 1(%1, %3), %%mm1 \n\t"
  123. "psadbw (%2), %%mm0 \n\t"
  124. "psadbw (%2, %3), %%mm1 \n\t"
  125. "paddw %%mm0, %%mm6 \n\t"
  126. "paddw %%mm1, %%mm6 \n\t"
  127. "lea (%1,%3,2), %1 \n\t"
  128. "lea (%2,%3,2), %2 \n\t"
  129. "sub $2, %0 \n\t"
  130. " jg 1b \n\t"
  131. : "+r" (h), "+r" (blk1), "+r" (blk2)
  132. : "r" ((long)stride)
  133. );
  134. }
  135. static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  136. {
  137. asm volatile(
  138. "movq (%1), %%mm0 \n\t"
  139. "add %3, %1 \n\t"
  140. ASMALIGN(4)
  141. "1: \n\t"
  142. "movq (%1), %%mm1 \n\t"
  143. "movq (%1, %3), %%mm2 \n\t"
  144. "pavgb %%mm1, %%mm0 \n\t"
  145. "pavgb %%mm2, %%mm1 \n\t"
  146. "psadbw (%2), %%mm0 \n\t"
  147. "psadbw (%2, %3), %%mm1 \n\t"
  148. "paddw %%mm0, %%mm6 \n\t"
  149. "paddw %%mm1, %%mm6 \n\t"
  150. "movq %%mm2, %%mm0 \n\t"
  151. "lea (%1,%3,2), %1 \n\t"
  152. "lea (%2,%3,2), %2 \n\t"
  153. "sub $2, %0 \n\t"
  154. " jg 1b \n\t"
  155. : "+r" (h), "+r" (blk1), "+r" (blk2)
  156. : "r" ((long)stride)
  157. );
  158. }
  159. static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  160. {
  161. asm volatile(
  162. "movq "MANGLE(bone)", %%mm5 \n\t"
  163. "movq (%1), %%mm0 \n\t"
  164. "pavgb 1(%1), %%mm0 \n\t"
  165. "add %3, %1 \n\t"
  166. ASMALIGN(4)
  167. "1: \n\t"
  168. "movq (%1), %%mm1 \n\t"
  169. "movq (%1,%3), %%mm2 \n\t"
  170. "pavgb 1(%1), %%mm1 \n\t"
  171. "pavgb 1(%1,%3), %%mm2 \n\t"
  172. "psubusb %%mm5, %%mm1 \n\t"
  173. "pavgb %%mm1, %%mm0 \n\t"
  174. "pavgb %%mm2, %%mm1 \n\t"
  175. "psadbw (%2), %%mm0 \n\t"
  176. "psadbw (%2,%3), %%mm1 \n\t"
  177. "paddw %%mm0, %%mm6 \n\t"
  178. "paddw %%mm1, %%mm6 \n\t"
  179. "movq %%mm2, %%mm0 \n\t"
  180. "lea (%1,%3,2), %1 \n\t"
  181. "lea (%2,%3,2), %2 \n\t"
  182. "sub $2, %0 \n\t"
  183. " jg 1b \n\t"
  184. : "+r" (h), "+r" (blk1), "+r" (blk2)
  185. : "r" ((long)stride)
  186. );
  187. }
  188. static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
  189. {
  190. long len= -(stride*h);
  191. asm volatile(
  192. ASMALIGN(4)
  193. "1: \n\t"
  194. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  195. "movq (%2, %%"REG_a"), %%mm1 \n\t"
  196. "movq (%1, %%"REG_a"), %%mm2 \n\t"
  197. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  198. "punpcklbw %%mm7, %%mm0 \n\t"
  199. "punpcklbw %%mm7, %%mm1 \n\t"
  200. "punpckhbw %%mm7, %%mm2 \n\t"
  201. "punpckhbw %%mm7, %%mm3 \n\t"
  202. "paddw %%mm0, %%mm1 \n\t"
  203. "paddw %%mm2, %%mm3 \n\t"
  204. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  205. "movq (%3, %%"REG_a"), %%mm2 \n\t"
  206. "paddw %%mm5, %%mm1 \n\t"
  207. "paddw %%mm5, %%mm3 \n\t"
  208. "psrlw $1, %%mm1 \n\t"
  209. "psrlw $1, %%mm3 \n\t"
  210. "packuswb %%mm3, %%mm1 \n\t"
  211. "psubusb %%mm1, %%mm4 \n\t"
  212. "psubusb %%mm2, %%mm1 \n\t"
  213. "por %%mm4, %%mm1 \n\t"
  214. "movq %%mm1, %%mm0 \n\t"
  215. "punpcklbw %%mm7, %%mm0 \n\t"
  216. "punpckhbw %%mm7, %%mm1 \n\t"
  217. "paddw %%mm1, %%mm0 \n\t"
  218. "paddw %%mm0, %%mm6 \n\t"
  219. "add %4, %%"REG_a" \n\t"
  220. " js 1b \n\t"
  221. : "+a" (len)
  222. : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((long)stride)
  223. );
  224. }
  225. static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  226. {
  227. long len= -(stride*h);
  228. asm volatile(
  229. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  230. "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
  231. "movq %%mm0, %%mm1 \n\t"
  232. "movq %%mm2, %%mm3 \n\t"
  233. "punpcklbw %%mm7, %%mm0 \n\t"
  234. "punpckhbw %%mm7, %%mm1 \n\t"
  235. "punpcklbw %%mm7, %%mm2 \n\t"
  236. "punpckhbw %%mm7, %%mm3 \n\t"
  237. "paddw %%mm2, %%mm0 \n\t"
  238. "paddw %%mm3, %%mm1 \n\t"
  239. ASMALIGN(4)
  240. "1: \n\t"
  241. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  242. "movq 1(%2, %%"REG_a"), %%mm4 \n\t"
  243. "movq %%mm2, %%mm3 \n\t"
  244. "movq %%mm4, %%mm5 \n\t"
  245. "punpcklbw %%mm7, %%mm2 \n\t"
  246. "punpckhbw %%mm7, %%mm3 \n\t"
  247. "punpcklbw %%mm7, %%mm4 \n\t"
  248. "punpckhbw %%mm7, %%mm5 \n\t"
  249. "paddw %%mm4, %%mm2 \n\t"
  250. "paddw %%mm5, %%mm3 \n\t"
  251. "movq 16+"MANGLE(round_tab)", %%mm5 \n\t"
  252. "paddw %%mm2, %%mm0 \n\t"
  253. "paddw %%mm3, %%mm1 \n\t"
  254. "paddw %%mm5, %%mm0 \n\t"
  255. "paddw %%mm5, %%mm1 \n\t"
  256. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  257. "movq (%3, %%"REG_a"), %%mm5 \n\t"
  258. "psrlw $2, %%mm0 \n\t"
  259. "psrlw $2, %%mm1 \n\t"
  260. "packuswb %%mm1, %%mm0 \n\t"
  261. "psubusb %%mm0, %%mm4 \n\t"
  262. "psubusb %%mm5, %%mm0 \n\t"
  263. "por %%mm4, %%mm0 \n\t"
  264. "movq %%mm0, %%mm4 \n\t"
  265. "punpcklbw %%mm7, %%mm0 \n\t"
  266. "punpckhbw %%mm7, %%mm4 \n\t"
  267. "paddw %%mm0, %%mm6 \n\t"
  268. "paddw %%mm4, %%mm6 \n\t"
  269. "movq %%mm2, %%mm0 \n\t"
  270. "movq %%mm3, %%mm1 \n\t"
  271. "add %4, %%"REG_a" \n\t"
  272. " js 1b \n\t"
  273. : "+a" (len)
  274. : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((long)stride)
  275. );
  276. }
  277. static inline int sum_mmx(void)
  278. {
  279. int ret;
  280. asm volatile(
  281. "movq %%mm6, %%mm0 \n\t"
  282. "psrlq $32, %%mm6 \n\t"
  283. "paddw %%mm0, %%mm6 \n\t"
  284. "movq %%mm6, %%mm0 \n\t"
  285. "psrlq $16, %%mm6 \n\t"
  286. "paddw %%mm0, %%mm6 \n\t"
  287. "movd %%mm6, %0 \n\t"
  288. : "=r" (ret)
  289. );
  290. return ret&0xFFFF;
  291. }
  292. static inline int sum_mmx2(void)
  293. {
  294. int ret;
  295. asm volatile(
  296. "movd %%mm6, %0 \n\t"
  297. : "=r" (ret)
  298. );
  299. return ret;
  300. }
  301. static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  302. {
  303. sad8_2_mmx(blk1, blk1+1, blk2, stride, h);
  304. }
  305. static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  306. {
  307. sad8_2_mmx(blk1, blk1+stride, blk2, stride, h);
  308. }
  309. #define PIX_SAD(suf)\
  310. static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  311. {\
  312. assert(h==8);\
  313. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  314. "pxor %%mm6, %%mm6 \n\t":);\
  315. \
  316. sad8_1_ ## suf(blk1, blk2, stride, 8);\
  317. \
  318. return sum_ ## suf();\
  319. }\
  320. static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  321. {\
  322. assert(h==8);\
  323. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  324. "pxor %%mm6, %%mm6 \n\t"\
  325. "movq %0, %%mm5 \n\t"\
  326. :: "m"(round_tab[1]) \
  327. );\
  328. \
  329. sad8_x2a_ ## suf(blk1, blk2, stride, 8);\
  330. \
  331. return sum_ ## suf();\
  332. }\
  333. \
  334. static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  335. {\
  336. assert(h==8);\
  337. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  338. "pxor %%mm6, %%mm6 \n\t"\
  339. "movq %0, %%mm5 \n\t"\
  340. :: "m"(round_tab[1]) \
  341. );\
  342. \
  343. sad8_y2a_ ## suf(blk1, blk2, stride, 8);\
  344. \
  345. return sum_ ## suf();\
  346. }\
  347. \
  348. static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  349. {\
  350. assert(h==8);\
  351. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  352. "pxor %%mm6, %%mm6 \n\t"\
  353. ::);\
  354. \
  355. sad8_4_ ## suf(blk1, blk2, stride, 8);\
  356. \
  357. return sum_ ## suf();\
  358. }\
  359. \
  360. static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  361. {\
  362. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  363. "pxor %%mm6, %%mm6 \n\t":);\
  364. \
  365. sad8_1_ ## suf(blk1 , blk2 , stride, h);\
  366. sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
  367. \
  368. return sum_ ## suf();\
  369. }\
  370. static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  371. {\
  372. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  373. "pxor %%mm6, %%mm6 \n\t"\
  374. "movq %0, %%mm5 \n\t"\
  375. :: "m"(round_tab[1]) \
  376. );\
  377. \
  378. sad8_x2a_ ## suf(blk1 , blk2 , stride, h);\
  379. sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\
  380. \
  381. return sum_ ## suf();\
  382. }\
  383. static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  384. {\
  385. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  386. "pxor %%mm6, %%mm6 \n\t"\
  387. "movq %0, %%mm5 \n\t"\
  388. :: "m"(round_tab[1]) \
  389. );\
  390. \
  391. sad8_y2a_ ## suf(blk1 , blk2 , stride, h);\
  392. sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\
  393. \
  394. return sum_ ## suf();\
  395. }\
  396. static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
  397. {\
  398. asm volatile("pxor %%mm7, %%mm7 \n\t"\
  399. "pxor %%mm6, %%mm6 \n\t"\
  400. ::);\
  401. \
  402. sad8_4_ ## suf(blk1 , blk2 , stride, h);\
  403. sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
  404. \
  405. return sum_ ## suf();\
  406. }\
  407. PIX_SAD(mmx)
  408. PIX_SAD(mmx2)
  409. void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
  410. {
  411. if (mm_flags & MM_MMX) {
  412. c->pix_abs[0][0] = sad16_mmx;
  413. c->pix_abs[0][1] = sad16_x2_mmx;
  414. c->pix_abs[0][2] = sad16_y2_mmx;
  415. c->pix_abs[0][3] = sad16_xy2_mmx;
  416. c->pix_abs[1][0] = sad8_mmx;
  417. c->pix_abs[1][1] = sad8_x2_mmx;
  418. c->pix_abs[1][2] = sad8_y2_mmx;
  419. c->pix_abs[1][3] = sad8_xy2_mmx;
  420. c->sad[0]= sad16_mmx;
  421. c->sad[1]= sad8_mmx;
  422. }
  423. if (mm_flags & MM_MMXEXT) {
  424. c->pix_abs[0][0] = sad16_mmx2;
  425. c->pix_abs[1][0] = sad8_mmx2;
  426. c->sad[0]= sad16_mmx2;
  427. c->sad[1]= sad8_mmx2;
  428. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  429. c->pix_abs[0][1] = sad16_x2_mmx2;
  430. c->pix_abs[0][2] = sad16_y2_mmx2;
  431. c->pix_abs[0][3] = sad16_xy2_mmx2;
  432. c->pix_abs[1][1] = sad8_x2_mmx2;
  433. c->pix_abs[1][2] = sad8_y2_mmx2;
  434. c->pix_abs[1][3] = sad8_xy2_mmx2;
  435. }
  436. }
  437. if ((mm_flags & MM_SSE2) && !(mm_flags & MM_3DNOW)) {
  438. c->sad[0]= sad16_sse2;
  439. }
  440. }