You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

488 lines
22KB

  1. /*
  2. * MMX optimized motion estimation
  3. * Copyright (c) 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer
  5. *
  6. * mostly by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/attributes.h"
  25. #include "libavutil/internal.h"
  26. #include "libavutil/mem.h"
  27. #include "libavutil/x86/asm.h"
  28. #include "libavutil/x86/cpu.h"
  29. #include "libavcodec/mpegvideo.h"
  30. #include "dsputil_x86.h"
  31. #if HAVE_INLINE_ASM
  32. DECLARE_ASM_CONST(8, uint64_t, round_tab)[3] = {
  33. 0x0000000000000000ULL,
  34. 0x0001000100010001ULL,
  35. 0x0002000200020002ULL,
  36. };
  37. DECLARE_ASM_CONST(8, uint64_t, bone) = 0x0101010101010101LL;
  38. static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  39. {
  40. x86_reg len = -(stride * h);
  41. __asm__ volatile (
  42. ".p2align 4 \n\t"
  43. "1: \n\t"
  44. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  45. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  46. "movq (%2, %%"REG_a"), %%mm4 \n\t"
  47. "add %3, %%"REG_a" \n\t"
  48. "psubusb %%mm0, %%mm2 \n\t"
  49. "psubusb %%mm4, %%mm0 \n\t"
  50. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  51. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  52. "movq (%2, %%"REG_a"), %%mm5 \n\t"
  53. "psubusb %%mm1, %%mm3 \n\t"
  54. "psubusb %%mm5, %%mm1 \n\t"
  55. "por %%mm2, %%mm0 \n\t"
  56. "por %%mm1, %%mm3 \n\t"
  57. "movq %%mm0, %%mm1 \n\t"
  58. "movq %%mm3, %%mm2 \n\t"
  59. "punpcklbw %%mm7, %%mm0 \n\t"
  60. "punpckhbw %%mm7, %%mm1 \n\t"
  61. "punpcklbw %%mm7, %%mm3 \n\t"
  62. "punpckhbw %%mm7, %%mm2 \n\t"
  63. "paddw %%mm1, %%mm0 \n\t"
  64. "paddw %%mm3, %%mm2 \n\t"
  65. "paddw %%mm2, %%mm0 \n\t"
  66. "paddw %%mm0, %%mm6 \n\t"
  67. "add %3, %%"REG_a" \n\t"
  68. " js 1b \n\t"
  69. : "+a" (len)
  70. : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg) stride));
  71. }
  72. static inline void sad8_1_mmxext(uint8_t *blk1, uint8_t *blk2,
  73. int stride, int h)
  74. {
  75. __asm__ volatile (
  76. ".p2align 4 \n\t"
  77. "1: \n\t"
  78. "movq (%1), %%mm0 \n\t"
  79. "movq (%1, %3), %%mm1 \n\t"
  80. "psadbw (%2), %%mm0 \n\t"
  81. "psadbw (%2, %3), %%mm1 \n\t"
  82. "paddw %%mm0, %%mm6 \n\t"
  83. "paddw %%mm1, %%mm6 \n\t"
  84. "lea (%1,%3,2), %1 \n\t"
  85. "lea (%2,%3,2), %2 \n\t"
  86. "sub $2, %0 \n\t"
  87. " jg 1b \n\t"
  88. : "+r" (h), "+r" (blk1), "+r" (blk2)
  89. : "r" ((x86_reg) stride));
  90. }
  91. static int sad16_sse2(MpegEncContext *v, uint8_t *blk2, uint8_t *blk1,
  92. int stride, int h)
  93. {
  94. int ret;
  95. __asm__ volatile (
  96. "pxor %%xmm2, %%xmm2 \n\t"
  97. ".p2align 4 \n\t"
  98. "1: \n\t"
  99. "movdqu (%1), %%xmm0 \n\t"
  100. "movdqu (%1, %4), %%xmm1 \n\t"
  101. "psadbw (%2), %%xmm0 \n\t"
  102. "psadbw (%2, %4), %%xmm1 \n\t"
  103. "paddw %%xmm0, %%xmm2 \n\t"
  104. "paddw %%xmm1, %%xmm2 \n\t"
  105. "lea (%1,%4,2), %1 \n\t"
  106. "lea (%2,%4,2), %2 \n\t"
  107. "sub $2, %0 \n\t"
  108. " jg 1b \n\t"
  109. "movhlps %%xmm2, %%xmm0 \n\t"
  110. "paddw %%xmm0, %%xmm2 \n\t"
  111. "movd %%xmm2, %3 \n\t"
  112. : "+r" (h), "+r" (blk1), "+r" (blk2), "=r" (ret)
  113. : "r" ((x86_reg) stride));
  114. return ret;
  115. }
  116. static inline void sad8_x2a_mmxext(uint8_t *blk1, uint8_t *blk2,
  117. int stride, int h)
  118. {
  119. __asm__ volatile (
  120. ".p2align 4 \n\t"
  121. "1: \n\t"
  122. "movq (%1), %%mm0 \n\t"
  123. "movq (%1, %3), %%mm1 \n\t"
  124. "pavgb 1(%1), %%mm0 \n\t"
  125. "pavgb 1(%1, %3), %%mm1 \n\t"
  126. "psadbw (%2), %%mm0 \n\t"
  127. "psadbw (%2, %3), %%mm1 \n\t"
  128. "paddw %%mm0, %%mm6 \n\t"
  129. "paddw %%mm1, %%mm6 \n\t"
  130. "lea (%1,%3,2), %1 \n\t"
  131. "lea (%2,%3,2), %2 \n\t"
  132. "sub $2, %0 \n\t"
  133. " jg 1b \n\t"
  134. : "+r" (h), "+r" (blk1), "+r" (blk2)
  135. : "r" ((x86_reg) stride));
  136. }
  137. static inline void sad8_y2a_mmxext(uint8_t *blk1, uint8_t *blk2,
  138. int stride, int h)
  139. {
  140. __asm__ volatile (
  141. "movq (%1), %%mm0 \n\t"
  142. "add %3, %1 \n\t"
  143. ".p2align 4 \n\t"
  144. "1: \n\t"
  145. "movq (%1), %%mm1 \n\t"
  146. "movq (%1, %3), %%mm2 \n\t"
  147. "pavgb %%mm1, %%mm0 \n\t"
  148. "pavgb %%mm2, %%mm1 \n\t"
  149. "psadbw (%2), %%mm0 \n\t"
  150. "psadbw (%2, %3), %%mm1 \n\t"
  151. "paddw %%mm0, %%mm6 \n\t"
  152. "paddw %%mm1, %%mm6 \n\t"
  153. "movq %%mm2, %%mm0 \n\t"
  154. "lea (%1,%3,2), %1 \n\t"
  155. "lea (%2,%3,2), %2 \n\t"
  156. "sub $2, %0 \n\t"
  157. " jg 1b \n\t"
  158. : "+r" (h), "+r" (blk1), "+r" (blk2)
  159. : "r" ((x86_reg) stride));
  160. }
  161. static inline void sad8_4_mmxext(uint8_t *blk1, uint8_t *blk2,
  162. int stride, int h)
  163. {
  164. __asm__ volatile (
  165. "movq "MANGLE(bone)", %%mm5 \n\t"
  166. "movq (%1), %%mm0 \n\t"
  167. "pavgb 1(%1), %%mm0 \n\t"
  168. "add %3, %1 \n\t"
  169. ".p2align 4 \n\t"
  170. "1: \n\t"
  171. "movq (%1), %%mm1 \n\t"
  172. "movq (%1,%3), %%mm2 \n\t"
  173. "pavgb 1(%1), %%mm1 \n\t"
  174. "pavgb 1(%1,%3), %%mm2 \n\t"
  175. "psubusb %%mm5, %%mm1 \n\t"
  176. "pavgb %%mm1, %%mm0 \n\t"
  177. "pavgb %%mm2, %%mm1 \n\t"
  178. "psadbw (%2), %%mm0 \n\t"
  179. "psadbw (%2,%3), %%mm1 \n\t"
  180. "paddw %%mm0, %%mm6 \n\t"
  181. "paddw %%mm1, %%mm6 \n\t"
  182. "movq %%mm2, %%mm0 \n\t"
  183. "lea (%1,%3,2), %1 \n\t"
  184. "lea (%2,%3,2), %2 \n\t"
  185. "sub $2, %0 \n\t"
  186. " jg 1b \n\t"
  187. : "+r" (h), "+r" (blk1), "+r" (blk2)
  188. : "r" ((x86_reg) stride));
  189. }
  190. static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
  191. int stride, int h)
  192. {
  193. x86_reg len = -(stride * h);
  194. __asm__ volatile (
  195. ".p2align 4 \n\t"
  196. "1: \n\t"
  197. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  198. "movq (%2, %%"REG_a"), %%mm1 \n\t"
  199. "movq (%1, %%"REG_a"), %%mm2 \n\t"
  200. "movq (%2, %%"REG_a"), %%mm3 \n\t"
  201. "punpcklbw %%mm7, %%mm0 \n\t"
  202. "punpcklbw %%mm7, %%mm1 \n\t"
  203. "punpckhbw %%mm7, %%mm2 \n\t"
  204. "punpckhbw %%mm7, %%mm3 \n\t"
  205. "paddw %%mm0, %%mm1 \n\t"
  206. "paddw %%mm2, %%mm3 \n\t"
  207. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  208. "movq (%3, %%"REG_a"), %%mm2 \n\t"
  209. "paddw %%mm5, %%mm1 \n\t"
  210. "paddw %%mm5, %%mm3 \n\t"
  211. "psrlw $1, %%mm1 \n\t"
  212. "psrlw $1, %%mm3 \n\t"
  213. "packuswb %%mm3, %%mm1 \n\t"
  214. "psubusb %%mm1, %%mm4 \n\t"
  215. "psubusb %%mm2, %%mm1 \n\t"
  216. "por %%mm4, %%mm1 \n\t"
  217. "movq %%mm1, %%mm0 \n\t"
  218. "punpcklbw %%mm7, %%mm0 \n\t"
  219. "punpckhbw %%mm7, %%mm1 \n\t"
  220. "paddw %%mm1, %%mm0 \n\t"
  221. "paddw %%mm0, %%mm6 \n\t"
  222. "add %4, %%"REG_a" \n\t"
  223. " js 1b \n\t"
  224. : "+a" (len)
  225. : "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
  226. "r" ((x86_reg) stride));
  227. }
  228. static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  229. {
  230. x86_reg len = -(stride * h);
  231. __asm__ volatile (
  232. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  233. "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
  234. "movq %%mm0, %%mm1 \n\t"
  235. "movq %%mm2, %%mm3 \n\t"
  236. "punpcklbw %%mm7, %%mm0 \n\t"
  237. "punpckhbw %%mm7, %%mm1 \n\t"
  238. "punpcklbw %%mm7, %%mm2 \n\t"
  239. "punpckhbw %%mm7, %%mm3 \n\t"
  240. "paddw %%mm2, %%mm0 \n\t"
  241. "paddw %%mm3, %%mm1 \n\t"
  242. ".p2align 4 \n\t"
  243. "1: \n\t"
  244. "movq (%2, %%"REG_a"), %%mm2 \n\t"
  245. "movq 1(%2, %%"REG_a"), %%mm4 \n\t"
  246. "movq %%mm2, %%mm3 \n\t"
  247. "movq %%mm4, %%mm5 \n\t"
  248. "punpcklbw %%mm7, %%mm2 \n\t"
  249. "punpckhbw %%mm7, %%mm3 \n\t"
  250. "punpcklbw %%mm7, %%mm4 \n\t"
  251. "punpckhbw %%mm7, %%mm5 \n\t"
  252. "paddw %%mm4, %%mm2 \n\t"
  253. "paddw %%mm5, %%mm3 \n\t"
  254. "movq 16+"MANGLE(round_tab)", %%mm5 \n\t"
  255. "paddw %%mm2, %%mm0 \n\t"
  256. "paddw %%mm3, %%mm1 \n\t"
  257. "paddw %%mm5, %%mm0 \n\t"
  258. "paddw %%mm5, %%mm1 \n\t"
  259. "movq (%3, %%"REG_a"), %%mm4 \n\t"
  260. "movq (%3, %%"REG_a"), %%mm5 \n\t"
  261. "psrlw $2, %%mm0 \n\t"
  262. "psrlw $2, %%mm1 \n\t"
  263. "packuswb %%mm1, %%mm0 \n\t"
  264. "psubusb %%mm0, %%mm4 \n\t"
  265. "psubusb %%mm5, %%mm0 \n\t"
  266. "por %%mm4, %%mm0 \n\t"
  267. "movq %%mm0, %%mm4 \n\t"
  268. "punpcklbw %%mm7, %%mm0 \n\t"
  269. "punpckhbw %%mm7, %%mm4 \n\t"
  270. "paddw %%mm0, %%mm6 \n\t"
  271. "paddw %%mm4, %%mm6 \n\t"
  272. "movq %%mm2, %%mm0 \n\t"
  273. "movq %%mm3, %%mm1 \n\t"
  274. "add %4, %%"REG_a" \n\t"
  275. " js 1b \n\t"
  276. : "+a" (len)
  277. : "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
  278. "r" ((x86_reg) stride));
  279. }
  280. static inline int sum_mmx(void)
  281. {
  282. int ret;
  283. __asm__ volatile (
  284. "movq %%mm6, %%mm0 \n\t"
  285. "psrlq $32, %%mm6 \n\t"
  286. "paddw %%mm0, %%mm6 \n\t"
  287. "movq %%mm6, %%mm0 \n\t"
  288. "psrlq $16, %%mm6 \n\t"
  289. "paddw %%mm0, %%mm6 \n\t"
  290. "movd %%mm6, %0 \n\t"
  291. : "=r" (ret));
  292. return ret & 0xFFFF;
  293. }
  294. static inline int sum_mmxext(void)
  295. {
  296. int ret;
  297. __asm__ volatile (
  298. "movd %%mm6, %0 \n\t"
  299. : "=r" (ret));
  300. return ret;
  301. }
  302. static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  303. {
  304. sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
  305. }
  306. static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  307. {
  308. sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
  309. }
  310. #define PIX_SAD(suf) \
  311. static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  312. uint8_t *blk1, int stride, int h) \
  313. { \
  314. assert(h == 8); \
  315. __asm__ volatile ( \
  316. "pxor %%mm7, %%mm7 \n\t" \
  317. "pxor %%mm6, %%mm6 \n\t" \
  318. :); \
  319. \
  320. sad8_1_ ## suf(blk1, blk2, stride, 8); \
  321. \
  322. return sum_ ## suf(); \
  323. } \
  324. \
  325. static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  326. uint8_t *blk1, int stride, int h) \
  327. { \
  328. assert(h == 8); \
  329. __asm__ volatile ( \
  330. "pxor %%mm7, %%mm7 \n\t" \
  331. "pxor %%mm6, %%mm6 \n\t" \
  332. "movq %0, %%mm5 \n\t" \
  333. :: "m" (round_tab[1])); \
  334. \
  335. sad8_x2a_ ## suf(blk1, blk2, stride, 8); \
  336. \
  337. return sum_ ## suf(); \
  338. } \
  339. \
  340. static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  341. uint8_t *blk1, int stride, int h) \
  342. { \
  343. assert(h == 8); \
  344. __asm__ volatile ( \
  345. "pxor %%mm7, %%mm7 \n\t" \
  346. "pxor %%mm6, %%mm6 \n\t" \
  347. "movq %0, %%mm5 \n\t" \
  348. :: "m" (round_tab[1])); \
  349. \
  350. sad8_y2a_ ## suf(blk1, blk2, stride, 8); \
  351. \
  352. return sum_ ## suf(); \
  353. } \
  354. \
  355. static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  356. uint8_t *blk1, int stride, int h) \
  357. { \
  358. assert(h == 8); \
  359. __asm__ volatile ( \
  360. "pxor %%mm7, %%mm7 \n\t" \
  361. "pxor %%mm6, %%mm6 \n\t" \
  362. ::); \
  363. \
  364. sad8_4_ ## suf(blk1, blk2, stride, 8); \
  365. \
  366. return sum_ ## suf(); \
  367. } \
  368. \
  369. static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  370. uint8_t *blk1, int stride, int h) \
  371. { \
  372. __asm__ volatile ( \
  373. "pxor %%mm7, %%mm7 \n\t" \
  374. "pxor %%mm6, %%mm6 \n\t" \
  375. :); \
  376. \
  377. sad8_1_ ## suf(blk1, blk2, stride, h); \
  378. sad8_1_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
  379. \
  380. return sum_ ## suf(); \
  381. } \
  382. \
  383. static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  384. uint8_t *blk1, int stride, int h) \
  385. { \
  386. __asm__ volatile ( \
  387. "pxor %%mm7, %%mm7 \n\t" \
  388. "pxor %%mm6, %%mm6 \n\t" \
  389. "movq %0, %%mm5 \n\t" \
  390. :: "m" (round_tab[1])); \
  391. \
  392. sad8_x2a_ ## suf(blk1, blk2, stride, h); \
  393. sad8_x2a_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
  394. \
  395. return sum_ ## suf(); \
  396. } \
  397. \
  398. static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  399. uint8_t *blk1, int stride, int h) \
  400. { \
  401. __asm__ volatile ( \
  402. "pxor %%mm7, %%mm7 \n\t" \
  403. "pxor %%mm6, %%mm6 \n\t" \
  404. "movq %0, %%mm5 \n\t" \
  405. :: "m" (round_tab[1])); \
  406. \
  407. sad8_y2a_ ## suf(blk1, blk2, stride, h); \
  408. sad8_y2a_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
  409. \
  410. return sum_ ## suf(); \
  411. } \
  412. \
  413. static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  414. uint8_t *blk1, int stride, int h) \
  415. { \
  416. __asm__ volatile ( \
  417. "pxor %%mm7, %%mm7 \n\t" \
  418. "pxor %%mm6, %%mm6 \n\t" \
  419. ::); \
  420. \
  421. sad8_4_ ## suf(blk1, blk2, stride, h); \
  422. sad8_4_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
  423. \
  424. return sum_ ## suf(); \
  425. } \
  426. PIX_SAD(mmx)
  427. PIX_SAD(mmxext)
  428. #endif /* HAVE_INLINE_ASM */
  429. av_cold void ff_dsputil_init_pix_mmx(DSPContext *c, AVCodecContext *avctx)
  430. {
  431. #if HAVE_INLINE_ASM
  432. int cpu_flags = av_get_cpu_flags();
  433. if (INLINE_MMX(cpu_flags)) {
  434. c->pix_abs[0][0] = sad16_mmx;
  435. c->pix_abs[0][1] = sad16_x2_mmx;
  436. c->pix_abs[0][2] = sad16_y2_mmx;
  437. c->pix_abs[0][3] = sad16_xy2_mmx;
  438. c->pix_abs[1][0] = sad8_mmx;
  439. c->pix_abs[1][1] = sad8_x2_mmx;
  440. c->pix_abs[1][2] = sad8_y2_mmx;
  441. c->pix_abs[1][3] = sad8_xy2_mmx;
  442. c->sad[0] = sad16_mmx;
  443. c->sad[1] = sad8_mmx;
  444. }
  445. if (INLINE_MMXEXT(cpu_flags)) {
  446. c->pix_abs[0][0] = sad16_mmxext;
  447. c->pix_abs[1][0] = sad8_mmxext;
  448. c->sad[0] = sad16_mmxext;
  449. c->sad[1] = sad8_mmxext;
  450. if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
  451. c->pix_abs[0][1] = sad16_x2_mmxext;
  452. c->pix_abs[0][2] = sad16_y2_mmxext;
  453. c->pix_abs[0][3] = sad16_xy2_mmxext;
  454. c->pix_abs[1][1] = sad8_x2_mmxext;
  455. c->pix_abs[1][2] = sad8_y2_mmxext;
  456. c->pix_abs[1][3] = sad8_xy2_mmxext;
  457. }
  458. }
  459. if (INLINE_SSE2(cpu_flags) && !(cpu_flags & AV_CPU_FLAG_3DNOW)) {
  460. c->sad[0] = sad16_sse2;
  461. }
  462. #endif /* HAVE_INLINE_ASM */
  463. }