You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2459 lines
101KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "dsputil.h"
  25. #include "dsputil_mmx.h"
  26. #include "simple_idct.h"
  27. #include "mpegvideo.h"
  28. #include "x86_cpu.h"
  29. #include "mmx.h"
  30. #include "vp3dsp_mmx.h"
  31. #include "vp3dsp_sse2.h"
  32. #include "h263.h"
  33. //#undef NDEBUG
  34. //#include <assert.h>
  35. extern void ff_idct_xvid_mmx(short *block);
  36. extern void ff_idct_xvid_mmx2(short *block);
  37. int mm_flags; /* multimedia extension flags */
  38. /* pixel operations */
  39. DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
  40. DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
  41. DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) =
  42. {0x8000000080000000ULL, 0x8000000080000000ULL};
  43. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
  44. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL;
  45. DECLARE_ALIGNED_16(const xmm_t, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
  46. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_8 ) = 0x0008000800080008ULL;
  47. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
  48. DECLARE_ALIGNED_16(const xmm_t, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
  49. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
  50. DECLARE_ALIGNED_16(const xmm_t, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
  51. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
  52. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_64 ) = 0x0040004000400040ULL;
  53. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
  54. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
  55. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
  56. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL;
  57. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL;
  58. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
  59. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
  60. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
  61. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
  62. DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 };
  63. DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
  64. #define JUMPALIGN() asm volatile (ASMALIGN(3)::)
  65. #define MOVQ_ZERO(regd) asm volatile ("pxor %%" #regd ", %%" #regd ::)
  66. #define MOVQ_BFE(regd) \
  67. asm volatile ( \
  68. "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
  69. "paddb %%" #regd ", %%" #regd " \n\t" ::)
  70. #ifndef PIC
  71. #define MOVQ_BONE(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
  72. #define MOVQ_WTWO(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
  73. #else
  74. // for shared library it's better to use this way for accessing constants
  75. // pcmpeqd -> -1
  76. #define MOVQ_BONE(regd) \
  77. asm volatile ( \
  78. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  79. "psrlw $15, %%" #regd " \n\t" \
  80. "packuswb %%" #regd ", %%" #regd " \n\t" ::)
  81. #define MOVQ_WTWO(regd) \
  82. asm volatile ( \
  83. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  84. "psrlw $15, %%" #regd " \n\t" \
  85. "psllw $1, %%" #regd " \n\t"::)
  86. #endif
  87. // using regr as temporary and for the output result
  88. // first argument is unmodifed and second is trashed
  89. // regfe is supposed to contain 0xfefefefefefefefe
  90. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  91. "movq " #rega ", " #regr " \n\t"\
  92. "pand " #regb ", " #regr " \n\t"\
  93. "pxor " #rega ", " #regb " \n\t"\
  94. "pand " #regfe "," #regb " \n\t"\
  95. "psrlq $1, " #regb " \n\t"\
  96. "paddb " #regb ", " #regr " \n\t"
  97. #define PAVGB_MMX(rega, regb, regr, regfe) \
  98. "movq " #rega ", " #regr " \n\t"\
  99. "por " #regb ", " #regr " \n\t"\
  100. "pxor " #rega ", " #regb " \n\t"\
  101. "pand " #regfe "," #regb " \n\t"\
  102. "psrlq $1, " #regb " \n\t"\
  103. "psubb " #regb ", " #regr " \n\t"
  104. // mm6 is supposed to contain 0xfefefefefefefefe
  105. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  106. "movq " #rega ", " #regr " \n\t"\
  107. "movq " #regc ", " #regp " \n\t"\
  108. "pand " #regb ", " #regr " \n\t"\
  109. "pand " #regd ", " #regp " \n\t"\
  110. "pxor " #rega ", " #regb " \n\t"\
  111. "pxor " #regc ", " #regd " \n\t"\
  112. "pand %%mm6, " #regb " \n\t"\
  113. "pand %%mm6, " #regd " \n\t"\
  114. "psrlq $1, " #regb " \n\t"\
  115. "psrlq $1, " #regd " \n\t"\
  116. "paddb " #regb ", " #regr " \n\t"\
  117. "paddb " #regd ", " #regp " \n\t"
  118. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  119. "movq " #rega ", " #regr " \n\t"\
  120. "movq " #regc ", " #regp " \n\t"\
  121. "por " #regb ", " #regr " \n\t"\
  122. "por " #regd ", " #regp " \n\t"\
  123. "pxor " #rega ", " #regb " \n\t"\
  124. "pxor " #regc ", " #regd " \n\t"\
  125. "pand %%mm6, " #regb " \n\t"\
  126. "pand %%mm6, " #regd " \n\t"\
  127. "psrlq $1, " #regd " \n\t"\
  128. "psrlq $1, " #regb " \n\t"\
  129. "psubb " #regb ", " #regr " \n\t"\
  130. "psubb " #regd ", " #regp " \n\t"
  131. /***********************************/
  132. /* MMX no rounding */
  133. #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
  134. #define SET_RND MOVQ_WONE
  135. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  136. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  137. #include "dsputil_mmx_rnd.h"
  138. #undef DEF
  139. #undef SET_RND
  140. #undef PAVGBP
  141. #undef PAVGB
  142. /***********************************/
  143. /* MMX rounding */
  144. #define DEF(x, y) x ## _ ## y ##_mmx
  145. #define SET_RND MOVQ_WTWO
  146. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  147. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  148. #include "dsputil_mmx_rnd.h"
  149. #undef DEF
  150. #undef SET_RND
  151. #undef PAVGBP
  152. #undef PAVGB
  153. /***********************************/
  154. /* 3Dnow specific */
  155. #define DEF(x) x ## _3dnow
  156. #define PAVGB "pavgusb"
  157. #include "dsputil_mmx_avg.h"
  158. #undef DEF
  159. #undef PAVGB
  160. /***********************************/
  161. /* MMX2 specific */
  162. #define DEF(x) x ## _mmx2
  163. /* Introduced only in MMX2 set */
  164. #define PAVGB "pavgb"
  165. #include "dsputil_mmx_avg.h"
  166. #undef DEF
  167. #undef PAVGB
  168. #define put_no_rnd_pixels16_mmx put_pixels16_mmx
  169. #define put_no_rnd_pixels8_mmx put_pixels8_mmx
  170. #define put_pixels16_mmx2 put_pixels16_mmx
  171. #define put_pixels8_mmx2 put_pixels8_mmx
  172. #define put_pixels4_mmx2 put_pixels4_mmx
  173. #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
  174. #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
  175. #define put_pixels16_3dnow put_pixels16_mmx
  176. #define put_pixels8_3dnow put_pixels8_mmx
  177. #define put_pixels4_3dnow put_pixels4_mmx
  178. #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
  179. #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
  180. /***********************************/
  181. /* standard MMX */
  182. void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  183. {
  184. const DCTELEM *p;
  185. uint8_t *pix;
  186. /* read the pixels */
  187. p = block;
  188. pix = pixels;
  189. /* unrolled loop */
  190. asm volatile(
  191. "movq %3, %%mm0 \n\t"
  192. "movq 8%3, %%mm1 \n\t"
  193. "movq 16%3, %%mm2 \n\t"
  194. "movq 24%3, %%mm3 \n\t"
  195. "movq 32%3, %%mm4 \n\t"
  196. "movq 40%3, %%mm5 \n\t"
  197. "movq 48%3, %%mm6 \n\t"
  198. "movq 56%3, %%mm7 \n\t"
  199. "packuswb %%mm1, %%mm0 \n\t"
  200. "packuswb %%mm3, %%mm2 \n\t"
  201. "packuswb %%mm5, %%mm4 \n\t"
  202. "packuswb %%mm7, %%mm6 \n\t"
  203. "movq %%mm0, (%0) \n\t"
  204. "movq %%mm2, (%0, %1) \n\t"
  205. "movq %%mm4, (%0, %1, 2) \n\t"
  206. "movq %%mm6, (%0, %2) \n\t"
  207. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
  208. :"memory");
  209. pix += line_size*4;
  210. p += 32;
  211. // if here would be an exact copy of the code above
  212. // compiler would generate some very strange code
  213. // thus using "r"
  214. asm volatile(
  215. "movq (%3), %%mm0 \n\t"
  216. "movq 8(%3), %%mm1 \n\t"
  217. "movq 16(%3), %%mm2 \n\t"
  218. "movq 24(%3), %%mm3 \n\t"
  219. "movq 32(%3), %%mm4 \n\t"
  220. "movq 40(%3), %%mm5 \n\t"
  221. "movq 48(%3), %%mm6 \n\t"
  222. "movq 56(%3), %%mm7 \n\t"
  223. "packuswb %%mm1, %%mm0 \n\t"
  224. "packuswb %%mm3, %%mm2 \n\t"
  225. "packuswb %%mm5, %%mm4 \n\t"
  226. "packuswb %%mm7, %%mm6 \n\t"
  227. "movq %%mm0, (%0) \n\t"
  228. "movq %%mm2, (%0, %1) \n\t"
  229. "movq %%mm4, (%0, %1, 2) \n\t"
  230. "movq %%mm6, (%0, %2) \n\t"
  231. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
  232. :"memory");
  233. }
  234. static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) =
  235. { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
  236. void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  237. {
  238. int i;
  239. movq_m2r(*vector128, mm1);
  240. for (i = 0; i < 8; i++) {
  241. movq_m2r(*(block), mm0);
  242. packsswb_m2r(*(block + 4), mm0);
  243. block += 8;
  244. paddb_r2r(mm1, mm0);
  245. movq_r2m(mm0, *pixels);
  246. pixels += line_size;
  247. }
  248. }
  249. void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  250. {
  251. const DCTELEM *p;
  252. uint8_t *pix;
  253. int i;
  254. /* read the pixels */
  255. p = block;
  256. pix = pixels;
  257. MOVQ_ZERO(mm7);
  258. i = 4;
  259. do {
  260. asm volatile(
  261. "movq (%2), %%mm0 \n\t"
  262. "movq 8(%2), %%mm1 \n\t"
  263. "movq 16(%2), %%mm2 \n\t"
  264. "movq 24(%2), %%mm3 \n\t"
  265. "movq %0, %%mm4 \n\t"
  266. "movq %1, %%mm6 \n\t"
  267. "movq %%mm4, %%mm5 \n\t"
  268. "punpcklbw %%mm7, %%mm4 \n\t"
  269. "punpckhbw %%mm7, %%mm5 \n\t"
  270. "paddsw %%mm4, %%mm0 \n\t"
  271. "paddsw %%mm5, %%mm1 \n\t"
  272. "movq %%mm6, %%mm5 \n\t"
  273. "punpcklbw %%mm7, %%mm6 \n\t"
  274. "punpckhbw %%mm7, %%mm5 \n\t"
  275. "paddsw %%mm6, %%mm2 \n\t"
  276. "paddsw %%mm5, %%mm3 \n\t"
  277. "packuswb %%mm1, %%mm0 \n\t"
  278. "packuswb %%mm3, %%mm2 \n\t"
  279. "movq %%mm0, %0 \n\t"
  280. "movq %%mm2, %1 \n\t"
  281. :"+m"(*pix), "+m"(*(pix+line_size))
  282. :"r"(p)
  283. :"memory");
  284. pix += line_size*2;
  285. p += 16;
  286. } while (--i);
  287. }
  288. static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  289. {
  290. asm volatile(
  291. "lea (%3, %3), %%"REG_a" \n\t"
  292. ASMALIGN(3)
  293. "1: \n\t"
  294. "movd (%1), %%mm0 \n\t"
  295. "movd (%1, %3), %%mm1 \n\t"
  296. "movd %%mm0, (%2) \n\t"
  297. "movd %%mm1, (%2, %3) \n\t"
  298. "add %%"REG_a", %1 \n\t"
  299. "add %%"REG_a", %2 \n\t"
  300. "movd (%1), %%mm0 \n\t"
  301. "movd (%1, %3), %%mm1 \n\t"
  302. "movd %%mm0, (%2) \n\t"
  303. "movd %%mm1, (%2, %3) \n\t"
  304. "add %%"REG_a", %1 \n\t"
  305. "add %%"REG_a", %2 \n\t"
  306. "subl $4, %0 \n\t"
  307. "jnz 1b \n\t"
  308. : "+g"(h), "+r" (pixels), "+r" (block)
  309. : "r"((long)line_size)
  310. : "%"REG_a, "memory"
  311. );
  312. }
  313. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  314. {
  315. asm volatile(
  316. "lea (%3, %3), %%"REG_a" \n\t"
  317. ASMALIGN(3)
  318. "1: \n\t"
  319. "movq (%1), %%mm0 \n\t"
  320. "movq (%1, %3), %%mm1 \n\t"
  321. "movq %%mm0, (%2) \n\t"
  322. "movq %%mm1, (%2, %3) \n\t"
  323. "add %%"REG_a", %1 \n\t"
  324. "add %%"REG_a", %2 \n\t"
  325. "movq (%1), %%mm0 \n\t"
  326. "movq (%1, %3), %%mm1 \n\t"
  327. "movq %%mm0, (%2) \n\t"
  328. "movq %%mm1, (%2, %3) \n\t"
  329. "add %%"REG_a", %1 \n\t"
  330. "add %%"REG_a", %2 \n\t"
  331. "subl $4, %0 \n\t"
  332. "jnz 1b \n\t"
  333. : "+g"(h), "+r" (pixels), "+r" (block)
  334. : "r"((long)line_size)
  335. : "%"REG_a, "memory"
  336. );
  337. }
  338. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  339. {
  340. asm volatile(
  341. "lea (%3, %3), %%"REG_a" \n\t"
  342. ASMALIGN(3)
  343. "1: \n\t"
  344. "movq (%1), %%mm0 \n\t"
  345. "movq 8(%1), %%mm4 \n\t"
  346. "movq (%1, %3), %%mm1 \n\t"
  347. "movq 8(%1, %3), %%mm5 \n\t"
  348. "movq %%mm0, (%2) \n\t"
  349. "movq %%mm4, 8(%2) \n\t"
  350. "movq %%mm1, (%2, %3) \n\t"
  351. "movq %%mm5, 8(%2, %3) \n\t"
  352. "add %%"REG_a", %1 \n\t"
  353. "add %%"REG_a", %2 \n\t"
  354. "movq (%1), %%mm0 \n\t"
  355. "movq 8(%1), %%mm4 \n\t"
  356. "movq (%1, %3), %%mm1 \n\t"
  357. "movq 8(%1, %3), %%mm5 \n\t"
  358. "movq %%mm0, (%2) \n\t"
  359. "movq %%mm4, 8(%2) \n\t"
  360. "movq %%mm1, (%2, %3) \n\t"
  361. "movq %%mm5, 8(%2, %3) \n\t"
  362. "add %%"REG_a", %1 \n\t"
  363. "add %%"REG_a", %2 \n\t"
  364. "subl $4, %0 \n\t"
  365. "jnz 1b \n\t"
  366. : "+g"(h), "+r" (pixels), "+r" (block)
  367. : "r"((long)line_size)
  368. : "%"REG_a, "memory"
  369. );
  370. }
  371. static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  372. {
  373. asm volatile(
  374. "1: \n\t"
  375. "movdqu (%1), %%xmm0 \n\t"
  376. "movdqu (%1,%3), %%xmm1 \n\t"
  377. "movdqu (%1,%3,2), %%xmm2 \n\t"
  378. "movdqu (%1,%4), %%xmm3 \n\t"
  379. "movdqa %%xmm0, (%2) \n\t"
  380. "movdqa %%xmm1, (%2,%3) \n\t"
  381. "movdqa %%xmm2, (%2,%3,2) \n\t"
  382. "movdqa %%xmm3, (%2,%4) \n\t"
  383. "subl $4, %0 \n\t"
  384. "lea (%1,%3,4), %1 \n\t"
  385. "lea (%2,%3,4), %2 \n\t"
  386. "jnz 1b \n\t"
  387. : "+g"(h), "+r" (pixels), "+r" (block)
  388. : "r"((long)line_size), "r"(3L*line_size)
  389. : "memory"
  390. );
  391. }
  392. static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  393. {
  394. asm volatile(
  395. "1: \n\t"
  396. "movdqu (%1), %%xmm0 \n\t"
  397. "movdqu (%1,%3), %%xmm1 \n\t"
  398. "movdqu (%1,%3,2), %%xmm2 \n\t"
  399. "movdqu (%1,%4), %%xmm3 \n\t"
  400. "pavgb (%2), %%xmm0 \n\t"
  401. "pavgb (%2,%3), %%xmm1 \n\t"
  402. "pavgb (%2,%3,2), %%xmm2 \n\t"
  403. "pavgb (%2,%4), %%xmm3 \n\t"
  404. "movdqa %%xmm0, (%2) \n\t"
  405. "movdqa %%xmm1, (%2,%3) \n\t"
  406. "movdqa %%xmm2, (%2,%3,2) \n\t"
  407. "movdqa %%xmm3, (%2,%4) \n\t"
  408. "subl $4, %0 \n\t"
  409. "lea (%1,%3,4), %1 \n\t"
  410. "lea (%2,%3,4), %2 \n\t"
  411. "jnz 1b \n\t"
  412. : "+g"(h), "+r" (pixels), "+r" (block)
  413. : "r"((long)line_size), "r"(3L*line_size)
  414. : "memory"
  415. );
  416. }
  417. static void clear_blocks_mmx(DCTELEM *blocks)
  418. {
  419. asm volatile(
  420. "pxor %%mm7, %%mm7 \n\t"
  421. "mov $-128*6, %%"REG_a" \n\t"
  422. "1: \n\t"
  423. "movq %%mm7, (%0, %%"REG_a") \n\t"
  424. "movq %%mm7, 8(%0, %%"REG_a") \n\t"
  425. "movq %%mm7, 16(%0, %%"REG_a") \n\t"
  426. "movq %%mm7, 24(%0, %%"REG_a") \n\t"
  427. "add $32, %%"REG_a" \n\t"
  428. " js 1b \n\t"
  429. : : "r" (((uint8_t *)blocks)+128*6)
  430. : "%"REG_a
  431. );
  432. }
  433. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
  434. long i=0;
  435. asm volatile(
  436. "1: \n\t"
  437. "movq (%1, %0), %%mm0 \n\t"
  438. "movq (%2, %0), %%mm1 \n\t"
  439. "paddb %%mm0, %%mm1 \n\t"
  440. "movq %%mm1, (%2, %0) \n\t"
  441. "movq 8(%1, %0), %%mm0 \n\t"
  442. "movq 8(%2, %0), %%mm1 \n\t"
  443. "paddb %%mm0, %%mm1 \n\t"
  444. "movq %%mm1, 8(%2, %0) \n\t"
  445. "add $16, %0 \n\t"
  446. "cmp %3, %0 \n\t"
  447. " jb 1b \n\t"
  448. : "+r" (i)
  449. : "r"(src), "r"(dst), "r"((long)w-15)
  450. );
  451. for(; i<w; i++)
  452. dst[i+0] += src[i+0];
  453. }
  454. static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  455. long i=0;
  456. asm volatile(
  457. "1: \n\t"
  458. "movq (%2, %0), %%mm0 \n\t"
  459. "movq 8(%2, %0), %%mm1 \n\t"
  460. "paddb (%3, %0), %%mm0 \n\t"
  461. "paddb 8(%3, %0), %%mm1 \n\t"
  462. "movq %%mm0, (%1, %0) \n\t"
  463. "movq %%mm1, 8(%1, %0) \n\t"
  464. "add $16, %0 \n\t"
  465. "cmp %4, %0 \n\t"
  466. " jb 1b \n\t"
  467. : "+r" (i)
  468. : "r"(dst), "r"(src1), "r"(src2), "r"((long)w-15)
  469. );
  470. for(; i<w; i++)
  471. dst[i] = src1[i] + src2[i];
  472. }
  473. #define H263_LOOP_FILTER \
  474. "pxor %%mm7, %%mm7 \n\t"\
  475. "movq %0, %%mm0 \n\t"\
  476. "movq %0, %%mm1 \n\t"\
  477. "movq %3, %%mm2 \n\t"\
  478. "movq %3, %%mm3 \n\t"\
  479. "punpcklbw %%mm7, %%mm0 \n\t"\
  480. "punpckhbw %%mm7, %%mm1 \n\t"\
  481. "punpcklbw %%mm7, %%mm2 \n\t"\
  482. "punpckhbw %%mm7, %%mm3 \n\t"\
  483. "psubw %%mm2, %%mm0 \n\t"\
  484. "psubw %%mm3, %%mm1 \n\t"\
  485. "movq %1, %%mm2 \n\t"\
  486. "movq %1, %%mm3 \n\t"\
  487. "movq %2, %%mm4 \n\t"\
  488. "movq %2, %%mm5 \n\t"\
  489. "punpcklbw %%mm7, %%mm2 \n\t"\
  490. "punpckhbw %%mm7, %%mm3 \n\t"\
  491. "punpcklbw %%mm7, %%mm4 \n\t"\
  492. "punpckhbw %%mm7, %%mm5 \n\t"\
  493. "psubw %%mm2, %%mm4 \n\t"\
  494. "psubw %%mm3, %%mm5 \n\t"\
  495. "psllw $2, %%mm4 \n\t"\
  496. "psllw $2, %%mm5 \n\t"\
  497. "paddw %%mm0, %%mm4 \n\t"\
  498. "paddw %%mm1, %%mm5 \n\t"\
  499. "pxor %%mm6, %%mm6 \n\t"\
  500. "pcmpgtw %%mm4, %%mm6 \n\t"\
  501. "pcmpgtw %%mm5, %%mm7 \n\t"\
  502. "pxor %%mm6, %%mm4 \n\t"\
  503. "pxor %%mm7, %%mm5 \n\t"\
  504. "psubw %%mm6, %%mm4 \n\t"\
  505. "psubw %%mm7, %%mm5 \n\t"\
  506. "psrlw $3, %%mm4 \n\t"\
  507. "psrlw $3, %%mm5 \n\t"\
  508. "packuswb %%mm5, %%mm4 \n\t"\
  509. "packsswb %%mm7, %%mm6 \n\t"\
  510. "pxor %%mm7, %%mm7 \n\t"\
  511. "movd %4, %%mm2 \n\t"\
  512. "punpcklbw %%mm2, %%mm2 \n\t"\
  513. "punpcklbw %%mm2, %%mm2 \n\t"\
  514. "punpcklbw %%mm2, %%mm2 \n\t"\
  515. "psubusb %%mm4, %%mm2 \n\t"\
  516. "movq %%mm2, %%mm3 \n\t"\
  517. "psubusb %%mm4, %%mm3 \n\t"\
  518. "psubb %%mm3, %%mm2 \n\t"\
  519. "movq %1, %%mm3 \n\t"\
  520. "movq %2, %%mm4 \n\t"\
  521. "pxor %%mm6, %%mm3 \n\t"\
  522. "pxor %%mm6, %%mm4 \n\t"\
  523. "paddusb %%mm2, %%mm3 \n\t"\
  524. "psubusb %%mm2, %%mm4 \n\t"\
  525. "pxor %%mm6, %%mm3 \n\t"\
  526. "pxor %%mm6, %%mm4 \n\t"\
  527. "paddusb %%mm2, %%mm2 \n\t"\
  528. "packsswb %%mm1, %%mm0 \n\t"\
  529. "pcmpgtb %%mm0, %%mm7 \n\t"\
  530. "pxor %%mm7, %%mm0 \n\t"\
  531. "psubb %%mm7, %%mm0 \n\t"\
  532. "movq %%mm0, %%mm1 \n\t"\
  533. "psubusb %%mm2, %%mm0 \n\t"\
  534. "psubb %%mm0, %%mm1 \n\t"\
  535. "pand %5, %%mm1 \n\t"\
  536. "psrlw $2, %%mm1 \n\t"\
  537. "pxor %%mm7, %%mm1 \n\t"\
  538. "psubb %%mm7, %%mm1 \n\t"\
  539. "movq %0, %%mm5 \n\t"\
  540. "movq %3, %%mm6 \n\t"\
  541. "psubb %%mm1, %%mm5 \n\t"\
  542. "paddb %%mm1, %%mm6 \n\t"
  543. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  544. if(ENABLE_ANY_H263) {
  545. const int strength= ff_h263_loop_filter_strength[qscale];
  546. asm volatile(
  547. H263_LOOP_FILTER
  548. "movq %%mm3, %1 \n\t"
  549. "movq %%mm4, %2 \n\t"
  550. "movq %%mm5, %0 \n\t"
  551. "movq %%mm6, %3 \n\t"
  552. : "+m" (*(uint64_t*)(src - 2*stride)),
  553. "+m" (*(uint64_t*)(src - 1*stride)),
  554. "+m" (*(uint64_t*)(src + 0*stride)),
  555. "+m" (*(uint64_t*)(src + 1*stride))
  556. : "g" (2*strength), "m"(ff_pb_FC)
  557. );
  558. }
  559. }
  560. static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
  561. asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
  562. "movd %4, %%mm0 \n\t"
  563. "movd %5, %%mm1 \n\t"
  564. "movd %6, %%mm2 \n\t"
  565. "movd %7, %%mm3 \n\t"
  566. "punpcklbw %%mm1, %%mm0 \n\t"
  567. "punpcklbw %%mm3, %%mm2 \n\t"
  568. "movq %%mm0, %%mm1 \n\t"
  569. "punpcklwd %%mm2, %%mm0 \n\t"
  570. "punpckhwd %%mm2, %%mm1 \n\t"
  571. "movd %%mm0, %0 \n\t"
  572. "punpckhdq %%mm0, %%mm0 \n\t"
  573. "movd %%mm0, %1 \n\t"
  574. "movd %%mm1, %2 \n\t"
  575. "punpckhdq %%mm1, %%mm1 \n\t"
  576. "movd %%mm1, %3 \n\t"
  577. : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
  578. "=m" (*(uint32_t*)(dst + 1*dst_stride)),
  579. "=m" (*(uint32_t*)(dst + 2*dst_stride)),
  580. "=m" (*(uint32_t*)(dst + 3*dst_stride))
  581. : "m" (*(uint32_t*)(src + 0*src_stride)),
  582. "m" (*(uint32_t*)(src + 1*src_stride)),
  583. "m" (*(uint32_t*)(src + 2*src_stride)),
  584. "m" (*(uint32_t*)(src + 3*src_stride))
  585. );
  586. }
  587. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  588. if(ENABLE_ANY_H263) {
  589. const int strength= ff_h263_loop_filter_strength[qscale];
  590. DECLARE_ALIGNED(8, uint64_t, temp[4]);
  591. uint8_t *btemp= (uint8_t*)temp;
  592. src -= 2;
  593. transpose4x4(btemp , src , 8, stride);
  594. transpose4x4(btemp+4, src + 4*stride, 8, stride);
  595. asm volatile(
  596. H263_LOOP_FILTER // 5 3 4 6
  597. : "+m" (temp[0]),
  598. "+m" (temp[1]),
  599. "+m" (temp[2]),
  600. "+m" (temp[3])
  601. : "g" (2*strength), "m"(ff_pb_FC)
  602. );
  603. asm volatile(
  604. "movq %%mm5, %%mm1 \n\t"
  605. "movq %%mm4, %%mm0 \n\t"
  606. "punpcklbw %%mm3, %%mm5 \n\t"
  607. "punpcklbw %%mm6, %%mm4 \n\t"
  608. "punpckhbw %%mm3, %%mm1 \n\t"
  609. "punpckhbw %%mm6, %%mm0 \n\t"
  610. "movq %%mm5, %%mm3 \n\t"
  611. "movq %%mm1, %%mm6 \n\t"
  612. "punpcklwd %%mm4, %%mm5 \n\t"
  613. "punpcklwd %%mm0, %%mm1 \n\t"
  614. "punpckhwd %%mm4, %%mm3 \n\t"
  615. "punpckhwd %%mm0, %%mm6 \n\t"
  616. "movd %%mm5, (%0) \n\t"
  617. "punpckhdq %%mm5, %%mm5 \n\t"
  618. "movd %%mm5, (%0,%2) \n\t"
  619. "movd %%mm3, (%0,%2,2) \n\t"
  620. "punpckhdq %%mm3, %%mm3 \n\t"
  621. "movd %%mm3, (%0,%3) \n\t"
  622. "movd %%mm1, (%1) \n\t"
  623. "punpckhdq %%mm1, %%mm1 \n\t"
  624. "movd %%mm1, (%1,%2) \n\t"
  625. "movd %%mm6, (%1,%2,2) \n\t"
  626. "punpckhdq %%mm6, %%mm6 \n\t"
  627. "movd %%mm6, (%1,%3) \n\t"
  628. :: "r" (src),
  629. "r" (src + 4*stride),
  630. "r" ((long) stride ),
  631. "r" ((long)(3*stride))
  632. );
  633. }
  634. }
  635. /* draw the edges of width 'w' of an image of size width, height
  636. this mmx version can only handle w==8 || w==16 */
  637. static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
  638. {
  639. uint8_t *ptr, *last_line;
  640. int i;
  641. last_line = buf + (height - 1) * wrap;
  642. /* left and right */
  643. ptr = buf;
  644. if(w==8)
  645. {
  646. asm volatile(
  647. "1: \n\t"
  648. "movd (%0), %%mm0 \n\t"
  649. "punpcklbw %%mm0, %%mm0 \n\t"
  650. "punpcklwd %%mm0, %%mm0 \n\t"
  651. "punpckldq %%mm0, %%mm0 \n\t"
  652. "movq %%mm0, -8(%0) \n\t"
  653. "movq -8(%0, %2), %%mm1 \n\t"
  654. "punpckhbw %%mm1, %%mm1 \n\t"
  655. "punpckhwd %%mm1, %%mm1 \n\t"
  656. "punpckhdq %%mm1, %%mm1 \n\t"
  657. "movq %%mm1, (%0, %2) \n\t"
  658. "add %1, %0 \n\t"
  659. "cmp %3, %0 \n\t"
  660. " jb 1b \n\t"
  661. : "+r" (ptr)
  662. : "r" ((long)wrap), "r" ((long)width), "r" (ptr + wrap*height)
  663. );
  664. }
  665. else
  666. {
  667. asm volatile(
  668. "1: \n\t"
  669. "movd (%0), %%mm0 \n\t"
  670. "punpcklbw %%mm0, %%mm0 \n\t"
  671. "punpcklwd %%mm0, %%mm0 \n\t"
  672. "punpckldq %%mm0, %%mm0 \n\t"
  673. "movq %%mm0, -8(%0) \n\t"
  674. "movq %%mm0, -16(%0) \n\t"
  675. "movq -8(%0, %2), %%mm1 \n\t"
  676. "punpckhbw %%mm1, %%mm1 \n\t"
  677. "punpckhwd %%mm1, %%mm1 \n\t"
  678. "punpckhdq %%mm1, %%mm1 \n\t"
  679. "movq %%mm1, (%0, %2) \n\t"
  680. "movq %%mm1, 8(%0, %2) \n\t"
  681. "add %1, %0 \n\t"
  682. "cmp %3, %0 \n\t"
  683. " jb 1b \n\t"
  684. : "+r" (ptr)
  685. : "r" ((long)wrap), "r" ((long)width), "r" (ptr + wrap*height)
  686. );
  687. }
  688. for(i=0;i<w;i+=4) {
  689. /* top and bottom (and hopefully also the corners) */
  690. ptr= buf - (i + 1) * wrap - w;
  691. asm volatile(
  692. "1: \n\t"
  693. "movq (%1, %0), %%mm0 \n\t"
  694. "movq %%mm0, (%0) \n\t"
  695. "movq %%mm0, (%0, %2) \n\t"
  696. "movq %%mm0, (%0, %2, 2) \n\t"
  697. "movq %%mm0, (%0, %3) \n\t"
  698. "add $8, %0 \n\t"
  699. "cmp %4, %0 \n\t"
  700. " jb 1b \n\t"
  701. : "+r" (ptr)
  702. : "r" ((long)buf - (long)ptr - w), "r" ((long)-wrap), "r" ((long)-wrap*3), "r" (ptr+width+2*w)
  703. );
  704. ptr= last_line + (i + 1) * wrap - w;
  705. asm volatile(
  706. "1: \n\t"
  707. "movq (%1, %0), %%mm0 \n\t"
  708. "movq %%mm0, (%0) \n\t"
  709. "movq %%mm0, (%0, %2) \n\t"
  710. "movq %%mm0, (%0, %2, 2) \n\t"
  711. "movq %%mm0, (%0, %3) \n\t"
  712. "add $8, %0 \n\t"
  713. "cmp %4, %0 \n\t"
  714. " jb 1b \n\t"
  715. : "+r" (ptr)
  716. : "r" ((long)last_line - (long)ptr - w), "r" ((long)wrap), "r" ((long)wrap*3), "r" (ptr+width+2*w)
  717. );
  718. }
  719. }
  720. #define PAETH(cpu, abs3)\
  721. void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
  722. {\
  723. long i = -bpp;\
  724. long end = w-3;\
  725. asm volatile(\
  726. "pxor %%mm7, %%mm7 \n"\
  727. "movd (%1,%0), %%mm0 \n"\
  728. "movd (%2,%0), %%mm1 \n"\
  729. "punpcklbw %%mm7, %%mm0 \n"\
  730. "punpcklbw %%mm7, %%mm1 \n"\
  731. "add %4, %0 \n"\
  732. "1: \n"\
  733. "movq %%mm1, %%mm2 \n"\
  734. "movd (%2,%0), %%mm1 \n"\
  735. "movq %%mm2, %%mm3 \n"\
  736. "punpcklbw %%mm7, %%mm1 \n"\
  737. "movq %%mm2, %%mm4 \n"\
  738. "psubw %%mm1, %%mm3 \n"\
  739. "psubw %%mm0, %%mm4 \n"\
  740. "movq %%mm3, %%mm5 \n"\
  741. "paddw %%mm4, %%mm5 \n"\
  742. abs3\
  743. "movq %%mm4, %%mm6 \n"\
  744. "pminsw %%mm5, %%mm6 \n"\
  745. "pcmpgtw %%mm6, %%mm3 \n"\
  746. "pcmpgtw %%mm5, %%mm4 \n"\
  747. "movq %%mm4, %%mm6 \n"\
  748. "pand %%mm3, %%mm4 \n"\
  749. "pandn %%mm3, %%mm6 \n"\
  750. "pandn %%mm0, %%mm3 \n"\
  751. "movd (%3,%0), %%mm0 \n"\
  752. "pand %%mm1, %%mm6 \n"\
  753. "pand %%mm4, %%mm2 \n"\
  754. "punpcklbw %%mm7, %%mm0 \n"\
  755. "movq %6, %%mm5 \n"\
  756. "paddw %%mm6, %%mm0 \n"\
  757. "paddw %%mm2, %%mm3 \n"\
  758. "paddw %%mm3, %%mm0 \n"\
  759. "pand %%mm5, %%mm0 \n"\
  760. "movq %%mm0, %%mm3 \n"\
  761. "packuswb %%mm3, %%mm3 \n"\
  762. "movd %%mm3, (%1,%0) \n"\
  763. "add %4, %0 \n"\
  764. "cmp %5, %0 \n"\
  765. "jle 1b \n"\
  766. :"+r"(i)\
  767. :"r"(dst), "r"(top), "r"(src), "r"((long)bpp), "g"(end),\
  768. "m"(ff_pw_255)\
  769. :"memory"\
  770. );\
  771. }
  772. #define ABS3_MMX2\
  773. "psubw %%mm5, %%mm7 \n"\
  774. "pmaxsw %%mm7, %%mm5 \n"\
  775. "pxor %%mm6, %%mm6 \n"\
  776. "pxor %%mm7, %%mm7 \n"\
  777. "psubw %%mm3, %%mm6 \n"\
  778. "psubw %%mm4, %%mm7 \n"\
  779. "pmaxsw %%mm6, %%mm3 \n"\
  780. "pmaxsw %%mm7, %%mm4 \n"\
  781. "pxor %%mm7, %%mm7 \n"
  782. #define ABS3_SSSE3\
  783. "pabsw %%mm3, %%mm3 \n"\
  784. "pabsw %%mm4, %%mm4 \n"\
  785. "pabsw %%mm5, %%mm5 \n"
  786. PAETH(mmx2, ABS3_MMX2)
  787. #ifdef HAVE_SSSE3
  788. PAETH(ssse3, ABS3_SSSE3)
  789. #endif
  790. #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
  791. "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
  792. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
  793. "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
  794. "movq "#in7", " #m3 " \n\t" /* d */\
  795. "movq "#in0", %%mm5 \n\t" /* D */\
  796. "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
  797. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
  798. "movq "#in1", %%mm5 \n\t" /* C */\
  799. "movq "#in2", %%mm6 \n\t" /* B */\
  800. "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
  801. "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
  802. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
  803. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
  804. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
  805. "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
  806. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
  807. "psraw $5, %%mm5 \n\t"\
  808. "packuswb %%mm5, %%mm5 \n\t"\
  809. OP(%%mm5, out, %%mm7, d)
  810. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
  811. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  812. uint64_t temp;\
  813. \
  814. asm volatile(\
  815. "pxor %%mm7, %%mm7 \n\t"\
  816. "1: \n\t"\
  817. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  818. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  819. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  820. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  821. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  822. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  823. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  824. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  825. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  826. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  827. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  828. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  829. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  830. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  831. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  832. "paddw %%mm3, %%mm5 \n\t" /* b */\
  833. "paddw %%mm2, %%mm6 \n\t" /* c */\
  834. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  835. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  836. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  837. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  838. "paddw %%mm4, %%mm0 \n\t" /* a */\
  839. "paddw %%mm1, %%mm5 \n\t" /* d */\
  840. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  841. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  842. "paddw %6, %%mm6 \n\t"\
  843. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  844. "psraw $5, %%mm0 \n\t"\
  845. "movq %%mm0, %5 \n\t"\
  846. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  847. \
  848. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
  849. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
  850. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
  851. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
  852. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
  853. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
  854. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
  855. "paddw %%mm0, %%mm2 \n\t" /* b */\
  856. "paddw %%mm5, %%mm3 \n\t" /* c */\
  857. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  858. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  859. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
  860. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
  861. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
  862. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
  863. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  864. "paddw %%mm2, %%mm1 \n\t" /* a */\
  865. "paddw %%mm6, %%mm4 \n\t" /* d */\
  866. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  867. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
  868. "paddw %6, %%mm1 \n\t"\
  869. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
  870. "psraw $5, %%mm3 \n\t"\
  871. "movq %5, %%mm1 \n\t"\
  872. "packuswb %%mm3, %%mm1 \n\t"\
  873. OP_MMX2(%%mm1, (%1),%%mm4, q)\
  874. /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
  875. \
  876. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
  877. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
  878. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
  879. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
  880. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
  881. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
  882. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
  883. "paddw %%mm1, %%mm5 \n\t" /* b */\
  884. "paddw %%mm4, %%mm0 \n\t" /* c */\
  885. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  886. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
  887. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
  888. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
  889. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
  890. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
  891. "paddw %%mm3, %%mm2 \n\t" /* d */\
  892. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
  893. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
  894. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
  895. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
  896. "paddw %%mm2, %%mm6 \n\t" /* a */\
  897. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
  898. "paddw %6, %%mm0 \n\t"\
  899. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  900. "psraw $5, %%mm0 \n\t"\
  901. /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
  902. \
  903. "paddw %%mm5, %%mm3 \n\t" /* a */\
  904. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
  905. "paddw %%mm4, %%mm6 \n\t" /* b */\
  906. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
  907. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
  908. "paddw %%mm1, %%mm4 \n\t" /* c */\
  909. "paddw %%mm2, %%mm5 \n\t" /* d */\
  910. "paddw %%mm6, %%mm6 \n\t" /* 2b */\
  911. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
  912. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
  913. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
  914. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
  915. "paddw %6, %%mm4 \n\t"\
  916. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
  917. "psraw $5, %%mm4 \n\t"\
  918. "packuswb %%mm4, %%mm0 \n\t"\
  919. OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
  920. \
  921. "add %3, %0 \n\t"\
  922. "add %4, %1 \n\t"\
  923. "decl %2 \n\t"\
  924. " jnz 1b \n\t"\
  925. : "+a"(src), "+c"(dst), "+D"(h)\
  926. : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  927. : "memory"\
  928. );\
  929. }\
  930. \
  931. static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  932. int i;\
  933. int16_t temp[16];\
  934. /* quick HACK, XXX FIXME MUST be optimized */\
  935. for(i=0; i<h; i++)\
  936. {\
  937. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  938. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  939. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  940. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  941. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  942. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
  943. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
  944. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
  945. temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
  946. temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
  947. temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
  948. temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
  949. temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
  950. temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
  951. temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
  952. temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
  953. asm volatile(\
  954. "movq (%0), %%mm0 \n\t"\
  955. "movq 8(%0), %%mm1 \n\t"\
  956. "paddw %2, %%mm0 \n\t"\
  957. "paddw %2, %%mm1 \n\t"\
  958. "psraw $5, %%mm0 \n\t"\
  959. "psraw $5, %%mm1 \n\t"\
  960. "packuswb %%mm1, %%mm0 \n\t"\
  961. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  962. "movq 16(%0), %%mm0 \n\t"\
  963. "movq 24(%0), %%mm1 \n\t"\
  964. "paddw %2, %%mm0 \n\t"\
  965. "paddw %2, %%mm1 \n\t"\
  966. "psraw $5, %%mm0 \n\t"\
  967. "psraw $5, %%mm1 \n\t"\
  968. "packuswb %%mm1, %%mm0 \n\t"\
  969. OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
  970. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  971. : "memory"\
  972. );\
  973. dst+=dstStride;\
  974. src+=srcStride;\
  975. }\
  976. }\
  977. \
  978. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  979. asm volatile(\
  980. "pxor %%mm7, %%mm7 \n\t"\
  981. "1: \n\t"\
  982. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  983. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  984. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  985. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  986. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  987. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  988. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  989. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  990. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  991. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  992. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  993. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  994. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  995. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  996. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  997. "paddw %%mm3, %%mm5 \n\t" /* b */\
  998. "paddw %%mm2, %%mm6 \n\t" /* c */\
  999. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1000. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1001. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1002. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1003. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1004. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1005. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1006. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1007. "paddw %5, %%mm6 \n\t"\
  1008. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1009. "psraw $5, %%mm0 \n\t"\
  1010. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1011. \
  1012. "movd 5(%0), %%mm5 \n\t" /* FGHI */\
  1013. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
  1014. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
  1015. "paddw %%mm5, %%mm1 \n\t" /* a */\
  1016. "paddw %%mm6, %%mm2 \n\t" /* b */\
  1017. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
  1018. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
  1019. "paddw %%mm6, %%mm3 \n\t" /* c */\
  1020. "paddw %%mm5, %%mm4 \n\t" /* d */\
  1021. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1022. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1023. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1024. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1025. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
  1026. "paddw %5, %%mm1 \n\t"\
  1027. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
  1028. "psraw $5, %%mm3 \n\t"\
  1029. "packuswb %%mm3, %%mm0 \n\t"\
  1030. OP_MMX2(%%mm0, (%1), %%mm4, q)\
  1031. \
  1032. "add %3, %0 \n\t"\
  1033. "add %4, %1 \n\t"\
  1034. "decl %2 \n\t"\
  1035. " jnz 1b \n\t"\
  1036. : "+a"(src), "+c"(dst), "+d"(h)\
  1037. : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
  1038. : "memory"\
  1039. );\
  1040. }\
  1041. \
  1042. static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1043. int i;\
  1044. int16_t temp[8];\
  1045. /* quick HACK, XXX FIXME MUST be optimized */\
  1046. for(i=0; i<h; i++)\
  1047. {\
  1048. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1049. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1050. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1051. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1052. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1053. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
  1054. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
  1055. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
  1056. asm volatile(\
  1057. "movq (%0), %%mm0 \n\t"\
  1058. "movq 8(%0), %%mm1 \n\t"\
  1059. "paddw %2, %%mm0 \n\t"\
  1060. "paddw %2, %%mm1 \n\t"\
  1061. "psraw $5, %%mm0 \n\t"\
  1062. "psraw $5, %%mm1 \n\t"\
  1063. "packuswb %%mm1, %%mm0 \n\t"\
  1064. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1065. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1066. :"memory"\
  1067. );\
  1068. dst+=dstStride;\
  1069. src+=srcStride;\
  1070. }\
  1071. }
  1072. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
  1073. \
  1074. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1075. uint64_t temp[17*4];\
  1076. uint64_t *temp_ptr= temp;\
  1077. int count= 17;\
  1078. \
  1079. /*FIXME unroll */\
  1080. asm volatile(\
  1081. "pxor %%mm7, %%mm7 \n\t"\
  1082. "1: \n\t"\
  1083. "movq (%0), %%mm0 \n\t"\
  1084. "movq (%0), %%mm1 \n\t"\
  1085. "movq 8(%0), %%mm2 \n\t"\
  1086. "movq 8(%0), %%mm3 \n\t"\
  1087. "punpcklbw %%mm7, %%mm0 \n\t"\
  1088. "punpckhbw %%mm7, %%mm1 \n\t"\
  1089. "punpcklbw %%mm7, %%mm2 \n\t"\
  1090. "punpckhbw %%mm7, %%mm3 \n\t"\
  1091. "movq %%mm0, (%1) \n\t"\
  1092. "movq %%mm1, 17*8(%1) \n\t"\
  1093. "movq %%mm2, 2*17*8(%1) \n\t"\
  1094. "movq %%mm3, 3*17*8(%1) \n\t"\
  1095. "add $8, %1 \n\t"\
  1096. "add %3, %0 \n\t"\
  1097. "decl %2 \n\t"\
  1098. " jnz 1b \n\t"\
  1099. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1100. : "r" ((long)srcStride)\
  1101. : "memory"\
  1102. );\
  1103. \
  1104. temp_ptr= temp;\
  1105. count=4;\
  1106. \
  1107. /*FIXME reorder for speed */\
  1108. asm volatile(\
  1109. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1110. "1: \n\t"\
  1111. "movq (%0), %%mm0 \n\t"\
  1112. "movq 8(%0), %%mm1 \n\t"\
  1113. "movq 16(%0), %%mm2 \n\t"\
  1114. "movq 24(%0), %%mm3 \n\t"\
  1115. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1116. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1117. "add %4, %1 \n\t"\
  1118. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1119. \
  1120. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1121. "add %4, %1 \n\t"\
  1122. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1123. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
  1124. "add %4, %1 \n\t"\
  1125. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
  1126. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
  1127. "add %4, %1 \n\t"\
  1128. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
  1129. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
  1130. "add %4, %1 \n\t"\
  1131. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
  1132. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
  1133. "add %4, %1 \n\t"\
  1134. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
  1135. \
  1136. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
  1137. "add %4, %1 \n\t" \
  1138. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
  1139. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
  1140. \
  1141. "add $136, %0 \n\t"\
  1142. "add %6, %1 \n\t"\
  1143. "decl %2 \n\t"\
  1144. " jnz 1b \n\t"\
  1145. \
  1146. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1147. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\
  1148. :"memory"\
  1149. );\
  1150. }\
  1151. \
  1152. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1153. uint64_t temp[9*2];\
  1154. uint64_t *temp_ptr= temp;\
  1155. int count= 9;\
  1156. \
  1157. /*FIXME unroll */\
  1158. asm volatile(\
  1159. "pxor %%mm7, %%mm7 \n\t"\
  1160. "1: \n\t"\
  1161. "movq (%0), %%mm0 \n\t"\
  1162. "movq (%0), %%mm1 \n\t"\
  1163. "punpcklbw %%mm7, %%mm0 \n\t"\
  1164. "punpckhbw %%mm7, %%mm1 \n\t"\
  1165. "movq %%mm0, (%1) \n\t"\
  1166. "movq %%mm1, 9*8(%1) \n\t"\
  1167. "add $8, %1 \n\t"\
  1168. "add %3, %0 \n\t"\
  1169. "decl %2 \n\t"\
  1170. " jnz 1b \n\t"\
  1171. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1172. : "r" ((long)srcStride)\
  1173. : "memory"\
  1174. );\
  1175. \
  1176. temp_ptr= temp;\
  1177. count=2;\
  1178. \
  1179. /*FIXME reorder for speed */\
  1180. asm volatile(\
  1181. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1182. "1: \n\t"\
  1183. "movq (%0), %%mm0 \n\t"\
  1184. "movq 8(%0), %%mm1 \n\t"\
  1185. "movq 16(%0), %%mm2 \n\t"\
  1186. "movq 24(%0), %%mm3 \n\t"\
  1187. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1188. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1189. "add %4, %1 \n\t"\
  1190. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1191. \
  1192. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1193. "add %4, %1 \n\t"\
  1194. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1195. \
  1196. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
  1197. "add %4, %1 \n\t"\
  1198. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
  1199. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
  1200. \
  1201. "add $72, %0 \n\t"\
  1202. "add %6, %1 \n\t"\
  1203. "decl %2 \n\t"\
  1204. " jnz 1b \n\t"\
  1205. \
  1206. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1207. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\
  1208. : "memory"\
  1209. );\
  1210. }\
  1211. \
  1212. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1213. OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
  1214. }\
  1215. \
  1216. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1217. uint64_t temp[8];\
  1218. uint8_t * const half= (uint8_t*)temp;\
  1219. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1220. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1221. }\
  1222. \
  1223. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1224. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
  1225. }\
  1226. \
  1227. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1228. uint64_t temp[8];\
  1229. uint8_t * const half= (uint8_t*)temp;\
  1230. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1231. OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
  1232. }\
  1233. \
  1234. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1235. uint64_t temp[8];\
  1236. uint8_t * const half= (uint8_t*)temp;\
  1237. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1238. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1239. }\
  1240. \
  1241. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1242. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1243. }\
  1244. \
  1245. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1246. uint64_t temp[8];\
  1247. uint8_t * const half= (uint8_t*)temp;\
  1248. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1249. OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
  1250. }\
  1251. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1252. uint64_t half[8 + 9];\
  1253. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1254. uint8_t * const halfHV= ((uint8_t*)half);\
  1255. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1256. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1257. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1258. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1259. }\
  1260. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1261. uint64_t half[8 + 9];\
  1262. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1263. uint8_t * const halfHV= ((uint8_t*)half);\
  1264. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1265. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1266. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1267. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1268. }\
  1269. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1270. uint64_t half[8 + 9];\
  1271. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1272. uint8_t * const halfHV= ((uint8_t*)half);\
  1273. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1274. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1275. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1276. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1277. }\
  1278. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1279. uint64_t half[8 + 9];\
  1280. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1281. uint8_t * const halfHV= ((uint8_t*)half);\
  1282. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1283. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1284. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1285. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1286. }\
  1287. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1288. uint64_t half[8 + 9];\
  1289. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1290. uint8_t * const halfHV= ((uint8_t*)half);\
  1291. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1292. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1293. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1294. }\
  1295. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1296. uint64_t half[8 + 9];\
  1297. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1298. uint8_t * const halfHV= ((uint8_t*)half);\
  1299. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1300. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1301. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1302. }\
  1303. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1304. uint64_t half[8 + 9];\
  1305. uint8_t * const halfH= ((uint8_t*)half);\
  1306. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1307. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1308. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1309. }\
  1310. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1311. uint64_t half[8 + 9];\
  1312. uint8_t * const halfH= ((uint8_t*)half);\
  1313. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1314. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1315. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1316. }\
  1317. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1318. uint64_t half[9];\
  1319. uint8_t * const halfH= ((uint8_t*)half);\
  1320. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1321. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1322. }\
  1323. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1324. OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
  1325. }\
  1326. \
  1327. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1328. uint64_t temp[32];\
  1329. uint8_t * const half= (uint8_t*)temp;\
  1330. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1331. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1332. }\
  1333. \
  1334. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1335. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
  1336. }\
  1337. \
  1338. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1339. uint64_t temp[32];\
  1340. uint8_t * const half= (uint8_t*)temp;\
  1341. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1342. OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
  1343. }\
  1344. \
  1345. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1346. uint64_t temp[32];\
  1347. uint8_t * const half= (uint8_t*)temp;\
  1348. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1349. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1350. }\
  1351. \
  1352. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1353. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1354. }\
  1355. \
  1356. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1357. uint64_t temp[32];\
  1358. uint8_t * const half= (uint8_t*)temp;\
  1359. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1360. OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
  1361. }\
  1362. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1363. uint64_t half[16*2 + 17*2];\
  1364. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1365. uint8_t * const halfHV= ((uint8_t*)half);\
  1366. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1367. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1368. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1369. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1370. }\
  1371. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1372. uint64_t half[16*2 + 17*2];\
  1373. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1374. uint8_t * const halfHV= ((uint8_t*)half);\
  1375. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1376. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1377. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1378. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1379. }\
  1380. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1381. uint64_t half[16*2 + 17*2];\
  1382. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1383. uint8_t * const halfHV= ((uint8_t*)half);\
  1384. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1385. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1386. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1387. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1388. }\
  1389. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1390. uint64_t half[16*2 + 17*2];\
  1391. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1392. uint8_t * const halfHV= ((uint8_t*)half);\
  1393. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1394. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1395. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1396. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1397. }\
  1398. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1399. uint64_t half[16*2 + 17*2];\
  1400. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1401. uint8_t * const halfHV= ((uint8_t*)half);\
  1402. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1403. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1404. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1405. }\
  1406. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1407. uint64_t half[16*2 + 17*2];\
  1408. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1409. uint8_t * const halfHV= ((uint8_t*)half);\
  1410. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1411. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1412. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1413. }\
  1414. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1415. uint64_t half[17*2];\
  1416. uint8_t * const halfH= ((uint8_t*)half);\
  1417. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1418. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1419. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1420. }\
  1421. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1422. uint64_t half[17*2];\
  1423. uint8_t * const halfH= ((uint8_t*)half);\
  1424. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1425. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1426. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1427. }\
  1428. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1429. uint64_t half[17*2];\
  1430. uint8_t * const halfH= ((uint8_t*)half);\
  1431. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1432. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1433. }
  1434. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  1435. #define AVG_3DNOW_OP(a,b,temp, size) \
  1436. "mov" #size " " #b ", " #temp " \n\t"\
  1437. "pavgusb " #temp ", " #a " \n\t"\
  1438. "mov" #size " " #a ", " #b " \n\t"
  1439. #define AVG_MMX2_OP(a,b,temp, size) \
  1440. "mov" #size " " #b ", " #temp " \n\t"\
  1441. "pavgb " #temp ", " #a " \n\t"\
  1442. "mov" #size " " #a ", " #b " \n\t"
  1443. QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
  1444. QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
  1445. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
  1446. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
  1447. QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
  1448. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
  1449. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
  1450. QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
  1451. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
  1452. /***********************************/
  1453. /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
  1454. #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
  1455. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1456. OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
  1457. }
  1458. #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
  1459. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1460. OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
  1461. }
  1462. #define QPEL_2TAP(OPNAME, SIZE, MMX)\
  1463. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
  1464. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
  1465. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
  1466. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
  1467. OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
  1468. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
  1469. OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
  1470. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
  1471. OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
  1472. static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1473. OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
  1474. }\
  1475. static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1476. OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
  1477. }\
  1478. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
  1479. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
  1480. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
  1481. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
  1482. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
  1483. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
  1484. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
  1485. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
  1486. QPEL_2TAP(put_, 16, mmx2)
  1487. QPEL_2TAP(avg_, 16, mmx2)
  1488. QPEL_2TAP(put_, 8, mmx2)
  1489. QPEL_2TAP(avg_, 8, mmx2)
  1490. QPEL_2TAP(put_, 16, 3dnow)
  1491. QPEL_2TAP(avg_, 16, 3dnow)
  1492. QPEL_2TAP(put_, 8, 3dnow)
  1493. QPEL_2TAP(avg_, 8, 3dnow)
  1494. #if 0
  1495. static void just_return() { return; }
  1496. #endif
  1497. static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  1498. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
  1499. const int w = 8;
  1500. const int ix = ox>>(16+shift);
  1501. const int iy = oy>>(16+shift);
  1502. const int oxs = ox>>4;
  1503. const int oys = oy>>4;
  1504. const int dxxs = dxx>>4;
  1505. const int dxys = dxy>>4;
  1506. const int dyxs = dyx>>4;
  1507. const int dyys = dyy>>4;
  1508. const uint16_t r4[4] = {r,r,r,r};
  1509. const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
  1510. const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
  1511. const uint64_t shift2 = 2*shift;
  1512. uint8_t edge_buf[(h+1)*stride];
  1513. int x, y;
  1514. const int dxw = (dxx-(1<<(16+shift)))*(w-1);
  1515. const int dyh = (dyy-(1<<(16+shift)))*(h-1);
  1516. const int dxh = dxy*(h-1);
  1517. const int dyw = dyx*(w-1);
  1518. if( // non-constant fullpel offset (3% of blocks)
  1519. ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
  1520. (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
  1521. // uses more than 16 bits of subpel mv (only at huge resolution)
  1522. || (dxx|dxy|dyx|dyy)&15 )
  1523. {
  1524. //FIXME could still use mmx for some of the rows
  1525. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
  1526. return;
  1527. }
  1528. src += ix + iy*stride;
  1529. if( (unsigned)ix >= width-w ||
  1530. (unsigned)iy >= height-h )
  1531. {
  1532. ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
  1533. src = edge_buf;
  1534. }
  1535. asm volatile(
  1536. "movd %0, %%mm6 \n\t"
  1537. "pxor %%mm7, %%mm7 \n\t"
  1538. "punpcklwd %%mm6, %%mm6 \n\t"
  1539. "punpcklwd %%mm6, %%mm6 \n\t"
  1540. :: "r"(1<<shift)
  1541. );
  1542. for(x=0; x<w; x+=4){
  1543. uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
  1544. oxs - dxys + dxxs*(x+1),
  1545. oxs - dxys + dxxs*(x+2),
  1546. oxs - dxys + dxxs*(x+3) };
  1547. uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
  1548. oys - dyys + dyxs*(x+1),
  1549. oys - dyys + dyxs*(x+2),
  1550. oys - dyys + dyxs*(x+3) };
  1551. for(y=0; y<h; y++){
  1552. asm volatile(
  1553. "movq %0, %%mm4 \n\t"
  1554. "movq %1, %%mm5 \n\t"
  1555. "paddw %2, %%mm4 \n\t"
  1556. "paddw %3, %%mm5 \n\t"
  1557. "movq %%mm4, %0 \n\t"
  1558. "movq %%mm5, %1 \n\t"
  1559. "psrlw $12, %%mm4 \n\t"
  1560. "psrlw $12, %%mm5 \n\t"
  1561. : "+m"(*dx4), "+m"(*dy4)
  1562. : "m"(*dxy4), "m"(*dyy4)
  1563. );
  1564. asm volatile(
  1565. "movq %%mm6, %%mm2 \n\t"
  1566. "movq %%mm6, %%mm1 \n\t"
  1567. "psubw %%mm4, %%mm2 \n\t"
  1568. "psubw %%mm5, %%mm1 \n\t"
  1569. "movq %%mm2, %%mm0 \n\t"
  1570. "movq %%mm4, %%mm3 \n\t"
  1571. "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
  1572. "pmullw %%mm5, %%mm3 \n\t" // dx*dy
  1573. "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
  1574. "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
  1575. "movd %4, %%mm5 \n\t"
  1576. "movd %3, %%mm4 \n\t"
  1577. "punpcklbw %%mm7, %%mm5 \n\t"
  1578. "punpcklbw %%mm7, %%mm4 \n\t"
  1579. "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
  1580. "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
  1581. "movd %2, %%mm5 \n\t"
  1582. "movd %1, %%mm4 \n\t"
  1583. "punpcklbw %%mm7, %%mm5 \n\t"
  1584. "punpcklbw %%mm7, %%mm4 \n\t"
  1585. "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
  1586. "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
  1587. "paddw %5, %%mm1 \n\t"
  1588. "paddw %%mm3, %%mm2 \n\t"
  1589. "paddw %%mm1, %%mm0 \n\t"
  1590. "paddw %%mm2, %%mm0 \n\t"
  1591. "psrlw %6, %%mm0 \n\t"
  1592. "packuswb %%mm0, %%mm0 \n\t"
  1593. "movd %%mm0, %0 \n\t"
  1594. : "=m"(dst[x+y*stride])
  1595. : "m"(src[0]), "m"(src[1]),
  1596. "m"(src[stride]), "m"(src[stride+1]),
  1597. "m"(*r4), "m"(shift2)
  1598. );
  1599. src += stride;
  1600. }
  1601. src += 4-h*stride;
  1602. }
  1603. }
  1604. #define PREFETCH(name, op) \
  1605. static void name(void *mem, int stride, int h){\
  1606. const uint8_t *p= mem;\
  1607. do{\
  1608. asm volatile(#op" %0" :: "m"(*p));\
  1609. p+= stride;\
  1610. }while(--h);\
  1611. }
  1612. PREFETCH(prefetch_mmx2, prefetcht0)
  1613. PREFETCH(prefetch_3dnow, prefetch)
  1614. #undef PREFETCH
  1615. #include "h264dsp_mmx.c"
  1616. /* CAVS specific */
  1617. void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
  1618. void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx);
  1619. void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1620. put_pixels8_mmx(dst, src, stride, 8);
  1621. }
  1622. void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1623. avg_pixels8_mmx(dst, src, stride, 8);
  1624. }
  1625. void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1626. put_pixels16_mmx(dst, src, stride, 16);
  1627. }
  1628. void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1629. avg_pixels16_mmx(dst, src, stride, 16);
  1630. }
  1631. /* VC1 specific */
  1632. void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
  1633. void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
  1634. put_pixels8_mmx(dst, src, stride, 8);
  1635. }
  1636. /* external functions, from idct_mmx.c */
  1637. void ff_mmx_idct(DCTELEM *block);
  1638. void ff_mmxext_idct(DCTELEM *block);
  1639. /* XXX: those functions should be suppressed ASAP when all IDCTs are
  1640. converted */
  1641. #ifdef CONFIG_GPL
  1642. static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  1643. {
  1644. ff_mmx_idct (block);
  1645. put_pixels_clamped_mmx(block, dest, line_size);
  1646. }
  1647. static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  1648. {
  1649. ff_mmx_idct (block);
  1650. add_pixels_clamped_mmx(block, dest, line_size);
  1651. }
  1652. static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  1653. {
  1654. ff_mmxext_idct (block);
  1655. put_pixels_clamped_mmx(block, dest, line_size);
  1656. }
  1657. static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  1658. {
  1659. ff_mmxext_idct (block);
  1660. add_pixels_clamped_mmx(block, dest, line_size);
  1661. }
  1662. #endif
  1663. static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
  1664. {
  1665. ff_idct_xvid_mmx (block);
  1666. put_pixels_clamped_mmx(block, dest, line_size);
  1667. }
  1668. static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
  1669. {
  1670. ff_idct_xvid_mmx (block);
  1671. add_pixels_clamped_mmx(block, dest, line_size);
  1672. }
  1673. static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
  1674. {
  1675. ff_idct_xvid_mmx2 (block);
  1676. put_pixels_clamped_mmx(block, dest, line_size);
  1677. }
  1678. static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
  1679. {
  1680. ff_idct_xvid_mmx2 (block);
  1681. add_pixels_clamped_mmx(block, dest, line_size);
  1682. }
  1683. static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
  1684. {
  1685. int i;
  1686. asm volatile("pxor %%mm7, %%mm7":);
  1687. for(i=0; i<blocksize; i+=2) {
  1688. asm volatile(
  1689. "movq %0, %%mm0 \n\t"
  1690. "movq %1, %%mm1 \n\t"
  1691. "movq %%mm0, %%mm2 \n\t"
  1692. "movq %%mm1, %%mm3 \n\t"
  1693. "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
  1694. "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
  1695. "pslld $31, %%mm2 \n\t" // keep only the sign bit
  1696. "pxor %%mm2, %%mm1 \n\t"
  1697. "movq %%mm3, %%mm4 \n\t"
  1698. "pand %%mm1, %%mm3 \n\t"
  1699. "pandn %%mm1, %%mm4 \n\t"
  1700. "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  1701. "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  1702. "movq %%mm3, %1 \n\t"
  1703. "movq %%mm0, %0 \n\t"
  1704. :"+m"(mag[i]), "+m"(ang[i])
  1705. ::"memory"
  1706. );
  1707. }
  1708. asm volatile("femms");
  1709. }
  1710. static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
  1711. {
  1712. int i;
  1713. asm volatile(
  1714. "movaps %0, %%xmm5 \n\t"
  1715. ::"m"(ff_pdw_80000000[0])
  1716. );
  1717. for(i=0; i<blocksize; i+=4) {
  1718. asm volatile(
  1719. "movaps %0, %%xmm0 \n\t"
  1720. "movaps %1, %%xmm1 \n\t"
  1721. "xorps %%xmm2, %%xmm2 \n\t"
  1722. "xorps %%xmm3, %%xmm3 \n\t"
  1723. "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
  1724. "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
  1725. "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
  1726. "xorps %%xmm2, %%xmm1 \n\t"
  1727. "movaps %%xmm3, %%xmm4 \n\t"
  1728. "andps %%xmm1, %%xmm3 \n\t"
  1729. "andnps %%xmm1, %%xmm4 \n\t"
  1730. "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  1731. "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  1732. "movaps %%xmm3, %1 \n\t"
  1733. "movaps %%xmm0, %0 \n\t"
  1734. :"+m"(mag[i]), "+m"(ang[i])
  1735. ::"memory"
  1736. );
  1737. }
  1738. }
  1739. static void vector_fmul_3dnow(float *dst, const float *src, int len){
  1740. long i = (len-4)*4;
  1741. asm volatile(
  1742. "1: \n\t"
  1743. "movq (%1,%0), %%mm0 \n\t"
  1744. "movq 8(%1,%0), %%mm1 \n\t"
  1745. "pfmul (%2,%0), %%mm0 \n\t"
  1746. "pfmul 8(%2,%0), %%mm1 \n\t"
  1747. "movq %%mm0, (%1,%0) \n\t"
  1748. "movq %%mm1, 8(%1,%0) \n\t"
  1749. "sub $16, %0 \n\t"
  1750. "jge 1b \n\t"
  1751. "femms \n\t"
  1752. :"+r"(i)
  1753. :"r"(dst), "r"(src)
  1754. :"memory"
  1755. );
  1756. }
  1757. static void vector_fmul_sse(float *dst, const float *src, int len){
  1758. long i = (len-8)*4;
  1759. asm volatile(
  1760. "1: \n\t"
  1761. "movaps (%1,%0), %%xmm0 \n\t"
  1762. "movaps 16(%1,%0), %%xmm1 \n\t"
  1763. "mulps (%2,%0), %%xmm0 \n\t"
  1764. "mulps 16(%2,%0), %%xmm1 \n\t"
  1765. "movaps %%xmm0, (%1,%0) \n\t"
  1766. "movaps %%xmm1, 16(%1,%0) \n\t"
  1767. "sub $32, %0 \n\t"
  1768. "jge 1b \n\t"
  1769. :"+r"(i)
  1770. :"r"(dst), "r"(src)
  1771. :"memory"
  1772. );
  1773. }
  1774. static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
  1775. long i = len*4-16;
  1776. asm volatile(
  1777. "1: \n\t"
  1778. "pswapd 8(%1), %%mm0 \n\t"
  1779. "pswapd (%1), %%mm1 \n\t"
  1780. "pfmul (%3,%0), %%mm0 \n\t"
  1781. "pfmul 8(%3,%0), %%mm1 \n\t"
  1782. "movq %%mm0, (%2,%0) \n\t"
  1783. "movq %%mm1, 8(%2,%0) \n\t"
  1784. "add $16, %1 \n\t"
  1785. "sub $16, %0 \n\t"
  1786. "jge 1b \n\t"
  1787. :"+r"(i), "+r"(src1)
  1788. :"r"(dst), "r"(src0)
  1789. );
  1790. asm volatile("femms");
  1791. }
  1792. static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
  1793. long i = len*4-32;
  1794. asm volatile(
  1795. "1: \n\t"
  1796. "movaps 16(%1), %%xmm0 \n\t"
  1797. "movaps (%1), %%xmm1 \n\t"
  1798. "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
  1799. "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
  1800. "mulps (%3,%0), %%xmm0 \n\t"
  1801. "mulps 16(%3,%0), %%xmm1 \n\t"
  1802. "movaps %%xmm0, (%2,%0) \n\t"
  1803. "movaps %%xmm1, 16(%2,%0) \n\t"
  1804. "add $32, %1 \n\t"
  1805. "sub $32, %0 \n\t"
  1806. "jge 1b \n\t"
  1807. :"+r"(i), "+r"(src1)
  1808. :"r"(dst), "r"(src0)
  1809. );
  1810. }
  1811. static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
  1812. const float *src2, int src3, int len, int step){
  1813. long i = (len-4)*4;
  1814. if(step == 2 && src3 == 0){
  1815. dst += (len-4)*2;
  1816. asm volatile(
  1817. "1: \n\t"
  1818. "movq (%2,%0), %%mm0 \n\t"
  1819. "movq 8(%2,%0), %%mm1 \n\t"
  1820. "pfmul (%3,%0), %%mm0 \n\t"
  1821. "pfmul 8(%3,%0), %%mm1 \n\t"
  1822. "pfadd (%4,%0), %%mm0 \n\t"
  1823. "pfadd 8(%4,%0), %%mm1 \n\t"
  1824. "movd %%mm0, (%1) \n\t"
  1825. "movd %%mm1, 16(%1) \n\t"
  1826. "psrlq $32, %%mm0 \n\t"
  1827. "psrlq $32, %%mm1 \n\t"
  1828. "movd %%mm0, 8(%1) \n\t"
  1829. "movd %%mm1, 24(%1) \n\t"
  1830. "sub $32, %1 \n\t"
  1831. "sub $16, %0 \n\t"
  1832. "jge 1b \n\t"
  1833. :"+r"(i), "+r"(dst)
  1834. :"r"(src0), "r"(src1), "r"(src2)
  1835. :"memory"
  1836. );
  1837. }
  1838. else if(step == 1 && src3 == 0){
  1839. asm volatile(
  1840. "1: \n\t"
  1841. "movq (%2,%0), %%mm0 \n\t"
  1842. "movq 8(%2,%0), %%mm1 \n\t"
  1843. "pfmul (%3,%0), %%mm0 \n\t"
  1844. "pfmul 8(%3,%0), %%mm1 \n\t"
  1845. "pfadd (%4,%0), %%mm0 \n\t"
  1846. "pfadd 8(%4,%0), %%mm1 \n\t"
  1847. "movq %%mm0, (%1,%0) \n\t"
  1848. "movq %%mm1, 8(%1,%0) \n\t"
  1849. "sub $16, %0 \n\t"
  1850. "jge 1b \n\t"
  1851. :"+r"(i)
  1852. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  1853. :"memory"
  1854. );
  1855. }
  1856. else
  1857. ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
  1858. asm volatile("femms");
  1859. }
  1860. static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
  1861. const float *src2, int src3, int len, int step){
  1862. long i = (len-8)*4;
  1863. if(step == 2 && src3 == 0){
  1864. dst += (len-8)*2;
  1865. asm volatile(
  1866. "1: \n\t"
  1867. "movaps (%2,%0), %%xmm0 \n\t"
  1868. "movaps 16(%2,%0), %%xmm1 \n\t"
  1869. "mulps (%3,%0), %%xmm0 \n\t"
  1870. "mulps 16(%3,%0), %%xmm1 \n\t"
  1871. "addps (%4,%0), %%xmm0 \n\t"
  1872. "addps 16(%4,%0), %%xmm1 \n\t"
  1873. "movss %%xmm0, (%1) \n\t"
  1874. "movss %%xmm1, 32(%1) \n\t"
  1875. "movhlps %%xmm0, %%xmm2 \n\t"
  1876. "movhlps %%xmm1, %%xmm3 \n\t"
  1877. "movss %%xmm2, 16(%1) \n\t"
  1878. "movss %%xmm3, 48(%1) \n\t"
  1879. "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
  1880. "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
  1881. "movss %%xmm0, 8(%1) \n\t"
  1882. "movss %%xmm1, 40(%1) \n\t"
  1883. "movhlps %%xmm0, %%xmm2 \n\t"
  1884. "movhlps %%xmm1, %%xmm3 \n\t"
  1885. "movss %%xmm2, 24(%1) \n\t"
  1886. "movss %%xmm3, 56(%1) \n\t"
  1887. "sub $64, %1 \n\t"
  1888. "sub $32, %0 \n\t"
  1889. "jge 1b \n\t"
  1890. :"+r"(i), "+r"(dst)
  1891. :"r"(src0), "r"(src1), "r"(src2)
  1892. :"memory"
  1893. );
  1894. }
  1895. else if(step == 1 && src3 == 0){
  1896. asm volatile(
  1897. "1: \n\t"
  1898. "movaps (%2,%0), %%xmm0 \n\t"
  1899. "movaps 16(%2,%0), %%xmm1 \n\t"
  1900. "mulps (%3,%0), %%xmm0 \n\t"
  1901. "mulps 16(%3,%0), %%xmm1 \n\t"
  1902. "addps (%4,%0), %%xmm0 \n\t"
  1903. "addps 16(%4,%0), %%xmm1 \n\t"
  1904. "movaps %%xmm0, (%1,%0) \n\t"
  1905. "movaps %%xmm1, 16(%1,%0) \n\t"
  1906. "sub $32, %0 \n\t"
  1907. "jge 1b \n\t"
  1908. :"+r"(i)
  1909. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  1910. :"memory"
  1911. );
  1912. }
  1913. else
  1914. ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
  1915. }
  1916. static void float_to_int16_3dnow(int16_t *dst, const float *src, int len){
  1917. // not bit-exact: pf2id uses different rounding than C and SSE
  1918. int i;
  1919. for(i=0; i<len; i+=4) {
  1920. asm volatile(
  1921. "pf2id %1, %%mm0 \n\t"
  1922. "pf2id %2, %%mm1 \n\t"
  1923. "packssdw %%mm1, %%mm0 \n\t"
  1924. "movq %%mm0, %0 \n\t"
  1925. :"=m"(dst[i])
  1926. :"m"(src[i]), "m"(src[i+2])
  1927. );
  1928. }
  1929. asm volatile("femms");
  1930. }
  1931. static void float_to_int16_sse(int16_t *dst, const float *src, int len){
  1932. int i;
  1933. for(i=0; i<len; i+=4) {
  1934. asm volatile(
  1935. "cvtps2pi %1, %%mm0 \n\t"
  1936. "cvtps2pi %2, %%mm1 \n\t"
  1937. "packssdw %%mm1, %%mm0 \n\t"
  1938. "movq %%mm0, %0 \n\t"
  1939. :"=m"(dst[i])
  1940. :"m"(src[i]), "m"(src[i+2])
  1941. );
  1942. }
  1943. asm volatile("emms");
  1944. }
  1945. extern void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width);
  1946. extern void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width);
  1947. extern void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
  1948. extern void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
  1949. extern void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  1950. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
  1951. extern void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  1952. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
  1953. void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
  1954. {
  1955. mm_flags = mm_support();
  1956. if (avctx->dsp_mask) {
  1957. if (avctx->dsp_mask & FF_MM_FORCE)
  1958. mm_flags |= (avctx->dsp_mask & 0xffff);
  1959. else
  1960. mm_flags &= ~(avctx->dsp_mask & 0xffff);
  1961. }
  1962. #if 0
  1963. av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
  1964. if (mm_flags & MM_MMX)
  1965. av_log(avctx, AV_LOG_INFO, " mmx");
  1966. if (mm_flags & MM_MMXEXT)
  1967. av_log(avctx, AV_LOG_INFO, " mmxext");
  1968. if (mm_flags & MM_3DNOW)
  1969. av_log(avctx, AV_LOG_INFO, " 3dnow");
  1970. if (mm_flags & MM_SSE)
  1971. av_log(avctx, AV_LOG_INFO, " sse");
  1972. if (mm_flags & MM_SSE2)
  1973. av_log(avctx, AV_LOG_INFO, " sse2");
  1974. av_log(avctx, AV_LOG_INFO, "\n");
  1975. #endif
  1976. if (mm_flags & MM_MMX) {
  1977. const int idct_algo= avctx->idct_algo;
  1978. if(avctx->lowres==0){
  1979. if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
  1980. c->idct_put= ff_simple_idct_put_mmx;
  1981. c->idct_add= ff_simple_idct_add_mmx;
  1982. c->idct = ff_simple_idct_mmx;
  1983. c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
  1984. #ifdef CONFIG_GPL
  1985. }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
  1986. if(mm_flags & MM_MMXEXT){
  1987. c->idct_put= ff_libmpeg2mmx2_idct_put;
  1988. c->idct_add= ff_libmpeg2mmx2_idct_add;
  1989. c->idct = ff_mmxext_idct;
  1990. }else{
  1991. c->idct_put= ff_libmpeg2mmx_idct_put;
  1992. c->idct_add= ff_libmpeg2mmx_idct_add;
  1993. c->idct = ff_mmx_idct;
  1994. }
  1995. c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
  1996. #endif
  1997. }else if((ENABLE_VP3_DECODER || ENABLE_VP5_DECODER || ENABLE_VP6_DECODER) &&
  1998. idct_algo==FF_IDCT_VP3 &&
  1999. avctx->codec->id!=CODEC_ID_THEORA &&
  2000. !(avctx->flags & CODEC_FLAG_BITEXACT)){
  2001. if(mm_flags & MM_SSE2){
  2002. c->idct_put= ff_vp3_idct_put_sse2;
  2003. c->idct_add= ff_vp3_idct_add_sse2;
  2004. c->idct = ff_vp3_idct_sse2;
  2005. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  2006. }else{
  2007. ff_vp3_dsp_init_mmx();
  2008. c->idct_put= ff_vp3_idct_put_mmx;
  2009. c->idct_add= ff_vp3_idct_add_mmx;
  2010. c->idct = ff_vp3_idct_mmx;
  2011. c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
  2012. }
  2013. }else if(idct_algo==FF_IDCT_CAVS){
  2014. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  2015. }else if(idct_algo==FF_IDCT_XVIDMMX){
  2016. if(mm_flags & MM_MMXEXT){
  2017. c->idct_put= ff_idct_xvid_mmx2_put;
  2018. c->idct_add= ff_idct_xvid_mmx2_add;
  2019. c->idct = ff_idct_xvid_mmx2;
  2020. }else{
  2021. c->idct_put= ff_idct_xvid_mmx_put;
  2022. c->idct_add= ff_idct_xvid_mmx_add;
  2023. c->idct = ff_idct_xvid_mmx;
  2024. }
  2025. }
  2026. }
  2027. c->put_pixels_clamped = put_pixels_clamped_mmx;
  2028. c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
  2029. c->add_pixels_clamped = add_pixels_clamped_mmx;
  2030. c->clear_blocks = clear_blocks_mmx;
  2031. #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  2032. c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
  2033. c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
  2034. c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
  2035. c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
  2036. SET_HPEL_FUNCS(put, 0, 16, mmx);
  2037. SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
  2038. SET_HPEL_FUNCS(avg, 0, 16, mmx);
  2039. SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
  2040. SET_HPEL_FUNCS(put, 1, 8, mmx);
  2041. SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
  2042. SET_HPEL_FUNCS(avg, 1, 8, mmx);
  2043. SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
  2044. c->gmc= gmc_mmx;
  2045. c->add_bytes= add_bytes_mmx;
  2046. c->add_bytes_l2= add_bytes_l2_mmx;
  2047. c->draw_edges = draw_edges_mmx;
  2048. if (ENABLE_ANY_H263) {
  2049. c->h263_v_loop_filter= h263_v_loop_filter_mmx;
  2050. c->h263_h_loop_filter= h263_h_loop_filter_mmx;
  2051. }
  2052. c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
  2053. c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
  2054. c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_nornd;
  2055. c->h264_idct_dc_add=
  2056. c->h264_idct_add= ff_h264_idct_add_mmx;
  2057. c->h264_idct8_dc_add=
  2058. c->h264_idct8_add= ff_h264_idct8_add_mmx;
  2059. if (mm_flags & MM_SSE2)
  2060. c->h264_idct8_add= ff_h264_idct8_add_sse2;
  2061. if (mm_flags & MM_MMXEXT) {
  2062. c->prefetch = prefetch_mmx2;
  2063. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
  2064. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
  2065. c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
  2066. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
  2067. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
  2068. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
  2069. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
  2070. c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
  2071. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
  2072. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
  2073. c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
  2074. c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
  2075. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2076. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
  2077. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
  2078. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
  2079. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
  2080. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
  2081. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
  2082. }
  2083. #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  2084. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
  2085. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
  2086. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
  2087. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
  2088. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
  2089. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
  2090. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
  2091. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
  2092. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
  2093. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
  2094. c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
  2095. c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
  2096. c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
  2097. c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
  2098. c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
  2099. c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
  2100. SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
  2101. SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
  2102. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
  2103. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
  2104. SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
  2105. SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
  2106. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
  2107. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
  2108. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
  2109. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
  2110. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
  2111. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
  2112. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
  2113. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
  2114. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
  2115. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
  2116. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
  2117. c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
  2118. c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
  2119. c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
  2120. c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
  2121. c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
  2122. c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
  2123. c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
  2124. c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
  2125. c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
  2126. c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
  2127. c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
  2128. c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
  2129. c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
  2130. c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
  2131. c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
  2132. c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
  2133. c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
  2134. c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
  2135. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
  2136. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
  2137. c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
  2138. c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
  2139. c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
  2140. c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
  2141. c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
  2142. c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
  2143. if (ENABLE_CAVS_DECODER)
  2144. ff_cavsdsp_init_mmx2(c, avctx);
  2145. if (ENABLE_VC1_DECODER || ENABLE_WMV3_DECODER)
  2146. ff_vc1dsp_init_mmx(c, avctx);
  2147. c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
  2148. } else if (mm_flags & MM_3DNOW) {
  2149. c->prefetch = prefetch_3dnow;
  2150. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  2151. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  2152. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  2153. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  2154. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  2155. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  2156. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  2157. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  2158. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  2159. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  2160. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2161. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  2162. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  2163. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  2164. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  2165. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  2166. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  2167. }
  2168. SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
  2169. SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
  2170. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
  2171. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
  2172. SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
  2173. SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
  2174. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
  2175. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
  2176. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
  2177. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
  2178. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
  2179. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
  2180. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
  2181. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
  2182. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
  2183. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
  2184. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
  2185. c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
  2186. if (ENABLE_CAVS_DECODER)
  2187. ff_cavsdsp_init_3dnow(c, avctx);
  2188. }
  2189. #define H264_QPEL_FUNCS(x, y, CPU)\
  2190. c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
  2191. c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
  2192. c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
  2193. c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
  2194. if((mm_flags & MM_SSE2) && !(mm_flags & MM_3DNOW)){
  2195. // these functions are slower than mmx on AMD, but faster on Intel
  2196. /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
  2197. c->put_pixels_tab[0][0] = put_pixels16_sse2;
  2198. c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
  2199. */
  2200. H264_QPEL_FUNCS(0, 0, sse2);
  2201. }
  2202. if(mm_flags & MM_SSE2){
  2203. H264_QPEL_FUNCS(0, 1, sse2);
  2204. H264_QPEL_FUNCS(0, 2, sse2);
  2205. H264_QPEL_FUNCS(0, 3, sse2);
  2206. H264_QPEL_FUNCS(1, 1, sse2);
  2207. H264_QPEL_FUNCS(1, 2, sse2);
  2208. H264_QPEL_FUNCS(1, 3, sse2);
  2209. H264_QPEL_FUNCS(2, 1, sse2);
  2210. H264_QPEL_FUNCS(2, 2, sse2);
  2211. H264_QPEL_FUNCS(2, 3, sse2);
  2212. H264_QPEL_FUNCS(3, 1, sse2);
  2213. H264_QPEL_FUNCS(3, 2, sse2);
  2214. H264_QPEL_FUNCS(3, 3, sse2);
  2215. }
  2216. #ifdef HAVE_SSSE3
  2217. if(mm_flags & MM_SSSE3){
  2218. H264_QPEL_FUNCS(1, 0, ssse3);
  2219. H264_QPEL_FUNCS(1, 1, ssse3);
  2220. H264_QPEL_FUNCS(1, 2, ssse3);
  2221. H264_QPEL_FUNCS(1, 3, ssse3);
  2222. H264_QPEL_FUNCS(2, 0, ssse3);
  2223. H264_QPEL_FUNCS(2, 1, ssse3);
  2224. H264_QPEL_FUNCS(2, 2, ssse3);
  2225. H264_QPEL_FUNCS(2, 3, ssse3);
  2226. H264_QPEL_FUNCS(3, 0, ssse3);
  2227. H264_QPEL_FUNCS(3, 1, ssse3);
  2228. H264_QPEL_FUNCS(3, 2, ssse3);
  2229. H264_QPEL_FUNCS(3, 3, ssse3);
  2230. c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
  2231. }
  2232. #endif
  2233. #ifdef CONFIG_SNOW_DECODER
  2234. if(mm_flags & MM_SSE2 & 0){
  2235. c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
  2236. #ifdef HAVE_7REGS
  2237. c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
  2238. #endif
  2239. c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
  2240. }
  2241. else{
  2242. if(mm_flags & MM_MMXEXT){
  2243. c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
  2244. #ifdef HAVE_7REGS
  2245. c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
  2246. #endif
  2247. }
  2248. c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
  2249. }
  2250. #endif
  2251. if(mm_flags & MM_3DNOW){
  2252. c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
  2253. c->vector_fmul = vector_fmul_3dnow;
  2254. if(!(avctx->flags & CODEC_FLAG_BITEXACT))
  2255. c->float_to_int16 = float_to_int16_3dnow;
  2256. }
  2257. if(mm_flags & MM_3DNOWEXT)
  2258. c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
  2259. if(mm_flags & MM_SSE){
  2260. c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
  2261. c->vector_fmul = vector_fmul_sse;
  2262. c->float_to_int16 = float_to_int16_sse;
  2263. c->vector_fmul_reverse = vector_fmul_reverse_sse;
  2264. c->vector_fmul_add_add = vector_fmul_add_add_sse;
  2265. }
  2266. if(mm_flags & MM_3DNOW)
  2267. c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
  2268. }
  2269. if (ENABLE_ENCODERS)
  2270. dsputilenc_init_mmx(c, avctx);
  2271. #if 0
  2272. // for speed testing
  2273. get_pixels = just_return;
  2274. put_pixels_clamped = just_return;
  2275. add_pixels_clamped = just_return;
  2276. pix_abs16x16 = just_return;
  2277. pix_abs16x16_x2 = just_return;
  2278. pix_abs16x16_y2 = just_return;
  2279. pix_abs16x16_xy2 = just_return;
  2280. put_pixels_tab[0] = just_return;
  2281. put_pixels_tab[1] = just_return;
  2282. put_pixels_tab[2] = just_return;
  2283. put_pixels_tab[3] = just_return;
  2284. put_no_rnd_pixels_tab[0] = just_return;
  2285. put_no_rnd_pixels_tab[1] = just_return;
  2286. put_no_rnd_pixels_tab[2] = just_return;
  2287. put_no_rnd_pixels_tab[3] = just_return;
  2288. avg_pixels_tab[0] = just_return;
  2289. avg_pixels_tab[1] = just_return;
  2290. avg_pixels_tab[2] = just_return;
  2291. avg_pixels_tab[3] = just_return;
  2292. avg_no_rnd_pixels_tab[0] = just_return;
  2293. avg_no_rnd_pixels_tab[1] = just_return;
  2294. avg_no_rnd_pixels_tab[2] = just_return;
  2295. avg_no_rnd_pixels_tab[3] = just_return;
  2296. //av_fdct = just_return;
  2297. //ff_idct = just_return;
  2298. #endif
  2299. }