You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2367 lines
98KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "dsputil.h"
  25. #include "dsputil_mmx.h"
  26. #include "simple_idct.h"
  27. #include "mpegvideo.h"
  28. #include "x86_cpu.h"
  29. #include "mmx.h"
  30. #include "vp3dsp_mmx.h"
  31. #include "vp3dsp_sse2.h"
  32. #include "h263.h"
  33. //#undef NDEBUG
  34. //#include <assert.h>
  35. extern void ff_idct_xvid_mmx(short *block);
  36. extern void ff_idct_xvid_mmx2(short *block);
  37. int mm_flags; /* multimedia extension flags */
  38. /* pixel operations */
  39. DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
  40. DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
  41. DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) =
  42. {0x8000000080000000ULL, 0x8000000080000000ULL};
  43. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
  44. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL;
  45. DECLARE_ALIGNED_16(const xmm_t, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
  46. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_8 ) = 0x0008000800080008ULL;
  47. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
  48. DECLARE_ALIGNED_16(const xmm_t, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
  49. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
  50. DECLARE_ALIGNED_16(const xmm_t, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
  51. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
  52. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_64 ) = 0x0040004000400040ULL;
  53. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
  54. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
  55. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
  56. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL;
  57. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL;
  58. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
  59. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
  60. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
  61. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
  62. DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 };
  63. DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
  64. #define JUMPALIGN() asm volatile (ASMALIGN(3)::)
  65. #define MOVQ_ZERO(regd) asm volatile ("pxor %%" #regd ", %%" #regd ::)
  66. #define MOVQ_BFE(regd) \
  67. asm volatile ( \
  68. "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
  69. "paddb %%" #regd ", %%" #regd " \n\t" ::)
  70. #ifndef PIC
  71. #define MOVQ_BONE(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
  72. #define MOVQ_WTWO(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
  73. #else
  74. // for shared library it's better to use this way for accessing constants
  75. // pcmpeqd -> -1
  76. #define MOVQ_BONE(regd) \
  77. asm volatile ( \
  78. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  79. "psrlw $15, %%" #regd " \n\t" \
  80. "packuswb %%" #regd ", %%" #regd " \n\t" ::)
  81. #define MOVQ_WTWO(regd) \
  82. asm volatile ( \
  83. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  84. "psrlw $15, %%" #regd " \n\t" \
  85. "psllw $1, %%" #regd " \n\t"::)
  86. #endif
  87. // using regr as temporary and for the output result
  88. // first argument is unmodifed and second is trashed
  89. // regfe is supposed to contain 0xfefefefefefefefe
  90. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  91. "movq " #rega ", " #regr " \n\t"\
  92. "pand " #regb ", " #regr " \n\t"\
  93. "pxor " #rega ", " #regb " \n\t"\
  94. "pand " #regfe "," #regb " \n\t"\
  95. "psrlq $1, " #regb " \n\t"\
  96. "paddb " #regb ", " #regr " \n\t"
  97. #define PAVGB_MMX(rega, regb, regr, regfe) \
  98. "movq " #rega ", " #regr " \n\t"\
  99. "por " #regb ", " #regr " \n\t"\
  100. "pxor " #rega ", " #regb " \n\t"\
  101. "pand " #regfe "," #regb " \n\t"\
  102. "psrlq $1, " #regb " \n\t"\
  103. "psubb " #regb ", " #regr " \n\t"
  104. // mm6 is supposed to contain 0xfefefefefefefefe
  105. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  106. "movq " #rega ", " #regr " \n\t"\
  107. "movq " #regc ", " #regp " \n\t"\
  108. "pand " #regb ", " #regr " \n\t"\
  109. "pand " #regd ", " #regp " \n\t"\
  110. "pxor " #rega ", " #regb " \n\t"\
  111. "pxor " #regc ", " #regd " \n\t"\
  112. "pand %%mm6, " #regb " \n\t"\
  113. "pand %%mm6, " #regd " \n\t"\
  114. "psrlq $1, " #regb " \n\t"\
  115. "psrlq $1, " #regd " \n\t"\
  116. "paddb " #regb ", " #regr " \n\t"\
  117. "paddb " #regd ", " #regp " \n\t"
  118. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  119. "movq " #rega ", " #regr " \n\t"\
  120. "movq " #regc ", " #regp " \n\t"\
  121. "por " #regb ", " #regr " \n\t"\
  122. "por " #regd ", " #regp " \n\t"\
  123. "pxor " #rega ", " #regb " \n\t"\
  124. "pxor " #regc ", " #regd " \n\t"\
  125. "pand %%mm6, " #regb " \n\t"\
  126. "pand %%mm6, " #regd " \n\t"\
  127. "psrlq $1, " #regd " \n\t"\
  128. "psrlq $1, " #regb " \n\t"\
  129. "psubb " #regb ", " #regr " \n\t"\
  130. "psubb " #regd ", " #regp " \n\t"
  131. /***********************************/
  132. /* MMX no rounding */
  133. #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
  134. #define SET_RND MOVQ_WONE
  135. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  136. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  137. #include "dsputil_mmx_rnd.h"
  138. #undef DEF
  139. #undef SET_RND
  140. #undef PAVGBP
  141. #undef PAVGB
  142. /***********************************/
  143. /* MMX rounding */
  144. #define DEF(x, y) x ## _ ## y ##_mmx
  145. #define SET_RND MOVQ_WTWO
  146. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  147. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  148. #include "dsputil_mmx_rnd.h"
  149. #undef DEF
  150. #undef SET_RND
  151. #undef PAVGBP
  152. #undef PAVGB
  153. /***********************************/
  154. /* 3Dnow specific */
  155. #define DEF(x) x ## _3dnow
  156. #define PAVGB "pavgusb"
  157. #include "dsputil_mmx_avg.h"
  158. #undef DEF
  159. #undef PAVGB
  160. /***********************************/
  161. /* MMX2 specific */
  162. #define DEF(x) x ## _mmx2
  163. /* Introduced only in MMX2 set */
  164. #define PAVGB "pavgb"
  165. #include "dsputil_mmx_avg.h"
  166. #undef DEF
  167. #undef PAVGB
  168. #define put_no_rnd_pixels16_mmx put_pixels16_mmx
  169. #define put_no_rnd_pixels8_mmx put_pixels8_mmx
  170. #define put_pixels16_mmx2 put_pixels16_mmx
  171. #define put_pixels8_mmx2 put_pixels8_mmx
  172. #define put_pixels4_mmx2 put_pixels4_mmx
  173. #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
  174. #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
  175. #define put_pixels16_3dnow put_pixels16_mmx
  176. #define put_pixels8_3dnow put_pixels8_mmx
  177. #define put_pixels4_3dnow put_pixels4_mmx
  178. #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
  179. #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
  180. /***********************************/
  181. /* standard MMX */
  182. void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  183. {
  184. const DCTELEM *p;
  185. uint8_t *pix;
  186. /* read the pixels */
  187. p = block;
  188. pix = pixels;
  189. /* unrolled loop */
  190. asm volatile(
  191. "movq %3, %%mm0 \n\t"
  192. "movq 8%3, %%mm1 \n\t"
  193. "movq 16%3, %%mm2 \n\t"
  194. "movq 24%3, %%mm3 \n\t"
  195. "movq 32%3, %%mm4 \n\t"
  196. "movq 40%3, %%mm5 \n\t"
  197. "movq 48%3, %%mm6 \n\t"
  198. "movq 56%3, %%mm7 \n\t"
  199. "packuswb %%mm1, %%mm0 \n\t"
  200. "packuswb %%mm3, %%mm2 \n\t"
  201. "packuswb %%mm5, %%mm4 \n\t"
  202. "packuswb %%mm7, %%mm6 \n\t"
  203. "movq %%mm0, (%0) \n\t"
  204. "movq %%mm2, (%0, %1) \n\t"
  205. "movq %%mm4, (%0, %1, 2) \n\t"
  206. "movq %%mm6, (%0, %2) \n\t"
  207. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
  208. :"memory");
  209. pix += line_size*4;
  210. p += 32;
  211. // if here would be an exact copy of the code above
  212. // compiler would generate some very strange code
  213. // thus using "r"
  214. asm volatile(
  215. "movq (%3), %%mm0 \n\t"
  216. "movq 8(%3), %%mm1 \n\t"
  217. "movq 16(%3), %%mm2 \n\t"
  218. "movq 24(%3), %%mm3 \n\t"
  219. "movq 32(%3), %%mm4 \n\t"
  220. "movq 40(%3), %%mm5 \n\t"
  221. "movq 48(%3), %%mm6 \n\t"
  222. "movq 56(%3), %%mm7 \n\t"
  223. "packuswb %%mm1, %%mm0 \n\t"
  224. "packuswb %%mm3, %%mm2 \n\t"
  225. "packuswb %%mm5, %%mm4 \n\t"
  226. "packuswb %%mm7, %%mm6 \n\t"
  227. "movq %%mm0, (%0) \n\t"
  228. "movq %%mm2, (%0, %1) \n\t"
  229. "movq %%mm4, (%0, %1, 2) \n\t"
  230. "movq %%mm6, (%0, %2) \n\t"
  231. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
  232. :"memory");
  233. }
  234. static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) =
  235. { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
  236. void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  237. {
  238. int i;
  239. movq_m2r(*vector128, mm1);
  240. for (i = 0; i < 8; i++) {
  241. movq_m2r(*(block), mm0);
  242. packsswb_m2r(*(block + 4), mm0);
  243. block += 8;
  244. paddb_r2r(mm1, mm0);
  245. movq_r2m(mm0, *pixels);
  246. pixels += line_size;
  247. }
  248. }
  249. void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  250. {
  251. const DCTELEM *p;
  252. uint8_t *pix;
  253. int i;
  254. /* read the pixels */
  255. p = block;
  256. pix = pixels;
  257. MOVQ_ZERO(mm7);
  258. i = 4;
  259. do {
  260. asm volatile(
  261. "movq (%2), %%mm0 \n\t"
  262. "movq 8(%2), %%mm1 \n\t"
  263. "movq 16(%2), %%mm2 \n\t"
  264. "movq 24(%2), %%mm3 \n\t"
  265. "movq %0, %%mm4 \n\t"
  266. "movq %1, %%mm6 \n\t"
  267. "movq %%mm4, %%mm5 \n\t"
  268. "punpcklbw %%mm7, %%mm4 \n\t"
  269. "punpckhbw %%mm7, %%mm5 \n\t"
  270. "paddsw %%mm4, %%mm0 \n\t"
  271. "paddsw %%mm5, %%mm1 \n\t"
  272. "movq %%mm6, %%mm5 \n\t"
  273. "punpcklbw %%mm7, %%mm6 \n\t"
  274. "punpckhbw %%mm7, %%mm5 \n\t"
  275. "paddsw %%mm6, %%mm2 \n\t"
  276. "paddsw %%mm5, %%mm3 \n\t"
  277. "packuswb %%mm1, %%mm0 \n\t"
  278. "packuswb %%mm3, %%mm2 \n\t"
  279. "movq %%mm0, %0 \n\t"
  280. "movq %%mm2, %1 \n\t"
  281. :"+m"(*pix), "+m"(*(pix+line_size))
  282. :"r"(p)
  283. :"memory");
  284. pix += line_size*2;
  285. p += 16;
  286. } while (--i);
  287. }
  288. static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  289. {
  290. asm volatile(
  291. "lea (%3, %3), %%"REG_a" \n\t"
  292. ASMALIGN(3)
  293. "1: \n\t"
  294. "movd (%1), %%mm0 \n\t"
  295. "movd (%1, %3), %%mm1 \n\t"
  296. "movd %%mm0, (%2) \n\t"
  297. "movd %%mm1, (%2, %3) \n\t"
  298. "add %%"REG_a", %1 \n\t"
  299. "add %%"REG_a", %2 \n\t"
  300. "movd (%1), %%mm0 \n\t"
  301. "movd (%1, %3), %%mm1 \n\t"
  302. "movd %%mm0, (%2) \n\t"
  303. "movd %%mm1, (%2, %3) \n\t"
  304. "add %%"REG_a", %1 \n\t"
  305. "add %%"REG_a", %2 \n\t"
  306. "subl $4, %0 \n\t"
  307. "jnz 1b \n\t"
  308. : "+g"(h), "+r" (pixels), "+r" (block)
  309. : "r"((long)line_size)
  310. : "%"REG_a, "memory"
  311. );
  312. }
  313. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  314. {
  315. asm volatile(
  316. "lea (%3, %3), %%"REG_a" \n\t"
  317. ASMALIGN(3)
  318. "1: \n\t"
  319. "movq (%1), %%mm0 \n\t"
  320. "movq (%1, %3), %%mm1 \n\t"
  321. "movq %%mm0, (%2) \n\t"
  322. "movq %%mm1, (%2, %3) \n\t"
  323. "add %%"REG_a", %1 \n\t"
  324. "add %%"REG_a", %2 \n\t"
  325. "movq (%1), %%mm0 \n\t"
  326. "movq (%1, %3), %%mm1 \n\t"
  327. "movq %%mm0, (%2) \n\t"
  328. "movq %%mm1, (%2, %3) \n\t"
  329. "add %%"REG_a", %1 \n\t"
  330. "add %%"REG_a", %2 \n\t"
  331. "subl $4, %0 \n\t"
  332. "jnz 1b \n\t"
  333. : "+g"(h), "+r" (pixels), "+r" (block)
  334. : "r"((long)line_size)
  335. : "%"REG_a, "memory"
  336. );
  337. }
  338. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  339. {
  340. asm volatile(
  341. "lea (%3, %3), %%"REG_a" \n\t"
  342. ASMALIGN(3)
  343. "1: \n\t"
  344. "movq (%1), %%mm0 \n\t"
  345. "movq 8(%1), %%mm4 \n\t"
  346. "movq (%1, %3), %%mm1 \n\t"
  347. "movq 8(%1, %3), %%mm5 \n\t"
  348. "movq %%mm0, (%2) \n\t"
  349. "movq %%mm4, 8(%2) \n\t"
  350. "movq %%mm1, (%2, %3) \n\t"
  351. "movq %%mm5, 8(%2, %3) \n\t"
  352. "add %%"REG_a", %1 \n\t"
  353. "add %%"REG_a", %2 \n\t"
  354. "movq (%1), %%mm0 \n\t"
  355. "movq 8(%1), %%mm4 \n\t"
  356. "movq (%1, %3), %%mm1 \n\t"
  357. "movq 8(%1, %3), %%mm5 \n\t"
  358. "movq %%mm0, (%2) \n\t"
  359. "movq %%mm4, 8(%2) \n\t"
  360. "movq %%mm1, (%2, %3) \n\t"
  361. "movq %%mm5, 8(%2, %3) \n\t"
  362. "add %%"REG_a", %1 \n\t"
  363. "add %%"REG_a", %2 \n\t"
  364. "subl $4, %0 \n\t"
  365. "jnz 1b \n\t"
  366. : "+g"(h), "+r" (pixels), "+r" (block)
  367. : "r"((long)line_size)
  368. : "%"REG_a, "memory"
  369. );
  370. }
  371. static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  372. {
  373. asm volatile(
  374. "1: \n\t"
  375. "movdqu (%1), %%xmm0 \n\t"
  376. "movdqu (%1,%3), %%xmm1 \n\t"
  377. "movdqu (%1,%3,2), %%xmm2 \n\t"
  378. "movdqu (%1,%4), %%xmm3 \n\t"
  379. "movdqa %%xmm0, (%2) \n\t"
  380. "movdqa %%xmm1, (%2,%3) \n\t"
  381. "movdqa %%xmm2, (%2,%3,2) \n\t"
  382. "movdqa %%xmm3, (%2,%4) \n\t"
  383. "subl $4, %0 \n\t"
  384. "lea (%1,%3,4), %1 \n\t"
  385. "lea (%2,%3,4), %2 \n\t"
  386. "jnz 1b \n\t"
  387. : "+g"(h), "+r" (pixels), "+r" (block)
  388. : "r"((long)line_size), "r"(3L*line_size)
  389. : "memory"
  390. );
  391. }
  392. static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  393. {
  394. asm volatile(
  395. "1: \n\t"
  396. "movdqu (%1), %%xmm0 \n\t"
  397. "movdqu (%1,%3), %%xmm1 \n\t"
  398. "movdqu (%1,%3,2), %%xmm2 \n\t"
  399. "movdqu (%1,%4), %%xmm3 \n\t"
  400. "pavgb (%2), %%xmm0 \n\t"
  401. "pavgb (%2,%3), %%xmm1 \n\t"
  402. "pavgb (%2,%3,2), %%xmm2 \n\t"
  403. "pavgb (%2,%4), %%xmm3 \n\t"
  404. "movdqa %%xmm0, (%2) \n\t"
  405. "movdqa %%xmm1, (%2,%3) \n\t"
  406. "movdqa %%xmm2, (%2,%3,2) \n\t"
  407. "movdqa %%xmm3, (%2,%4) \n\t"
  408. "subl $4, %0 \n\t"
  409. "lea (%1,%3,4), %1 \n\t"
  410. "lea (%2,%3,4), %2 \n\t"
  411. "jnz 1b \n\t"
  412. : "+g"(h), "+r" (pixels), "+r" (block)
  413. : "r"((long)line_size), "r"(3L*line_size)
  414. : "memory"
  415. );
  416. }
  417. static void clear_blocks_mmx(DCTELEM *blocks)
  418. {
  419. asm volatile(
  420. "pxor %%mm7, %%mm7 \n\t"
  421. "mov $-128*6, %%"REG_a" \n\t"
  422. "1: \n\t"
  423. "movq %%mm7, (%0, %%"REG_a") \n\t"
  424. "movq %%mm7, 8(%0, %%"REG_a") \n\t"
  425. "movq %%mm7, 16(%0, %%"REG_a") \n\t"
  426. "movq %%mm7, 24(%0, %%"REG_a") \n\t"
  427. "add $32, %%"REG_a" \n\t"
  428. " js 1b \n\t"
  429. : : "r" (((uint8_t *)blocks)+128*6)
  430. : "%"REG_a
  431. );
  432. }
  433. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
  434. long i=0;
  435. asm volatile(
  436. "1: \n\t"
  437. "movq (%1, %0), %%mm0 \n\t"
  438. "movq (%2, %0), %%mm1 \n\t"
  439. "paddb %%mm0, %%mm1 \n\t"
  440. "movq %%mm1, (%2, %0) \n\t"
  441. "movq 8(%1, %0), %%mm0 \n\t"
  442. "movq 8(%2, %0), %%mm1 \n\t"
  443. "paddb %%mm0, %%mm1 \n\t"
  444. "movq %%mm1, 8(%2, %0) \n\t"
  445. "add $16, %0 \n\t"
  446. "cmp %3, %0 \n\t"
  447. " jb 1b \n\t"
  448. : "+r" (i)
  449. : "r"(src), "r"(dst), "r"((long)w-15)
  450. );
  451. for(; i<w; i++)
  452. dst[i+0] += src[i+0];
  453. }
  454. static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  455. long i=0;
  456. asm volatile(
  457. "1: \n\t"
  458. "movq (%2, %0), %%mm0 \n\t"
  459. "movq 8(%2, %0), %%mm1 \n\t"
  460. "paddb (%3, %0), %%mm0 \n\t"
  461. "paddb 8(%3, %0), %%mm1 \n\t"
  462. "movq %%mm0, (%1, %0) \n\t"
  463. "movq %%mm1, 8(%1, %0) \n\t"
  464. "add $16, %0 \n\t"
  465. "cmp %4, %0 \n\t"
  466. " jb 1b \n\t"
  467. : "+r" (i)
  468. : "r"(dst), "r"(src1), "r"(src2), "r"((long)w-15)
  469. );
  470. for(; i<w; i++)
  471. dst[i] = src1[i] + src2[i];
  472. }
  473. #define H263_LOOP_FILTER \
  474. "pxor %%mm7, %%mm7 \n\t"\
  475. "movq %0, %%mm0 \n\t"\
  476. "movq %0, %%mm1 \n\t"\
  477. "movq %3, %%mm2 \n\t"\
  478. "movq %3, %%mm3 \n\t"\
  479. "punpcklbw %%mm7, %%mm0 \n\t"\
  480. "punpckhbw %%mm7, %%mm1 \n\t"\
  481. "punpcklbw %%mm7, %%mm2 \n\t"\
  482. "punpckhbw %%mm7, %%mm3 \n\t"\
  483. "psubw %%mm2, %%mm0 \n\t"\
  484. "psubw %%mm3, %%mm1 \n\t"\
  485. "movq %1, %%mm2 \n\t"\
  486. "movq %1, %%mm3 \n\t"\
  487. "movq %2, %%mm4 \n\t"\
  488. "movq %2, %%mm5 \n\t"\
  489. "punpcklbw %%mm7, %%mm2 \n\t"\
  490. "punpckhbw %%mm7, %%mm3 \n\t"\
  491. "punpcklbw %%mm7, %%mm4 \n\t"\
  492. "punpckhbw %%mm7, %%mm5 \n\t"\
  493. "psubw %%mm2, %%mm4 \n\t"\
  494. "psubw %%mm3, %%mm5 \n\t"\
  495. "psllw $2, %%mm4 \n\t"\
  496. "psllw $2, %%mm5 \n\t"\
  497. "paddw %%mm0, %%mm4 \n\t"\
  498. "paddw %%mm1, %%mm5 \n\t"\
  499. "pxor %%mm6, %%mm6 \n\t"\
  500. "pcmpgtw %%mm4, %%mm6 \n\t"\
  501. "pcmpgtw %%mm5, %%mm7 \n\t"\
  502. "pxor %%mm6, %%mm4 \n\t"\
  503. "pxor %%mm7, %%mm5 \n\t"\
  504. "psubw %%mm6, %%mm4 \n\t"\
  505. "psubw %%mm7, %%mm5 \n\t"\
  506. "psrlw $3, %%mm4 \n\t"\
  507. "psrlw $3, %%mm5 \n\t"\
  508. "packuswb %%mm5, %%mm4 \n\t"\
  509. "packsswb %%mm7, %%mm6 \n\t"\
  510. "pxor %%mm7, %%mm7 \n\t"\
  511. "movd %4, %%mm2 \n\t"\
  512. "punpcklbw %%mm2, %%mm2 \n\t"\
  513. "punpcklbw %%mm2, %%mm2 \n\t"\
  514. "punpcklbw %%mm2, %%mm2 \n\t"\
  515. "psubusb %%mm4, %%mm2 \n\t"\
  516. "movq %%mm2, %%mm3 \n\t"\
  517. "psubusb %%mm4, %%mm3 \n\t"\
  518. "psubb %%mm3, %%mm2 \n\t"\
  519. "movq %1, %%mm3 \n\t"\
  520. "movq %2, %%mm4 \n\t"\
  521. "pxor %%mm6, %%mm3 \n\t"\
  522. "pxor %%mm6, %%mm4 \n\t"\
  523. "paddusb %%mm2, %%mm3 \n\t"\
  524. "psubusb %%mm2, %%mm4 \n\t"\
  525. "pxor %%mm6, %%mm3 \n\t"\
  526. "pxor %%mm6, %%mm4 \n\t"\
  527. "paddusb %%mm2, %%mm2 \n\t"\
  528. "packsswb %%mm1, %%mm0 \n\t"\
  529. "pcmpgtb %%mm0, %%mm7 \n\t"\
  530. "pxor %%mm7, %%mm0 \n\t"\
  531. "psubb %%mm7, %%mm0 \n\t"\
  532. "movq %%mm0, %%mm1 \n\t"\
  533. "psubusb %%mm2, %%mm0 \n\t"\
  534. "psubb %%mm0, %%mm1 \n\t"\
  535. "pand %5, %%mm1 \n\t"\
  536. "psrlw $2, %%mm1 \n\t"\
  537. "pxor %%mm7, %%mm1 \n\t"\
  538. "psubb %%mm7, %%mm1 \n\t"\
  539. "movq %0, %%mm5 \n\t"\
  540. "movq %3, %%mm6 \n\t"\
  541. "psubb %%mm1, %%mm5 \n\t"\
  542. "paddb %%mm1, %%mm6 \n\t"
  543. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  544. if(ENABLE_ANY_H263) {
  545. const int strength= ff_h263_loop_filter_strength[qscale];
  546. asm volatile(
  547. H263_LOOP_FILTER
  548. "movq %%mm3, %1 \n\t"
  549. "movq %%mm4, %2 \n\t"
  550. "movq %%mm5, %0 \n\t"
  551. "movq %%mm6, %3 \n\t"
  552. : "+m" (*(uint64_t*)(src - 2*stride)),
  553. "+m" (*(uint64_t*)(src - 1*stride)),
  554. "+m" (*(uint64_t*)(src + 0*stride)),
  555. "+m" (*(uint64_t*)(src + 1*stride))
  556. : "g" (2*strength), "m"(ff_pb_FC)
  557. );
  558. }
  559. }
  560. static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
  561. asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
  562. "movd %4, %%mm0 \n\t"
  563. "movd %5, %%mm1 \n\t"
  564. "movd %6, %%mm2 \n\t"
  565. "movd %7, %%mm3 \n\t"
  566. "punpcklbw %%mm1, %%mm0 \n\t"
  567. "punpcklbw %%mm3, %%mm2 \n\t"
  568. "movq %%mm0, %%mm1 \n\t"
  569. "punpcklwd %%mm2, %%mm0 \n\t"
  570. "punpckhwd %%mm2, %%mm1 \n\t"
  571. "movd %%mm0, %0 \n\t"
  572. "punpckhdq %%mm0, %%mm0 \n\t"
  573. "movd %%mm0, %1 \n\t"
  574. "movd %%mm1, %2 \n\t"
  575. "punpckhdq %%mm1, %%mm1 \n\t"
  576. "movd %%mm1, %3 \n\t"
  577. : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
  578. "=m" (*(uint32_t*)(dst + 1*dst_stride)),
  579. "=m" (*(uint32_t*)(dst + 2*dst_stride)),
  580. "=m" (*(uint32_t*)(dst + 3*dst_stride))
  581. : "m" (*(uint32_t*)(src + 0*src_stride)),
  582. "m" (*(uint32_t*)(src + 1*src_stride)),
  583. "m" (*(uint32_t*)(src + 2*src_stride)),
  584. "m" (*(uint32_t*)(src + 3*src_stride))
  585. );
  586. }
  587. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  588. if(ENABLE_ANY_H263) {
  589. const int strength= ff_h263_loop_filter_strength[qscale];
  590. DECLARE_ALIGNED(8, uint64_t, temp[4]);
  591. uint8_t *btemp= (uint8_t*)temp;
  592. src -= 2;
  593. transpose4x4(btemp , src , 8, stride);
  594. transpose4x4(btemp+4, src + 4*stride, 8, stride);
  595. asm volatile(
  596. H263_LOOP_FILTER // 5 3 4 6
  597. : "+m" (temp[0]),
  598. "+m" (temp[1]),
  599. "+m" (temp[2]),
  600. "+m" (temp[3])
  601. : "g" (2*strength), "m"(ff_pb_FC)
  602. );
  603. asm volatile(
  604. "movq %%mm5, %%mm1 \n\t"
  605. "movq %%mm4, %%mm0 \n\t"
  606. "punpcklbw %%mm3, %%mm5 \n\t"
  607. "punpcklbw %%mm6, %%mm4 \n\t"
  608. "punpckhbw %%mm3, %%mm1 \n\t"
  609. "punpckhbw %%mm6, %%mm0 \n\t"
  610. "movq %%mm5, %%mm3 \n\t"
  611. "movq %%mm1, %%mm6 \n\t"
  612. "punpcklwd %%mm4, %%mm5 \n\t"
  613. "punpcklwd %%mm0, %%mm1 \n\t"
  614. "punpckhwd %%mm4, %%mm3 \n\t"
  615. "punpckhwd %%mm0, %%mm6 \n\t"
  616. "movd %%mm5, (%0) \n\t"
  617. "punpckhdq %%mm5, %%mm5 \n\t"
  618. "movd %%mm5, (%0,%2) \n\t"
  619. "movd %%mm3, (%0,%2,2) \n\t"
  620. "punpckhdq %%mm3, %%mm3 \n\t"
  621. "movd %%mm3, (%0,%3) \n\t"
  622. "movd %%mm1, (%1) \n\t"
  623. "punpckhdq %%mm1, %%mm1 \n\t"
  624. "movd %%mm1, (%1,%2) \n\t"
  625. "movd %%mm6, (%1,%2,2) \n\t"
  626. "punpckhdq %%mm6, %%mm6 \n\t"
  627. "movd %%mm6, (%1,%3) \n\t"
  628. :: "r" (src),
  629. "r" (src + 4*stride),
  630. "r" ((long) stride ),
  631. "r" ((long)(3*stride))
  632. );
  633. }
  634. }
  635. #define PAETH(cpu, abs3)\
  636. void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
  637. {\
  638. long i = -bpp;\
  639. long end = w-3;\
  640. asm volatile(\
  641. "pxor %%mm7, %%mm7 \n"\
  642. "movd (%1,%0), %%mm0 \n"\
  643. "movd (%2,%0), %%mm1 \n"\
  644. "punpcklbw %%mm7, %%mm0 \n"\
  645. "punpcklbw %%mm7, %%mm1 \n"\
  646. "add %4, %0 \n"\
  647. "1: \n"\
  648. "movq %%mm1, %%mm2 \n"\
  649. "movd (%2,%0), %%mm1 \n"\
  650. "movq %%mm2, %%mm3 \n"\
  651. "punpcklbw %%mm7, %%mm1 \n"\
  652. "movq %%mm2, %%mm4 \n"\
  653. "psubw %%mm1, %%mm3 \n"\
  654. "psubw %%mm0, %%mm4 \n"\
  655. "movq %%mm3, %%mm5 \n"\
  656. "paddw %%mm4, %%mm5 \n"\
  657. abs3\
  658. "movq %%mm4, %%mm6 \n"\
  659. "pminsw %%mm5, %%mm6 \n"\
  660. "pcmpgtw %%mm6, %%mm3 \n"\
  661. "pcmpgtw %%mm5, %%mm4 \n"\
  662. "movq %%mm4, %%mm6 \n"\
  663. "pand %%mm3, %%mm4 \n"\
  664. "pandn %%mm3, %%mm6 \n"\
  665. "pandn %%mm0, %%mm3 \n"\
  666. "movd (%3,%0), %%mm0 \n"\
  667. "pand %%mm1, %%mm6 \n"\
  668. "pand %%mm4, %%mm2 \n"\
  669. "punpcklbw %%mm7, %%mm0 \n"\
  670. "movq %6, %%mm5 \n"\
  671. "paddw %%mm6, %%mm0 \n"\
  672. "paddw %%mm2, %%mm3 \n"\
  673. "paddw %%mm3, %%mm0 \n"\
  674. "pand %%mm5, %%mm0 \n"\
  675. "movq %%mm0, %%mm3 \n"\
  676. "packuswb %%mm3, %%mm3 \n"\
  677. "movd %%mm3, (%1,%0) \n"\
  678. "add %4, %0 \n"\
  679. "cmp %5, %0 \n"\
  680. "jle 1b \n"\
  681. :"+r"(i)\
  682. :"r"(dst), "r"(top), "r"(src), "r"((long)bpp), "g"(end),\
  683. "m"(ff_pw_255)\
  684. :"memory"\
  685. );\
  686. }
  687. #define ABS3_MMX2\
  688. "psubw %%mm5, %%mm7 \n"\
  689. "pmaxsw %%mm7, %%mm5 \n"\
  690. "pxor %%mm6, %%mm6 \n"\
  691. "pxor %%mm7, %%mm7 \n"\
  692. "psubw %%mm3, %%mm6 \n"\
  693. "psubw %%mm4, %%mm7 \n"\
  694. "pmaxsw %%mm6, %%mm3 \n"\
  695. "pmaxsw %%mm7, %%mm4 \n"\
  696. "pxor %%mm7, %%mm7 \n"
  697. #define ABS3_SSSE3\
  698. "pabsw %%mm3, %%mm3 \n"\
  699. "pabsw %%mm4, %%mm4 \n"\
  700. "pabsw %%mm5, %%mm5 \n"
  701. PAETH(mmx2, ABS3_MMX2)
  702. #ifdef HAVE_SSSE3
  703. PAETH(ssse3, ABS3_SSSE3)
  704. #endif
  705. #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
  706. "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
  707. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
  708. "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
  709. "movq "#in7", " #m3 " \n\t" /* d */\
  710. "movq "#in0", %%mm5 \n\t" /* D */\
  711. "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
  712. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
  713. "movq "#in1", %%mm5 \n\t" /* C */\
  714. "movq "#in2", %%mm6 \n\t" /* B */\
  715. "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
  716. "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
  717. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
  718. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
  719. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
  720. "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
  721. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
  722. "psraw $5, %%mm5 \n\t"\
  723. "packuswb %%mm5, %%mm5 \n\t"\
  724. OP(%%mm5, out, %%mm7, d)
  725. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
  726. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  727. uint64_t temp;\
  728. \
  729. asm volatile(\
  730. "pxor %%mm7, %%mm7 \n\t"\
  731. "1: \n\t"\
  732. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  733. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  734. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  735. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  736. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  737. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  738. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  739. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  740. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  741. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  742. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  743. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  744. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  745. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  746. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  747. "paddw %%mm3, %%mm5 \n\t" /* b */\
  748. "paddw %%mm2, %%mm6 \n\t" /* c */\
  749. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  750. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  751. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  752. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  753. "paddw %%mm4, %%mm0 \n\t" /* a */\
  754. "paddw %%mm1, %%mm5 \n\t" /* d */\
  755. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  756. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  757. "paddw %6, %%mm6 \n\t"\
  758. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  759. "psraw $5, %%mm0 \n\t"\
  760. "movq %%mm0, %5 \n\t"\
  761. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  762. \
  763. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
  764. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
  765. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
  766. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
  767. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
  768. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
  769. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
  770. "paddw %%mm0, %%mm2 \n\t" /* b */\
  771. "paddw %%mm5, %%mm3 \n\t" /* c */\
  772. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  773. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  774. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
  775. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
  776. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
  777. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
  778. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  779. "paddw %%mm2, %%mm1 \n\t" /* a */\
  780. "paddw %%mm6, %%mm4 \n\t" /* d */\
  781. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  782. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
  783. "paddw %6, %%mm1 \n\t"\
  784. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
  785. "psraw $5, %%mm3 \n\t"\
  786. "movq %5, %%mm1 \n\t"\
  787. "packuswb %%mm3, %%mm1 \n\t"\
  788. OP_MMX2(%%mm1, (%1),%%mm4, q)\
  789. /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
  790. \
  791. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
  792. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
  793. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
  794. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
  795. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
  796. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
  797. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
  798. "paddw %%mm1, %%mm5 \n\t" /* b */\
  799. "paddw %%mm4, %%mm0 \n\t" /* c */\
  800. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  801. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
  802. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
  803. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
  804. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
  805. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
  806. "paddw %%mm3, %%mm2 \n\t" /* d */\
  807. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
  808. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
  809. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
  810. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
  811. "paddw %%mm2, %%mm6 \n\t" /* a */\
  812. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
  813. "paddw %6, %%mm0 \n\t"\
  814. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  815. "psraw $5, %%mm0 \n\t"\
  816. /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
  817. \
  818. "paddw %%mm5, %%mm3 \n\t" /* a */\
  819. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
  820. "paddw %%mm4, %%mm6 \n\t" /* b */\
  821. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
  822. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
  823. "paddw %%mm1, %%mm4 \n\t" /* c */\
  824. "paddw %%mm2, %%mm5 \n\t" /* d */\
  825. "paddw %%mm6, %%mm6 \n\t" /* 2b */\
  826. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
  827. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
  828. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
  829. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
  830. "paddw %6, %%mm4 \n\t"\
  831. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
  832. "psraw $5, %%mm4 \n\t"\
  833. "packuswb %%mm4, %%mm0 \n\t"\
  834. OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
  835. \
  836. "add %3, %0 \n\t"\
  837. "add %4, %1 \n\t"\
  838. "decl %2 \n\t"\
  839. " jnz 1b \n\t"\
  840. : "+a"(src), "+c"(dst), "+g"(h)\
  841. : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  842. : "memory"\
  843. );\
  844. }\
  845. \
  846. static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  847. int i;\
  848. int16_t temp[16];\
  849. /* quick HACK, XXX FIXME MUST be optimized */\
  850. for(i=0; i<h; i++)\
  851. {\
  852. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  853. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  854. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  855. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  856. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  857. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
  858. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
  859. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
  860. temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
  861. temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
  862. temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
  863. temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
  864. temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
  865. temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
  866. temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
  867. temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
  868. asm volatile(\
  869. "movq (%0), %%mm0 \n\t"\
  870. "movq 8(%0), %%mm1 \n\t"\
  871. "paddw %2, %%mm0 \n\t"\
  872. "paddw %2, %%mm1 \n\t"\
  873. "psraw $5, %%mm0 \n\t"\
  874. "psraw $5, %%mm1 \n\t"\
  875. "packuswb %%mm1, %%mm0 \n\t"\
  876. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  877. "movq 16(%0), %%mm0 \n\t"\
  878. "movq 24(%0), %%mm1 \n\t"\
  879. "paddw %2, %%mm0 \n\t"\
  880. "paddw %2, %%mm1 \n\t"\
  881. "psraw $5, %%mm0 \n\t"\
  882. "psraw $5, %%mm1 \n\t"\
  883. "packuswb %%mm1, %%mm0 \n\t"\
  884. OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
  885. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  886. : "memory"\
  887. );\
  888. dst+=dstStride;\
  889. src+=srcStride;\
  890. }\
  891. }\
  892. \
  893. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  894. uint64_t temp;\
  895. \
  896. asm volatile(\
  897. "pxor %%mm7, %%mm7 \n\t"\
  898. "1: \n\t"\
  899. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  900. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  901. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  902. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  903. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  904. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  905. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  906. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  907. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  908. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  909. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  910. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  911. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  912. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  913. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  914. "paddw %%mm3, %%mm5 \n\t" /* b */\
  915. "paddw %%mm2, %%mm6 \n\t" /* c */\
  916. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  917. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  918. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  919. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  920. "paddw %%mm4, %%mm0 \n\t" /* a */\
  921. "paddw %%mm1, %%mm5 \n\t" /* d */\
  922. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  923. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  924. "paddw %6, %%mm6 \n\t"\
  925. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  926. "psraw $5, %%mm0 \n\t"\
  927. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  928. \
  929. "movd 5(%0), %%mm5 \n\t" /* FGHI */\
  930. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
  931. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
  932. "paddw %%mm5, %%mm1 \n\t" /* a */\
  933. "paddw %%mm6, %%mm2 \n\t" /* b */\
  934. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
  935. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
  936. "paddw %%mm6, %%mm3 \n\t" /* c */\
  937. "paddw %%mm5, %%mm4 \n\t" /* d */\
  938. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  939. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  940. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  941. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  942. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
  943. "paddw %6, %%mm1 \n\t"\
  944. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
  945. "psraw $5, %%mm3 \n\t"\
  946. "packuswb %%mm3, %%mm0 \n\t"\
  947. OP_MMX2(%%mm0, (%1), %%mm4, q)\
  948. \
  949. "add %3, %0 \n\t"\
  950. "add %4, %1 \n\t"\
  951. "decl %2 \n\t"\
  952. " jnz 1b \n\t"\
  953. : "+a"(src), "+c"(dst), "+g"(h)\
  954. : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  955. : "memory"\
  956. );\
  957. }\
  958. \
  959. static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  960. int i;\
  961. int16_t temp[8];\
  962. /* quick HACK, XXX FIXME MUST be optimized */\
  963. for(i=0; i<h; i++)\
  964. {\
  965. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  966. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  967. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  968. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  969. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  970. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
  971. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
  972. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
  973. asm volatile(\
  974. "movq (%0), %%mm0 \n\t"\
  975. "movq 8(%0), %%mm1 \n\t"\
  976. "paddw %2, %%mm0 \n\t"\
  977. "paddw %2, %%mm1 \n\t"\
  978. "psraw $5, %%mm0 \n\t"\
  979. "psraw $5, %%mm1 \n\t"\
  980. "packuswb %%mm1, %%mm0 \n\t"\
  981. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  982. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  983. :"memory"\
  984. );\
  985. dst+=dstStride;\
  986. src+=srcStride;\
  987. }\
  988. }
  989. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
  990. \
  991. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  992. uint64_t temp[17*4];\
  993. uint64_t *temp_ptr= temp;\
  994. int count= 17;\
  995. \
  996. /*FIXME unroll */\
  997. asm volatile(\
  998. "pxor %%mm7, %%mm7 \n\t"\
  999. "1: \n\t"\
  1000. "movq (%0), %%mm0 \n\t"\
  1001. "movq (%0), %%mm1 \n\t"\
  1002. "movq 8(%0), %%mm2 \n\t"\
  1003. "movq 8(%0), %%mm3 \n\t"\
  1004. "punpcklbw %%mm7, %%mm0 \n\t"\
  1005. "punpckhbw %%mm7, %%mm1 \n\t"\
  1006. "punpcklbw %%mm7, %%mm2 \n\t"\
  1007. "punpckhbw %%mm7, %%mm3 \n\t"\
  1008. "movq %%mm0, (%1) \n\t"\
  1009. "movq %%mm1, 17*8(%1) \n\t"\
  1010. "movq %%mm2, 2*17*8(%1) \n\t"\
  1011. "movq %%mm3, 3*17*8(%1) \n\t"\
  1012. "add $8, %1 \n\t"\
  1013. "add %3, %0 \n\t"\
  1014. "decl %2 \n\t"\
  1015. " jnz 1b \n\t"\
  1016. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1017. : "r" ((long)srcStride)\
  1018. : "memory"\
  1019. );\
  1020. \
  1021. temp_ptr= temp;\
  1022. count=4;\
  1023. \
  1024. /*FIXME reorder for speed */\
  1025. asm volatile(\
  1026. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1027. "1: \n\t"\
  1028. "movq (%0), %%mm0 \n\t"\
  1029. "movq 8(%0), %%mm1 \n\t"\
  1030. "movq 16(%0), %%mm2 \n\t"\
  1031. "movq 24(%0), %%mm3 \n\t"\
  1032. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1033. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1034. "add %4, %1 \n\t"\
  1035. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1036. \
  1037. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1038. "add %4, %1 \n\t"\
  1039. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1040. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
  1041. "add %4, %1 \n\t"\
  1042. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
  1043. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
  1044. "add %4, %1 \n\t"\
  1045. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
  1046. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
  1047. "add %4, %1 \n\t"\
  1048. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
  1049. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
  1050. "add %4, %1 \n\t"\
  1051. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
  1052. \
  1053. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
  1054. "add %4, %1 \n\t" \
  1055. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
  1056. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
  1057. \
  1058. "add $136, %0 \n\t"\
  1059. "add %6, %1 \n\t"\
  1060. "decl %2 \n\t"\
  1061. " jnz 1b \n\t"\
  1062. \
  1063. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1064. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\
  1065. :"memory"\
  1066. );\
  1067. }\
  1068. \
  1069. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1070. uint64_t temp[9*2];\
  1071. uint64_t *temp_ptr= temp;\
  1072. int count= 9;\
  1073. \
  1074. /*FIXME unroll */\
  1075. asm volatile(\
  1076. "pxor %%mm7, %%mm7 \n\t"\
  1077. "1: \n\t"\
  1078. "movq (%0), %%mm0 \n\t"\
  1079. "movq (%0), %%mm1 \n\t"\
  1080. "punpcklbw %%mm7, %%mm0 \n\t"\
  1081. "punpckhbw %%mm7, %%mm1 \n\t"\
  1082. "movq %%mm0, (%1) \n\t"\
  1083. "movq %%mm1, 9*8(%1) \n\t"\
  1084. "add $8, %1 \n\t"\
  1085. "add %3, %0 \n\t"\
  1086. "decl %2 \n\t"\
  1087. " jnz 1b \n\t"\
  1088. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1089. : "r" ((long)srcStride)\
  1090. : "memory"\
  1091. );\
  1092. \
  1093. temp_ptr= temp;\
  1094. count=2;\
  1095. \
  1096. /*FIXME reorder for speed */\
  1097. asm volatile(\
  1098. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1099. "1: \n\t"\
  1100. "movq (%0), %%mm0 \n\t"\
  1101. "movq 8(%0), %%mm1 \n\t"\
  1102. "movq 16(%0), %%mm2 \n\t"\
  1103. "movq 24(%0), %%mm3 \n\t"\
  1104. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1105. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1106. "add %4, %1 \n\t"\
  1107. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1108. \
  1109. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1110. "add %4, %1 \n\t"\
  1111. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1112. \
  1113. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
  1114. "add %4, %1 \n\t"\
  1115. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
  1116. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
  1117. \
  1118. "add $72, %0 \n\t"\
  1119. "add %6, %1 \n\t"\
  1120. "decl %2 \n\t"\
  1121. " jnz 1b \n\t"\
  1122. \
  1123. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1124. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\
  1125. : "memory"\
  1126. );\
  1127. }\
  1128. \
  1129. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1130. OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
  1131. }\
  1132. \
  1133. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1134. uint64_t temp[8];\
  1135. uint8_t * const half= (uint8_t*)temp;\
  1136. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1137. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1138. }\
  1139. \
  1140. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1141. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
  1142. }\
  1143. \
  1144. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1145. uint64_t temp[8];\
  1146. uint8_t * const half= (uint8_t*)temp;\
  1147. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1148. OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
  1149. }\
  1150. \
  1151. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1152. uint64_t temp[8];\
  1153. uint8_t * const half= (uint8_t*)temp;\
  1154. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1155. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1156. }\
  1157. \
  1158. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1159. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1160. }\
  1161. \
  1162. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1163. uint64_t temp[8];\
  1164. uint8_t * const half= (uint8_t*)temp;\
  1165. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1166. OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
  1167. }\
  1168. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1169. uint64_t half[8 + 9];\
  1170. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1171. uint8_t * const halfHV= ((uint8_t*)half);\
  1172. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1173. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1174. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1175. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1176. }\
  1177. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1178. uint64_t half[8 + 9];\
  1179. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1180. uint8_t * const halfHV= ((uint8_t*)half);\
  1181. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1182. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1183. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1184. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1185. }\
  1186. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1187. uint64_t half[8 + 9];\
  1188. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1189. uint8_t * const halfHV= ((uint8_t*)half);\
  1190. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1191. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1192. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1193. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1194. }\
  1195. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1196. uint64_t half[8 + 9];\
  1197. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1198. uint8_t * const halfHV= ((uint8_t*)half);\
  1199. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1200. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1201. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1202. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1203. }\
  1204. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1205. uint64_t half[8 + 9];\
  1206. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1207. uint8_t * const halfHV= ((uint8_t*)half);\
  1208. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1209. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1210. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1211. }\
  1212. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1213. uint64_t half[8 + 9];\
  1214. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1215. uint8_t * const halfHV= ((uint8_t*)half);\
  1216. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1217. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1218. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1219. }\
  1220. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1221. uint64_t half[8 + 9];\
  1222. uint8_t * const halfH= ((uint8_t*)half);\
  1223. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1224. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1225. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1226. }\
  1227. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1228. uint64_t half[8 + 9];\
  1229. uint8_t * const halfH= ((uint8_t*)half);\
  1230. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1231. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1232. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1233. }\
  1234. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1235. uint64_t half[9];\
  1236. uint8_t * const halfH= ((uint8_t*)half);\
  1237. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1238. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1239. }\
  1240. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1241. OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
  1242. }\
  1243. \
  1244. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1245. uint64_t temp[32];\
  1246. uint8_t * const half= (uint8_t*)temp;\
  1247. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1248. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1249. }\
  1250. \
  1251. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1252. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
  1253. }\
  1254. \
  1255. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1256. uint64_t temp[32];\
  1257. uint8_t * const half= (uint8_t*)temp;\
  1258. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1259. OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
  1260. }\
  1261. \
  1262. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1263. uint64_t temp[32];\
  1264. uint8_t * const half= (uint8_t*)temp;\
  1265. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1266. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1267. }\
  1268. \
  1269. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1270. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1271. }\
  1272. \
  1273. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1274. uint64_t temp[32];\
  1275. uint8_t * const half= (uint8_t*)temp;\
  1276. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1277. OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
  1278. }\
  1279. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1280. uint64_t half[16*2 + 17*2];\
  1281. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1282. uint8_t * const halfHV= ((uint8_t*)half);\
  1283. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1284. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1285. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1286. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1287. }\
  1288. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1289. uint64_t half[16*2 + 17*2];\
  1290. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1291. uint8_t * const halfHV= ((uint8_t*)half);\
  1292. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1293. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1294. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1295. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1296. }\
  1297. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1298. uint64_t half[16*2 + 17*2];\
  1299. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1300. uint8_t * const halfHV= ((uint8_t*)half);\
  1301. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1302. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1303. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1304. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1305. }\
  1306. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1307. uint64_t half[16*2 + 17*2];\
  1308. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1309. uint8_t * const halfHV= ((uint8_t*)half);\
  1310. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1311. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1312. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1313. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1314. }\
  1315. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1316. uint64_t half[16*2 + 17*2];\
  1317. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1318. uint8_t * const halfHV= ((uint8_t*)half);\
  1319. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1320. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1321. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1322. }\
  1323. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1324. uint64_t half[16*2 + 17*2];\
  1325. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1326. uint8_t * const halfHV= ((uint8_t*)half);\
  1327. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1328. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1329. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1330. }\
  1331. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1332. uint64_t half[17*2];\
  1333. uint8_t * const halfH= ((uint8_t*)half);\
  1334. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1335. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1336. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1337. }\
  1338. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1339. uint64_t half[17*2];\
  1340. uint8_t * const halfH= ((uint8_t*)half);\
  1341. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1342. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1343. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1344. }\
  1345. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1346. uint64_t half[17*2];\
  1347. uint8_t * const halfH= ((uint8_t*)half);\
  1348. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1349. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1350. }
  1351. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  1352. #define AVG_3DNOW_OP(a,b,temp, size) \
  1353. "mov" #size " " #b ", " #temp " \n\t"\
  1354. "pavgusb " #temp ", " #a " \n\t"\
  1355. "mov" #size " " #a ", " #b " \n\t"
  1356. #define AVG_MMX2_OP(a,b,temp, size) \
  1357. "mov" #size " " #b ", " #temp " \n\t"\
  1358. "pavgb " #temp ", " #a " \n\t"\
  1359. "mov" #size " " #a ", " #b " \n\t"
  1360. QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
  1361. QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
  1362. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
  1363. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
  1364. QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
  1365. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
  1366. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
  1367. QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
  1368. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
  1369. /***********************************/
  1370. /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
  1371. #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
  1372. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1373. OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
  1374. }
  1375. #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
  1376. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1377. OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
  1378. }
  1379. #define QPEL_2TAP(OPNAME, SIZE, MMX)\
  1380. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
  1381. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
  1382. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
  1383. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
  1384. OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
  1385. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
  1386. OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
  1387. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
  1388. OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
  1389. static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1390. OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
  1391. }\
  1392. static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1393. OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
  1394. }\
  1395. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
  1396. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
  1397. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
  1398. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
  1399. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
  1400. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
  1401. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
  1402. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
  1403. QPEL_2TAP(put_, 16, mmx2)
  1404. QPEL_2TAP(avg_, 16, mmx2)
  1405. QPEL_2TAP(put_, 8, mmx2)
  1406. QPEL_2TAP(avg_, 8, mmx2)
  1407. QPEL_2TAP(put_, 16, 3dnow)
  1408. QPEL_2TAP(avg_, 16, 3dnow)
  1409. QPEL_2TAP(put_, 8, 3dnow)
  1410. QPEL_2TAP(avg_, 8, 3dnow)
  1411. #if 0
  1412. static void just_return() { return; }
  1413. #endif
  1414. static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  1415. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
  1416. const int w = 8;
  1417. const int ix = ox>>(16+shift);
  1418. const int iy = oy>>(16+shift);
  1419. const int oxs = ox>>4;
  1420. const int oys = oy>>4;
  1421. const int dxxs = dxx>>4;
  1422. const int dxys = dxy>>4;
  1423. const int dyxs = dyx>>4;
  1424. const int dyys = dyy>>4;
  1425. const uint16_t r4[4] = {r,r,r,r};
  1426. const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
  1427. const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
  1428. const uint64_t shift2 = 2*shift;
  1429. uint8_t edge_buf[(h+1)*stride];
  1430. int x, y;
  1431. const int dxw = (dxx-(1<<(16+shift)))*(w-1);
  1432. const int dyh = (dyy-(1<<(16+shift)))*(h-1);
  1433. const int dxh = dxy*(h-1);
  1434. const int dyw = dyx*(w-1);
  1435. if( // non-constant fullpel offset (3% of blocks)
  1436. ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
  1437. (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
  1438. // uses more than 16 bits of subpel mv (only at huge resolution)
  1439. || (dxx|dxy|dyx|dyy)&15 )
  1440. {
  1441. //FIXME could still use mmx for some of the rows
  1442. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
  1443. return;
  1444. }
  1445. src += ix + iy*stride;
  1446. if( (unsigned)ix >= width-w ||
  1447. (unsigned)iy >= height-h )
  1448. {
  1449. ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
  1450. src = edge_buf;
  1451. }
  1452. asm volatile(
  1453. "movd %0, %%mm6 \n\t"
  1454. "pxor %%mm7, %%mm7 \n\t"
  1455. "punpcklwd %%mm6, %%mm6 \n\t"
  1456. "punpcklwd %%mm6, %%mm6 \n\t"
  1457. :: "r"(1<<shift)
  1458. );
  1459. for(x=0; x<w; x+=4){
  1460. uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
  1461. oxs - dxys + dxxs*(x+1),
  1462. oxs - dxys + dxxs*(x+2),
  1463. oxs - dxys + dxxs*(x+3) };
  1464. uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
  1465. oys - dyys + dyxs*(x+1),
  1466. oys - dyys + dyxs*(x+2),
  1467. oys - dyys + dyxs*(x+3) };
  1468. for(y=0; y<h; y++){
  1469. asm volatile(
  1470. "movq %0, %%mm4 \n\t"
  1471. "movq %1, %%mm5 \n\t"
  1472. "paddw %2, %%mm4 \n\t"
  1473. "paddw %3, %%mm5 \n\t"
  1474. "movq %%mm4, %0 \n\t"
  1475. "movq %%mm5, %1 \n\t"
  1476. "psrlw $12, %%mm4 \n\t"
  1477. "psrlw $12, %%mm5 \n\t"
  1478. : "+m"(*dx4), "+m"(*dy4)
  1479. : "m"(*dxy4), "m"(*dyy4)
  1480. );
  1481. asm volatile(
  1482. "movq %%mm6, %%mm2 \n\t"
  1483. "movq %%mm6, %%mm1 \n\t"
  1484. "psubw %%mm4, %%mm2 \n\t"
  1485. "psubw %%mm5, %%mm1 \n\t"
  1486. "movq %%mm2, %%mm0 \n\t"
  1487. "movq %%mm4, %%mm3 \n\t"
  1488. "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
  1489. "pmullw %%mm5, %%mm3 \n\t" // dx*dy
  1490. "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
  1491. "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
  1492. "movd %4, %%mm5 \n\t"
  1493. "movd %3, %%mm4 \n\t"
  1494. "punpcklbw %%mm7, %%mm5 \n\t"
  1495. "punpcklbw %%mm7, %%mm4 \n\t"
  1496. "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
  1497. "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
  1498. "movd %2, %%mm5 \n\t"
  1499. "movd %1, %%mm4 \n\t"
  1500. "punpcklbw %%mm7, %%mm5 \n\t"
  1501. "punpcklbw %%mm7, %%mm4 \n\t"
  1502. "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
  1503. "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
  1504. "paddw %5, %%mm1 \n\t"
  1505. "paddw %%mm3, %%mm2 \n\t"
  1506. "paddw %%mm1, %%mm0 \n\t"
  1507. "paddw %%mm2, %%mm0 \n\t"
  1508. "psrlw %6, %%mm0 \n\t"
  1509. "packuswb %%mm0, %%mm0 \n\t"
  1510. "movd %%mm0, %0 \n\t"
  1511. : "=m"(dst[x+y*stride])
  1512. : "m"(src[0]), "m"(src[1]),
  1513. "m"(src[stride]), "m"(src[stride+1]),
  1514. "m"(*r4), "m"(shift2)
  1515. );
  1516. src += stride;
  1517. }
  1518. src += 4-h*stride;
  1519. }
  1520. }
  1521. #define PREFETCH(name, op) \
  1522. static void name(void *mem, int stride, int h){\
  1523. const uint8_t *p= mem;\
  1524. do{\
  1525. asm volatile(#op" %0" :: "m"(*p));\
  1526. p+= stride;\
  1527. }while(--h);\
  1528. }
  1529. PREFETCH(prefetch_mmx2, prefetcht0)
  1530. PREFETCH(prefetch_3dnow, prefetch)
  1531. #undef PREFETCH
  1532. #include "h264dsp_mmx.c"
  1533. /* CAVS specific */
  1534. void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
  1535. void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1536. put_pixels8_mmx(dst, src, stride, 8);
  1537. }
  1538. void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1539. avg_pixels8_mmx(dst, src, stride, 8);
  1540. }
  1541. void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1542. put_pixels16_mmx(dst, src, stride, 16);
  1543. }
  1544. void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1545. avg_pixels16_mmx(dst, src, stride, 16);
  1546. }
  1547. /* VC1 specific */
  1548. void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
  1549. void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
  1550. put_pixels8_mmx(dst, src, stride, 8);
  1551. }
  1552. /* external functions, from idct_mmx.c */
  1553. void ff_mmx_idct(DCTELEM *block);
  1554. void ff_mmxext_idct(DCTELEM *block);
  1555. /* XXX: those functions should be suppressed ASAP when all IDCTs are
  1556. converted */
  1557. #ifdef CONFIG_GPL
  1558. static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  1559. {
  1560. ff_mmx_idct (block);
  1561. put_pixels_clamped_mmx(block, dest, line_size);
  1562. }
  1563. static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  1564. {
  1565. ff_mmx_idct (block);
  1566. add_pixels_clamped_mmx(block, dest, line_size);
  1567. }
  1568. static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  1569. {
  1570. ff_mmxext_idct (block);
  1571. put_pixels_clamped_mmx(block, dest, line_size);
  1572. }
  1573. static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  1574. {
  1575. ff_mmxext_idct (block);
  1576. add_pixels_clamped_mmx(block, dest, line_size);
  1577. }
  1578. #endif
  1579. static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
  1580. {
  1581. ff_idct_xvid_mmx (block);
  1582. put_pixels_clamped_mmx(block, dest, line_size);
  1583. }
  1584. static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
  1585. {
  1586. ff_idct_xvid_mmx (block);
  1587. add_pixels_clamped_mmx(block, dest, line_size);
  1588. }
  1589. static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
  1590. {
  1591. ff_idct_xvid_mmx2 (block);
  1592. put_pixels_clamped_mmx(block, dest, line_size);
  1593. }
  1594. static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
  1595. {
  1596. ff_idct_xvid_mmx2 (block);
  1597. add_pixels_clamped_mmx(block, dest, line_size);
  1598. }
  1599. static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
  1600. {
  1601. int i;
  1602. asm volatile("pxor %%mm7, %%mm7":);
  1603. for(i=0; i<blocksize; i+=2) {
  1604. asm volatile(
  1605. "movq %0, %%mm0 \n\t"
  1606. "movq %1, %%mm1 \n\t"
  1607. "movq %%mm0, %%mm2 \n\t"
  1608. "movq %%mm1, %%mm3 \n\t"
  1609. "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
  1610. "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
  1611. "pslld $31, %%mm2 \n\t" // keep only the sign bit
  1612. "pxor %%mm2, %%mm1 \n\t"
  1613. "movq %%mm3, %%mm4 \n\t"
  1614. "pand %%mm1, %%mm3 \n\t"
  1615. "pandn %%mm1, %%mm4 \n\t"
  1616. "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  1617. "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  1618. "movq %%mm3, %1 \n\t"
  1619. "movq %%mm0, %0 \n\t"
  1620. :"+m"(mag[i]), "+m"(ang[i])
  1621. ::"memory"
  1622. );
  1623. }
  1624. asm volatile("femms");
  1625. }
  1626. static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
  1627. {
  1628. int i;
  1629. asm volatile(
  1630. "movaps %0, %%xmm5 \n\t"
  1631. ::"m"(ff_pdw_80000000[0])
  1632. );
  1633. for(i=0; i<blocksize; i+=4) {
  1634. asm volatile(
  1635. "movaps %0, %%xmm0 \n\t"
  1636. "movaps %1, %%xmm1 \n\t"
  1637. "xorps %%xmm2, %%xmm2 \n\t"
  1638. "xorps %%xmm3, %%xmm3 \n\t"
  1639. "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
  1640. "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
  1641. "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
  1642. "xorps %%xmm2, %%xmm1 \n\t"
  1643. "movaps %%xmm3, %%xmm4 \n\t"
  1644. "andps %%xmm1, %%xmm3 \n\t"
  1645. "andnps %%xmm1, %%xmm4 \n\t"
  1646. "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  1647. "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  1648. "movaps %%xmm3, %1 \n\t"
  1649. "movaps %%xmm0, %0 \n\t"
  1650. :"+m"(mag[i]), "+m"(ang[i])
  1651. ::"memory"
  1652. );
  1653. }
  1654. }
  1655. static void vector_fmul_3dnow(float *dst, const float *src, int len){
  1656. long i = (len-4)*4;
  1657. asm volatile(
  1658. "1: \n\t"
  1659. "movq (%1,%0), %%mm0 \n\t"
  1660. "movq 8(%1,%0), %%mm1 \n\t"
  1661. "pfmul (%2,%0), %%mm0 \n\t"
  1662. "pfmul 8(%2,%0), %%mm1 \n\t"
  1663. "movq %%mm0, (%1,%0) \n\t"
  1664. "movq %%mm1, 8(%1,%0) \n\t"
  1665. "sub $16, %0 \n\t"
  1666. "jge 1b \n\t"
  1667. "femms \n\t"
  1668. :"+r"(i)
  1669. :"r"(dst), "r"(src)
  1670. :"memory"
  1671. );
  1672. }
  1673. static void vector_fmul_sse(float *dst, const float *src, int len){
  1674. long i = (len-8)*4;
  1675. asm volatile(
  1676. "1: \n\t"
  1677. "movaps (%1,%0), %%xmm0 \n\t"
  1678. "movaps 16(%1,%0), %%xmm1 \n\t"
  1679. "mulps (%2,%0), %%xmm0 \n\t"
  1680. "mulps 16(%2,%0), %%xmm1 \n\t"
  1681. "movaps %%xmm0, (%1,%0) \n\t"
  1682. "movaps %%xmm1, 16(%1,%0) \n\t"
  1683. "sub $32, %0 \n\t"
  1684. "jge 1b \n\t"
  1685. :"+r"(i)
  1686. :"r"(dst), "r"(src)
  1687. :"memory"
  1688. );
  1689. }
  1690. static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
  1691. long i = len*4-16;
  1692. asm volatile(
  1693. "1: \n\t"
  1694. "pswapd 8(%1), %%mm0 \n\t"
  1695. "pswapd (%1), %%mm1 \n\t"
  1696. "pfmul (%3,%0), %%mm0 \n\t"
  1697. "pfmul 8(%3,%0), %%mm1 \n\t"
  1698. "movq %%mm0, (%2,%0) \n\t"
  1699. "movq %%mm1, 8(%2,%0) \n\t"
  1700. "add $16, %1 \n\t"
  1701. "sub $16, %0 \n\t"
  1702. "jge 1b \n\t"
  1703. :"+r"(i), "+r"(src1)
  1704. :"r"(dst), "r"(src0)
  1705. );
  1706. asm volatile("femms");
  1707. }
  1708. static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
  1709. long i = len*4-32;
  1710. asm volatile(
  1711. "1: \n\t"
  1712. "movaps 16(%1), %%xmm0 \n\t"
  1713. "movaps (%1), %%xmm1 \n\t"
  1714. "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
  1715. "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
  1716. "mulps (%3,%0), %%xmm0 \n\t"
  1717. "mulps 16(%3,%0), %%xmm1 \n\t"
  1718. "movaps %%xmm0, (%2,%0) \n\t"
  1719. "movaps %%xmm1, 16(%2,%0) \n\t"
  1720. "add $32, %1 \n\t"
  1721. "sub $32, %0 \n\t"
  1722. "jge 1b \n\t"
  1723. :"+r"(i), "+r"(src1)
  1724. :"r"(dst), "r"(src0)
  1725. );
  1726. }
  1727. static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
  1728. const float *src2, int src3, int len, int step){
  1729. long i = (len-4)*4;
  1730. if(step == 2 && src3 == 0){
  1731. dst += (len-4)*2;
  1732. asm volatile(
  1733. "1: \n\t"
  1734. "movq (%2,%0), %%mm0 \n\t"
  1735. "movq 8(%2,%0), %%mm1 \n\t"
  1736. "pfmul (%3,%0), %%mm0 \n\t"
  1737. "pfmul 8(%3,%0), %%mm1 \n\t"
  1738. "pfadd (%4,%0), %%mm0 \n\t"
  1739. "pfadd 8(%4,%0), %%mm1 \n\t"
  1740. "movd %%mm0, (%1) \n\t"
  1741. "movd %%mm1, 16(%1) \n\t"
  1742. "psrlq $32, %%mm0 \n\t"
  1743. "psrlq $32, %%mm1 \n\t"
  1744. "movd %%mm0, 8(%1) \n\t"
  1745. "movd %%mm1, 24(%1) \n\t"
  1746. "sub $32, %1 \n\t"
  1747. "sub $16, %0 \n\t"
  1748. "jge 1b \n\t"
  1749. :"+r"(i), "+r"(dst)
  1750. :"r"(src0), "r"(src1), "r"(src2)
  1751. :"memory"
  1752. );
  1753. }
  1754. else if(step == 1 && src3 == 0){
  1755. asm volatile(
  1756. "1: \n\t"
  1757. "movq (%2,%0), %%mm0 \n\t"
  1758. "movq 8(%2,%0), %%mm1 \n\t"
  1759. "pfmul (%3,%0), %%mm0 \n\t"
  1760. "pfmul 8(%3,%0), %%mm1 \n\t"
  1761. "pfadd (%4,%0), %%mm0 \n\t"
  1762. "pfadd 8(%4,%0), %%mm1 \n\t"
  1763. "movq %%mm0, (%1,%0) \n\t"
  1764. "movq %%mm1, 8(%1,%0) \n\t"
  1765. "sub $16, %0 \n\t"
  1766. "jge 1b \n\t"
  1767. :"+r"(i)
  1768. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  1769. :"memory"
  1770. );
  1771. }
  1772. else
  1773. ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
  1774. asm volatile("femms");
  1775. }
  1776. static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
  1777. const float *src2, int src3, int len, int step){
  1778. long i = (len-8)*4;
  1779. if(step == 2 && src3 == 0){
  1780. dst += (len-8)*2;
  1781. asm volatile(
  1782. "1: \n\t"
  1783. "movaps (%2,%0), %%xmm0 \n\t"
  1784. "movaps 16(%2,%0), %%xmm1 \n\t"
  1785. "mulps (%3,%0), %%xmm0 \n\t"
  1786. "mulps 16(%3,%0), %%xmm1 \n\t"
  1787. "addps (%4,%0), %%xmm0 \n\t"
  1788. "addps 16(%4,%0), %%xmm1 \n\t"
  1789. "movss %%xmm0, (%1) \n\t"
  1790. "movss %%xmm1, 32(%1) \n\t"
  1791. "movhlps %%xmm0, %%xmm2 \n\t"
  1792. "movhlps %%xmm1, %%xmm3 \n\t"
  1793. "movss %%xmm2, 16(%1) \n\t"
  1794. "movss %%xmm3, 48(%1) \n\t"
  1795. "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
  1796. "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
  1797. "movss %%xmm0, 8(%1) \n\t"
  1798. "movss %%xmm1, 40(%1) \n\t"
  1799. "movhlps %%xmm0, %%xmm2 \n\t"
  1800. "movhlps %%xmm1, %%xmm3 \n\t"
  1801. "movss %%xmm2, 24(%1) \n\t"
  1802. "movss %%xmm3, 56(%1) \n\t"
  1803. "sub $64, %1 \n\t"
  1804. "sub $32, %0 \n\t"
  1805. "jge 1b \n\t"
  1806. :"+r"(i), "+r"(dst)
  1807. :"r"(src0), "r"(src1), "r"(src2)
  1808. :"memory"
  1809. );
  1810. }
  1811. else if(step == 1 && src3 == 0){
  1812. asm volatile(
  1813. "1: \n\t"
  1814. "movaps (%2,%0), %%xmm0 \n\t"
  1815. "movaps 16(%2,%0), %%xmm1 \n\t"
  1816. "mulps (%3,%0), %%xmm0 \n\t"
  1817. "mulps 16(%3,%0), %%xmm1 \n\t"
  1818. "addps (%4,%0), %%xmm0 \n\t"
  1819. "addps 16(%4,%0), %%xmm1 \n\t"
  1820. "movaps %%xmm0, (%1,%0) \n\t"
  1821. "movaps %%xmm1, 16(%1,%0) \n\t"
  1822. "sub $32, %0 \n\t"
  1823. "jge 1b \n\t"
  1824. :"+r"(i)
  1825. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  1826. :"memory"
  1827. );
  1828. }
  1829. else
  1830. ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
  1831. }
  1832. static void float_to_int16_3dnow(int16_t *dst, const float *src, int len){
  1833. // not bit-exact: pf2id uses different rounding than C and SSE
  1834. int i;
  1835. for(i=0; i<len; i+=4) {
  1836. asm volatile(
  1837. "pf2id %1, %%mm0 \n\t"
  1838. "pf2id %2, %%mm1 \n\t"
  1839. "packssdw %%mm1, %%mm0 \n\t"
  1840. "movq %%mm0, %0 \n\t"
  1841. :"=m"(dst[i])
  1842. :"m"(src[i]), "m"(src[i+2])
  1843. );
  1844. }
  1845. asm volatile("femms");
  1846. }
  1847. static void float_to_int16_sse(int16_t *dst, const float *src, int len){
  1848. int i;
  1849. for(i=0; i<len; i+=4) {
  1850. asm volatile(
  1851. "cvtps2pi %1, %%mm0 \n\t"
  1852. "cvtps2pi %2, %%mm1 \n\t"
  1853. "packssdw %%mm1, %%mm0 \n\t"
  1854. "movq %%mm0, %0 \n\t"
  1855. :"=m"(dst[i])
  1856. :"m"(src[i]), "m"(src[i+2])
  1857. );
  1858. }
  1859. asm volatile("emms");
  1860. }
  1861. extern void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width);
  1862. extern void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width);
  1863. extern void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
  1864. extern void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
  1865. extern void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  1866. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
  1867. extern void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  1868. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
  1869. void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
  1870. {
  1871. mm_flags = mm_support();
  1872. if (avctx->dsp_mask) {
  1873. if (avctx->dsp_mask & FF_MM_FORCE)
  1874. mm_flags |= (avctx->dsp_mask & 0xffff);
  1875. else
  1876. mm_flags &= ~(avctx->dsp_mask & 0xffff);
  1877. }
  1878. #if 0
  1879. av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
  1880. if (mm_flags & MM_MMX)
  1881. av_log(avctx, AV_LOG_INFO, " mmx");
  1882. if (mm_flags & MM_MMXEXT)
  1883. av_log(avctx, AV_LOG_INFO, " mmxext");
  1884. if (mm_flags & MM_3DNOW)
  1885. av_log(avctx, AV_LOG_INFO, " 3dnow");
  1886. if (mm_flags & MM_SSE)
  1887. av_log(avctx, AV_LOG_INFO, " sse");
  1888. if (mm_flags & MM_SSE2)
  1889. av_log(avctx, AV_LOG_INFO, " sse2");
  1890. av_log(avctx, AV_LOG_INFO, "\n");
  1891. #endif
  1892. if (mm_flags & MM_MMX) {
  1893. const int idct_algo= avctx->idct_algo;
  1894. if(avctx->lowres==0){
  1895. if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
  1896. c->idct_put= ff_simple_idct_put_mmx;
  1897. c->idct_add= ff_simple_idct_add_mmx;
  1898. c->idct = ff_simple_idct_mmx;
  1899. c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
  1900. #ifdef CONFIG_GPL
  1901. }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
  1902. if(mm_flags & MM_MMXEXT){
  1903. c->idct_put= ff_libmpeg2mmx2_idct_put;
  1904. c->idct_add= ff_libmpeg2mmx2_idct_add;
  1905. c->idct = ff_mmxext_idct;
  1906. }else{
  1907. c->idct_put= ff_libmpeg2mmx_idct_put;
  1908. c->idct_add= ff_libmpeg2mmx_idct_add;
  1909. c->idct = ff_mmx_idct;
  1910. }
  1911. c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
  1912. #endif
  1913. }else if((ENABLE_VP3_DECODER || ENABLE_VP5_DECODER || ENABLE_VP6_DECODER) &&
  1914. idct_algo==FF_IDCT_VP3 &&
  1915. avctx->codec->id!=CODEC_ID_THEORA &&
  1916. !(avctx->flags & CODEC_FLAG_BITEXACT)){
  1917. if(mm_flags & MM_SSE2){
  1918. c->idct_put= ff_vp3_idct_put_sse2;
  1919. c->idct_add= ff_vp3_idct_add_sse2;
  1920. c->idct = ff_vp3_idct_sse2;
  1921. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  1922. }else{
  1923. ff_vp3_dsp_init_mmx();
  1924. c->idct_put= ff_vp3_idct_put_mmx;
  1925. c->idct_add= ff_vp3_idct_add_mmx;
  1926. c->idct = ff_vp3_idct_mmx;
  1927. c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
  1928. }
  1929. }else if(idct_algo==FF_IDCT_CAVS){
  1930. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  1931. }else if(idct_algo==FF_IDCT_XVIDMMX){
  1932. if(mm_flags & MM_MMXEXT){
  1933. c->idct_put= ff_idct_xvid_mmx2_put;
  1934. c->idct_add= ff_idct_xvid_mmx2_add;
  1935. c->idct = ff_idct_xvid_mmx2;
  1936. }else{
  1937. c->idct_put= ff_idct_xvid_mmx_put;
  1938. c->idct_add= ff_idct_xvid_mmx_add;
  1939. c->idct = ff_idct_xvid_mmx;
  1940. }
  1941. }
  1942. }
  1943. c->put_pixels_clamped = put_pixels_clamped_mmx;
  1944. c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
  1945. c->add_pixels_clamped = add_pixels_clamped_mmx;
  1946. c->clear_blocks = clear_blocks_mmx;
  1947. #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  1948. c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
  1949. c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
  1950. c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
  1951. c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
  1952. SET_HPEL_FUNCS(put, 0, 16, mmx);
  1953. SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
  1954. SET_HPEL_FUNCS(avg, 0, 16, mmx);
  1955. SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
  1956. SET_HPEL_FUNCS(put, 1, 8, mmx);
  1957. SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
  1958. SET_HPEL_FUNCS(avg, 1, 8, mmx);
  1959. SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
  1960. c->gmc= gmc_mmx;
  1961. c->add_bytes= add_bytes_mmx;
  1962. c->add_bytes_l2= add_bytes_l2_mmx;
  1963. if (ENABLE_ANY_H263) {
  1964. c->h263_v_loop_filter= h263_v_loop_filter_mmx;
  1965. c->h263_h_loop_filter= h263_h_loop_filter_mmx;
  1966. }
  1967. c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
  1968. c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
  1969. c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_nornd;
  1970. c->h264_idct_dc_add=
  1971. c->h264_idct_add= ff_h264_idct_add_mmx;
  1972. c->h264_idct8_dc_add=
  1973. c->h264_idct8_add= ff_h264_idct8_add_mmx;
  1974. if (mm_flags & MM_SSE2)
  1975. c->h264_idct8_add= ff_h264_idct8_add_sse2;
  1976. if (mm_flags & MM_MMXEXT) {
  1977. c->prefetch = prefetch_mmx2;
  1978. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
  1979. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
  1980. c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
  1981. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
  1982. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
  1983. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
  1984. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
  1985. c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
  1986. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
  1987. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
  1988. c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
  1989. c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
  1990. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1991. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
  1992. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
  1993. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
  1994. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
  1995. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
  1996. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
  1997. }
  1998. #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  1999. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
  2000. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
  2001. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
  2002. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
  2003. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
  2004. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
  2005. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
  2006. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
  2007. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
  2008. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
  2009. c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
  2010. c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
  2011. c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
  2012. c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
  2013. c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
  2014. c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
  2015. SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
  2016. SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
  2017. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
  2018. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
  2019. SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
  2020. SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
  2021. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
  2022. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
  2023. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
  2024. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
  2025. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
  2026. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
  2027. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
  2028. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
  2029. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
  2030. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
  2031. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
  2032. c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
  2033. c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
  2034. c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
  2035. c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
  2036. c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
  2037. c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
  2038. c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
  2039. c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
  2040. c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
  2041. c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
  2042. c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
  2043. c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
  2044. c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
  2045. c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
  2046. c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
  2047. c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
  2048. c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
  2049. c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
  2050. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
  2051. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
  2052. c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
  2053. c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
  2054. c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
  2055. c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
  2056. c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
  2057. c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
  2058. if (ENABLE_CAVS_DECODER)
  2059. ff_cavsdsp_init_mmx2(c, avctx);
  2060. if (ENABLE_VC1_DECODER || ENABLE_WMV3_DECODER)
  2061. ff_vc1dsp_init_mmx(c, avctx);
  2062. c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
  2063. } else if (mm_flags & MM_3DNOW) {
  2064. c->prefetch = prefetch_3dnow;
  2065. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  2066. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  2067. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  2068. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  2069. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  2070. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  2071. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  2072. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  2073. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  2074. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  2075. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2076. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  2077. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  2078. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  2079. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  2080. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  2081. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  2082. }
  2083. SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
  2084. SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
  2085. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
  2086. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
  2087. SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
  2088. SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
  2089. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
  2090. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
  2091. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
  2092. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
  2093. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
  2094. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
  2095. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
  2096. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
  2097. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
  2098. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
  2099. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
  2100. c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
  2101. }
  2102. #define H264_QPEL_FUNCS(x, y, CPU)\
  2103. c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
  2104. c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
  2105. c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
  2106. c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
  2107. if((mm_flags & MM_SSE2) && !(mm_flags & MM_3DNOW)){
  2108. // these functions are slower than mmx on AMD, but faster on Intel
  2109. /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
  2110. c->put_pixels_tab[0][0] = put_pixels16_sse2;
  2111. c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
  2112. */
  2113. H264_QPEL_FUNCS(0, 0, sse2);
  2114. }
  2115. if(mm_flags & MM_SSE2){
  2116. H264_QPEL_FUNCS(0, 1, sse2);
  2117. H264_QPEL_FUNCS(0, 2, sse2);
  2118. H264_QPEL_FUNCS(0, 3, sse2);
  2119. H264_QPEL_FUNCS(1, 1, sse2);
  2120. H264_QPEL_FUNCS(1, 2, sse2);
  2121. H264_QPEL_FUNCS(1, 3, sse2);
  2122. H264_QPEL_FUNCS(2, 1, sse2);
  2123. H264_QPEL_FUNCS(2, 2, sse2);
  2124. H264_QPEL_FUNCS(2, 3, sse2);
  2125. H264_QPEL_FUNCS(3, 1, sse2);
  2126. H264_QPEL_FUNCS(3, 2, sse2);
  2127. H264_QPEL_FUNCS(3, 3, sse2);
  2128. }
  2129. #ifdef HAVE_SSSE3
  2130. if(mm_flags & MM_SSSE3){
  2131. H264_QPEL_FUNCS(1, 0, ssse3);
  2132. H264_QPEL_FUNCS(1, 1, ssse3);
  2133. H264_QPEL_FUNCS(1, 2, ssse3);
  2134. H264_QPEL_FUNCS(1, 3, ssse3);
  2135. H264_QPEL_FUNCS(2, 0, ssse3);
  2136. H264_QPEL_FUNCS(2, 1, ssse3);
  2137. H264_QPEL_FUNCS(2, 2, ssse3);
  2138. H264_QPEL_FUNCS(2, 3, ssse3);
  2139. H264_QPEL_FUNCS(3, 0, ssse3);
  2140. H264_QPEL_FUNCS(3, 1, ssse3);
  2141. H264_QPEL_FUNCS(3, 2, ssse3);
  2142. H264_QPEL_FUNCS(3, 3, ssse3);
  2143. c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
  2144. }
  2145. #endif
  2146. #ifdef CONFIG_SNOW_DECODER
  2147. if(mm_flags & MM_SSE2 & 0){
  2148. c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
  2149. #ifdef HAVE_7REGS
  2150. c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
  2151. #endif
  2152. c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
  2153. }
  2154. else{
  2155. if(mm_flags & MM_MMXEXT){
  2156. c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
  2157. #ifdef HAVE_7REGS
  2158. c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
  2159. #endif
  2160. }
  2161. c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
  2162. }
  2163. #endif
  2164. if(mm_flags & MM_3DNOW){
  2165. c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
  2166. c->vector_fmul = vector_fmul_3dnow;
  2167. if(!(avctx->flags & CODEC_FLAG_BITEXACT))
  2168. c->float_to_int16 = float_to_int16_3dnow;
  2169. }
  2170. if(mm_flags & MM_3DNOWEXT)
  2171. c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
  2172. if(mm_flags & MM_SSE){
  2173. c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
  2174. c->vector_fmul = vector_fmul_sse;
  2175. c->float_to_int16 = float_to_int16_sse;
  2176. c->vector_fmul_reverse = vector_fmul_reverse_sse;
  2177. c->vector_fmul_add_add = vector_fmul_add_add_sse;
  2178. }
  2179. if(mm_flags & MM_3DNOW)
  2180. c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
  2181. }
  2182. if (ENABLE_ENCODERS)
  2183. dsputilenc_init_mmx(c, avctx);
  2184. #if 0
  2185. // for speed testing
  2186. get_pixels = just_return;
  2187. put_pixels_clamped = just_return;
  2188. add_pixels_clamped = just_return;
  2189. pix_abs16x16 = just_return;
  2190. pix_abs16x16_x2 = just_return;
  2191. pix_abs16x16_y2 = just_return;
  2192. pix_abs16x16_xy2 = just_return;
  2193. put_pixels_tab[0] = just_return;
  2194. put_pixels_tab[1] = just_return;
  2195. put_pixels_tab[2] = just_return;
  2196. put_pixels_tab[3] = just_return;
  2197. put_no_rnd_pixels_tab[0] = just_return;
  2198. put_no_rnd_pixels_tab[1] = just_return;
  2199. put_no_rnd_pixels_tab[2] = just_return;
  2200. put_no_rnd_pixels_tab[3] = just_return;
  2201. avg_pixels_tab[0] = just_return;
  2202. avg_pixels_tab[1] = just_return;
  2203. avg_pixels_tab[2] = just_return;
  2204. avg_pixels_tab[3] = just_return;
  2205. avg_no_rnd_pixels_tab[0] = just_return;
  2206. avg_no_rnd_pixels_tab[1] = just_return;
  2207. avg_no_rnd_pixels_tab[2] = just_return;
  2208. avg_no_rnd_pixels_tab[3] = just_return;
  2209. //av_fdct = just_return;
  2210. //ff_idct = just_return;
  2211. #endif
  2212. }