You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3038 lines
124KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "libavutil/x86_cpu.h"
  25. #include "libavcodec/dsputil.h"
  26. #include "libavcodec/h263.h"
  27. #include "libavcodec/mpegvideo.h"
  28. #include "libavcodec/simple_idct.h"
  29. #include "dsputil_mmx.h"
  30. #include "mmx.h"
  31. #include "vp3dsp_mmx.h"
  32. #include "vp3dsp_sse2.h"
  33. #include "idct_xvid.h"
  34. //#undef NDEBUG
  35. //#include <assert.h>
  36. int mm_flags; /* multimedia extension flags */
  37. /* pixel operations */
  38. DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
  39. DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
  40. DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) =
  41. {0x8000000080000000ULL, 0x8000000080000000ULL};
  42. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
  43. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL;
  44. DECLARE_ALIGNED_16(const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
  45. DECLARE_ALIGNED_16(const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
  46. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
  47. DECLARE_ALIGNED_16(const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
  48. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
  49. DECLARE_ALIGNED_16(const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
  50. DECLARE_ALIGNED_16(const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
  51. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
  52. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_64 ) = 0x0040004000400040ULL;
  53. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
  54. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
  55. DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
  56. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL;
  57. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL;
  58. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
  59. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
  60. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
  61. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
  62. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
  63. DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
  64. DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 };
  65. DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
  66. #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
  67. #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
  68. #define MOVQ_BFE(regd) \
  69. __asm__ volatile ( \
  70. "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
  71. "paddb %%" #regd ", %%" #regd " \n\t" ::)
  72. #ifndef PIC
  73. #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
  74. #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
  75. #else
  76. // for shared library it's better to use this way for accessing constants
  77. // pcmpeqd -> -1
  78. #define MOVQ_BONE(regd) \
  79. __asm__ volatile ( \
  80. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  81. "psrlw $15, %%" #regd " \n\t" \
  82. "packuswb %%" #regd ", %%" #regd " \n\t" ::)
  83. #define MOVQ_WTWO(regd) \
  84. __asm__ volatile ( \
  85. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  86. "psrlw $15, %%" #regd " \n\t" \
  87. "psllw $1, %%" #regd " \n\t"::)
  88. #endif
  89. // using regr as temporary and for the output result
  90. // first argument is unmodifed and second is trashed
  91. // regfe is supposed to contain 0xfefefefefefefefe
  92. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  93. "movq " #rega ", " #regr " \n\t"\
  94. "pand " #regb ", " #regr " \n\t"\
  95. "pxor " #rega ", " #regb " \n\t"\
  96. "pand " #regfe "," #regb " \n\t"\
  97. "psrlq $1, " #regb " \n\t"\
  98. "paddb " #regb ", " #regr " \n\t"
  99. #define PAVGB_MMX(rega, regb, regr, regfe) \
  100. "movq " #rega ", " #regr " \n\t"\
  101. "por " #regb ", " #regr " \n\t"\
  102. "pxor " #rega ", " #regb " \n\t"\
  103. "pand " #regfe "," #regb " \n\t"\
  104. "psrlq $1, " #regb " \n\t"\
  105. "psubb " #regb ", " #regr " \n\t"
  106. // mm6 is supposed to contain 0xfefefefefefefefe
  107. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  108. "movq " #rega ", " #regr " \n\t"\
  109. "movq " #regc ", " #regp " \n\t"\
  110. "pand " #regb ", " #regr " \n\t"\
  111. "pand " #regd ", " #regp " \n\t"\
  112. "pxor " #rega ", " #regb " \n\t"\
  113. "pxor " #regc ", " #regd " \n\t"\
  114. "pand %%mm6, " #regb " \n\t"\
  115. "pand %%mm6, " #regd " \n\t"\
  116. "psrlq $1, " #regb " \n\t"\
  117. "psrlq $1, " #regd " \n\t"\
  118. "paddb " #regb ", " #regr " \n\t"\
  119. "paddb " #regd ", " #regp " \n\t"
  120. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  121. "movq " #rega ", " #regr " \n\t"\
  122. "movq " #regc ", " #regp " \n\t"\
  123. "por " #regb ", " #regr " \n\t"\
  124. "por " #regd ", " #regp " \n\t"\
  125. "pxor " #rega ", " #regb " \n\t"\
  126. "pxor " #regc ", " #regd " \n\t"\
  127. "pand %%mm6, " #regb " \n\t"\
  128. "pand %%mm6, " #regd " \n\t"\
  129. "psrlq $1, " #regd " \n\t"\
  130. "psrlq $1, " #regb " \n\t"\
  131. "psubb " #regb ", " #regr " \n\t"\
  132. "psubb " #regd ", " #regp " \n\t"
  133. /***********************************/
  134. /* MMX no rounding */
  135. #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
  136. #define SET_RND MOVQ_WONE
  137. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  138. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  139. #include "dsputil_mmx_rnd_template.c"
  140. #undef DEF
  141. #undef SET_RND
  142. #undef PAVGBP
  143. #undef PAVGB
  144. /***********************************/
  145. /* MMX rounding */
  146. #define DEF(x, y) x ## _ ## y ##_mmx
  147. #define SET_RND MOVQ_WTWO
  148. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  149. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  150. #include "dsputil_mmx_rnd_template.c"
  151. #undef DEF
  152. #undef SET_RND
  153. #undef PAVGBP
  154. #undef PAVGB
  155. /***********************************/
  156. /* 3Dnow specific */
  157. #define DEF(x) x ## _3dnow
  158. #define PAVGB "pavgusb"
  159. #include "dsputil_mmx_avg_template.c"
  160. #undef DEF
  161. #undef PAVGB
  162. /***********************************/
  163. /* MMX2 specific */
  164. #define DEF(x) x ## _mmx2
  165. /* Introduced only in MMX2 set */
  166. #define PAVGB "pavgb"
  167. #include "dsputil_mmx_avg_template.c"
  168. #undef DEF
  169. #undef PAVGB
  170. #define put_no_rnd_pixels16_mmx put_pixels16_mmx
  171. #define put_no_rnd_pixels8_mmx put_pixels8_mmx
  172. #define put_pixels16_mmx2 put_pixels16_mmx
  173. #define put_pixels8_mmx2 put_pixels8_mmx
  174. #define put_pixels4_mmx2 put_pixels4_mmx
  175. #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
  176. #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
  177. #define put_pixels16_3dnow put_pixels16_mmx
  178. #define put_pixels8_3dnow put_pixels8_mmx
  179. #define put_pixels4_3dnow put_pixels4_mmx
  180. #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
  181. #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
  182. /***********************************/
  183. /* standard MMX */
  184. void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  185. {
  186. const DCTELEM *p;
  187. uint8_t *pix;
  188. /* read the pixels */
  189. p = block;
  190. pix = pixels;
  191. /* unrolled loop */
  192. __asm__ volatile(
  193. "movq %3, %%mm0 \n\t"
  194. "movq 8%3, %%mm1 \n\t"
  195. "movq 16%3, %%mm2 \n\t"
  196. "movq 24%3, %%mm3 \n\t"
  197. "movq 32%3, %%mm4 \n\t"
  198. "movq 40%3, %%mm5 \n\t"
  199. "movq 48%3, %%mm6 \n\t"
  200. "movq 56%3, %%mm7 \n\t"
  201. "packuswb %%mm1, %%mm0 \n\t"
  202. "packuswb %%mm3, %%mm2 \n\t"
  203. "packuswb %%mm5, %%mm4 \n\t"
  204. "packuswb %%mm7, %%mm6 \n\t"
  205. "movq %%mm0, (%0) \n\t"
  206. "movq %%mm2, (%0, %1) \n\t"
  207. "movq %%mm4, (%0, %1, 2) \n\t"
  208. "movq %%mm6, (%0, %2) \n\t"
  209. ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
  210. :"memory");
  211. pix += line_size*4;
  212. p += 32;
  213. // if here would be an exact copy of the code above
  214. // compiler would generate some very strange code
  215. // thus using "r"
  216. __asm__ volatile(
  217. "movq (%3), %%mm0 \n\t"
  218. "movq 8(%3), %%mm1 \n\t"
  219. "movq 16(%3), %%mm2 \n\t"
  220. "movq 24(%3), %%mm3 \n\t"
  221. "movq 32(%3), %%mm4 \n\t"
  222. "movq 40(%3), %%mm5 \n\t"
  223. "movq 48(%3), %%mm6 \n\t"
  224. "movq 56(%3), %%mm7 \n\t"
  225. "packuswb %%mm1, %%mm0 \n\t"
  226. "packuswb %%mm3, %%mm2 \n\t"
  227. "packuswb %%mm5, %%mm4 \n\t"
  228. "packuswb %%mm7, %%mm6 \n\t"
  229. "movq %%mm0, (%0) \n\t"
  230. "movq %%mm2, (%0, %1) \n\t"
  231. "movq %%mm4, (%0, %1, 2) \n\t"
  232. "movq %%mm6, (%0, %2) \n\t"
  233. ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
  234. :"memory");
  235. }
  236. static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) =
  237. { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
  238. void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  239. {
  240. int i;
  241. movq_m2r(*vector128, mm1);
  242. for (i = 0; i < 8; i++) {
  243. movq_m2r(*(block), mm0);
  244. packsswb_m2r(*(block + 4), mm0);
  245. block += 8;
  246. paddb_r2r(mm1, mm0);
  247. movq_r2m(mm0, *pixels);
  248. pixels += line_size;
  249. }
  250. }
  251. void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  252. {
  253. const DCTELEM *p;
  254. uint8_t *pix;
  255. int i;
  256. /* read the pixels */
  257. p = block;
  258. pix = pixels;
  259. MOVQ_ZERO(mm7);
  260. i = 4;
  261. do {
  262. __asm__ volatile(
  263. "movq (%2), %%mm0 \n\t"
  264. "movq 8(%2), %%mm1 \n\t"
  265. "movq 16(%2), %%mm2 \n\t"
  266. "movq 24(%2), %%mm3 \n\t"
  267. "movq %0, %%mm4 \n\t"
  268. "movq %1, %%mm6 \n\t"
  269. "movq %%mm4, %%mm5 \n\t"
  270. "punpcklbw %%mm7, %%mm4 \n\t"
  271. "punpckhbw %%mm7, %%mm5 \n\t"
  272. "paddsw %%mm4, %%mm0 \n\t"
  273. "paddsw %%mm5, %%mm1 \n\t"
  274. "movq %%mm6, %%mm5 \n\t"
  275. "punpcklbw %%mm7, %%mm6 \n\t"
  276. "punpckhbw %%mm7, %%mm5 \n\t"
  277. "paddsw %%mm6, %%mm2 \n\t"
  278. "paddsw %%mm5, %%mm3 \n\t"
  279. "packuswb %%mm1, %%mm0 \n\t"
  280. "packuswb %%mm3, %%mm2 \n\t"
  281. "movq %%mm0, %0 \n\t"
  282. "movq %%mm2, %1 \n\t"
  283. :"+m"(*pix), "+m"(*(pix+line_size))
  284. :"r"(p)
  285. :"memory");
  286. pix += line_size*2;
  287. p += 16;
  288. } while (--i);
  289. }
  290. static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  291. {
  292. __asm__ volatile(
  293. "lea (%3, %3), %%"REG_a" \n\t"
  294. ASMALIGN(3)
  295. "1: \n\t"
  296. "movd (%1), %%mm0 \n\t"
  297. "movd (%1, %3), %%mm1 \n\t"
  298. "movd %%mm0, (%2) \n\t"
  299. "movd %%mm1, (%2, %3) \n\t"
  300. "add %%"REG_a", %1 \n\t"
  301. "add %%"REG_a", %2 \n\t"
  302. "movd (%1), %%mm0 \n\t"
  303. "movd (%1, %3), %%mm1 \n\t"
  304. "movd %%mm0, (%2) \n\t"
  305. "movd %%mm1, (%2, %3) \n\t"
  306. "add %%"REG_a", %1 \n\t"
  307. "add %%"REG_a", %2 \n\t"
  308. "subl $4, %0 \n\t"
  309. "jnz 1b \n\t"
  310. : "+g"(h), "+r" (pixels), "+r" (block)
  311. : "r"((x86_reg)line_size)
  312. : "%"REG_a, "memory"
  313. );
  314. }
  315. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  316. {
  317. __asm__ volatile(
  318. "lea (%3, %3), %%"REG_a" \n\t"
  319. ASMALIGN(3)
  320. "1: \n\t"
  321. "movq (%1), %%mm0 \n\t"
  322. "movq (%1, %3), %%mm1 \n\t"
  323. "movq %%mm0, (%2) \n\t"
  324. "movq %%mm1, (%2, %3) \n\t"
  325. "add %%"REG_a", %1 \n\t"
  326. "add %%"REG_a", %2 \n\t"
  327. "movq (%1), %%mm0 \n\t"
  328. "movq (%1, %3), %%mm1 \n\t"
  329. "movq %%mm0, (%2) \n\t"
  330. "movq %%mm1, (%2, %3) \n\t"
  331. "add %%"REG_a", %1 \n\t"
  332. "add %%"REG_a", %2 \n\t"
  333. "subl $4, %0 \n\t"
  334. "jnz 1b \n\t"
  335. : "+g"(h), "+r" (pixels), "+r" (block)
  336. : "r"((x86_reg)line_size)
  337. : "%"REG_a, "memory"
  338. );
  339. }
  340. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  341. {
  342. __asm__ volatile(
  343. "lea (%3, %3), %%"REG_a" \n\t"
  344. ASMALIGN(3)
  345. "1: \n\t"
  346. "movq (%1), %%mm0 \n\t"
  347. "movq 8(%1), %%mm4 \n\t"
  348. "movq (%1, %3), %%mm1 \n\t"
  349. "movq 8(%1, %3), %%mm5 \n\t"
  350. "movq %%mm0, (%2) \n\t"
  351. "movq %%mm4, 8(%2) \n\t"
  352. "movq %%mm1, (%2, %3) \n\t"
  353. "movq %%mm5, 8(%2, %3) \n\t"
  354. "add %%"REG_a", %1 \n\t"
  355. "add %%"REG_a", %2 \n\t"
  356. "movq (%1), %%mm0 \n\t"
  357. "movq 8(%1), %%mm4 \n\t"
  358. "movq (%1, %3), %%mm1 \n\t"
  359. "movq 8(%1, %3), %%mm5 \n\t"
  360. "movq %%mm0, (%2) \n\t"
  361. "movq %%mm4, 8(%2) \n\t"
  362. "movq %%mm1, (%2, %3) \n\t"
  363. "movq %%mm5, 8(%2, %3) \n\t"
  364. "add %%"REG_a", %1 \n\t"
  365. "add %%"REG_a", %2 \n\t"
  366. "subl $4, %0 \n\t"
  367. "jnz 1b \n\t"
  368. : "+g"(h), "+r" (pixels), "+r" (block)
  369. : "r"((x86_reg)line_size)
  370. : "%"REG_a, "memory"
  371. );
  372. }
  373. static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  374. {
  375. __asm__ volatile(
  376. "1: \n\t"
  377. "movdqu (%1), %%xmm0 \n\t"
  378. "movdqu (%1,%3), %%xmm1 \n\t"
  379. "movdqu (%1,%3,2), %%xmm2 \n\t"
  380. "movdqu (%1,%4), %%xmm3 \n\t"
  381. "movdqa %%xmm0, (%2) \n\t"
  382. "movdqa %%xmm1, (%2,%3) \n\t"
  383. "movdqa %%xmm2, (%2,%3,2) \n\t"
  384. "movdqa %%xmm3, (%2,%4) \n\t"
  385. "subl $4, %0 \n\t"
  386. "lea (%1,%3,4), %1 \n\t"
  387. "lea (%2,%3,4), %2 \n\t"
  388. "jnz 1b \n\t"
  389. : "+g"(h), "+r" (pixels), "+r" (block)
  390. : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
  391. : "memory"
  392. );
  393. }
  394. static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  395. {
  396. __asm__ volatile(
  397. "1: \n\t"
  398. "movdqu (%1), %%xmm0 \n\t"
  399. "movdqu (%1,%3), %%xmm1 \n\t"
  400. "movdqu (%1,%3,2), %%xmm2 \n\t"
  401. "movdqu (%1,%4), %%xmm3 \n\t"
  402. "pavgb (%2), %%xmm0 \n\t"
  403. "pavgb (%2,%3), %%xmm1 \n\t"
  404. "pavgb (%2,%3,2), %%xmm2 \n\t"
  405. "pavgb (%2,%4), %%xmm3 \n\t"
  406. "movdqa %%xmm0, (%2) \n\t"
  407. "movdqa %%xmm1, (%2,%3) \n\t"
  408. "movdqa %%xmm2, (%2,%3,2) \n\t"
  409. "movdqa %%xmm3, (%2,%4) \n\t"
  410. "subl $4, %0 \n\t"
  411. "lea (%1,%3,4), %1 \n\t"
  412. "lea (%2,%3,4), %2 \n\t"
  413. "jnz 1b \n\t"
  414. : "+g"(h), "+r" (pixels), "+r" (block)
  415. : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
  416. : "memory"
  417. );
  418. }
  419. #define CLEAR_BLOCKS(name,n) \
  420. static void name(DCTELEM *blocks)\
  421. {\
  422. __asm__ volatile(\
  423. "pxor %%mm7, %%mm7 \n\t"\
  424. "mov %1, %%"REG_a" \n\t"\
  425. "1: \n\t"\
  426. "movq %%mm7, (%0, %%"REG_a") \n\t"\
  427. "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
  428. "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
  429. "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
  430. "add $32, %%"REG_a" \n\t"\
  431. " js 1b \n\t"\
  432. : : "r" (((uint8_t *)blocks)+128*n),\
  433. "i" (-128*n)\
  434. : "%"REG_a\
  435. );\
  436. }
  437. CLEAR_BLOCKS(clear_blocks_mmx, 6)
  438. CLEAR_BLOCKS(clear_block_mmx, 1)
  439. static void clear_block_sse(DCTELEM *block)
  440. {
  441. __asm__ volatile(
  442. "xorps %%xmm0, %%xmm0 \n"
  443. "movaps %%xmm0, (%0) \n"
  444. "movaps %%xmm0, 16(%0) \n"
  445. "movaps %%xmm0, 32(%0) \n"
  446. "movaps %%xmm0, 48(%0) \n"
  447. "movaps %%xmm0, 64(%0) \n"
  448. "movaps %%xmm0, 80(%0) \n"
  449. "movaps %%xmm0, 96(%0) \n"
  450. "movaps %%xmm0, 112(%0) \n"
  451. :: "r"(block)
  452. : "memory"
  453. );
  454. }
  455. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
  456. x86_reg i=0;
  457. __asm__ volatile(
  458. "jmp 2f \n\t"
  459. "1: \n\t"
  460. "movq (%1, %0), %%mm0 \n\t"
  461. "movq (%2, %0), %%mm1 \n\t"
  462. "paddb %%mm0, %%mm1 \n\t"
  463. "movq %%mm1, (%2, %0) \n\t"
  464. "movq 8(%1, %0), %%mm0 \n\t"
  465. "movq 8(%2, %0), %%mm1 \n\t"
  466. "paddb %%mm0, %%mm1 \n\t"
  467. "movq %%mm1, 8(%2, %0) \n\t"
  468. "add $16, %0 \n\t"
  469. "2: \n\t"
  470. "cmp %3, %0 \n\t"
  471. " js 1b \n\t"
  472. : "+r" (i)
  473. : "r"(src), "r"(dst), "r"((x86_reg)w-15)
  474. );
  475. for(; i<w; i++)
  476. dst[i+0] += src[i+0];
  477. }
  478. static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  479. x86_reg i=0;
  480. __asm__ volatile(
  481. "jmp 2f \n\t"
  482. "1: \n\t"
  483. "movq (%2, %0), %%mm0 \n\t"
  484. "movq 8(%2, %0), %%mm1 \n\t"
  485. "paddb (%3, %0), %%mm0 \n\t"
  486. "paddb 8(%3, %0), %%mm1 \n\t"
  487. "movq %%mm0, (%1, %0) \n\t"
  488. "movq %%mm1, 8(%1, %0) \n\t"
  489. "add $16, %0 \n\t"
  490. "2: \n\t"
  491. "cmp %4, %0 \n\t"
  492. " js 1b \n\t"
  493. : "+r" (i)
  494. : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
  495. );
  496. for(; i<w; i++)
  497. dst[i] = src1[i] + src2[i];
  498. }
  499. #if HAVE_7REGS && HAVE_TEN_OPERANDS
  500. static void add_hfyu_median_prediction_cmov(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top) {
  501. x86_reg w2 = -w;
  502. x86_reg x;
  503. int l = *left & 0xff;
  504. int tl = *left_top & 0xff;
  505. int t;
  506. __asm__ volatile(
  507. "mov %7, %3 \n"
  508. "1: \n"
  509. "movzx (%3,%4), %2 \n"
  510. "mov %2, %k3 \n"
  511. "sub %b1, %b3 \n"
  512. "add %b0, %b3 \n"
  513. "mov %2, %1 \n"
  514. "cmp %0, %2 \n"
  515. "cmovg %0, %2 \n"
  516. "cmovg %1, %0 \n"
  517. "cmp %k3, %0 \n"
  518. "cmovg %k3, %0 \n"
  519. "mov %7, %3 \n"
  520. "cmp %2, %0 \n"
  521. "cmovl %2, %0 \n"
  522. "add (%6,%4), %b0 \n"
  523. "mov %b0, (%5,%4) \n"
  524. "inc %4 \n"
  525. "jl 1b \n"
  526. :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
  527. :"r"(dst+w), "r"(diff+w), "rm"(top+w)
  528. );
  529. *left = l;
  530. *left_top = tl;
  531. }
  532. #endif
  533. #define H263_LOOP_FILTER \
  534. "pxor %%mm7, %%mm7 \n\t"\
  535. "movq %0, %%mm0 \n\t"\
  536. "movq %0, %%mm1 \n\t"\
  537. "movq %3, %%mm2 \n\t"\
  538. "movq %3, %%mm3 \n\t"\
  539. "punpcklbw %%mm7, %%mm0 \n\t"\
  540. "punpckhbw %%mm7, %%mm1 \n\t"\
  541. "punpcklbw %%mm7, %%mm2 \n\t"\
  542. "punpckhbw %%mm7, %%mm3 \n\t"\
  543. "psubw %%mm2, %%mm0 \n\t"\
  544. "psubw %%mm3, %%mm1 \n\t"\
  545. "movq %1, %%mm2 \n\t"\
  546. "movq %1, %%mm3 \n\t"\
  547. "movq %2, %%mm4 \n\t"\
  548. "movq %2, %%mm5 \n\t"\
  549. "punpcklbw %%mm7, %%mm2 \n\t"\
  550. "punpckhbw %%mm7, %%mm3 \n\t"\
  551. "punpcklbw %%mm7, %%mm4 \n\t"\
  552. "punpckhbw %%mm7, %%mm5 \n\t"\
  553. "psubw %%mm2, %%mm4 \n\t"\
  554. "psubw %%mm3, %%mm5 \n\t"\
  555. "psllw $2, %%mm4 \n\t"\
  556. "psllw $2, %%mm5 \n\t"\
  557. "paddw %%mm0, %%mm4 \n\t"\
  558. "paddw %%mm1, %%mm5 \n\t"\
  559. "pxor %%mm6, %%mm6 \n\t"\
  560. "pcmpgtw %%mm4, %%mm6 \n\t"\
  561. "pcmpgtw %%mm5, %%mm7 \n\t"\
  562. "pxor %%mm6, %%mm4 \n\t"\
  563. "pxor %%mm7, %%mm5 \n\t"\
  564. "psubw %%mm6, %%mm4 \n\t"\
  565. "psubw %%mm7, %%mm5 \n\t"\
  566. "psrlw $3, %%mm4 \n\t"\
  567. "psrlw $3, %%mm5 \n\t"\
  568. "packuswb %%mm5, %%mm4 \n\t"\
  569. "packsswb %%mm7, %%mm6 \n\t"\
  570. "pxor %%mm7, %%mm7 \n\t"\
  571. "movd %4, %%mm2 \n\t"\
  572. "punpcklbw %%mm2, %%mm2 \n\t"\
  573. "punpcklbw %%mm2, %%mm2 \n\t"\
  574. "punpcklbw %%mm2, %%mm2 \n\t"\
  575. "psubusb %%mm4, %%mm2 \n\t"\
  576. "movq %%mm2, %%mm3 \n\t"\
  577. "psubusb %%mm4, %%mm3 \n\t"\
  578. "psubb %%mm3, %%mm2 \n\t"\
  579. "movq %1, %%mm3 \n\t"\
  580. "movq %2, %%mm4 \n\t"\
  581. "pxor %%mm6, %%mm3 \n\t"\
  582. "pxor %%mm6, %%mm4 \n\t"\
  583. "paddusb %%mm2, %%mm3 \n\t"\
  584. "psubusb %%mm2, %%mm4 \n\t"\
  585. "pxor %%mm6, %%mm3 \n\t"\
  586. "pxor %%mm6, %%mm4 \n\t"\
  587. "paddusb %%mm2, %%mm2 \n\t"\
  588. "packsswb %%mm1, %%mm0 \n\t"\
  589. "pcmpgtb %%mm0, %%mm7 \n\t"\
  590. "pxor %%mm7, %%mm0 \n\t"\
  591. "psubb %%mm7, %%mm0 \n\t"\
  592. "movq %%mm0, %%mm1 \n\t"\
  593. "psubusb %%mm2, %%mm0 \n\t"\
  594. "psubb %%mm0, %%mm1 \n\t"\
  595. "pand %5, %%mm1 \n\t"\
  596. "psrlw $2, %%mm1 \n\t"\
  597. "pxor %%mm7, %%mm1 \n\t"\
  598. "psubb %%mm7, %%mm1 \n\t"\
  599. "movq %0, %%mm5 \n\t"\
  600. "movq %3, %%mm6 \n\t"\
  601. "psubb %%mm1, %%mm5 \n\t"\
  602. "paddb %%mm1, %%mm6 \n\t"
  603. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  604. if(CONFIG_ANY_H263) {
  605. const int strength= ff_h263_loop_filter_strength[qscale];
  606. __asm__ volatile(
  607. H263_LOOP_FILTER
  608. "movq %%mm3, %1 \n\t"
  609. "movq %%mm4, %2 \n\t"
  610. "movq %%mm5, %0 \n\t"
  611. "movq %%mm6, %3 \n\t"
  612. : "+m" (*(uint64_t*)(src - 2*stride)),
  613. "+m" (*(uint64_t*)(src - 1*stride)),
  614. "+m" (*(uint64_t*)(src + 0*stride)),
  615. "+m" (*(uint64_t*)(src + 1*stride))
  616. : "g" (2*strength), "m"(ff_pb_FC)
  617. );
  618. }
  619. }
  620. static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
  621. __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
  622. "movd %4, %%mm0 \n\t"
  623. "movd %5, %%mm1 \n\t"
  624. "movd %6, %%mm2 \n\t"
  625. "movd %7, %%mm3 \n\t"
  626. "punpcklbw %%mm1, %%mm0 \n\t"
  627. "punpcklbw %%mm3, %%mm2 \n\t"
  628. "movq %%mm0, %%mm1 \n\t"
  629. "punpcklwd %%mm2, %%mm0 \n\t"
  630. "punpckhwd %%mm2, %%mm1 \n\t"
  631. "movd %%mm0, %0 \n\t"
  632. "punpckhdq %%mm0, %%mm0 \n\t"
  633. "movd %%mm0, %1 \n\t"
  634. "movd %%mm1, %2 \n\t"
  635. "punpckhdq %%mm1, %%mm1 \n\t"
  636. "movd %%mm1, %3 \n\t"
  637. : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
  638. "=m" (*(uint32_t*)(dst + 1*dst_stride)),
  639. "=m" (*(uint32_t*)(dst + 2*dst_stride)),
  640. "=m" (*(uint32_t*)(dst + 3*dst_stride))
  641. : "m" (*(uint32_t*)(src + 0*src_stride)),
  642. "m" (*(uint32_t*)(src + 1*src_stride)),
  643. "m" (*(uint32_t*)(src + 2*src_stride)),
  644. "m" (*(uint32_t*)(src + 3*src_stride))
  645. );
  646. }
  647. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  648. if(CONFIG_ANY_H263) {
  649. const int strength= ff_h263_loop_filter_strength[qscale];
  650. DECLARE_ALIGNED(8, uint64_t, temp[4]);
  651. uint8_t *btemp= (uint8_t*)temp;
  652. src -= 2;
  653. transpose4x4(btemp , src , 8, stride);
  654. transpose4x4(btemp+4, src + 4*stride, 8, stride);
  655. __asm__ volatile(
  656. H263_LOOP_FILTER // 5 3 4 6
  657. : "+m" (temp[0]),
  658. "+m" (temp[1]),
  659. "+m" (temp[2]),
  660. "+m" (temp[3])
  661. : "g" (2*strength), "m"(ff_pb_FC)
  662. );
  663. __asm__ volatile(
  664. "movq %%mm5, %%mm1 \n\t"
  665. "movq %%mm4, %%mm0 \n\t"
  666. "punpcklbw %%mm3, %%mm5 \n\t"
  667. "punpcklbw %%mm6, %%mm4 \n\t"
  668. "punpckhbw %%mm3, %%mm1 \n\t"
  669. "punpckhbw %%mm6, %%mm0 \n\t"
  670. "movq %%mm5, %%mm3 \n\t"
  671. "movq %%mm1, %%mm6 \n\t"
  672. "punpcklwd %%mm4, %%mm5 \n\t"
  673. "punpcklwd %%mm0, %%mm1 \n\t"
  674. "punpckhwd %%mm4, %%mm3 \n\t"
  675. "punpckhwd %%mm0, %%mm6 \n\t"
  676. "movd %%mm5, (%0) \n\t"
  677. "punpckhdq %%mm5, %%mm5 \n\t"
  678. "movd %%mm5, (%0,%2) \n\t"
  679. "movd %%mm3, (%0,%2,2) \n\t"
  680. "punpckhdq %%mm3, %%mm3 \n\t"
  681. "movd %%mm3, (%0,%3) \n\t"
  682. "movd %%mm1, (%1) \n\t"
  683. "punpckhdq %%mm1, %%mm1 \n\t"
  684. "movd %%mm1, (%1,%2) \n\t"
  685. "movd %%mm6, (%1,%2,2) \n\t"
  686. "punpckhdq %%mm6, %%mm6 \n\t"
  687. "movd %%mm6, (%1,%3) \n\t"
  688. :: "r" (src),
  689. "r" (src + 4*stride),
  690. "r" ((x86_reg) stride ),
  691. "r" ((x86_reg)(3*stride))
  692. );
  693. }
  694. }
  695. /* draw the edges of width 'w' of an image of size width, height
  696. this mmx version can only handle w==8 || w==16 */
  697. static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
  698. {
  699. uint8_t *ptr, *last_line;
  700. int i;
  701. last_line = buf + (height - 1) * wrap;
  702. /* left and right */
  703. ptr = buf;
  704. if(w==8)
  705. {
  706. __asm__ volatile(
  707. "1: \n\t"
  708. "movd (%0), %%mm0 \n\t"
  709. "punpcklbw %%mm0, %%mm0 \n\t"
  710. "punpcklwd %%mm0, %%mm0 \n\t"
  711. "punpckldq %%mm0, %%mm0 \n\t"
  712. "movq %%mm0, -8(%0) \n\t"
  713. "movq -8(%0, %2), %%mm1 \n\t"
  714. "punpckhbw %%mm1, %%mm1 \n\t"
  715. "punpckhwd %%mm1, %%mm1 \n\t"
  716. "punpckhdq %%mm1, %%mm1 \n\t"
  717. "movq %%mm1, (%0, %2) \n\t"
  718. "add %1, %0 \n\t"
  719. "cmp %3, %0 \n\t"
  720. " jb 1b \n\t"
  721. : "+r" (ptr)
  722. : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
  723. );
  724. }
  725. else
  726. {
  727. __asm__ volatile(
  728. "1: \n\t"
  729. "movd (%0), %%mm0 \n\t"
  730. "punpcklbw %%mm0, %%mm0 \n\t"
  731. "punpcklwd %%mm0, %%mm0 \n\t"
  732. "punpckldq %%mm0, %%mm0 \n\t"
  733. "movq %%mm0, -8(%0) \n\t"
  734. "movq %%mm0, -16(%0) \n\t"
  735. "movq -8(%0, %2), %%mm1 \n\t"
  736. "punpckhbw %%mm1, %%mm1 \n\t"
  737. "punpckhwd %%mm1, %%mm1 \n\t"
  738. "punpckhdq %%mm1, %%mm1 \n\t"
  739. "movq %%mm1, (%0, %2) \n\t"
  740. "movq %%mm1, 8(%0, %2) \n\t"
  741. "add %1, %0 \n\t"
  742. "cmp %3, %0 \n\t"
  743. " jb 1b \n\t"
  744. : "+r" (ptr)
  745. : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
  746. );
  747. }
  748. for(i=0;i<w;i+=4) {
  749. /* top and bottom (and hopefully also the corners) */
  750. ptr= buf - (i + 1) * wrap - w;
  751. __asm__ volatile(
  752. "1: \n\t"
  753. "movq (%1, %0), %%mm0 \n\t"
  754. "movq %%mm0, (%0) \n\t"
  755. "movq %%mm0, (%0, %2) \n\t"
  756. "movq %%mm0, (%0, %2, 2) \n\t"
  757. "movq %%mm0, (%0, %3) \n\t"
  758. "add $8, %0 \n\t"
  759. "cmp %4, %0 \n\t"
  760. " jb 1b \n\t"
  761. : "+r" (ptr)
  762. : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
  763. );
  764. ptr= last_line + (i + 1) * wrap - w;
  765. __asm__ volatile(
  766. "1: \n\t"
  767. "movq (%1, %0), %%mm0 \n\t"
  768. "movq %%mm0, (%0) \n\t"
  769. "movq %%mm0, (%0, %2) \n\t"
  770. "movq %%mm0, (%0, %2, 2) \n\t"
  771. "movq %%mm0, (%0, %3) \n\t"
  772. "add $8, %0 \n\t"
  773. "cmp %4, %0 \n\t"
  774. " jb 1b \n\t"
  775. : "+r" (ptr)
  776. : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
  777. );
  778. }
  779. }
  780. #define PAETH(cpu, abs3)\
  781. static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
  782. {\
  783. x86_reg i = -bpp;\
  784. x86_reg end = w-3;\
  785. __asm__ volatile(\
  786. "pxor %%mm7, %%mm7 \n"\
  787. "movd (%1,%0), %%mm0 \n"\
  788. "movd (%2,%0), %%mm1 \n"\
  789. "punpcklbw %%mm7, %%mm0 \n"\
  790. "punpcklbw %%mm7, %%mm1 \n"\
  791. "add %4, %0 \n"\
  792. "1: \n"\
  793. "movq %%mm1, %%mm2 \n"\
  794. "movd (%2,%0), %%mm1 \n"\
  795. "movq %%mm2, %%mm3 \n"\
  796. "punpcklbw %%mm7, %%mm1 \n"\
  797. "movq %%mm2, %%mm4 \n"\
  798. "psubw %%mm1, %%mm3 \n"\
  799. "psubw %%mm0, %%mm4 \n"\
  800. "movq %%mm3, %%mm5 \n"\
  801. "paddw %%mm4, %%mm5 \n"\
  802. abs3\
  803. "movq %%mm4, %%mm6 \n"\
  804. "pminsw %%mm5, %%mm6 \n"\
  805. "pcmpgtw %%mm6, %%mm3 \n"\
  806. "pcmpgtw %%mm5, %%mm4 \n"\
  807. "movq %%mm4, %%mm6 \n"\
  808. "pand %%mm3, %%mm4 \n"\
  809. "pandn %%mm3, %%mm6 \n"\
  810. "pandn %%mm0, %%mm3 \n"\
  811. "movd (%3,%0), %%mm0 \n"\
  812. "pand %%mm1, %%mm6 \n"\
  813. "pand %%mm4, %%mm2 \n"\
  814. "punpcklbw %%mm7, %%mm0 \n"\
  815. "movq %6, %%mm5 \n"\
  816. "paddw %%mm6, %%mm0 \n"\
  817. "paddw %%mm2, %%mm3 \n"\
  818. "paddw %%mm3, %%mm0 \n"\
  819. "pand %%mm5, %%mm0 \n"\
  820. "movq %%mm0, %%mm3 \n"\
  821. "packuswb %%mm3, %%mm3 \n"\
  822. "movd %%mm3, (%1,%0) \n"\
  823. "add %4, %0 \n"\
  824. "cmp %5, %0 \n"\
  825. "jle 1b \n"\
  826. :"+r"(i)\
  827. :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
  828. "m"(ff_pw_255)\
  829. :"memory"\
  830. );\
  831. }
  832. #define ABS3_MMX2\
  833. "psubw %%mm5, %%mm7 \n"\
  834. "pmaxsw %%mm7, %%mm5 \n"\
  835. "pxor %%mm6, %%mm6 \n"\
  836. "pxor %%mm7, %%mm7 \n"\
  837. "psubw %%mm3, %%mm6 \n"\
  838. "psubw %%mm4, %%mm7 \n"\
  839. "pmaxsw %%mm6, %%mm3 \n"\
  840. "pmaxsw %%mm7, %%mm4 \n"\
  841. "pxor %%mm7, %%mm7 \n"
  842. #define ABS3_SSSE3\
  843. "pabsw %%mm3, %%mm3 \n"\
  844. "pabsw %%mm4, %%mm4 \n"\
  845. "pabsw %%mm5, %%mm5 \n"
  846. PAETH(mmx2, ABS3_MMX2)
  847. #if HAVE_SSSE3
  848. PAETH(ssse3, ABS3_SSSE3)
  849. #endif
  850. #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
  851. "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
  852. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
  853. "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
  854. "movq "#in7", " #m3 " \n\t" /* d */\
  855. "movq "#in0", %%mm5 \n\t" /* D */\
  856. "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
  857. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
  858. "movq "#in1", %%mm5 \n\t" /* C */\
  859. "movq "#in2", %%mm6 \n\t" /* B */\
  860. "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
  861. "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
  862. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
  863. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
  864. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
  865. "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
  866. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
  867. "psraw $5, %%mm5 \n\t"\
  868. "packuswb %%mm5, %%mm5 \n\t"\
  869. OP(%%mm5, out, %%mm7, d)
  870. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
  871. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  872. uint64_t temp;\
  873. \
  874. __asm__ volatile(\
  875. "pxor %%mm7, %%mm7 \n\t"\
  876. "1: \n\t"\
  877. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  878. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  879. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  880. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  881. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  882. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  883. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  884. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  885. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  886. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  887. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  888. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  889. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  890. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  891. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  892. "paddw %%mm3, %%mm5 \n\t" /* b */\
  893. "paddw %%mm2, %%mm6 \n\t" /* c */\
  894. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  895. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  896. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  897. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  898. "paddw %%mm4, %%mm0 \n\t" /* a */\
  899. "paddw %%mm1, %%mm5 \n\t" /* d */\
  900. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  901. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  902. "paddw %6, %%mm6 \n\t"\
  903. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  904. "psraw $5, %%mm0 \n\t"\
  905. "movq %%mm0, %5 \n\t"\
  906. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  907. \
  908. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
  909. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
  910. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
  911. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
  912. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
  913. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
  914. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
  915. "paddw %%mm0, %%mm2 \n\t" /* b */\
  916. "paddw %%mm5, %%mm3 \n\t" /* c */\
  917. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  918. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  919. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
  920. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
  921. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
  922. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
  923. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  924. "paddw %%mm2, %%mm1 \n\t" /* a */\
  925. "paddw %%mm6, %%mm4 \n\t" /* d */\
  926. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  927. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
  928. "paddw %6, %%mm1 \n\t"\
  929. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
  930. "psraw $5, %%mm3 \n\t"\
  931. "movq %5, %%mm1 \n\t"\
  932. "packuswb %%mm3, %%mm1 \n\t"\
  933. OP_MMX2(%%mm1, (%1),%%mm4, q)\
  934. /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
  935. \
  936. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
  937. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
  938. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
  939. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
  940. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
  941. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
  942. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
  943. "paddw %%mm1, %%mm5 \n\t" /* b */\
  944. "paddw %%mm4, %%mm0 \n\t" /* c */\
  945. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  946. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
  947. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
  948. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
  949. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
  950. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
  951. "paddw %%mm3, %%mm2 \n\t" /* d */\
  952. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
  953. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
  954. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
  955. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
  956. "paddw %%mm2, %%mm6 \n\t" /* a */\
  957. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
  958. "paddw %6, %%mm0 \n\t"\
  959. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  960. "psraw $5, %%mm0 \n\t"\
  961. /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
  962. \
  963. "paddw %%mm5, %%mm3 \n\t" /* a */\
  964. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
  965. "paddw %%mm4, %%mm6 \n\t" /* b */\
  966. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
  967. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
  968. "paddw %%mm1, %%mm4 \n\t" /* c */\
  969. "paddw %%mm2, %%mm5 \n\t" /* d */\
  970. "paddw %%mm6, %%mm6 \n\t" /* 2b */\
  971. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
  972. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
  973. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
  974. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
  975. "paddw %6, %%mm4 \n\t"\
  976. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
  977. "psraw $5, %%mm4 \n\t"\
  978. "packuswb %%mm4, %%mm0 \n\t"\
  979. OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
  980. \
  981. "add %3, %0 \n\t"\
  982. "add %4, %1 \n\t"\
  983. "decl %2 \n\t"\
  984. " jnz 1b \n\t"\
  985. : "+a"(src), "+c"(dst), "+D"(h)\
  986. : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  987. : "memory"\
  988. );\
  989. }\
  990. \
  991. static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  992. int i;\
  993. int16_t temp[16];\
  994. /* quick HACK, XXX FIXME MUST be optimized */\
  995. for(i=0; i<h; i++)\
  996. {\
  997. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  998. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  999. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1000. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1001. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1002. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
  1003. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
  1004. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
  1005. temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
  1006. temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
  1007. temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
  1008. temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
  1009. temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
  1010. temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
  1011. temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
  1012. temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
  1013. __asm__ volatile(\
  1014. "movq (%0), %%mm0 \n\t"\
  1015. "movq 8(%0), %%mm1 \n\t"\
  1016. "paddw %2, %%mm0 \n\t"\
  1017. "paddw %2, %%mm1 \n\t"\
  1018. "psraw $5, %%mm0 \n\t"\
  1019. "psraw $5, %%mm1 \n\t"\
  1020. "packuswb %%mm1, %%mm0 \n\t"\
  1021. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1022. "movq 16(%0), %%mm0 \n\t"\
  1023. "movq 24(%0), %%mm1 \n\t"\
  1024. "paddw %2, %%mm0 \n\t"\
  1025. "paddw %2, %%mm1 \n\t"\
  1026. "psraw $5, %%mm0 \n\t"\
  1027. "psraw $5, %%mm1 \n\t"\
  1028. "packuswb %%mm1, %%mm0 \n\t"\
  1029. OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
  1030. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1031. : "memory"\
  1032. );\
  1033. dst+=dstStride;\
  1034. src+=srcStride;\
  1035. }\
  1036. }\
  1037. \
  1038. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1039. __asm__ volatile(\
  1040. "pxor %%mm7, %%mm7 \n\t"\
  1041. "1: \n\t"\
  1042. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1043. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1044. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1045. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1046. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1047. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1048. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1049. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1050. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1051. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1052. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1053. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1054. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1055. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1056. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1057. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1058. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1059. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1060. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1061. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1062. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1063. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1064. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1065. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1066. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1067. "paddw %5, %%mm6 \n\t"\
  1068. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1069. "psraw $5, %%mm0 \n\t"\
  1070. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1071. \
  1072. "movd 5(%0), %%mm5 \n\t" /* FGHI */\
  1073. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
  1074. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
  1075. "paddw %%mm5, %%mm1 \n\t" /* a */\
  1076. "paddw %%mm6, %%mm2 \n\t" /* b */\
  1077. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
  1078. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
  1079. "paddw %%mm6, %%mm3 \n\t" /* c */\
  1080. "paddw %%mm5, %%mm4 \n\t" /* d */\
  1081. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1082. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1083. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1084. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1085. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
  1086. "paddw %5, %%mm1 \n\t"\
  1087. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
  1088. "psraw $5, %%mm3 \n\t"\
  1089. "packuswb %%mm3, %%mm0 \n\t"\
  1090. OP_MMX2(%%mm0, (%1), %%mm4, q)\
  1091. \
  1092. "add %3, %0 \n\t"\
  1093. "add %4, %1 \n\t"\
  1094. "decl %2 \n\t"\
  1095. " jnz 1b \n\t"\
  1096. : "+a"(src), "+c"(dst), "+d"(h)\
  1097. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
  1098. : "memory"\
  1099. );\
  1100. }\
  1101. \
  1102. static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1103. int i;\
  1104. int16_t temp[8];\
  1105. /* quick HACK, XXX FIXME MUST be optimized */\
  1106. for(i=0; i<h; i++)\
  1107. {\
  1108. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1109. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1110. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1111. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1112. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1113. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
  1114. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
  1115. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
  1116. __asm__ volatile(\
  1117. "movq (%0), %%mm0 \n\t"\
  1118. "movq 8(%0), %%mm1 \n\t"\
  1119. "paddw %2, %%mm0 \n\t"\
  1120. "paddw %2, %%mm1 \n\t"\
  1121. "psraw $5, %%mm0 \n\t"\
  1122. "psraw $5, %%mm1 \n\t"\
  1123. "packuswb %%mm1, %%mm0 \n\t"\
  1124. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1125. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1126. :"memory"\
  1127. );\
  1128. dst+=dstStride;\
  1129. src+=srcStride;\
  1130. }\
  1131. }
  1132. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
  1133. \
  1134. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1135. uint64_t temp[17*4];\
  1136. uint64_t *temp_ptr= temp;\
  1137. int count= 17;\
  1138. \
  1139. /*FIXME unroll */\
  1140. __asm__ volatile(\
  1141. "pxor %%mm7, %%mm7 \n\t"\
  1142. "1: \n\t"\
  1143. "movq (%0), %%mm0 \n\t"\
  1144. "movq (%0), %%mm1 \n\t"\
  1145. "movq 8(%0), %%mm2 \n\t"\
  1146. "movq 8(%0), %%mm3 \n\t"\
  1147. "punpcklbw %%mm7, %%mm0 \n\t"\
  1148. "punpckhbw %%mm7, %%mm1 \n\t"\
  1149. "punpcklbw %%mm7, %%mm2 \n\t"\
  1150. "punpckhbw %%mm7, %%mm3 \n\t"\
  1151. "movq %%mm0, (%1) \n\t"\
  1152. "movq %%mm1, 17*8(%1) \n\t"\
  1153. "movq %%mm2, 2*17*8(%1) \n\t"\
  1154. "movq %%mm3, 3*17*8(%1) \n\t"\
  1155. "add $8, %1 \n\t"\
  1156. "add %3, %0 \n\t"\
  1157. "decl %2 \n\t"\
  1158. " jnz 1b \n\t"\
  1159. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1160. : "r" ((x86_reg)srcStride)\
  1161. : "memory"\
  1162. );\
  1163. \
  1164. temp_ptr= temp;\
  1165. count=4;\
  1166. \
  1167. /*FIXME reorder for speed */\
  1168. __asm__ volatile(\
  1169. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1170. "1: \n\t"\
  1171. "movq (%0), %%mm0 \n\t"\
  1172. "movq 8(%0), %%mm1 \n\t"\
  1173. "movq 16(%0), %%mm2 \n\t"\
  1174. "movq 24(%0), %%mm3 \n\t"\
  1175. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1176. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1177. "add %4, %1 \n\t"\
  1178. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1179. \
  1180. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1181. "add %4, %1 \n\t"\
  1182. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1183. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
  1184. "add %4, %1 \n\t"\
  1185. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
  1186. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
  1187. "add %4, %1 \n\t"\
  1188. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
  1189. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
  1190. "add %4, %1 \n\t"\
  1191. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
  1192. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
  1193. "add %4, %1 \n\t"\
  1194. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
  1195. \
  1196. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
  1197. "add %4, %1 \n\t" \
  1198. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
  1199. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
  1200. \
  1201. "add $136, %0 \n\t"\
  1202. "add %6, %1 \n\t"\
  1203. "decl %2 \n\t"\
  1204. " jnz 1b \n\t"\
  1205. \
  1206. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1207. : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
  1208. :"memory"\
  1209. );\
  1210. }\
  1211. \
  1212. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1213. uint64_t temp[9*2];\
  1214. uint64_t *temp_ptr= temp;\
  1215. int count= 9;\
  1216. \
  1217. /*FIXME unroll */\
  1218. __asm__ volatile(\
  1219. "pxor %%mm7, %%mm7 \n\t"\
  1220. "1: \n\t"\
  1221. "movq (%0), %%mm0 \n\t"\
  1222. "movq (%0), %%mm1 \n\t"\
  1223. "punpcklbw %%mm7, %%mm0 \n\t"\
  1224. "punpckhbw %%mm7, %%mm1 \n\t"\
  1225. "movq %%mm0, (%1) \n\t"\
  1226. "movq %%mm1, 9*8(%1) \n\t"\
  1227. "add $8, %1 \n\t"\
  1228. "add %3, %0 \n\t"\
  1229. "decl %2 \n\t"\
  1230. " jnz 1b \n\t"\
  1231. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1232. : "r" ((x86_reg)srcStride)\
  1233. : "memory"\
  1234. );\
  1235. \
  1236. temp_ptr= temp;\
  1237. count=2;\
  1238. \
  1239. /*FIXME reorder for speed */\
  1240. __asm__ volatile(\
  1241. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1242. "1: \n\t"\
  1243. "movq (%0), %%mm0 \n\t"\
  1244. "movq 8(%0), %%mm1 \n\t"\
  1245. "movq 16(%0), %%mm2 \n\t"\
  1246. "movq 24(%0), %%mm3 \n\t"\
  1247. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1248. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1249. "add %4, %1 \n\t"\
  1250. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1251. \
  1252. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1253. "add %4, %1 \n\t"\
  1254. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1255. \
  1256. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
  1257. "add %4, %1 \n\t"\
  1258. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
  1259. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
  1260. \
  1261. "add $72, %0 \n\t"\
  1262. "add %6, %1 \n\t"\
  1263. "decl %2 \n\t"\
  1264. " jnz 1b \n\t"\
  1265. \
  1266. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1267. : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
  1268. : "memory"\
  1269. );\
  1270. }\
  1271. \
  1272. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1273. OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
  1274. }\
  1275. \
  1276. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1277. uint64_t temp[8];\
  1278. uint8_t * const half= (uint8_t*)temp;\
  1279. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1280. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1281. }\
  1282. \
  1283. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1284. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
  1285. }\
  1286. \
  1287. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1288. uint64_t temp[8];\
  1289. uint8_t * const half= (uint8_t*)temp;\
  1290. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1291. OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
  1292. }\
  1293. \
  1294. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1295. uint64_t temp[8];\
  1296. uint8_t * const half= (uint8_t*)temp;\
  1297. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1298. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1299. }\
  1300. \
  1301. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1302. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1303. }\
  1304. \
  1305. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1306. uint64_t temp[8];\
  1307. uint8_t * const half= (uint8_t*)temp;\
  1308. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1309. OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
  1310. }\
  1311. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1312. uint64_t half[8 + 9];\
  1313. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1314. uint8_t * const halfHV= ((uint8_t*)half);\
  1315. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1316. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1317. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1318. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1319. }\
  1320. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1321. uint64_t half[8 + 9];\
  1322. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1323. uint8_t * const halfHV= ((uint8_t*)half);\
  1324. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1325. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1326. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1327. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1328. }\
  1329. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1330. uint64_t half[8 + 9];\
  1331. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1332. uint8_t * const halfHV= ((uint8_t*)half);\
  1333. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1334. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1335. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1336. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1337. }\
  1338. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1339. uint64_t half[8 + 9];\
  1340. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1341. uint8_t * const halfHV= ((uint8_t*)half);\
  1342. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1343. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1344. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1345. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1346. }\
  1347. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1348. uint64_t half[8 + 9];\
  1349. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1350. uint8_t * const halfHV= ((uint8_t*)half);\
  1351. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1352. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1353. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1354. }\
  1355. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1356. uint64_t half[8 + 9];\
  1357. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1358. uint8_t * const halfHV= ((uint8_t*)half);\
  1359. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1360. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1361. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1362. }\
  1363. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1364. uint64_t half[8 + 9];\
  1365. uint8_t * const halfH= ((uint8_t*)half);\
  1366. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1367. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1368. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1369. }\
  1370. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1371. uint64_t half[8 + 9];\
  1372. uint8_t * const halfH= ((uint8_t*)half);\
  1373. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1374. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1375. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1376. }\
  1377. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1378. uint64_t half[9];\
  1379. uint8_t * const halfH= ((uint8_t*)half);\
  1380. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1381. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1382. }\
  1383. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1384. OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
  1385. }\
  1386. \
  1387. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1388. uint64_t temp[32];\
  1389. uint8_t * const half= (uint8_t*)temp;\
  1390. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1391. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1392. }\
  1393. \
  1394. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1395. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
  1396. }\
  1397. \
  1398. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1399. uint64_t temp[32];\
  1400. uint8_t * const half= (uint8_t*)temp;\
  1401. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1402. OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
  1403. }\
  1404. \
  1405. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1406. uint64_t temp[32];\
  1407. uint8_t * const half= (uint8_t*)temp;\
  1408. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1409. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1410. }\
  1411. \
  1412. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1413. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1414. }\
  1415. \
  1416. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1417. uint64_t temp[32];\
  1418. uint8_t * const half= (uint8_t*)temp;\
  1419. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1420. OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
  1421. }\
  1422. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1423. uint64_t half[16*2 + 17*2];\
  1424. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1425. uint8_t * const halfHV= ((uint8_t*)half);\
  1426. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1427. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1428. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1429. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1430. }\
  1431. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1432. uint64_t half[16*2 + 17*2];\
  1433. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1434. uint8_t * const halfHV= ((uint8_t*)half);\
  1435. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1436. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1437. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1438. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1439. }\
  1440. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1441. uint64_t half[16*2 + 17*2];\
  1442. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1443. uint8_t * const halfHV= ((uint8_t*)half);\
  1444. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1445. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1446. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1447. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1448. }\
  1449. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1450. uint64_t half[16*2 + 17*2];\
  1451. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1452. uint8_t * const halfHV= ((uint8_t*)half);\
  1453. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1454. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1455. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1456. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1457. }\
  1458. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1459. uint64_t half[16*2 + 17*2];\
  1460. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1461. uint8_t * const halfHV= ((uint8_t*)half);\
  1462. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1463. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1464. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1465. }\
  1466. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1467. uint64_t half[16*2 + 17*2];\
  1468. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1469. uint8_t * const halfHV= ((uint8_t*)half);\
  1470. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1471. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1472. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1473. }\
  1474. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1475. uint64_t half[17*2];\
  1476. uint8_t * const halfH= ((uint8_t*)half);\
  1477. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1478. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1479. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1480. }\
  1481. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1482. uint64_t half[17*2];\
  1483. uint8_t * const halfH= ((uint8_t*)half);\
  1484. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1485. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1486. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1487. }\
  1488. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1489. uint64_t half[17*2];\
  1490. uint8_t * const halfH= ((uint8_t*)half);\
  1491. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1492. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1493. }
  1494. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  1495. #define AVG_3DNOW_OP(a,b,temp, size) \
  1496. "mov" #size " " #b ", " #temp " \n\t"\
  1497. "pavgusb " #temp ", " #a " \n\t"\
  1498. "mov" #size " " #a ", " #b " \n\t"
  1499. #define AVG_MMX2_OP(a,b,temp, size) \
  1500. "mov" #size " " #b ", " #temp " \n\t"\
  1501. "pavgb " #temp ", " #a " \n\t"\
  1502. "mov" #size " " #a ", " #b " \n\t"
  1503. QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
  1504. QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
  1505. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
  1506. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
  1507. QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
  1508. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
  1509. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
  1510. QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
  1511. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
  1512. /***********************************/
  1513. /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
  1514. #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
  1515. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1516. OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
  1517. }
  1518. #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
  1519. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1520. OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
  1521. }
  1522. #define QPEL_2TAP(OPNAME, SIZE, MMX)\
  1523. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
  1524. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
  1525. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
  1526. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
  1527. OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
  1528. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
  1529. OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
  1530. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
  1531. OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
  1532. static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1533. OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
  1534. }\
  1535. static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1536. OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
  1537. }\
  1538. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
  1539. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
  1540. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
  1541. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
  1542. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
  1543. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
  1544. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
  1545. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
  1546. QPEL_2TAP(put_, 16, mmx2)
  1547. QPEL_2TAP(avg_, 16, mmx2)
  1548. QPEL_2TAP(put_, 8, mmx2)
  1549. QPEL_2TAP(avg_, 8, mmx2)
  1550. QPEL_2TAP(put_, 16, 3dnow)
  1551. QPEL_2TAP(avg_, 16, 3dnow)
  1552. QPEL_2TAP(put_, 8, 3dnow)
  1553. QPEL_2TAP(avg_, 8, 3dnow)
  1554. #if 0
  1555. static void just_return(void) { return; }
  1556. #endif
  1557. static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  1558. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
  1559. const int w = 8;
  1560. const int ix = ox>>(16+shift);
  1561. const int iy = oy>>(16+shift);
  1562. const int oxs = ox>>4;
  1563. const int oys = oy>>4;
  1564. const int dxxs = dxx>>4;
  1565. const int dxys = dxy>>4;
  1566. const int dyxs = dyx>>4;
  1567. const int dyys = dyy>>4;
  1568. const uint16_t r4[4] = {r,r,r,r};
  1569. const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
  1570. const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
  1571. const uint64_t shift2 = 2*shift;
  1572. uint8_t edge_buf[(h+1)*stride];
  1573. int x, y;
  1574. const int dxw = (dxx-(1<<(16+shift)))*(w-1);
  1575. const int dyh = (dyy-(1<<(16+shift)))*(h-1);
  1576. const int dxh = dxy*(h-1);
  1577. const int dyw = dyx*(w-1);
  1578. if( // non-constant fullpel offset (3% of blocks)
  1579. ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
  1580. (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
  1581. // uses more than 16 bits of subpel mv (only at huge resolution)
  1582. || (dxx|dxy|dyx|dyy)&15 )
  1583. {
  1584. //FIXME could still use mmx for some of the rows
  1585. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
  1586. return;
  1587. }
  1588. src += ix + iy*stride;
  1589. if( (unsigned)ix >= width-w ||
  1590. (unsigned)iy >= height-h )
  1591. {
  1592. ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
  1593. src = edge_buf;
  1594. }
  1595. __asm__ volatile(
  1596. "movd %0, %%mm6 \n\t"
  1597. "pxor %%mm7, %%mm7 \n\t"
  1598. "punpcklwd %%mm6, %%mm6 \n\t"
  1599. "punpcklwd %%mm6, %%mm6 \n\t"
  1600. :: "r"(1<<shift)
  1601. );
  1602. for(x=0; x<w; x+=4){
  1603. uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
  1604. oxs - dxys + dxxs*(x+1),
  1605. oxs - dxys + dxxs*(x+2),
  1606. oxs - dxys + dxxs*(x+3) };
  1607. uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
  1608. oys - dyys + dyxs*(x+1),
  1609. oys - dyys + dyxs*(x+2),
  1610. oys - dyys + dyxs*(x+3) };
  1611. for(y=0; y<h; y++){
  1612. __asm__ volatile(
  1613. "movq %0, %%mm4 \n\t"
  1614. "movq %1, %%mm5 \n\t"
  1615. "paddw %2, %%mm4 \n\t"
  1616. "paddw %3, %%mm5 \n\t"
  1617. "movq %%mm4, %0 \n\t"
  1618. "movq %%mm5, %1 \n\t"
  1619. "psrlw $12, %%mm4 \n\t"
  1620. "psrlw $12, %%mm5 \n\t"
  1621. : "+m"(*dx4), "+m"(*dy4)
  1622. : "m"(*dxy4), "m"(*dyy4)
  1623. );
  1624. __asm__ volatile(
  1625. "movq %%mm6, %%mm2 \n\t"
  1626. "movq %%mm6, %%mm1 \n\t"
  1627. "psubw %%mm4, %%mm2 \n\t"
  1628. "psubw %%mm5, %%mm1 \n\t"
  1629. "movq %%mm2, %%mm0 \n\t"
  1630. "movq %%mm4, %%mm3 \n\t"
  1631. "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
  1632. "pmullw %%mm5, %%mm3 \n\t" // dx*dy
  1633. "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
  1634. "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
  1635. "movd %4, %%mm5 \n\t"
  1636. "movd %3, %%mm4 \n\t"
  1637. "punpcklbw %%mm7, %%mm5 \n\t"
  1638. "punpcklbw %%mm7, %%mm4 \n\t"
  1639. "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
  1640. "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
  1641. "movd %2, %%mm5 \n\t"
  1642. "movd %1, %%mm4 \n\t"
  1643. "punpcklbw %%mm7, %%mm5 \n\t"
  1644. "punpcklbw %%mm7, %%mm4 \n\t"
  1645. "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
  1646. "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
  1647. "paddw %5, %%mm1 \n\t"
  1648. "paddw %%mm3, %%mm2 \n\t"
  1649. "paddw %%mm1, %%mm0 \n\t"
  1650. "paddw %%mm2, %%mm0 \n\t"
  1651. "psrlw %6, %%mm0 \n\t"
  1652. "packuswb %%mm0, %%mm0 \n\t"
  1653. "movd %%mm0, %0 \n\t"
  1654. : "=m"(dst[x+y*stride])
  1655. : "m"(src[0]), "m"(src[1]),
  1656. "m"(src[stride]), "m"(src[stride+1]),
  1657. "m"(*r4), "m"(shift2)
  1658. );
  1659. src += stride;
  1660. }
  1661. src += 4-h*stride;
  1662. }
  1663. }
  1664. #define PREFETCH(name, op) \
  1665. static void name(void *mem, int stride, int h){\
  1666. const uint8_t *p= mem;\
  1667. do{\
  1668. __asm__ volatile(#op" %0" :: "m"(*p));\
  1669. p+= stride;\
  1670. }while(--h);\
  1671. }
  1672. PREFETCH(prefetch_mmx2, prefetcht0)
  1673. PREFETCH(prefetch_3dnow, prefetch)
  1674. #undef PREFETCH
  1675. #include "h264dsp_mmx.c"
  1676. #include "rv40dsp_mmx.c"
  1677. /* CAVS specific */
  1678. void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
  1679. void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx);
  1680. void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1681. put_pixels8_mmx(dst, src, stride, 8);
  1682. }
  1683. void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1684. avg_pixels8_mmx(dst, src, stride, 8);
  1685. }
  1686. void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1687. put_pixels16_mmx(dst, src, stride, 16);
  1688. }
  1689. void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1690. avg_pixels16_mmx(dst, src, stride, 16);
  1691. }
  1692. /* VC1 specific */
  1693. void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
  1694. void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
  1695. put_pixels8_mmx(dst, src, stride, 8);
  1696. }
  1697. /* external functions, from idct_mmx.c */
  1698. void ff_mmx_idct(DCTELEM *block);
  1699. void ff_mmxext_idct(DCTELEM *block);
  1700. /* XXX: those functions should be suppressed ASAP when all IDCTs are
  1701. converted */
  1702. #if CONFIG_GPL
  1703. static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  1704. {
  1705. ff_mmx_idct (block);
  1706. put_pixels_clamped_mmx(block, dest, line_size);
  1707. }
  1708. static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  1709. {
  1710. ff_mmx_idct (block);
  1711. add_pixels_clamped_mmx(block, dest, line_size);
  1712. }
  1713. static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  1714. {
  1715. ff_mmxext_idct (block);
  1716. put_pixels_clamped_mmx(block, dest, line_size);
  1717. }
  1718. static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  1719. {
  1720. ff_mmxext_idct (block);
  1721. add_pixels_clamped_mmx(block, dest, line_size);
  1722. }
  1723. #endif
  1724. static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
  1725. {
  1726. ff_idct_xvid_mmx (block);
  1727. put_pixels_clamped_mmx(block, dest, line_size);
  1728. }
  1729. static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
  1730. {
  1731. ff_idct_xvid_mmx (block);
  1732. add_pixels_clamped_mmx(block, dest, line_size);
  1733. }
  1734. static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
  1735. {
  1736. ff_idct_xvid_mmx2 (block);
  1737. put_pixels_clamped_mmx(block, dest, line_size);
  1738. }
  1739. static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
  1740. {
  1741. ff_idct_xvid_mmx2 (block);
  1742. add_pixels_clamped_mmx(block, dest, line_size);
  1743. }
  1744. static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
  1745. {
  1746. int i;
  1747. __asm__ volatile("pxor %%mm7, %%mm7":);
  1748. for(i=0; i<blocksize; i+=2) {
  1749. __asm__ volatile(
  1750. "movq %0, %%mm0 \n\t"
  1751. "movq %1, %%mm1 \n\t"
  1752. "movq %%mm0, %%mm2 \n\t"
  1753. "movq %%mm1, %%mm3 \n\t"
  1754. "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
  1755. "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
  1756. "pslld $31, %%mm2 \n\t" // keep only the sign bit
  1757. "pxor %%mm2, %%mm1 \n\t"
  1758. "movq %%mm3, %%mm4 \n\t"
  1759. "pand %%mm1, %%mm3 \n\t"
  1760. "pandn %%mm1, %%mm4 \n\t"
  1761. "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  1762. "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  1763. "movq %%mm3, %1 \n\t"
  1764. "movq %%mm0, %0 \n\t"
  1765. :"+m"(mag[i]), "+m"(ang[i])
  1766. ::"memory"
  1767. );
  1768. }
  1769. __asm__ volatile("femms");
  1770. }
  1771. static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
  1772. {
  1773. int i;
  1774. __asm__ volatile(
  1775. "movaps %0, %%xmm5 \n\t"
  1776. ::"m"(ff_pdw_80000000[0])
  1777. );
  1778. for(i=0; i<blocksize; i+=4) {
  1779. __asm__ volatile(
  1780. "movaps %0, %%xmm0 \n\t"
  1781. "movaps %1, %%xmm1 \n\t"
  1782. "xorps %%xmm2, %%xmm2 \n\t"
  1783. "xorps %%xmm3, %%xmm3 \n\t"
  1784. "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
  1785. "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
  1786. "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
  1787. "xorps %%xmm2, %%xmm1 \n\t"
  1788. "movaps %%xmm3, %%xmm4 \n\t"
  1789. "andps %%xmm1, %%xmm3 \n\t"
  1790. "andnps %%xmm1, %%xmm4 \n\t"
  1791. "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  1792. "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  1793. "movaps %%xmm3, %1 \n\t"
  1794. "movaps %%xmm0, %0 \n\t"
  1795. :"+m"(mag[i]), "+m"(ang[i])
  1796. ::"memory"
  1797. );
  1798. }
  1799. }
  1800. #define IF1(x) x
  1801. #define IF0(x)
  1802. #define MIX5(mono,stereo)\
  1803. __asm__ volatile(\
  1804. "movss 0(%2), %%xmm5 \n"\
  1805. "movss 8(%2), %%xmm6 \n"\
  1806. "movss 24(%2), %%xmm7 \n"\
  1807. "shufps $0, %%xmm5, %%xmm5 \n"\
  1808. "shufps $0, %%xmm6, %%xmm6 \n"\
  1809. "shufps $0, %%xmm7, %%xmm7 \n"\
  1810. "1: \n"\
  1811. "movaps (%0,%1), %%xmm0 \n"\
  1812. "movaps 0x400(%0,%1), %%xmm1 \n"\
  1813. "movaps 0x800(%0,%1), %%xmm2 \n"\
  1814. "movaps 0xc00(%0,%1), %%xmm3 \n"\
  1815. "movaps 0x1000(%0,%1), %%xmm4 \n"\
  1816. "mulps %%xmm5, %%xmm0 \n"\
  1817. "mulps %%xmm6, %%xmm1 \n"\
  1818. "mulps %%xmm5, %%xmm2 \n"\
  1819. "mulps %%xmm7, %%xmm3 \n"\
  1820. "mulps %%xmm7, %%xmm4 \n"\
  1821. stereo("addps %%xmm1, %%xmm0 \n")\
  1822. "addps %%xmm1, %%xmm2 \n"\
  1823. "addps %%xmm3, %%xmm0 \n"\
  1824. "addps %%xmm4, %%xmm2 \n"\
  1825. mono("addps %%xmm2, %%xmm0 \n")\
  1826. "movaps %%xmm0, (%0,%1) \n"\
  1827. stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
  1828. "add $16, %0 \n"\
  1829. "jl 1b \n"\
  1830. :"+&r"(i)\
  1831. :"r"(samples[0]+len), "r"(matrix)\
  1832. :"memory"\
  1833. );
  1834. #define MIX_MISC(stereo)\
  1835. __asm__ volatile(\
  1836. "1: \n"\
  1837. "movaps (%3,%0), %%xmm0 \n"\
  1838. stereo("movaps %%xmm0, %%xmm1 \n")\
  1839. "mulps %%xmm6, %%xmm0 \n"\
  1840. stereo("mulps %%xmm7, %%xmm1 \n")\
  1841. "lea 1024(%3,%0), %1 \n"\
  1842. "mov %5, %2 \n"\
  1843. "2: \n"\
  1844. "movaps (%1), %%xmm2 \n"\
  1845. stereo("movaps %%xmm2, %%xmm3 \n")\
  1846. "mulps (%4,%2), %%xmm2 \n"\
  1847. stereo("mulps 16(%4,%2), %%xmm3 \n")\
  1848. "addps %%xmm2, %%xmm0 \n"\
  1849. stereo("addps %%xmm3, %%xmm1 \n")\
  1850. "add $1024, %1 \n"\
  1851. "add $32, %2 \n"\
  1852. "jl 2b \n"\
  1853. "movaps %%xmm0, (%3,%0) \n"\
  1854. stereo("movaps %%xmm1, 1024(%3,%0) \n")\
  1855. "add $16, %0 \n"\
  1856. "jl 1b \n"\
  1857. :"+&r"(i), "=&r"(j), "=&r"(k)\
  1858. :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
  1859. :"memory"\
  1860. );
  1861. static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
  1862. {
  1863. int (*matrix_cmp)[2] = (int(*)[2])matrix;
  1864. intptr_t i,j,k;
  1865. i = -len*sizeof(float);
  1866. if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
  1867. MIX5(IF0,IF1);
  1868. } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
  1869. MIX5(IF1,IF0);
  1870. } else {
  1871. DECLARE_ALIGNED_16(float, matrix_simd[in_ch][2][4]);
  1872. j = 2*in_ch*sizeof(float);
  1873. __asm__ volatile(
  1874. "1: \n"
  1875. "sub $8, %0 \n"
  1876. "movss (%2,%0), %%xmm6 \n"
  1877. "movss 4(%2,%0), %%xmm7 \n"
  1878. "shufps $0, %%xmm6, %%xmm6 \n"
  1879. "shufps $0, %%xmm7, %%xmm7 \n"
  1880. "movaps %%xmm6, (%1,%0,4) \n"
  1881. "movaps %%xmm7, 16(%1,%0,4) \n"
  1882. "jg 1b \n"
  1883. :"+&r"(j)
  1884. :"r"(matrix_simd), "r"(matrix)
  1885. :"memory"
  1886. );
  1887. if(out_ch == 2) {
  1888. MIX_MISC(IF1);
  1889. } else {
  1890. MIX_MISC(IF0);
  1891. }
  1892. }
  1893. }
  1894. static void vector_fmul_3dnow(float *dst, const float *src, int len){
  1895. x86_reg i = (len-4)*4;
  1896. __asm__ volatile(
  1897. "1: \n\t"
  1898. "movq (%1,%0), %%mm0 \n\t"
  1899. "movq 8(%1,%0), %%mm1 \n\t"
  1900. "pfmul (%2,%0), %%mm0 \n\t"
  1901. "pfmul 8(%2,%0), %%mm1 \n\t"
  1902. "movq %%mm0, (%1,%0) \n\t"
  1903. "movq %%mm1, 8(%1,%0) \n\t"
  1904. "sub $16, %0 \n\t"
  1905. "jge 1b \n\t"
  1906. "femms \n\t"
  1907. :"+r"(i)
  1908. :"r"(dst), "r"(src)
  1909. :"memory"
  1910. );
  1911. }
  1912. static void vector_fmul_sse(float *dst, const float *src, int len){
  1913. x86_reg i = (len-8)*4;
  1914. __asm__ volatile(
  1915. "1: \n\t"
  1916. "movaps (%1,%0), %%xmm0 \n\t"
  1917. "movaps 16(%1,%0), %%xmm1 \n\t"
  1918. "mulps (%2,%0), %%xmm0 \n\t"
  1919. "mulps 16(%2,%0), %%xmm1 \n\t"
  1920. "movaps %%xmm0, (%1,%0) \n\t"
  1921. "movaps %%xmm1, 16(%1,%0) \n\t"
  1922. "sub $32, %0 \n\t"
  1923. "jge 1b \n\t"
  1924. :"+r"(i)
  1925. :"r"(dst), "r"(src)
  1926. :"memory"
  1927. );
  1928. }
  1929. static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
  1930. x86_reg i = len*4-16;
  1931. __asm__ volatile(
  1932. "1: \n\t"
  1933. "pswapd 8(%1), %%mm0 \n\t"
  1934. "pswapd (%1), %%mm1 \n\t"
  1935. "pfmul (%3,%0), %%mm0 \n\t"
  1936. "pfmul 8(%3,%0), %%mm1 \n\t"
  1937. "movq %%mm0, (%2,%0) \n\t"
  1938. "movq %%mm1, 8(%2,%0) \n\t"
  1939. "add $16, %1 \n\t"
  1940. "sub $16, %0 \n\t"
  1941. "jge 1b \n\t"
  1942. :"+r"(i), "+r"(src1)
  1943. :"r"(dst), "r"(src0)
  1944. );
  1945. __asm__ volatile("femms");
  1946. }
  1947. static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
  1948. x86_reg i = len*4-32;
  1949. __asm__ volatile(
  1950. "1: \n\t"
  1951. "movaps 16(%1), %%xmm0 \n\t"
  1952. "movaps (%1), %%xmm1 \n\t"
  1953. "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
  1954. "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
  1955. "mulps (%3,%0), %%xmm0 \n\t"
  1956. "mulps 16(%3,%0), %%xmm1 \n\t"
  1957. "movaps %%xmm0, (%2,%0) \n\t"
  1958. "movaps %%xmm1, 16(%2,%0) \n\t"
  1959. "add $32, %1 \n\t"
  1960. "sub $32, %0 \n\t"
  1961. "jge 1b \n\t"
  1962. :"+r"(i), "+r"(src1)
  1963. :"r"(dst), "r"(src0)
  1964. );
  1965. }
  1966. static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
  1967. const float *src2, int src3, int len, int step){
  1968. x86_reg i = (len-4)*4;
  1969. if(step == 2 && src3 == 0){
  1970. dst += (len-4)*2;
  1971. __asm__ volatile(
  1972. "1: \n\t"
  1973. "movq (%2,%0), %%mm0 \n\t"
  1974. "movq 8(%2,%0), %%mm1 \n\t"
  1975. "pfmul (%3,%0), %%mm0 \n\t"
  1976. "pfmul 8(%3,%0), %%mm1 \n\t"
  1977. "pfadd (%4,%0), %%mm0 \n\t"
  1978. "pfadd 8(%4,%0), %%mm1 \n\t"
  1979. "movd %%mm0, (%1) \n\t"
  1980. "movd %%mm1, 16(%1) \n\t"
  1981. "psrlq $32, %%mm0 \n\t"
  1982. "psrlq $32, %%mm1 \n\t"
  1983. "movd %%mm0, 8(%1) \n\t"
  1984. "movd %%mm1, 24(%1) \n\t"
  1985. "sub $32, %1 \n\t"
  1986. "sub $16, %0 \n\t"
  1987. "jge 1b \n\t"
  1988. :"+r"(i), "+r"(dst)
  1989. :"r"(src0), "r"(src1), "r"(src2)
  1990. :"memory"
  1991. );
  1992. }
  1993. else if(step == 1 && src3 == 0){
  1994. __asm__ volatile(
  1995. "1: \n\t"
  1996. "movq (%2,%0), %%mm0 \n\t"
  1997. "movq 8(%2,%0), %%mm1 \n\t"
  1998. "pfmul (%3,%0), %%mm0 \n\t"
  1999. "pfmul 8(%3,%0), %%mm1 \n\t"
  2000. "pfadd (%4,%0), %%mm0 \n\t"
  2001. "pfadd 8(%4,%0), %%mm1 \n\t"
  2002. "movq %%mm0, (%1,%0) \n\t"
  2003. "movq %%mm1, 8(%1,%0) \n\t"
  2004. "sub $16, %0 \n\t"
  2005. "jge 1b \n\t"
  2006. :"+r"(i)
  2007. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  2008. :"memory"
  2009. );
  2010. }
  2011. else
  2012. ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
  2013. __asm__ volatile("femms");
  2014. }
  2015. static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
  2016. const float *src2, int src3, int len, int step){
  2017. x86_reg i = (len-8)*4;
  2018. if(step == 2 && src3 == 0){
  2019. dst += (len-8)*2;
  2020. __asm__ volatile(
  2021. "1: \n\t"
  2022. "movaps (%2,%0), %%xmm0 \n\t"
  2023. "movaps 16(%2,%0), %%xmm1 \n\t"
  2024. "mulps (%3,%0), %%xmm0 \n\t"
  2025. "mulps 16(%3,%0), %%xmm1 \n\t"
  2026. "addps (%4,%0), %%xmm0 \n\t"
  2027. "addps 16(%4,%0), %%xmm1 \n\t"
  2028. "movss %%xmm0, (%1) \n\t"
  2029. "movss %%xmm1, 32(%1) \n\t"
  2030. "movhlps %%xmm0, %%xmm2 \n\t"
  2031. "movhlps %%xmm1, %%xmm3 \n\t"
  2032. "movss %%xmm2, 16(%1) \n\t"
  2033. "movss %%xmm3, 48(%1) \n\t"
  2034. "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
  2035. "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
  2036. "movss %%xmm0, 8(%1) \n\t"
  2037. "movss %%xmm1, 40(%1) \n\t"
  2038. "movhlps %%xmm0, %%xmm2 \n\t"
  2039. "movhlps %%xmm1, %%xmm3 \n\t"
  2040. "movss %%xmm2, 24(%1) \n\t"
  2041. "movss %%xmm3, 56(%1) \n\t"
  2042. "sub $64, %1 \n\t"
  2043. "sub $32, %0 \n\t"
  2044. "jge 1b \n\t"
  2045. :"+r"(i), "+r"(dst)
  2046. :"r"(src0), "r"(src1), "r"(src2)
  2047. :"memory"
  2048. );
  2049. }
  2050. else if(step == 1 && src3 == 0){
  2051. __asm__ volatile(
  2052. "1: \n\t"
  2053. "movaps (%2,%0), %%xmm0 \n\t"
  2054. "movaps 16(%2,%0), %%xmm1 \n\t"
  2055. "mulps (%3,%0), %%xmm0 \n\t"
  2056. "mulps 16(%3,%0), %%xmm1 \n\t"
  2057. "addps (%4,%0), %%xmm0 \n\t"
  2058. "addps 16(%4,%0), %%xmm1 \n\t"
  2059. "movaps %%xmm0, (%1,%0) \n\t"
  2060. "movaps %%xmm1, 16(%1,%0) \n\t"
  2061. "sub $32, %0 \n\t"
  2062. "jge 1b \n\t"
  2063. :"+r"(i)
  2064. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  2065. :"memory"
  2066. );
  2067. }
  2068. else
  2069. ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
  2070. }
  2071. static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
  2072. const float *win, float add_bias, int len){
  2073. #if HAVE_6REGS
  2074. if(add_bias == 0){
  2075. x86_reg i = -len*4;
  2076. x86_reg j = len*4-8;
  2077. __asm__ volatile(
  2078. "1: \n"
  2079. "pswapd (%5,%1), %%mm1 \n"
  2080. "movq (%5,%0), %%mm0 \n"
  2081. "pswapd (%4,%1), %%mm5 \n"
  2082. "movq (%3,%0), %%mm4 \n"
  2083. "movq %%mm0, %%mm2 \n"
  2084. "movq %%mm1, %%mm3 \n"
  2085. "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
  2086. "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j]
  2087. "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
  2088. "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i]
  2089. "pfadd %%mm3, %%mm2 \n"
  2090. "pfsub %%mm0, %%mm1 \n"
  2091. "pswapd %%mm2, %%mm2 \n"
  2092. "movq %%mm1, (%2,%0) \n"
  2093. "movq %%mm2, (%2,%1) \n"
  2094. "sub $8, %1 \n"
  2095. "add $8, %0 \n"
  2096. "jl 1b \n"
  2097. "femms \n"
  2098. :"+r"(i), "+r"(j)
  2099. :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
  2100. );
  2101. }else
  2102. #endif
  2103. ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
  2104. }
  2105. static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
  2106. const float *win, float add_bias, int len){
  2107. #if HAVE_6REGS
  2108. if(add_bias == 0){
  2109. x86_reg i = -len*4;
  2110. x86_reg j = len*4-16;
  2111. __asm__ volatile(
  2112. "1: \n"
  2113. "movaps (%5,%1), %%xmm1 \n"
  2114. "movaps (%5,%0), %%xmm0 \n"
  2115. "movaps (%4,%1), %%xmm5 \n"
  2116. "movaps (%3,%0), %%xmm4 \n"
  2117. "shufps $0x1b, %%xmm1, %%xmm1 \n"
  2118. "shufps $0x1b, %%xmm5, %%xmm5 \n"
  2119. "movaps %%xmm0, %%xmm2 \n"
  2120. "movaps %%xmm1, %%xmm3 \n"
  2121. "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
  2122. "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j]
  2123. "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
  2124. "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i]
  2125. "addps %%xmm3, %%xmm2 \n"
  2126. "subps %%xmm0, %%xmm1 \n"
  2127. "shufps $0x1b, %%xmm2, %%xmm2 \n"
  2128. "movaps %%xmm1, (%2,%0) \n"
  2129. "movaps %%xmm2, (%2,%1) \n"
  2130. "sub $16, %1 \n"
  2131. "add $16, %0 \n"
  2132. "jl 1b \n"
  2133. :"+r"(i), "+r"(j)
  2134. :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
  2135. );
  2136. }else
  2137. #endif
  2138. ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
  2139. }
  2140. static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
  2141. {
  2142. x86_reg i = -4*len;
  2143. __asm__ volatile(
  2144. "movss %3, %%xmm4 \n"
  2145. "shufps $0, %%xmm4, %%xmm4 \n"
  2146. "1: \n"
  2147. "cvtpi2ps (%2,%0), %%xmm0 \n"
  2148. "cvtpi2ps 8(%2,%0), %%xmm1 \n"
  2149. "cvtpi2ps 16(%2,%0), %%xmm2 \n"
  2150. "cvtpi2ps 24(%2,%0), %%xmm3 \n"
  2151. "movlhps %%xmm1, %%xmm0 \n"
  2152. "movlhps %%xmm3, %%xmm2 \n"
  2153. "mulps %%xmm4, %%xmm0 \n"
  2154. "mulps %%xmm4, %%xmm2 \n"
  2155. "movaps %%xmm0, (%1,%0) \n"
  2156. "movaps %%xmm2, 16(%1,%0) \n"
  2157. "add $32, %0 \n"
  2158. "jl 1b \n"
  2159. :"+r"(i)
  2160. :"r"(dst+len), "r"(src+len), "m"(mul)
  2161. );
  2162. }
  2163. static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
  2164. {
  2165. x86_reg i = -4*len;
  2166. __asm__ volatile(
  2167. "movss %3, %%xmm4 \n"
  2168. "shufps $0, %%xmm4, %%xmm4 \n"
  2169. "1: \n"
  2170. "cvtdq2ps (%2,%0), %%xmm0 \n"
  2171. "cvtdq2ps 16(%2,%0), %%xmm1 \n"
  2172. "mulps %%xmm4, %%xmm0 \n"
  2173. "mulps %%xmm4, %%xmm1 \n"
  2174. "movaps %%xmm0, (%1,%0) \n"
  2175. "movaps %%xmm1, 16(%1,%0) \n"
  2176. "add $32, %0 \n"
  2177. "jl 1b \n"
  2178. :"+r"(i)
  2179. :"r"(dst+len), "r"(src+len), "m"(mul)
  2180. );
  2181. }
  2182. static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
  2183. x86_reg reglen = len;
  2184. // not bit-exact: pf2id uses different rounding than C and SSE
  2185. __asm__ volatile(
  2186. "add %0 , %0 \n\t"
  2187. "lea (%2,%0,2) , %2 \n\t"
  2188. "add %0 , %1 \n\t"
  2189. "neg %0 \n\t"
  2190. "1: \n\t"
  2191. "pf2id (%2,%0,2) , %%mm0 \n\t"
  2192. "pf2id 8(%2,%0,2) , %%mm1 \n\t"
  2193. "pf2id 16(%2,%0,2) , %%mm2 \n\t"
  2194. "pf2id 24(%2,%0,2) , %%mm3 \n\t"
  2195. "packssdw %%mm1 , %%mm0 \n\t"
  2196. "packssdw %%mm3 , %%mm2 \n\t"
  2197. "movq %%mm0 , (%1,%0) \n\t"
  2198. "movq %%mm2 , 8(%1,%0) \n\t"
  2199. "add $16 , %0 \n\t"
  2200. " js 1b \n\t"
  2201. "femms \n\t"
  2202. :"+r"(reglen), "+r"(dst), "+r"(src)
  2203. );
  2204. }
  2205. static void float_to_int16_sse(int16_t *dst, const float *src, long len){
  2206. x86_reg reglen = len;
  2207. __asm__ volatile(
  2208. "add %0 , %0 \n\t"
  2209. "lea (%2,%0,2) , %2 \n\t"
  2210. "add %0 , %1 \n\t"
  2211. "neg %0 \n\t"
  2212. "1: \n\t"
  2213. "cvtps2pi (%2,%0,2) , %%mm0 \n\t"
  2214. "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t"
  2215. "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t"
  2216. "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t"
  2217. "packssdw %%mm1 , %%mm0 \n\t"
  2218. "packssdw %%mm3 , %%mm2 \n\t"
  2219. "movq %%mm0 , (%1,%0) \n\t"
  2220. "movq %%mm2 , 8(%1,%0) \n\t"
  2221. "add $16 , %0 \n\t"
  2222. " js 1b \n\t"
  2223. "emms \n\t"
  2224. :"+r"(reglen), "+r"(dst), "+r"(src)
  2225. );
  2226. }
  2227. static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
  2228. x86_reg reglen = len;
  2229. __asm__ volatile(
  2230. "add %0 , %0 \n\t"
  2231. "lea (%2,%0,2) , %2 \n\t"
  2232. "add %0 , %1 \n\t"
  2233. "neg %0 \n\t"
  2234. "1: \n\t"
  2235. "cvtps2dq (%2,%0,2) , %%xmm0 \n\t"
  2236. "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t"
  2237. "packssdw %%xmm1 , %%xmm0 \n\t"
  2238. "movdqa %%xmm0 , (%1,%0) \n\t"
  2239. "add $16 , %0 \n\t"
  2240. " js 1b \n\t"
  2241. :"+r"(reglen), "+r"(dst), "+r"(src)
  2242. );
  2243. }
  2244. #if HAVE_YASM
  2245. void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
  2246. void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
  2247. void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
  2248. void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top);
  2249. void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
  2250. void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
  2251. void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
  2252. void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
  2253. #if ARCH_X86_32
  2254. static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
  2255. {
  2256. ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
  2257. ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
  2258. }
  2259. #endif
  2260. void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
  2261. void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
  2262. #else
  2263. #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6)
  2264. #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
  2265. #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
  2266. #endif
  2267. #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
  2268. #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
  2269. /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
  2270. static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
  2271. DECLARE_ALIGNED_16(int16_t, tmp[len]);\
  2272. int i,j,c;\
  2273. for(c=0; c<channels; c++){\
  2274. float_to_int16_##cpu(tmp, src[c], len);\
  2275. for(i=0, j=c; i<len; i++, j+=channels)\
  2276. dst[j] = tmp[i];\
  2277. }\
  2278. }\
  2279. \
  2280. static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
  2281. if(channels==1)\
  2282. float_to_int16_##cpu(dst, src[0], len);\
  2283. else if(channels==2){\
  2284. x86_reg reglen = len; \
  2285. const float *src0 = src[0];\
  2286. const float *src1 = src[1];\
  2287. __asm__ volatile(\
  2288. "shl $2, %0 \n"\
  2289. "add %0, %1 \n"\
  2290. "add %0, %2 \n"\
  2291. "add %0, %3 \n"\
  2292. "neg %0 \n"\
  2293. body\
  2294. :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
  2295. );\
  2296. }else if(channels==6){\
  2297. ff_float_to_int16_interleave6_##cpu(dst, src, len);\
  2298. }else\
  2299. float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
  2300. }
  2301. FLOAT_TO_INT16_INTERLEAVE(3dnow,
  2302. "1: \n"
  2303. "pf2id (%2,%0), %%mm0 \n"
  2304. "pf2id 8(%2,%0), %%mm1 \n"
  2305. "pf2id (%3,%0), %%mm2 \n"
  2306. "pf2id 8(%3,%0), %%mm3 \n"
  2307. "packssdw %%mm1, %%mm0 \n"
  2308. "packssdw %%mm3, %%mm2 \n"
  2309. "movq %%mm0, %%mm1 \n"
  2310. "punpcklwd %%mm2, %%mm0 \n"
  2311. "punpckhwd %%mm2, %%mm1 \n"
  2312. "movq %%mm0, (%1,%0)\n"
  2313. "movq %%mm1, 8(%1,%0)\n"
  2314. "add $16, %0 \n"
  2315. "js 1b \n"
  2316. "femms \n"
  2317. )
  2318. FLOAT_TO_INT16_INTERLEAVE(sse,
  2319. "1: \n"
  2320. "cvtps2pi (%2,%0), %%mm0 \n"
  2321. "cvtps2pi 8(%2,%0), %%mm1 \n"
  2322. "cvtps2pi (%3,%0), %%mm2 \n"
  2323. "cvtps2pi 8(%3,%0), %%mm3 \n"
  2324. "packssdw %%mm1, %%mm0 \n"
  2325. "packssdw %%mm3, %%mm2 \n"
  2326. "movq %%mm0, %%mm1 \n"
  2327. "punpcklwd %%mm2, %%mm0 \n"
  2328. "punpckhwd %%mm2, %%mm1 \n"
  2329. "movq %%mm0, (%1,%0)\n"
  2330. "movq %%mm1, 8(%1,%0)\n"
  2331. "add $16, %0 \n"
  2332. "js 1b \n"
  2333. "emms \n"
  2334. )
  2335. FLOAT_TO_INT16_INTERLEAVE(sse2,
  2336. "1: \n"
  2337. "cvtps2dq (%2,%0), %%xmm0 \n"
  2338. "cvtps2dq (%3,%0), %%xmm1 \n"
  2339. "packssdw %%xmm1, %%xmm0 \n"
  2340. "movhlps %%xmm0, %%xmm1 \n"
  2341. "punpcklwd %%xmm1, %%xmm0 \n"
  2342. "movdqa %%xmm0, (%1,%0) \n"
  2343. "add $16, %0 \n"
  2344. "js 1b \n"
  2345. )
  2346. static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
  2347. if(channels==6)
  2348. ff_float_to_int16_interleave6_3dn2(dst, src, len);
  2349. else
  2350. float_to_int16_interleave_3dnow(dst, src, len, channels);
  2351. }
  2352. void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width);
  2353. void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width);
  2354. void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
  2355. void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
  2356. void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  2357. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
  2358. void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  2359. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
  2360. static void add_int16_sse2(int16_t * v1, int16_t * v2, int order)
  2361. {
  2362. x86_reg o = -(order << 1);
  2363. v1 += order;
  2364. v2 += order;
  2365. __asm__ volatile(
  2366. "1: \n\t"
  2367. "movdqu (%1,%2), %%xmm0 \n\t"
  2368. "movdqu 16(%1,%2), %%xmm1 \n\t"
  2369. "paddw (%0,%2), %%xmm0 \n\t"
  2370. "paddw 16(%0,%2), %%xmm1 \n\t"
  2371. "movdqa %%xmm0, (%0,%2) \n\t"
  2372. "movdqa %%xmm1, 16(%0,%2) \n\t"
  2373. "add $32, %2 \n\t"
  2374. "js 1b \n\t"
  2375. : "+r"(v1), "+r"(v2), "+r"(o)
  2376. );
  2377. }
  2378. static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order)
  2379. {
  2380. x86_reg o = -(order << 1);
  2381. v1 += order;
  2382. v2 += order;
  2383. __asm__ volatile(
  2384. "1: \n\t"
  2385. "movdqa (%0,%2), %%xmm0 \n\t"
  2386. "movdqa 16(%0,%2), %%xmm2 \n\t"
  2387. "movdqu (%1,%2), %%xmm1 \n\t"
  2388. "movdqu 16(%1,%2), %%xmm3 \n\t"
  2389. "psubw %%xmm1, %%xmm0 \n\t"
  2390. "psubw %%xmm3, %%xmm2 \n\t"
  2391. "movdqa %%xmm0, (%0,%2) \n\t"
  2392. "movdqa %%xmm2, 16(%0,%2) \n\t"
  2393. "add $32, %2 \n\t"
  2394. "js 1b \n\t"
  2395. : "+r"(v1), "+r"(v2), "+r"(o)
  2396. );
  2397. }
  2398. static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift)
  2399. {
  2400. int res = 0;
  2401. DECLARE_ALIGNED_16(xmm_reg, sh);
  2402. x86_reg o = -(order << 1);
  2403. v1 += order;
  2404. v2 += order;
  2405. sh.a = shift;
  2406. __asm__ volatile(
  2407. "pxor %%xmm7, %%xmm7 \n\t"
  2408. "1: \n\t"
  2409. "movdqu (%0,%3), %%xmm0 \n\t"
  2410. "movdqu 16(%0,%3), %%xmm1 \n\t"
  2411. "pmaddwd (%1,%3), %%xmm0 \n\t"
  2412. "pmaddwd 16(%1,%3), %%xmm1 \n\t"
  2413. "paddd %%xmm0, %%xmm7 \n\t"
  2414. "paddd %%xmm1, %%xmm7 \n\t"
  2415. "add $32, %3 \n\t"
  2416. "js 1b \n\t"
  2417. "movhlps %%xmm7, %%xmm2 \n\t"
  2418. "paddd %%xmm2, %%xmm7 \n\t"
  2419. "psrad %4, %%xmm7 \n\t"
  2420. "pshuflw $0x4E, %%xmm7,%%xmm2 \n\t"
  2421. "paddd %%xmm2, %%xmm7 \n\t"
  2422. "movd %%xmm7, %2 \n\t"
  2423. : "+r"(v1), "+r"(v2), "=r"(res), "+r"(o)
  2424. : "m"(sh)
  2425. );
  2426. return res;
  2427. }
  2428. void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
  2429. {
  2430. mm_flags = mm_support();
  2431. if (avctx->dsp_mask) {
  2432. if (avctx->dsp_mask & FF_MM_FORCE)
  2433. mm_flags |= (avctx->dsp_mask & 0xffff);
  2434. else
  2435. mm_flags &= ~(avctx->dsp_mask & 0xffff);
  2436. }
  2437. #if 0
  2438. av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
  2439. if (mm_flags & FF_MM_MMX)
  2440. av_log(avctx, AV_LOG_INFO, " mmx");
  2441. if (mm_flags & FF_MM_MMXEXT)
  2442. av_log(avctx, AV_LOG_INFO, " mmxext");
  2443. if (mm_flags & FF_MM_3DNOW)
  2444. av_log(avctx, AV_LOG_INFO, " 3dnow");
  2445. if (mm_flags & FF_MM_SSE)
  2446. av_log(avctx, AV_LOG_INFO, " sse");
  2447. if (mm_flags & FF_MM_SSE2)
  2448. av_log(avctx, AV_LOG_INFO, " sse2");
  2449. av_log(avctx, AV_LOG_INFO, "\n");
  2450. #endif
  2451. if (mm_flags & FF_MM_MMX) {
  2452. const int idct_algo= avctx->idct_algo;
  2453. if(avctx->lowres==0){
  2454. if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
  2455. c->idct_put= ff_simple_idct_put_mmx;
  2456. c->idct_add= ff_simple_idct_add_mmx;
  2457. c->idct = ff_simple_idct_mmx;
  2458. c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
  2459. #if CONFIG_GPL
  2460. }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
  2461. if(mm_flags & FF_MM_MMXEXT){
  2462. c->idct_put= ff_libmpeg2mmx2_idct_put;
  2463. c->idct_add= ff_libmpeg2mmx2_idct_add;
  2464. c->idct = ff_mmxext_idct;
  2465. }else{
  2466. c->idct_put= ff_libmpeg2mmx_idct_put;
  2467. c->idct_add= ff_libmpeg2mmx_idct_add;
  2468. c->idct = ff_mmx_idct;
  2469. }
  2470. c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
  2471. #endif
  2472. }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER || CONFIG_THEORA_DECODER) &&
  2473. idct_algo==FF_IDCT_VP3){
  2474. if(mm_flags & FF_MM_SSE2){
  2475. c->idct_put= ff_vp3_idct_put_sse2;
  2476. c->idct_add= ff_vp3_idct_add_sse2;
  2477. c->idct = ff_vp3_idct_sse2;
  2478. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  2479. }else{
  2480. c->idct_put= ff_vp3_idct_put_mmx;
  2481. c->idct_add= ff_vp3_idct_add_mmx;
  2482. c->idct = ff_vp3_idct_mmx;
  2483. c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
  2484. }
  2485. }else if(idct_algo==FF_IDCT_CAVS){
  2486. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  2487. }else if(idct_algo==FF_IDCT_XVIDMMX){
  2488. if(mm_flags & FF_MM_SSE2){
  2489. c->idct_put= ff_idct_xvid_sse2_put;
  2490. c->idct_add= ff_idct_xvid_sse2_add;
  2491. c->idct = ff_idct_xvid_sse2;
  2492. c->idct_permutation_type= FF_SSE2_IDCT_PERM;
  2493. }else if(mm_flags & FF_MM_MMXEXT){
  2494. c->idct_put= ff_idct_xvid_mmx2_put;
  2495. c->idct_add= ff_idct_xvid_mmx2_add;
  2496. c->idct = ff_idct_xvid_mmx2;
  2497. }else{
  2498. c->idct_put= ff_idct_xvid_mmx_put;
  2499. c->idct_add= ff_idct_xvid_mmx_add;
  2500. c->idct = ff_idct_xvid_mmx;
  2501. }
  2502. }
  2503. }
  2504. c->put_pixels_clamped = put_pixels_clamped_mmx;
  2505. c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
  2506. c->add_pixels_clamped = add_pixels_clamped_mmx;
  2507. c->clear_block = clear_block_mmx;
  2508. c->clear_blocks = clear_blocks_mmx;
  2509. if (mm_flags & FF_MM_SSE)
  2510. c->clear_block = clear_block_sse;
  2511. #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  2512. c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
  2513. c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
  2514. c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
  2515. c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
  2516. SET_HPEL_FUNCS(put, 0, 16, mmx);
  2517. SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
  2518. SET_HPEL_FUNCS(avg, 0, 16, mmx);
  2519. SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
  2520. SET_HPEL_FUNCS(put, 1, 8, mmx);
  2521. SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
  2522. SET_HPEL_FUNCS(avg, 1, 8, mmx);
  2523. SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
  2524. c->gmc= gmc_mmx;
  2525. c->add_bytes= add_bytes_mmx;
  2526. c->add_bytes_l2= add_bytes_l2_mmx;
  2527. c->draw_edges = draw_edges_mmx;
  2528. if (CONFIG_ANY_H263) {
  2529. c->h263_v_loop_filter= h263_v_loop_filter_mmx;
  2530. c->h263_h_loop_filter= h263_h_loop_filter_mmx;
  2531. }
  2532. c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
  2533. c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
  2534. c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_nornd;
  2535. c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx;
  2536. c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx;
  2537. c->h264_idct_dc_add=
  2538. c->h264_idct_add= ff_h264_idct_add_mmx;
  2539. c->h264_idct8_dc_add=
  2540. c->h264_idct8_add= ff_h264_idct8_add_mmx;
  2541. c->h264_idct_add16 = ff_h264_idct_add16_mmx;
  2542. c->h264_idct8_add4 = ff_h264_idct8_add4_mmx;
  2543. c->h264_idct_add8 = ff_h264_idct_add8_mmx;
  2544. c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
  2545. if (mm_flags & FF_MM_MMXEXT) {
  2546. c->prefetch = prefetch_mmx2;
  2547. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
  2548. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
  2549. c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
  2550. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
  2551. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
  2552. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
  2553. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
  2554. c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
  2555. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
  2556. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
  2557. c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
  2558. c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
  2559. c->h264_idct_add16 = ff_h264_idct_add16_mmx2;
  2560. c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2;
  2561. c->h264_idct_add8 = ff_h264_idct_add8_mmx2;
  2562. c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
  2563. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2564. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
  2565. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
  2566. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
  2567. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
  2568. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
  2569. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
  2570. if (CONFIG_VP3_DECODER || CONFIG_THEORA_DECODER) {
  2571. c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
  2572. c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
  2573. }
  2574. }
  2575. #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  2576. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
  2577. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
  2578. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
  2579. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
  2580. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
  2581. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
  2582. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
  2583. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
  2584. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
  2585. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
  2586. c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
  2587. c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
  2588. c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
  2589. c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
  2590. c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
  2591. c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
  2592. SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
  2593. SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
  2594. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
  2595. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
  2596. SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
  2597. SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
  2598. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
  2599. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
  2600. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
  2601. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
  2602. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
  2603. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
  2604. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
  2605. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
  2606. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
  2607. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
  2608. c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2;
  2609. c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2;
  2610. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
  2611. c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
  2612. c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
  2613. c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
  2614. c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
  2615. c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
  2616. c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
  2617. c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
  2618. c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
  2619. c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
  2620. c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
  2621. c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
  2622. c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
  2623. c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
  2624. c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
  2625. c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
  2626. c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
  2627. c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
  2628. c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
  2629. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
  2630. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
  2631. c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
  2632. c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
  2633. c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
  2634. c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
  2635. c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
  2636. c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
  2637. #if HAVE_YASM
  2638. c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
  2639. #endif
  2640. #if HAVE_7REGS && HAVE_TEN_OPERANDS
  2641. if( mm_flags&FF_MM_3DNOW )
  2642. c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
  2643. #endif
  2644. if (CONFIG_CAVS_DECODER)
  2645. ff_cavsdsp_init_mmx2(c, avctx);
  2646. if (CONFIG_VC1_DECODER || CONFIG_WMV3_DECODER)
  2647. ff_vc1dsp_init_mmx(c, avctx);
  2648. c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
  2649. } else if (mm_flags & FF_MM_3DNOW) {
  2650. c->prefetch = prefetch_3dnow;
  2651. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  2652. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  2653. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  2654. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  2655. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  2656. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  2657. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  2658. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  2659. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  2660. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  2661. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2662. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  2663. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  2664. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  2665. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  2666. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  2667. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  2668. }
  2669. SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
  2670. SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
  2671. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
  2672. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
  2673. SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
  2674. SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
  2675. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
  2676. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
  2677. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
  2678. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
  2679. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
  2680. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
  2681. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
  2682. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
  2683. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
  2684. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
  2685. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
  2686. c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
  2687. c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow;
  2688. c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow;
  2689. if (CONFIG_CAVS_DECODER)
  2690. ff_cavsdsp_init_3dnow(c, avctx);
  2691. }
  2692. #define H264_QPEL_FUNCS(x, y, CPU)\
  2693. c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
  2694. c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
  2695. c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
  2696. c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
  2697. if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
  2698. // these functions are slower than mmx on AMD, but faster on Intel
  2699. /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
  2700. c->put_pixels_tab[0][0] = put_pixels16_sse2;
  2701. c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
  2702. */
  2703. H264_QPEL_FUNCS(0, 0, sse2);
  2704. }
  2705. if(mm_flags & FF_MM_SSE2){
  2706. c->h264_idct8_add = ff_h264_idct8_add_sse2;
  2707. c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
  2708. H264_QPEL_FUNCS(0, 1, sse2);
  2709. H264_QPEL_FUNCS(0, 2, sse2);
  2710. H264_QPEL_FUNCS(0, 3, sse2);
  2711. H264_QPEL_FUNCS(1, 1, sse2);
  2712. H264_QPEL_FUNCS(1, 2, sse2);
  2713. H264_QPEL_FUNCS(1, 3, sse2);
  2714. H264_QPEL_FUNCS(2, 1, sse2);
  2715. H264_QPEL_FUNCS(2, 2, sse2);
  2716. H264_QPEL_FUNCS(2, 3, sse2);
  2717. H264_QPEL_FUNCS(3, 1, sse2);
  2718. H264_QPEL_FUNCS(3, 2, sse2);
  2719. H264_QPEL_FUNCS(3, 3, sse2);
  2720. }
  2721. #if HAVE_SSSE3
  2722. if(mm_flags & FF_MM_SSSE3){
  2723. H264_QPEL_FUNCS(1, 0, ssse3);
  2724. H264_QPEL_FUNCS(1, 1, ssse3);
  2725. H264_QPEL_FUNCS(1, 2, ssse3);
  2726. H264_QPEL_FUNCS(1, 3, ssse3);
  2727. H264_QPEL_FUNCS(2, 0, ssse3);
  2728. H264_QPEL_FUNCS(2, 1, ssse3);
  2729. H264_QPEL_FUNCS(2, 2, ssse3);
  2730. H264_QPEL_FUNCS(2, 3, ssse3);
  2731. H264_QPEL_FUNCS(3, 0, ssse3);
  2732. H264_QPEL_FUNCS(3, 1, ssse3);
  2733. H264_QPEL_FUNCS(3, 2, ssse3);
  2734. H264_QPEL_FUNCS(3, 3, ssse3);
  2735. c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_nornd;
  2736. c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
  2737. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
  2738. c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
  2739. c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
  2740. c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
  2741. }
  2742. #endif
  2743. #if CONFIG_GPL && HAVE_YASM
  2744. if( mm_flags&FF_MM_MMXEXT ){
  2745. #if ARCH_X86_32
  2746. c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
  2747. c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
  2748. #endif
  2749. if( mm_flags&FF_MM_SSE2 ){
  2750. #if ARCH_X86_64 || !defined(__ICC) || __ICC > 1100
  2751. c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
  2752. c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
  2753. c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
  2754. c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
  2755. #endif
  2756. c->h264_idct_add16 = ff_h264_idct_add16_sse2;
  2757. c->h264_idct_add8 = ff_h264_idct_add8_sse2;
  2758. c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
  2759. }
  2760. }
  2761. #endif
  2762. #if CONFIG_SNOW_DECODER
  2763. if(mm_flags & FF_MM_SSE2 & 0){
  2764. c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
  2765. #if HAVE_7REGS
  2766. c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
  2767. #endif
  2768. c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
  2769. }
  2770. else{
  2771. if(mm_flags & FF_MM_MMXEXT){
  2772. c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
  2773. #if HAVE_7REGS
  2774. c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
  2775. #endif
  2776. }
  2777. c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
  2778. }
  2779. #endif
  2780. if(mm_flags & FF_MM_3DNOW){
  2781. c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
  2782. c->vector_fmul = vector_fmul_3dnow;
  2783. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2784. c->float_to_int16 = float_to_int16_3dnow;
  2785. c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
  2786. }
  2787. }
  2788. if(mm_flags & FF_MM_3DNOWEXT){
  2789. c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
  2790. c->vector_fmul_window = vector_fmul_window_3dnow2;
  2791. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2792. c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
  2793. }
  2794. }
  2795. if(mm_flags & FF_MM_SSE){
  2796. c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
  2797. c->ac3_downmix = ac3_downmix_sse;
  2798. c->vector_fmul = vector_fmul_sse;
  2799. c->vector_fmul_reverse = vector_fmul_reverse_sse;
  2800. c->vector_fmul_add_add = vector_fmul_add_add_sse;
  2801. c->vector_fmul_window = vector_fmul_window_sse;
  2802. c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
  2803. c->float_to_int16 = float_to_int16_sse;
  2804. c->float_to_int16_interleave = float_to_int16_interleave_sse;
  2805. }
  2806. if(mm_flags & FF_MM_3DNOW)
  2807. c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
  2808. if(mm_flags & FF_MM_SSE2){
  2809. c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
  2810. c->float_to_int16 = float_to_int16_sse2;
  2811. c->float_to_int16_interleave = float_to_int16_interleave_sse2;
  2812. c->add_int16 = add_int16_sse2;
  2813. c->sub_int16 = sub_int16_sse2;
  2814. c->scalarproduct_int16 = scalarproduct_int16_sse2;
  2815. }
  2816. }
  2817. if (CONFIG_ENCODERS)
  2818. dsputilenc_init_mmx(c, avctx);
  2819. #if 0
  2820. // for speed testing
  2821. get_pixels = just_return;
  2822. put_pixels_clamped = just_return;
  2823. add_pixels_clamped = just_return;
  2824. pix_abs16x16 = just_return;
  2825. pix_abs16x16_x2 = just_return;
  2826. pix_abs16x16_y2 = just_return;
  2827. pix_abs16x16_xy2 = just_return;
  2828. put_pixels_tab[0] = just_return;
  2829. put_pixels_tab[1] = just_return;
  2830. put_pixels_tab[2] = just_return;
  2831. put_pixels_tab[3] = just_return;
  2832. put_no_rnd_pixels_tab[0] = just_return;
  2833. put_no_rnd_pixels_tab[1] = just_return;
  2834. put_no_rnd_pixels_tab[2] = just_return;
  2835. put_no_rnd_pixels_tab[3] = just_return;
  2836. avg_pixels_tab[0] = just_return;
  2837. avg_pixels_tab[1] = just_return;
  2838. avg_pixels_tab[2] = just_return;
  2839. avg_pixels_tab[3] = just_return;
  2840. avg_no_rnd_pixels_tab[0] = just_return;
  2841. avg_no_rnd_pixels_tab[1] = just_return;
  2842. avg_no_rnd_pixels_tab[2] = just_return;
  2843. avg_no_rnd_pixels_tab[3] = just_return;
  2844. //av_fdct = just_return;
  2845. //ff_idct = just_return;
  2846. #endif
  2847. }