You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2923 lines
122KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "libavutil/cpu.h"
  25. #include "libavutil/x86_cpu.h"
  26. #include "libavcodec/dsputil.h"
  27. #include "libavcodec/h264dsp.h"
  28. #include "libavcodec/mpegvideo.h"
  29. #include "libavcodec/simple_idct.h"
  30. #include "libavcodec/ac3dec.h"
  31. #include "dsputil_mmx.h"
  32. #include "idct_xvid.h"
  33. #include "diracdsp_mmx.h"
  34. //#undef NDEBUG
  35. //#include <assert.h>
  36. /* pixel operations */
  37. DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
  38. DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
  39. DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
  40. {0x8000000080000000ULL, 0x8000000080000000ULL};
  41. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1 ) = {0x0001000100010001ULL, 0x0001000100010001ULL};
  42. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2 ) = {0x0002000200020002ULL, 0x0002000200020002ULL};
  43. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3 ) = {0x0003000300030003ULL, 0x0003000300030003ULL};
  44. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL};
  45. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
  46. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
  47. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9 ) = {0x0009000900090009ULL, 0x0009000900090009ULL};
  48. DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
  49. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
  50. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17 ) = {0x0011001100110011ULL, 0x0011001100110011ULL};
  51. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL};
  52. DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
  53. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL};
  54. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
  55. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
  56. DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
  57. DECLARE_ALIGNED(8, const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL;
  58. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL};
  59. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
  60. DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
  61. DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
  62. DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
  63. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = {0x0200020002000200ULL, 0x0200020002000200ULL};
  64. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019)= {0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL};
  65. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0 ) = {0x0000000000000000ULL, 0x0000000000000000ULL};
  66. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1 ) = {0x0101010101010101ULL, 0x0101010101010101ULL};
  67. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3 ) = {0x0303030303030303ULL, 0x0303030303030303ULL};
  68. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4 ) = {0x0404040404040404ULL, 0x0404040404040404ULL};
  69. DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
  70. DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
  71. DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
  72. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL};
  73. DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
  74. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1 ) = {0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL};
  75. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL};
  76. DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
  77. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL};
  78. DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
  79. DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
  80. #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
  81. #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
  82. #define MOVQ_BFE(regd) \
  83. __asm__ volatile ( \
  84. "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
  85. "paddb %%" #regd ", %%" #regd " \n\t" ::)
  86. #ifndef PIC
  87. #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
  88. #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
  89. #else
  90. // for shared library it's better to use this way for accessing constants
  91. // pcmpeqd -> -1
  92. #define MOVQ_BONE(regd) \
  93. __asm__ volatile ( \
  94. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  95. "psrlw $15, %%" #regd " \n\t" \
  96. "packuswb %%" #regd ", %%" #regd " \n\t" ::)
  97. #define MOVQ_WTWO(regd) \
  98. __asm__ volatile ( \
  99. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  100. "psrlw $15, %%" #regd " \n\t" \
  101. "psllw $1, %%" #regd " \n\t"::)
  102. #endif
  103. // using regr as temporary and for the output result
  104. // first argument is unmodifed and second is trashed
  105. // regfe is supposed to contain 0xfefefefefefefefe
  106. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  107. "movq " #rega ", " #regr " \n\t"\
  108. "pand " #regb ", " #regr " \n\t"\
  109. "pxor " #rega ", " #regb " \n\t"\
  110. "pand " #regfe "," #regb " \n\t"\
  111. "psrlq $1, " #regb " \n\t"\
  112. "paddb " #regb ", " #regr " \n\t"
  113. #define PAVGB_MMX(rega, regb, regr, regfe) \
  114. "movq " #rega ", " #regr " \n\t"\
  115. "por " #regb ", " #regr " \n\t"\
  116. "pxor " #rega ", " #regb " \n\t"\
  117. "pand " #regfe "," #regb " \n\t"\
  118. "psrlq $1, " #regb " \n\t"\
  119. "psubb " #regb ", " #regr " \n\t"
  120. // mm6 is supposed to contain 0xfefefefefefefefe
  121. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  122. "movq " #rega ", " #regr " \n\t"\
  123. "movq " #regc ", " #regp " \n\t"\
  124. "pand " #regb ", " #regr " \n\t"\
  125. "pand " #regd ", " #regp " \n\t"\
  126. "pxor " #rega ", " #regb " \n\t"\
  127. "pxor " #regc ", " #regd " \n\t"\
  128. "pand %%mm6, " #regb " \n\t"\
  129. "pand %%mm6, " #regd " \n\t"\
  130. "psrlq $1, " #regb " \n\t"\
  131. "psrlq $1, " #regd " \n\t"\
  132. "paddb " #regb ", " #regr " \n\t"\
  133. "paddb " #regd ", " #regp " \n\t"
  134. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  135. "movq " #rega ", " #regr " \n\t"\
  136. "movq " #regc ", " #regp " \n\t"\
  137. "por " #regb ", " #regr " \n\t"\
  138. "por " #regd ", " #regp " \n\t"\
  139. "pxor " #rega ", " #regb " \n\t"\
  140. "pxor " #regc ", " #regd " \n\t"\
  141. "pand %%mm6, " #regb " \n\t"\
  142. "pand %%mm6, " #regd " \n\t"\
  143. "psrlq $1, " #regd " \n\t"\
  144. "psrlq $1, " #regb " \n\t"\
  145. "psubb " #regb ", " #regr " \n\t"\
  146. "psubb " #regd ", " #regp " \n\t"
  147. /***********************************/
  148. /* MMX no rounding */
  149. #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
  150. #define SET_RND MOVQ_WONE
  151. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  152. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  153. #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
  154. #include "dsputil_mmx_rnd_template.c"
  155. #undef DEF
  156. #undef SET_RND
  157. #undef PAVGBP
  158. #undef PAVGB
  159. /***********************************/
  160. /* MMX rounding */
  161. #define DEF(x, y) x ## _ ## y ##_mmx
  162. #define SET_RND MOVQ_WTWO
  163. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  164. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  165. #include "dsputil_mmx_rnd_template.c"
  166. #undef DEF
  167. #undef SET_RND
  168. #undef PAVGBP
  169. #undef PAVGB
  170. #undef OP_AVG
  171. /***********************************/
  172. /* 3Dnow specific */
  173. #define DEF(x) x ## _3dnow
  174. #define PAVGB "pavgusb"
  175. #define OP_AVG PAVGB
  176. #include "dsputil_mmx_avg_template.c"
  177. #undef DEF
  178. #undef PAVGB
  179. #undef OP_AVG
  180. /***********************************/
  181. /* MMX2 specific */
  182. #define DEF(x) x ## _mmx2
  183. /* Introduced only in MMX2 set */
  184. #define PAVGB "pavgb"
  185. #define OP_AVG PAVGB
  186. #include "dsputil_mmx_avg_template.c"
  187. #undef DEF
  188. #undef PAVGB
  189. #undef OP_AVG
  190. #define put_no_rnd_pixels16_mmx put_pixels16_mmx
  191. #define put_no_rnd_pixels8_mmx put_pixels8_mmx
  192. #define put_pixels16_mmx2 put_pixels16_mmx
  193. #define put_pixels8_mmx2 put_pixels8_mmx
  194. #define put_pixels4_mmx2 put_pixels4_mmx
  195. #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
  196. #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
  197. #define put_pixels16_3dnow put_pixels16_mmx
  198. #define put_pixels8_3dnow put_pixels8_mmx
  199. #define put_pixels4_3dnow put_pixels4_mmx
  200. #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
  201. #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
  202. /***********************************/
  203. /* standard MMX */
  204. void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  205. {
  206. const DCTELEM *p;
  207. uint8_t *pix;
  208. /* read the pixels */
  209. p = block;
  210. pix = pixels;
  211. /* unrolled loop */
  212. __asm__ volatile(
  213. "movq %3, %%mm0 \n\t"
  214. "movq 8%3, %%mm1 \n\t"
  215. "movq 16%3, %%mm2 \n\t"
  216. "movq 24%3, %%mm3 \n\t"
  217. "movq 32%3, %%mm4 \n\t"
  218. "movq 40%3, %%mm5 \n\t"
  219. "movq 48%3, %%mm6 \n\t"
  220. "movq 56%3, %%mm7 \n\t"
  221. "packuswb %%mm1, %%mm0 \n\t"
  222. "packuswb %%mm3, %%mm2 \n\t"
  223. "packuswb %%mm5, %%mm4 \n\t"
  224. "packuswb %%mm7, %%mm6 \n\t"
  225. "movq %%mm0, (%0) \n\t"
  226. "movq %%mm2, (%0, %1) \n\t"
  227. "movq %%mm4, (%0, %1, 2) \n\t"
  228. "movq %%mm6, (%0, %2) \n\t"
  229. ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
  230. :"memory");
  231. pix += line_size*4;
  232. p += 32;
  233. // if here would be an exact copy of the code above
  234. // compiler would generate some very strange code
  235. // thus using "r"
  236. __asm__ volatile(
  237. "movq (%3), %%mm0 \n\t"
  238. "movq 8(%3), %%mm1 \n\t"
  239. "movq 16(%3), %%mm2 \n\t"
  240. "movq 24(%3), %%mm3 \n\t"
  241. "movq 32(%3), %%mm4 \n\t"
  242. "movq 40(%3), %%mm5 \n\t"
  243. "movq 48(%3), %%mm6 \n\t"
  244. "movq 56(%3), %%mm7 \n\t"
  245. "packuswb %%mm1, %%mm0 \n\t"
  246. "packuswb %%mm3, %%mm2 \n\t"
  247. "packuswb %%mm5, %%mm4 \n\t"
  248. "packuswb %%mm7, %%mm6 \n\t"
  249. "movq %%mm0, (%0) \n\t"
  250. "movq %%mm2, (%0, %1) \n\t"
  251. "movq %%mm4, (%0, %1, 2) \n\t"
  252. "movq %%mm6, (%0, %2) \n\t"
  253. ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
  254. :"memory");
  255. }
  256. #define put_signed_pixels_clamped_mmx_half(off) \
  257. "movq "#off"(%2), %%mm1 \n\t"\
  258. "movq 16+"#off"(%2), %%mm2 \n\t"\
  259. "movq 32+"#off"(%2), %%mm3 \n\t"\
  260. "movq 48+"#off"(%2), %%mm4 \n\t"\
  261. "packsswb 8+"#off"(%2), %%mm1 \n\t"\
  262. "packsswb 24+"#off"(%2), %%mm2 \n\t"\
  263. "packsswb 40+"#off"(%2), %%mm3 \n\t"\
  264. "packsswb 56+"#off"(%2), %%mm4 \n\t"\
  265. "paddb %%mm0, %%mm1 \n\t"\
  266. "paddb %%mm0, %%mm2 \n\t"\
  267. "paddb %%mm0, %%mm3 \n\t"\
  268. "paddb %%mm0, %%mm4 \n\t"\
  269. "movq %%mm1, (%0) \n\t"\
  270. "movq %%mm2, (%0, %3) \n\t"\
  271. "movq %%mm3, (%0, %3, 2) \n\t"\
  272. "movq %%mm4, (%0, %1) \n\t"
  273. void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  274. {
  275. x86_reg line_skip = line_size;
  276. x86_reg line_skip3;
  277. __asm__ volatile (
  278. "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
  279. "lea (%3, %3, 2), %1 \n\t"
  280. put_signed_pixels_clamped_mmx_half(0)
  281. "lea (%0, %3, 4), %0 \n\t"
  282. put_signed_pixels_clamped_mmx_half(64)
  283. :"+&r" (pixels), "=&r" (line_skip3)
  284. :"r" (block), "r"(line_skip)
  285. :"memory");
  286. }
  287. void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  288. {
  289. const DCTELEM *p;
  290. uint8_t *pix;
  291. int i;
  292. /* read the pixels */
  293. p = block;
  294. pix = pixels;
  295. MOVQ_ZERO(mm7);
  296. i = 4;
  297. do {
  298. __asm__ volatile(
  299. "movq (%2), %%mm0 \n\t"
  300. "movq 8(%2), %%mm1 \n\t"
  301. "movq 16(%2), %%mm2 \n\t"
  302. "movq 24(%2), %%mm3 \n\t"
  303. "movq %0, %%mm4 \n\t"
  304. "movq %1, %%mm6 \n\t"
  305. "movq %%mm4, %%mm5 \n\t"
  306. "punpcklbw %%mm7, %%mm4 \n\t"
  307. "punpckhbw %%mm7, %%mm5 \n\t"
  308. "paddsw %%mm4, %%mm0 \n\t"
  309. "paddsw %%mm5, %%mm1 \n\t"
  310. "movq %%mm6, %%mm5 \n\t"
  311. "punpcklbw %%mm7, %%mm6 \n\t"
  312. "punpckhbw %%mm7, %%mm5 \n\t"
  313. "paddsw %%mm6, %%mm2 \n\t"
  314. "paddsw %%mm5, %%mm3 \n\t"
  315. "packuswb %%mm1, %%mm0 \n\t"
  316. "packuswb %%mm3, %%mm2 \n\t"
  317. "movq %%mm0, %0 \n\t"
  318. "movq %%mm2, %1 \n\t"
  319. :"+m"(*pix), "+m"(*(pix+line_size))
  320. :"r"(p)
  321. :"memory");
  322. pix += line_size*2;
  323. p += 16;
  324. } while (--i);
  325. }
  326. static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  327. {
  328. __asm__ volatile(
  329. "lea (%3, %3), %%"REG_a" \n\t"
  330. ".p2align 3 \n\t"
  331. "1: \n\t"
  332. "movd (%1), %%mm0 \n\t"
  333. "movd (%1, %3), %%mm1 \n\t"
  334. "movd %%mm0, (%2) \n\t"
  335. "movd %%mm1, (%2, %3) \n\t"
  336. "add %%"REG_a", %1 \n\t"
  337. "add %%"REG_a", %2 \n\t"
  338. "movd (%1), %%mm0 \n\t"
  339. "movd (%1, %3), %%mm1 \n\t"
  340. "movd %%mm0, (%2) \n\t"
  341. "movd %%mm1, (%2, %3) \n\t"
  342. "add %%"REG_a", %1 \n\t"
  343. "add %%"REG_a", %2 \n\t"
  344. "subl $4, %0 \n\t"
  345. "jnz 1b \n\t"
  346. : "+g"(h), "+r" (pixels), "+r" (block)
  347. : "r"((x86_reg)line_size)
  348. : "%"REG_a, "memory"
  349. );
  350. }
  351. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  352. {
  353. __asm__ volatile(
  354. "lea (%3, %3), %%"REG_a" \n\t"
  355. ".p2align 3 \n\t"
  356. "1: \n\t"
  357. "movq (%1), %%mm0 \n\t"
  358. "movq (%1, %3), %%mm1 \n\t"
  359. "movq %%mm0, (%2) \n\t"
  360. "movq %%mm1, (%2, %3) \n\t"
  361. "add %%"REG_a", %1 \n\t"
  362. "add %%"REG_a", %2 \n\t"
  363. "movq (%1), %%mm0 \n\t"
  364. "movq (%1, %3), %%mm1 \n\t"
  365. "movq %%mm0, (%2) \n\t"
  366. "movq %%mm1, (%2, %3) \n\t"
  367. "add %%"REG_a", %1 \n\t"
  368. "add %%"REG_a", %2 \n\t"
  369. "subl $4, %0 \n\t"
  370. "jnz 1b \n\t"
  371. : "+g"(h), "+r" (pixels), "+r" (block)
  372. : "r"((x86_reg)line_size)
  373. : "%"REG_a, "memory"
  374. );
  375. }
  376. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  377. {
  378. __asm__ volatile(
  379. "lea (%3, %3), %%"REG_a" \n\t"
  380. ".p2align 3 \n\t"
  381. "1: \n\t"
  382. "movq (%1), %%mm0 \n\t"
  383. "movq 8(%1), %%mm4 \n\t"
  384. "movq (%1, %3), %%mm1 \n\t"
  385. "movq 8(%1, %3), %%mm5 \n\t"
  386. "movq %%mm0, (%2) \n\t"
  387. "movq %%mm4, 8(%2) \n\t"
  388. "movq %%mm1, (%2, %3) \n\t"
  389. "movq %%mm5, 8(%2, %3) \n\t"
  390. "add %%"REG_a", %1 \n\t"
  391. "add %%"REG_a", %2 \n\t"
  392. "movq (%1), %%mm0 \n\t"
  393. "movq 8(%1), %%mm4 \n\t"
  394. "movq (%1, %3), %%mm1 \n\t"
  395. "movq 8(%1, %3), %%mm5 \n\t"
  396. "movq %%mm0, (%2) \n\t"
  397. "movq %%mm4, 8(%2) \n\t"
  398. "movq %%mm1, (%2, %3) \n\t"
  399. "movq %%mm5, 8(%2, %3) \n\t"
  400. "add %%"REG_a", %1 \n\t"
  401. "add %%"REG_a", %2 \n\t"
  402. "subl $4, %0 \n\t"
  403. "jnz 1b \n\t"
  404. : "+g"(h), "+r" (pixels), "+r" (block)
  405. : "r"((x86_reg)line_size)
  406. : "%"REG_a, "memory"
  407. );
  408. }
  409. static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  410. {
  411. __asm__ volatile(
  412. "1: \n\t"
  413. "movdqu (%1), %%xmm0 \n\t"
  414. "movdqu (%1,%3), %%xmm1 \n\t"
  415. "movdqu (%1,%3,2), %%xmm2 \n\t"
  416. "movdqu (%1,%4), %%xmm3 \n\t"
  417. "lea (%1,%3,4), %1 \n\t"
  418. "movdqa %%xmm0, (%2) \n\t"
  419. "movdqa %%xmm1, (%2,%3) \n\t"
  420. "movdqa %%xmm2, (%2,%3,2) \n\t"
  421. "movdqa %%xmm3, (%2,%4) \n\t"
  422. "subl $4, %0 \n\t"
  423. "lea (%2,%3,4), %2 \n\t"
  424. "jnz 1b \n\t"
  425. : "+g"(h), "+r" (pixels), "+r" (block)
  426. : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
  427. : "memory"
  428. );
  429. }
  430. static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  431. {
  432. __asm__ volatile(
  433. "1: \n\t"
  434. "movdqu (%1), %%xmm0 \n\t"
  435. "movdqu (%1,%3), %%xmm1 \n\t"
  436. "movdqu (%1,%3,2), %%xmm2 \n\t"
  437. "movdqu (%1,%4), %%xmm3 \n\t"
  438. "lea (%1,%3,4), %1 \n\t"
  439. "pavgb (%2), %%xmm0 \n\t"
  440. "pavgb (%2,%3), %%xmm1 \n\t"
  441. "pavgb (%2,%3,2), %%xmm2 \n\t"
  442. "pavgb (%2,%4), %%xmm3 \n\t"
  443. "movdqa %%xmm0, (%2) \n\t"
  444. "movdqa %%xmm1, (%2,%3) \n\t"
  445. "movdqa %%xmm2, (%2,%3,2) \n\t"
  446. "movdqa %%xmm3, (%2,%4) \n\t"
  447. "subl $4, %0 \n\t"
  448. "lea (%2,%3,4), %2 \n\t"
  449. "jnz 1b \n\t"
  450. : "+g"(h), "+r" (pixels), "+r" (block)
  451. : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
  452. : "memory"
  453. );
  454. }
  455. #define CLEAR_BLOCKS(name,n) \
  456. static void name(DCTELEM *blocks)\
  457. {\
  458. __asm__ volatile(\
  459. "pxor %%mm7, %%mm7 \n\t"\
  460. "mov %1, %%"REG_a" \n\t"\
  461. "1: \n\t"\
  462. "movq %%mm7, (%0, %%"REG_a") \n\t"\
  463. "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
  464. "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
  465. "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
  466. "add $32, %%"REG_a" \n\t"\
  467. " js 1b \n\t"\
  468. : : "r" (((uint8_t *)blocks)+128*n),\
  469. "i" (-128*n)\
  470. : "%"REG_a\
  471. );\
  472. }
  473. CLEAR_BLOCKS(clear_blocks_mmx, 6)
  474. CLEAR_BLOCKS(clear_block_mmx, 1)
  475. static void clear_block_sse(DCTELEM *block)
  476. {
  477. __asm__ volatile(
  478. "xorps %%xmm0, %%xmm0 \n"
  479. "movaps %%xmm0, (%0) \n"
  480. "movaps %%xmm0, 16(%0) \n"
  481. "movaps %%xmm0, 32(%0) \n"
  482. "movaps %%xmm0, 48(%0) \n"
  483. "movaps %%xmm0, 64(%0) \n"
  484. "movaps %%xmm0, 80(%0) \n"
  485. "movaps %%xmm0, 96(%0) \n"
  486. "movaps %%xmm0, 112(%0) \n"
  487. :: "r"(block)
  488. : "memory"
  489. );
  490. }
  491. static void clear_blocks_sse(DCTELEM *blocks)
  492. {\
  493. __asm__ volatile(
  494. "xorps %%xmm0, %%xmm0 \n"
  495. "mov %1, %%"REG_a" \n"
  496. "1: \n"
  497. "movaps %%xmm0, (%0, %%"REG_a") \n"
  498. "movaps %%xmm0, 16(%0, %%"REG_a") \n"
  499. "movaps %%xmm0, 32(%0, %%"REG_a") \n"
  500. "movaps %%xmm0, 48(%0, %%"REG_a") \n"
  501. "movaps %%xmm0, 64(%0, %%"REG_a") \n"
  502. "movaps %%xmm0, 80(%0, %%"REG_a") \n"
  503. "movaps %%xmm0, 96(%0, %%"REG_a") \n"
  504. "movaps %%xmm0, 112(%0, %%"REG_a") \n"
  505. "add $128, %%"REG_a" \n"
  506. " js 1b \n"
  507. : : "r" (((uint8_t *)blocks)+128*6),
  508. "i" (-128*6)
  509. : "%"REG_a
  510. );
  511. }
  512. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
  513. x86_reg i=0;
  514. __asm__ volatile(
  515. "jmp 2f \n\t"
  516. "1: \n\t"
  517. "movq (%1, %0), %%mm0 \n\t"
  518. "movq (%2, %0), %%mm1 \n\t"
  519. "paddb %%mm0, %%mm1 \n\t"
  520. "movq %%mm1, (%2, %0) \n\t"
  521. "movq 8(%1, %0), %%mm0 \n\t"
  522. "movq 8(%2, %0), %%mm1 \n\t"
  523. "paddb %%mm0, %%mm1 \n\t"
  524. "movq %%mm1, 8(%2, %0) \n\t"
  525. "add $16, %0 \n\t"
  526. "2: \n\t"
  527. "cmp %3, %0 \n\t"
  528. " js 1b \n\t"
  529. : "+r" (i)
  530. : "r"(src), "r"(dst), "r"((x86_reg)w-15)
  531. );
  532. for(; i<w; i++)
  533. dst[i+0] += src[i+0];
  534. }
  535. #if HAVE_7REGS
  536. static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
  537. x86_reg w2 = -w;
  538. x86_reg x;
  539. int l = *left & 0xff;
  540. int tl = *left_top & 0xff;
  541. int t;
  542. __asm__ volatile(
  543. "mov %7, %3 \n"
  544. "1: \n"
  545. "movzbl (%3,%4), %2 \n"
  546. "mov %2, %k3 \n"
  547. "sub %b1, %b3 \n"
  548. "add %b0, %b3 \n"
  549. "mov %2, %1 \n"
  550. "cmp %0, %2 \n"
  551. "cmovg %0, %2 \n"
  552. "cmovg %1, %0 \n"
  553. "cmp %k3, %0 \n"
  554. "cmovg %k3, %0 \n"
  555. "mov %7, %3 \n"
  556. "cmp %2, %0 \n"
  557. "cmovl %2, %0 \n"
  558. "add (%6,%4), %b0 \n"
  559. "mov %b0, (%5,%4) \n"
  560. "inc %4 \n"
  561. "jl 1b \n"
  562. :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
  563. :"r"(dst+w), "r"(diff+w), "rm"(top+w)
  564. );
  565. *left = l;
  566. *left_top = tl;
  567. }
  568. #endif
  569. #define H263_LOOP_FILTER \
  570. "pxor %%mm7, %%mm7 \n\t"\
  571. "movq %0, %%mm0 \n\t"\
  572. "movq %0, %%mm1 \n\t"\
  573. "movq %3, %%mm2 \n\t"\
  574. "movq %3, %%mm3 \n\t"\
  575. "punpcklbw %%mm7, %%mm0 \n\t"\
  576. "punpckhbw %%mm7, %%mm1 \n\t"\
  577. "punpcklbw %%mm7, %%mm2 \n\t"\
  578. "punpckhbw %%mm7, %%mm3 \n\t"\
  579. "psubw %%mm2, %%mm0 \n\t"\
  580. "psubw %%mm3, %%mm1 \n\t"\
  581. "movq %1, %%mm2 \n\t"\
  582. "movq %1, %%mm3 \n\t"\
  583. "movq %2, %%mm4 \n\t"\
  584. "movq %2, %%mm5 \n\t"\
  585. "punpcklbw %%mm7, %%mm2 \n\t"\
  586. "punpckhbw %%mm7, %%mm3 \n\t"\
  587. "punpcklbw %%mm7, %%mm4 \n\t"\
  588. "punpckhbw %%mm7, %%mm5 \n\t"\
  589. "psubw %%mm2, %%mm4 \n\t"\
  590. "psubw %%mm3, %%mm5 \n\t"\
  591. "psllw $2, %%mm4 \n\t"\
  592. "psllw $2, %%mm5 \n\t"\
  593. "paddw %%mm0, %%mm4 \n\t"\
  594. "paddw %%mm1, %%mm5 \n\t"\
  595. "pxor %%mm6, %%mm6 \n\t"\
  596. "pcmpgtw %%mm4, %%mm6 \n\t"\
  597. "pcmpgtw %%mm5, %%mm7 \n\t"\
  598. "pxor %%mm6, %%mm4 \n\t"\
  599. "pxor %%mm7, %%mm5 \n\t"\
  600. "psubw %%mm6, %%mm4 \n\t"\
  601. "psubw %%mm7, %%mm5 \n\t"\
  602. "psrlw $3, %%mm4 \n\t"\
  603. "psrlw $3, %%mm5 \n\t"\
  604. "packuswb %%mm5, %%mm4 \n\t"\
  605. "packsswb %%mm7, %%mm6 \n\t"\
  606. "pxor %%mm7, %%mm7 \n\t"\
  607. "movd %4, %%mm2 \n\t"\
  608. "punpcklbw %%mm2, %%mm2 \n\t"\
  609. "punpcklbw %%mm2, %%mm2 \n\t"\
  610. "punpcklbw %%mm2, %%mm2 \n\t"\
  611. "psubusb %%mm4, %%mm2 \n\t"\
  612. "movq %%mm2, %%mm3 \n\t"\
  613. "psubusb %%mm4, %%mm3 \n\t"\
  614. "psubb %%mm3, %%mm2 \n\t"\
  615. "movq %1, %%mm3 \n\t"\
  616. "movq %2, %%mm4 \n\t"\
  617. "pxor %%mm6, %%mm3 \n\t"\
  618. "pxor %%mm6, %%mm4 \n\t"\
  619. "paddusb %%mm2, %%mm3 \n\t"\
  620. "psubusb %%mm2, %%mm4 \n\t"\
  621. "pxor %%mm6, %%mm3 \n\t"\
  622. "pxor %%mm6, %%mm4 \n\t"\
  623. "paddusb %%mm2, %%mm2 \n\t"\
  624. "packsswb %%mm1, %%mm0 \n\t"\
  625. "pcmpgtb %%mm0, %%mm7 \n\t"\
  626. "pxor %%mm7, %%mm0 \n\t"\
  627. "psubb %%mm7, %%mm0 \n\t"\
  628. "movq %%mm0, %%mm1 \n\t"\
  629. "psubusb %%mm2, %%mm0 \n\t"\
  630. "psubb %%mm0, %%mm1 \n\t"\
  631. "pand %5, %%mm1 \n\t"\
  632. "psrlw $2, %%mm1 \n\t"\
  633. "pxor %%mm7, %%mm1 \n\t"\
  634. "psubb %%mm7, %%mm1 \n\t"\
  635. "movq %0, %%mm5 \n\t"\
  636. "movq %3, %%mm6 \n\t"\
  637. "psubb %%mm1, %%mm5 \n\t"\
  638. "paddb %%mm1, %%mm6 \n\t"
  639. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  640. if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  641. const int strength= ff_h263_loop_filter_strength[qscale];
  642. __asm__ volatile(
  643. H263_LOOP_FILTER
  644. "movq %%mm3, %1 \n\t"
  645. "movq %%mm4, %2 \n\t"
  646. "movq %%mm5, %0 \n\t"
  647. "movq %%mm6, %3 \n\t"
  648. : "+m" (*(uint64_t*)(src - 2*stride)),
  649. "+m" (*(uint64_t*)(src - 1*stride)),
  650. "+m" (*(uint64_t*)(src + 0*stride)),
  651. "+m" (*(uint64_t*)(src + 1*stride))
  652. : "g" (2*strength), "m"(ff_pb_FC)
  653. );
  654. }
  655. }
  656. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  657. if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  658. const int strength= ff_h263_loop_filter_strength[qscale];
  659. DECLARE_ALIGNED(8, uint64_t, temp)[4];
  660. uint8_t *btemp= (uint8_t*)temp;
  661. src -= 2;
  662. transpose4x4(btemp , src , 8, stride);
  663. transpose4x4(btemp+4, src + 4*stride, 8, stride);
  664. __asm__ volatile(
  665. H263_LOOP_FILTER // 5 3 4 6
  666. : "+m" (temp[0]),
  667. "+m" (temp[1]),
  668. "+m" (temp[2]),
  669. "+m" (temp[3])
  670. : "g" (2*strength), "m"(ff_pb_FC)
  671. );
  672. __asm__ volatile(
  673. "movq %%mm5, %%mm1 \n\t"
  674. "movq %%mm4, %%mm0 \n\t"
  675. "punpcklbw %%mm3, %%mm5 \n\t"
  676. "punpcklbw %%mm6, %%mm4 \n\t"
  677. "punpckhbw %%mm3, %%mm1 \n\t"
  678. "punpckhbw %%mm6, %%mm0 \n\t"
  679. "movq %%mm5, %%mm3 \n\t"
  680. "movq %%mm1, %%mm6 \n\t"
  681. "punpcklwd %%mm4, %%mm5 \n\t"
  682. "punpcklwd %%mm0, %%mm1 \n\t"
  683. "punpckhwd %%mm4, %%mm3 \n\t"
  684. "punpckhwd %%mm0, %%mm6 \n\t"
  685. "movd %%mm5, (%0) \n\t"
  686. "punpckhdq %%mm5, %%mm5 \n\t"
  687. "movd %%mm5, (%0,%2) \n\t"
  688. "movd %%mm3, (%0,%2,2) \n\t"
  689. "punpckhdq %%mm3, %%mm3 \n\t"
  690. "movd %%mm3, (%0,%3) \n\t"
  691. "movd %%mm1, (%1) \n\t"
  692. "punpckhdq %%mm1, %%mm1 \n\t"
  693. "movd %%mm1, (%1,%2) \n\t"
  694. "movd %%mm6, (%1,%2,2) \n\t"
  695. "punpckhdq %%mm6, %%mm6 \n\t"
  696. "movd %%mm6, (%1,%3) \n\t"
  697. :: "r" (src),
  698. "r" (src + 4*stride),
  699. "r" ((x86_reg) stride ),
  700. "r" ((x86_reg)(3*stride))
  701. );
  702. }
  703. }
  704. /* draw the edges of width 'w' of an image of size width, height
  705. this mmx version can only handle w==8 || w==16 */
  706. static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
  707. {
  708. uint8_t *ptr, *last_line;
  709. int i;
  710. last_line = buf + (height - 1) * wrap;
  711. /* left and right */
  712. ptr = buf;
  713. if(w==8)
  714. {
  715. __asm__ volatile(
  716. "1: \n\t"
  717. "movd (%0), %%mm0 \n\t"
  718. "punpcklbw %%mm0, %%mm0 \n\t"
  719. "punpcklwd %%mm0, %%mm0 \n\t"
  720. "punpckldq %%mm0, %%mm0 \n\t"
  721. "movq %%mm0, -8(%0) \n\t"
  722. "movq -8(%0, %2), %%mm1 \n\t"
  723. "punpckhbw %%mm1, %%mm1 \n\t"
  724. "punpckhwd %%mm1, %%mm1 \n\t"
  725. "punpckhdq %%mm1, %%mm1 \n\t"
  726. "movq %%mm1, (%0, %2) \n\t"
  727. "add %1, %0 \n\t"
  728. "cmp %3, %0 \n\t"
  729. " jb 1b \n\t"
  730. : "+r" (ptr)
  731. : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
  732. );
  733. }
  734. else
  735. {
  736. __asm__ volatile(
  737. "1: \n\t"
  738. "movd (%0), %%mm0 \n\t"
  739. "punpcklbw %%mm0, %%mm0 \n\t"
  740. "punpcklwd %%mm0, %%mm0 \n\t"
  741. "punpckldq %%mm0, %%mm0 \n\t"
  742. "movq %%mm0, -8(%0) \n\t"
  743. "movq %%mm0, -16(%0) \n\t"
  744. "movq -8(%0, %2), %%mm1 \n\t"
  745. "punpckhbw %%mm1, %%mm1 \n\t"
  746. "punpckhwd %%mm1, %%mm1 \n\t"
  747. "punpckhdq %%mm1, %%mm1 \n\t"
  748. "movq %%mm1, (%0, %2) \n\t"
  749. "movq %%mm1, 8(%0, %2) \n\t"
  750. "add %1, %0 \n\t"
  751. "cmp %3, %0 \n\t"
  752. " jb 1b \n\t"
  753. : "+r" (ptr)
  754. : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
  755. );
  756. }
  757. /* top and bottom (and hopefully also the corners) */
  758. if (sides&EDGE_TOP) {
  759. for(i = 0; i < h; i += 4) {
  760. ptr= buf - (i + 1) * wrap - w;
  761. __asm__ volatile(
  762. "1: \n\t"
  763. "movq (%1, %0), %%mm0 \n\t"
  764. "movq %%mm0, (%0) \n\t"
  765. "movq %%mm0, (%0, %2) \n\t"
  766. "movq %%mm0, (%0, %2, 2) \n\t"
  767. "movq %%mm0, (%0, %3) \n\t"
  768. "add $8, %0 \n\t"
  769. "cmp %4, %0 \n\t"
  770. " jb 1b \n\t"
  771. : "+r" (ptr)
  772. : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
  773. );
  774. }
  775. }
  776. if (sides&EDGE_BOTTOM) {
  777. for(i = 0; i < w; i += 4) {
  778. ptr= last_line + (i + 1) * wrap - w;
  779. __asm__ volatile(
  780. "1: \n\t"
  781. "movq (%1, %0), %%mm0 \n\t"
  782. "movq %%mm0, (%0) \n\t"
  783. "movq %%mm0, (%0, %2) \n\t"
  784. "movq %%mm0, (%0, %2, 2) \n\t"
  785. "movq %%mm0, (%0, %3) \n\t"
  786. "add $8, %0 \n\t"
  787. "cmp %4, %0 \n\t"
  788. " jb 1b \n\t"
  789. : "+r" (ptr)
  790. : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
  791. );
  792. }
  793. }
  794. }
  795. #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
  796. "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
  797. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
  798. "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
  799. "movq "#in7", " #m3 " \n\t" /* d */\
  800. "movq "#in0", %%mm5 \n\t" /* D */\
  801. "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
  802. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
  803. "movq "#in1", %%mm5 \n\t" /* C */\
  804. "movq "#in2", %%mm6 \n\t" /* B */\
  805. "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
  806. "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
  807. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
  808. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
  809. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
  810. "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
  811. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
  812. "psraw $5, %%mm5 \n\t"\
  813. "packuswb %%mm5, %%mm5 \n\t"\
  814. OP(%%mm5, out, %%mm7, d)
  815. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
  816. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  817. uint64_t temp;\
  818. \
  819. __asm__ volatile(\
  820. "pxor %%mm7, %%mm7 \n\t"\
  821. "1: \n\t"\
  822. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  823. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  824. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  825. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  826. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  827. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  828. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  829. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  830. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  831. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  832. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  833. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  834. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  835. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  836. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  837. "paddw %%mm3, %%mm5 \n\t" /* b */\
  838. "paddw %%mm2, %%mm6 \n\t" /* c */\
  839. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  840. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  841. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  842. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  843. "paddw %%mm4, %%mm0 \n\t" /* a */\
  844. "paddw %%mm1, %%mm5 \n\t" /* d */\
  845. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  846. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  847. "paddw %6, %%mm6 \n\t"\
  848. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  849. "psraw $5, %%mm0 \n\t"\
  850. "movq %%mm0, %5 \n\t"\
  851. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  852. \
  853. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
  854. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
  855. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
  856. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
  857. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
  858. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
  859. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
  860. "paddw %%mm0, %%mm2 \n\t" /* b */\
  861. "paddw %%mm5, %%mm3 \n\t" /* c */\
  862. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  863. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  864. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
  865. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
  866. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
  867. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
  868. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  869. "paddw %%mm2, %%mm1 \n\t" /* a */\
  870. "paddw %%mm6, %%mm4 \n\t" /* d */\
  871. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  872. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
  873. "paddw %6, %%mm1 \n\t"\
  874. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
  875. "psraw $5, %%mm3 \n\t"\
  876. "movq %5, %%mm1 \n\t"\
  877. "packuswb %%mm3, %%mm1 \n\t"\
  878. OP_MMX2(%%mm1, (%1),%%mm4, q)\
  879. /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
  880. \
  881. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
  882. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
  883. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
  884. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
  885. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
  886. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
  887. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
  888. "paddw %%mm1, %%mm5 \n\t" /* b */\
  889. "paddw %%mm4, %%mm0 \n\t" /* c */\
  890. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  891. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
  892. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
  893. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
  894. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
  895. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
  896. "paddw %%mm3, %%mm2 \n\t" /* d */\
  897. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
  898. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
  899. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
  900. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
  901. "paddw %%mm2, %%mm6 \n\t" /* a */\
  902. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
  903. "paddw %6, %%mm0 \n\t"\
  904. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  905. "psraw $5, %%mm0 \n\t"\
  906. /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
  907. \
  908. "paddw %%mm5, %%mm3 \n\t" /* a */\
  909. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
  910. "paddw %%mm4, %%mm6 \n\t" /* b */\
  911. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
  912. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
  913. "paddw %%mm1, %%mm4 \n\t" /* c */\
  914. "paddw %%mm2, %%mm5 \n\t" /* d */\
  915. "paddw %%mm6, %%mm6 \n\t" /* 2b */\
  916. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
  917. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
  918. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
  919. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
  920. "paddw %6, %%mm4 \n\t"\
  921. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
  922. "psraw $5, %%mm4 \n\t"\
  923. "packuswb %%mm4, %%mm0 \n\t"\
  924. OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
  925. \
  926. "add %3, %0 \n\t"\
  927. "add %4, %1 \n\t"\
  928. "decl %2 \n\t"\
  929. " jnz 1b \n\t"\
  930. : "+a"(src), "+c"(dst), "+D"(h)\
  931. : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  932. : "memory"\
  933. );\
  934. }\
  935. \
  936. static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  937. int i;\
  938. int16_t temp[16];\
  939. /* quick HACK, XXX FIXME MUST be optimized */\
  940. for(i=0; i<h; i++)\
  941. {\
  942. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  943. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  944. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  945. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  946. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  947. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
  948. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
  949. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
  950. temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
  951. temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
  952. temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
  953. temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
  954. temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
  955. temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
  956. temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
  957. temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
  958. __asm__ volatile(\
  959. "movq (%0), %%mm0 \n\t"\
  960. "movq 8(%0), %%mm1 \n\t"\
  961. "paddw %2, %%mm0 \n\t"\
  962. "paddw %2, %%mm1 \n\t"\
  963. "psraw $5, %%mm0 \n\t"\
  964. "psraw $5, %%mm1 \n\t"\
  965. "packuswb %%mm1, %%mm0 \n\t"\
  966. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  967. "movq 16(%0), %%mm0 \n\t"\
  968. "movq 24(%0), %%mm1 \n\t"\
  969. "paddw %2, %%mm0 \n\t"\
  970. "paddw %2, %%mm1 \n\t"\
  971. "psraw $5, %%mm0 \n\t"\
  972. "psraw $5, %%mm1 \n\t"\
  973. "packuswb %%mm1, %%mm0 \n\t"\
  974. OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
  975. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  976. : "memory"\
  977. );\
  978. dst+=dstStride;\
  979. src+=srcStride;\
  980. }\
  981. }\
  982. \
  983. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  984. __asm__ volatile(\
  985. "pxor %%mm7, %%mm7 \n\t"\
  986. "1: \n\t"\
  987. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  988. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  989. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  990. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  991. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  992. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  993. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  994. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  995. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  996. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  997. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  998. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  999. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1000. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1001. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1002. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1003. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1004. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1005. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1006. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1007. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1008. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1009. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1010. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1011. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1012. "paddw %5, %%mm6 \n\t"\
  1013. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1014. "psraw $5, %%mm0 \n\t"\
  1015. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1016. \
  1017. "movd 5(%0), %%mm5 \n\t" /* FGHI */\
  1018. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
  1019. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
  1020. "paddw %%mm5, %%mm1 \n\t" /* a */\
  1021. "paddw %%mm6, %%mm2 \n\t" /* b */\
  1022. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
  1023. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
  1024. "paddw %%mm6, %%mm3 \n\t" /* c */\
  1025. "paddw %%mm5, %%mm4 \n\t" /* d */\
  1026. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1027. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1028. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1029. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1030. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
  1031. "paddw %5, %%mm1 \n\t"\
  1032. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
  1033. "psraw $5, %%mm3 \n\t"\
  1034. "packuswb %%mm3, %%mm0 \n\t"\
  1035. OP_MMX2(%%mm0, (%1), %%mm4, q)\
  1036. \
  1037. "add %3, %0 \n\t"\
  1038. "add %4, %1 \n\t"\
  1039. "decl %2 \n\t"\
  1040. " jnz 1b \n\t"\
  1041. : "+a"(src), "+c"(dst), "+d"(h)\
  1042. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
  1043. : "memory"\
  1044. );\
  1045. }\
  1046. \
  1047. static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1048. int i;\
  1049. int16_t temp[8];\
  1050. /* quick HACK, XXX FIXME MUST be optimized */\
  1051. for(i=0; i<h; i++)\
  1052. {\
  1053. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1054. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1055. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1056. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1057. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1058. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
  1059. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
  1060. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
  1061. __asm__ volatile(\
  1062. "movq (%0), %%mm0 \n\t"\
  1063. "movq 8(%0), %%mm1 \n\t"\
  1064. "paddw %2, %%mm0 \n\t"\
  1065. "paddw %2, %%mm1 \n\t"\
  1066. "psraw $5, %%mm0 \n\t"\
  1067. "psraw $5, %%mm1 \n\t"\
  1068. "packuswb %%mm1, %%mm0 \n\t"\
  1069. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1070. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1071. :"memory"\
  1072. );\
  1073. dst+=dstStride;\
  1074. src+=srcStride;\
  1075. }\
  1076. }
  1077. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
  1078. \
  1079. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1080. uint64_t temp[17*4];\
  1081. uint64_t *temp_ptr= temp;\
  1082. int count= 17;\
  1083. \
  1084. /*FIXME unroll */\
  1085. __asm__ volatile(\
  1086. "pxor %%mm7, %%mm7 \n\t"\
  1087. "1: \n\t"\
  1088. "movq (%0), %%mm0 \n\t"\
  1089. "movq (%0), %%mm1 \n\t"\
  1090. "movq 8(%0), %%mm2 \n\t"\
  1091. "movq 8(%0), %%mm3 \n\t"\
  1092. "punpcklbw %%mm7, %%mm0 \n\t"\
  1093. "punpckhbw %%mm7, %%mm1 \n\t"\
  1094. "punpcklbw %%mm7, %%mm2 \n\t"\
  1095. "punpckhbw %%mm7, %%mm3 \n\t"\
  1096. "movq %%mm0, (%1) \n\t"\
  1097. "movq %%mm1, 17*8(%1) \n\t"\
  1098. "movq %%mm2, 2*17*8(%1) \n\t"\
  1099. "movq %%mm3, 3*17*8(%1) \n\t"\
  1100. "add $8, %1 \n\t"\
  1101. "add %3, %0 \n\t"\
  1102. "decl %2 \n\t"\
  1103. " jnz 1b \n\t"\
  1104. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1105. : "r" ((x86_reg)srcStride)\
  1106. : "memory"\
  1107. );\
  1108. \
  1109. temp_ptr= temp;\
  1110. count=4;\
  1111. \
  1112. /*FIXME reorder for speed */\
  1113. __asm__ volatile(\
  1114. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1115. "1: \n\t"\
  1116. "movq (%0), %%mm0 \n\t"\
  1117. "movq 8(%0), %%mm1 \n\t"\
  1118. "movq 16(%0), %%mm2 \n\t"\
  1119. "movq 24(%0), %%mm3 \n\t"\
  1120. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1121. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1122. "add %4, %1 \n\t"\
  1123. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1124. \
  1125. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1126. "add %4, %1 \n\t"\
  1127. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1128. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
  1129. "add %4, %1 \n\t"\
  1130. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
  1131. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
  1132. "add %4, %1 \n\t"\
  1133. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
  1134. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
  1135. "add %4, %1 \n\t"\
  1136. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
  1137. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
  1138. "add %4, %1 \n\t"\
  1139. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
  1140. \
  1141. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
  1142. "add %4, %1 \n\t" \
  1143. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
  1144. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
  1145. \
  1146. "add $136, %0 \n\t"\
  1147. "add %6, %1 \n\t"\
  1148. "decl %2 \n\t"\
  1149. " jnz 1b \n\t"\
  1150. \
  1151. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1152. : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
  1153. :"memory"\
  1154. );\
  1155. }\
  1156. \
  1157. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1158. uint64_t temp[9*2];\
  1159. uint64_t *temp_ptr= temp;\
  1160. int count= 9;\
  1161. \
  1162. /*FIXME unroll */\
  1163. __asm__ volatile(\
  1164. "pxor %%mm7, %%mm7 \n\t"\
  1165. "1: \n\t"\
  1166. "movq (%0), %%mm0 \n\t"\
  1167. "movq (%0), %%mm1 \n\t"\
  1168. "punpcklbw %%mm7, %%mm0 \n\t"\
  1169. "punpckhbw %%mm7, %%mm1 \n\t"\
  1170. "movq %%mm0, (%1) \n\t"\
  1171. "movq %%mm1, 9*8(%1) \n\t"\
  1172. "add $8, %1 \n\t"\
  1173. "add %3, %0 \n\t"\
  1174. "decl %2 \n\t"\
  1175. " jnz 1b \n\t"\
  1176. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1177. : "r" ((x86_reg)srcStride)\
  1178. : "memory"\
  1179. );\
  1180. \
  1181. temp_ptr= temp;\
  1182. count=2;\
  1183. \
  1184. /*FIXME reorder for speed */\
  1185. __asm__ volatile(\
  1186. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1187. "1: \n\t"\
  1188. "movq (%0), %%mm0 \n\t"\
  1189. "movq 8(%0), %%mm1 \n\t"\
  1190. "movq 16(%0), %%mm2 \n\t"\
  1191. "movq 24(%0), %%mm3 \n\t"\
  1192. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1193. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1194. "add %4, %1 \n\t"\
  1195. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1196. \
  1197. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1198. "add %4, %1 \n\t"\
  1199. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1200. \
  1201. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
  1202. "add %4, %1 \n\t"\
  1203. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
  1204. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
  1205. \
  1206. "add $72, %0 \n\t"\
  1207. "add %6, %1 \n\t"\
  1208. "decl %2 \n\t"\
  1209. " jnz 1b \n\t"\
  1210. \
  1211. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1212. : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
  1213. : "memory"\
  1214. );\
  1215. }\
  1216. \
  1217. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1218. OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
  1219. }\
  1220. \
  1221. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1222. uint64_t temp[8];\
  1223. uint8_t * const half= (uint8_t*)temp;\
  1224. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1225. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1226. }\
  1227. \
  1228. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1229. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
  1230. }\
  1231. \
  1232. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1233. uint64_t temp[8];\
  1234. uint8_t * const half= (uint8_t*)temp;\
  1235. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1236. OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
  1237. }\
  1238. \
  1239. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1240. uint64_t temp[8];\
  1241. uint8_t * const half= (uint8_t*)temp;\
  1242. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1243. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1244. }\
  1245. \
  1246. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1247. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1248. }\
  1249. \
  1250. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1251. uint64_t temp[8];\
  1252. uint8_t * const half= (uint8_t*)temp;\
  1253. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1254. OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
  1255. }\
  1256. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1257. uint64_t half[8 + 9];\
  1258. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1259. uint8_t * const halfHV= ((uint8_t*)half);\
  1260. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1261. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1262. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1263. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1264. }\
  1265. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1266. uint64_t half[8 + 9];\
  1267. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1268. uint8_t * const halfHV= ((uint8_t*)half);\
  1269. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1270. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1271. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1272. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1273. }\
  1274. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1275. uint64_t half[8 + 9];\
  1276. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1277. uint8_t * const halfHV= ((uint8_t*)half);\
  1278. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1279. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1280. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1281. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1282. }\
  1283. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1284. uint64_t half[8 + 9];\
  1285. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1286. uint8_t * const halfHV= ((uint8_t*)half);\
  1287. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1288. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1289. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1290. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1291. }\
  1292. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1293. uint64_t half[8 + 9];\
  1294. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1295. uint8_t * const halfHV= ((uint8_t*)half);\
  1296. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1297. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1298. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1299. }\
  1300. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1301. uint64_t half[8 + 9];\
  1302. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1303. uint8_t * const halfHV= ((uint8_t*)half);\
  1304. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1305. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1306. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1307. }\
  1308. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1309. uint64_t half[8 + 9];\
  1310. uint8_t * const halfH= ((uint8_t*)half);\
  1311. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1312. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1313. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1314. }\
  1315. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1316. uint64_t half[8 + 9];\
  1317. uint8_t * const halfH= ((uint8_t*)half);\
  1318. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1319. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1320. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1321. }\
  1322. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1323. uint64_t half[9];\
  1324. uint8_t * const halfH= ((uint8_t*)half);\
  1325. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1326. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1327. }\
  1328. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1329. OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
  1330. }\
  1331. \
  1332. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1333. uint64_t temp[32];\
  1334. uint8_t * const half= (uint8_t*)temp;\
  1335. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1336. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1337. }\
  1338. \
  1339. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1340. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
  1341. }\
  1342. \
  1343. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1344. uint64_t temp[32];\
  1345. uint8_t * const half= (uint8_t*)temp;\
  1346. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1347. OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
  1348. }\
  1349. \
  1350. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1351. uint64_t temp[32];\
  1352. uint8_t * const half= (uint8_t*)temp;\
  1353. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1354. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1355. }\
  1356. \
  1357. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1358. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1359. }\
  1360. \
  1361. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1362. uint64_t temp[32];\
  1363. uint8_t * const half= (uint8_t*)temp;\
  1364. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1365. OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
  1366. }\
  1367. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1368. uint64_t half[16*2 + 17*2];\
  1369. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1370. uint8_t * const halfHV= ((uint8_t*)half);\
  1371. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1372. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1373. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1374. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1375. }\
  1376. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1377. uint64_t half[16*2 + 17*2];\
  1378. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1379. uint8_t * const halfHV= ((uint8_t*)half);\
  1380. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1381. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1382. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1383. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1384. }\
  1385. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1386. uint64_t half[16*2 + 17*2];\
  1387. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1388. uint8_t * const halfHV= ((uint8_t*)half);\
  1389. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1390. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1391. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1392. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1393. }\
  1394. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1395. uint64_t half[16*2 + 17*2];\
  1396. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1397. uint8_t * const halfHV= ((uint8_t*)half);\
  1398. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1399. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1400. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1401. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1402. }\
  1403. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1404. uint64_t half[16*2 + 17*2];\
  1405. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1406. uint8_t * const halfHV= ((uint8_t*)half);\
  1407. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1408. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1409. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1410. }\
  1411. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1412. uint64_t half[16*2 + 17*2];\
  1413. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1414. uint8_t * const halfHV= ((uint8_t*)half);\
  1415. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1416. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1417. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1418. }\
  1419. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1420. uint64_t half[17*2];\
  1421. uint8_t * const halfH= ((uint8_t*)half);\
  1422. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1423. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1424. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1425. }\
  1426. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1427. uint64_t half[17*2];\
  1428. uint8_t * const halfH= ((uint8_t*)half);\
  1429. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1430. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1431. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1432. }\
  1433. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1434. uint64_t half[17*2];\
  1435. uint8_t * const halfH= ((uint8_t*)half);\
  1436. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1437. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1438. }
  1439. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  1440. #define AVG_3DNOW_OP(a,b,temp, size) \
  1441. "mov" #size " " #b ", " #temp " \n\t"\
  1442. "pavgusb " #temp ", " #a " \n\t"\
  1443. "mov" #size " " #a ", " #b " \n\t"
  1444. #define AVG_MMX2_OP(a,b,temp, size) \
  1445. "mov" #size " " #b ", " #temp " \n\t"\
  1446. "pavgb " #temp ", " #a " \n\t"\
  1447. "mov" #size " " #a ", " #b " \n\t"
  1448. QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
  1449. QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
  1450. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
  1451. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
  1452. QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
  1453. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
  1454. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
  1455. QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
  1456. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
  1457. /***********************************/
  1458. /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
  1459. #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
  1460. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1461. OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
  1462. }
  1463. #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
  1464. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1465. OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
  1466. }
  1467. #define QPEL_2TAP(OPNAME, SIZE, MMX)\
  1468. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
  1469. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
  1470. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
  1471. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
  1472. OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
  1473. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
  1474. OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
  1475. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
  1476. OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
  1477. static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1478. OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
  1479. }\
  1480. static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1481. OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
  1482. }\
  1483. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
  1484. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
  1485. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
  1486. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
  1487. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
  1488. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
  1489. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
  1490. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
  1491. QPEL_2TAP(put_, 16, mmx2)
  1492. QPEL_2TAP(avg_, 16, mmx2)
  1493. QPEL_2TAP(put_, 8, mmx2)
  1494. QPEL_2TAP(avg_, 8, mmx2)
  1495. QPEL_2TAP(put_, 16, 3dnow)
  1496. QPEL_2TAP(avg_, 16, 3dnow)
  1497. QPEL_2TAP(put_, 8, 3dnow)
  1498. QPEL_2TAP(avg_, 8, 3dnow)
  1499. #if HAVE_YASM
  1500. typedef void emu_edge_core_func (uint8_t *buf, const uint8_t *src,
  1501. x86_reg linesize, x86_reg start_y,
  1502. x86_reg end_y, x86_reg block_h,
  1503. x86_reg start_x, x86_reg end_x,
  1504. x86_reg block_w);
  1505. extern emu_edge_core_func ff_emu_edge_core_mmx;
  1506. extern emu_edge_core_func ff_emu_edge_core_sse;
  1507. static av_always_inline
  1508. void emulated_edge_mc(uint8_t *buf, const uint8_t *src, int linesize,
  1509. int block_w, int block_h,
  1510. int src_x, int src_y, int w, int h,
  1511. emu_edge_core_func *core_fn)
  1512. {
  1513. int start_y, start_x, end_y, end_x, src_y_add=0;
  1514. if(src_y>= h){
  1515. src_y_add = h-1-src_y;
  1516. src_y=h-1;
  1517. }else if(src_y<=-block_h){
  1518. src_y_add = 1-block_h-src_y;
  1519. src_y=1-block_h;
  1520. }
  1521. if(src_x>= w){
  1522. src+= (w-1-src_x);
  1523. src_x=w-1;
  1524. }else if(src_x<=-block_w){
  1525. src+= (1-block_w-src_x);
  1526. src_x=1-block_w;
  1527. }
  1528. start_y= FFMAX(0, -src_y);
  1529. start_x= FFMAX(0, -src_x);
  1530. end_y= FFMIN(block_h, h-src_y);
  1531. end_x= FFMIN(block_w, w-src_x);
  1532. assert(start_x < end_x && block_w > 0);
  1533. assert(start_y < end_y && block_h > 0);
  1534. // fill in the to-be-copied part plus all above/below
  1535. src += (src_y_add+start_y)*linesize + start_x;
  1536. buf += start_x;
  1537. core_fn(buf, src, linesize, start_y, end_y, block_h, start_x, end_x, block_w);
  1538. }
  1539. #if ARCH_X86_32
  1540. static av_noinline
  1541. void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src, int linesize,
  1542. int block_w, int block_h,
  1543. int src_x, int src_y, int w, int h)
  1544. {
  1545. emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
  1546. w, h, &ff_emu_edge_core_mmx);
  1547. }
  1548. #endif
  1549. static av_noinline
  1550. void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src, int linesize,
  1551. int block_w, int block_h,
  1552. int src_x, int src_y, int w, int h)
  1553. {
  1554. emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
  1555. w, h, &ff_emu_edge_core_sse);
  1556. }
  1557. #endif /* HAVE_YASM */
  1558. typedef void emulated_edge_mc_func (uint8_t *dst, const uint8_t *src,
  1559. int linesize, int block_w, int block_h,
  1560. int src_x, int src_y, int w, int h);
  1561. static av_always_inline
  1562. void gmc(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  1563. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height,
  1564. emulated_edge_mc_func *emu_edge_fn)
  1565. {
  1566. const int w = 8;
  1567. const int ix = ox>>(16+shift);
  1568. const int iy = oy>>(16+shift);
  1569. const int oxs = ox>>4;
  1570. const int oys = oy>>4;
  1571. const int dxxs = dxx>>4;
  1572. const int dxys = dxy>>4;
  1573. const int dyxs = dyx>>4;
  1574. const int dyys = dyy>>4;
  1575. const uint16_t r4[4] = {r,r,r,r};
  1576. const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
  1577. const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
  1578. const uint64_t shift2 = 2*shift;
  1579. uint8_t edge_buf[(h+1)*stride];
  1580. int x, y;
  1581. const int dxw = (dxx-(1<<(16+shift)))*(w-1);
  1582. const int dyh = (dyy-(1<<(16+shift)))*(h-1);
  1583. const int dxh = dxy*(h-1);
  1584. const int dyw = dyx*(w-1);
  1585. if( // non-constant fullpel offset (3% of blocks)
  1586. ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
  1587. (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
  1588. // uses more than 16 bits of subpel mv (only at huge resolution)
  1589. || (dxx|dxy|dyx|dyy)&15 )
  1590. {
  1591. //FIXME could still use mmx for some of the rows
  1592. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
  1593. return;
  1594. }
  1595. src += ix + iy*stride;
  1596. if( (unsigned)ix >= width-w ||
  1597. (unsigned)iy >= height-h )
  1598. {
  1599. emu_edge_fn(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
  1600. src = edge_buf;
  1601. }
  1602. __asm__ volatile(
  1603. "movd %0, %%mm6 \n\t"
  1604. "pxor %%mm7, %%mm7 \n\t"
  1605. "punpcklwd %%mm6, %%mm6 \n\t"
  1606. "punpcklwd %%mm6, %%mm6 \n\t"
  1607. :: "r"(1<<shift)
  1608. );
  1609. for(x=0; x<w; x+=4){
  1610. uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
  1611. oxs - dxys + dxxs*(x+1),
  1612. oxs - dxys + dxxs*(x+2),
  1613. oxs - dxys + dxxs*(x+3) };
  1614. uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
  1615. oys - dyys + dyxs*(x+1),
  1616. oys - dyys + dyxs*(x+2),
  1617. oys - dyys + dyxs*(x+3) };
  1618. for(y=0; y<h; y++){
  1619. __asm__ volatile(
  1620. "movq %0, %%mm4 \n\t"
  1621. "movq %1, %%mm5 \n\t"
  1622. "paddw %2, %%mm4 \n\t"
  1623. "paddw %3, %%mm5 \n\t"
  1624. "movq %%mm4, %0 \n\t"
  1625. "movq %%mm5, %1 \n\t"
  1626. "psrlw $12, %%mm4 \n\t"
  1627. "psrlw $12, %%mm5 \n\t"
  1628. : "+m"(*dx4), "+m"(*dy4)
  1629. : "m"(*dxy4), "m"(*dyy4)
  1630. );
  1631. __asm__ volatile(
  1632. "movq %%mm6, %%mm2 \n\t"
  1633. "movq %%mm6, %%mm1 \n\t"
  1634. "psubw %%mm4, %%mm2 \n\t"
  1635. "psubw %%mm5, %%mm1 \n\t"
  1636. "movq %%mm2, %%mm0 \n\t"
  1637. "movq %%mm4, %%mm3 \n\t"
  1638. "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
  1639. "pmullw %%mm5, %%mm3 \n\t" // dx*dy
  1640. "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
  1641. "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
  1642. "movd %4, %%mm5 \n\t"
  1643. "movd %3, %%mm4 \n\t"
  1644. "punpcklbw %%mm7, %%mm5 \n\t"
  1645. "punpcklbw %%mm7, %%mm4 \n\t"
  1646. "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
  1647. "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
  1648. "movd %2, %%mm5 \n\t"
  1649. "movd %1, %%mm4 \n\t"
  1650. "punpcklbw %%mm7, %%mm5 \n\t"
  1651. "punpcklbw %%mm7, %%mm4 \n\t"
  1652. "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
  1653. "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
  1654. "paddw %5, %%mm1 \n\t"
  1655. "paddw %%mm3, %%mm2 \n\t"
  1656. "paddw %%mm1, %%mm0 \n\t"
  1657. "paddw %%mm2, %%mm0 \n\t"
  1658. "psrlw %6, %%mm0 \n\t"
  1659. "packuswb %%mm0, %%mm0 \n\t"
  1660. "movd %%mm0, %0 \n\t"
  1661. : "=m"(dst[x+y*stride])
  1662. : "m"(src[0]), "m"(src[1]),
  1663. "m"(src[stride]), "m"(src[stride+1]),
  1664. "m"(*r4), "m"(shift2)
  1665. );
  1666. src += stride;
  1667. }
  1668. src += 4-h*stride;
  1669. }
  1670. }
  1671. #if HAVE_YASM
  1672. #if ARCH_X86_32
  1673. static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  1674. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
  1675. {
  1676. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  1677. width, height, &emulated_edge_mc_mmx);
  1678. }
  1679. #endif
  1680. static void gmc_sse(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  1681. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
  1682. {
  1683. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  1684. width, height, &emulated_edge_mc_sse);
  1685. }
  1686. #else
  1687. static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  1688. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
  1689. {
  1690. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  1691. width, height, &ff_emulated_edge_mc_8);
  1692. }
  1693. #endif
  1694. #define PREFETCH(name, op) \
  1695. static void name(void *mem, int stride, int h){\
  1696. const uint8_t *p= mem;\
  1697. do{\
  1698. __asm__ volatile(#op" %0" :: "m"(*p));\
  1699. p+= stride;\
  1700. }while(--h);\
  1701. }
  1702. PREFETCH(prefetch_mmx2, prefetcht0)
  1703. PREFETCH(prefetch_3dnow, prefetch)
  1704. #undef PREFETCH
  1705. #include "h264_qpel_mmx.c"
  1706. void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src,
  1707. int stride, int h, int x, int y);
  1708. void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src,
  1709. int stride, int h, int x, int y);
  1710. void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src,
  1711. int stride, int h, int x, int y);
  1712. void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
  1713. int stride, int h, int x, int y);
  1714. void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
  1715. int stride, int h, int x, int y);
  1716. void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
  1717. int stride, int h, int x, int y);
  1718. void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
  1719. int stride, int h, int x, int y);
  1720. void ff_avg_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
  1721. int stride, int h, int x, int y);
  1722. void ff_put_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
  1723. int stride, int h, int x, int y);
  1724. void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
  1725. int stride, int h, int x, int y);
  1726. void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
  1727. int stride, int h, int x, int y);
  1728. void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
  1729. int stride, int h, int x, int y);
  1730. #define CHROMA_MC(OP, NUM, DEPTH, OPT) \
  1731. void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
  1732. (uint8_t *dst, uint8_t *src,\
  1733. int stride, int h, int x, int y);
  1734. CHROMA_MC(put, 2, 10, mmxext)
  1735. CHROMA_MC(avg, 2, 10, mmxext)
  1736. CHROMA_MC(put, 4, 10, mmxext)
  1737. CHROMA_MC(avg, 4, 10, mmxext)
  1738. CHROMA_MC(put, 8, 10, sse2)
  1739. CHROMA_MC(avg, 8, 10, sse2)
  1740. CHROMA_MC(put, 8, 10, avx)
  1741. CHROMA_MC(avg, 8, 10, avx)
  1742. /* CAVS specific */
  1743. void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1744. put_pixels8_mmx(dst, src, stride, 8);
  1745. }
  1746. void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1747. avg_pixels8_mmx(dst, src, stride, 8);
  1748. }
  1749. void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1750. put_pixels16_mmx(dst, src, stride, 16);
  1751. }
  1752. void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1753. avg_pixels16_mmx(dst, src, stride, 16);
  1754. }
  1755. /* VC1 specific */
  1756. void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
  1757. put_pixels8_mmx(dst, src, stride, 8);
  1758. }
  1759. void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
  1760. avg_pixels8_mmx2(dst, src, stride, 8);
  1761. }
  1762. /* only used in VP3/5/6 */
  1763. static void put_vp_no_rnd_pixels8_l2_mmx(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h)
  1764. {
  1765. // START_TIMER
  1766. MOVQ_BFE(mm6);
  1767. __asm__ volatile(
  1768. "1: \n\t"
  1769. "movq (%1), %%mm0 \n\t"
  1770. "movq (%2), %%mm1 \n\t"
  1771. "movq (%1,%4), %%mm2 \n\t"
  1772. "movq (%2,%4), %%mm3 \n\t"
  1773. PAVGBP_MMX_NO_RND(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
  1774. "movq %%mm4, (%3) \n\t"
  1775. "movq %%mm5, (%3,%4) \n\t"
  1776. "movq (%1,%4,2), %%mm0 \n\t"
  1777. "movq (%2,%4,2), %%mm1 \n\t"
  1778. "movq (%1,%5), %%mm2 \n\t"
  1779. "movq (%2,%5), %%mm3 \n\t"
  1780. "lea (%1,%4,4), %1 \n\t"
  1781. "lea (%2,%4,4), %2 \n\t"
  1782. PAVGBP_MMX_NO_RND(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
  1783. "movq %%mm4, (%3,%4,2) \n\t"
  1784. "movq %%mm5, (%3,%5) \n\t"
  1785. "lea (%3,%4,4), %3 \n\t"
  1786. "subl $4, %0 \n\t"
  1787. "jnz 1b \n\t"
  1788. :"+r"(h), "+r"(a), "+r"(b), "+r"(dst)
  1789. :"r"((x86_reg)stride), "r"((x86_reg)3L*stride)
  1790. :"memory");
  1791. // STOP_TIMER("put_vp_no_rnd_pixels8_l2_mmx")
  1792. }
  1793. static void put_vp_no_rnd_pixels16_l2_mmx(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h)
  1794. {
  1795. put_vp_no_rnd_pixels8_l2_mmx(dst, a, b, stride, h);
  1796. put_vp_no_rnd_pixels8_l2_mmx(dst+8, a+8, b+8, stride, h);
  1797. }
  1798. #if CONFIG_DIRAC_DECODER
  1799. #define DIRAC_PIXOP(OPNAME, EXT)\
  1800. void ff_ ## OPNAME ## _dirac_pixels8_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1801. {\
  1802. OPNAME ## _pixels8_ ## EXT(dst, src[0], stride, h);\
  1803. }\
  1804. void ff_ ## OPNAME ## _dirac_pixels16_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1805. {\
  1806. OPNAME ## _pixels16_ ## EXT(dst, src[0], stride, h);\
  1807. }\
  1808. void ff_ ## OPNAME ## _dirac_pixels32_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  1809. {\
  1810. OPNAME ## _pixels16_ ## EXT(dst , src[0] , stride, h);\
  1811. OPNAME ## _pixels16_ ## EXT(dst+16, src[0]+16, stride, h);\
  1812. }
  1813. DIRAC_PIXOP(put, mmx)
  1814. DIRAC_PIXOP(avg, mmx)
  1815. DIRAC_PIXOP(avg, mmx2)
  1816. void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  1817. {
  1818. put_pixels16_sse2(dst, src[0], stride, h);
  1819. }
  1820. void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  1821. {
  1822. avg_pixels16_sse2(dst, src[0], stride, h);
  1823. }
  1824. void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  1825. {
  1826. put_pixels16_sse2(dst , src[0] , stride, h);
  1827. put_pixels16_sse2(dst+16, src[0]+16, stride, h);
  1828. }
  1829. void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  1830. {
  1831. avg_pixels16_sse2(dst , src[0] , stride, h);
  1832. avg_pixels16_sse2(dst+16, src[0]+16, stride, h);
  1833. }
  1834. #endif
  1835. /* XXX: those functions should be suppressed ASAP when all IDCTs are
  1836. converted */
  1837. #if CONFIG_GPL
  1838. static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  1839. {
  1840. ff_mmx_idct (block);
  1841. ff_put_pixels_clamped_mmx(block, dest, line_size);
  1842. }
  1843. static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  1844. {
  1845. ff_mmx_idct (block);
  1846. ff_add_pixels_clamped_mmx(block, dest, line_size);
  1847. }
  1848. static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  1849. {
  1850. ff_mmxext_idct (block);
  1851. ff_put_pixels_clamped_mmx(block, dest, line_size);
  1852. }
  1853. static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  1854. {
  1855. ff_mmxext_idct (block);
  1856. ff_add_pixels_clamped_mmx(block, dest, line_size);
  1857. }
  1858. #endif
  1859. static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
  1860. {
  1861. ff_idct_xvid_mmx (block);
  1862. ff_put_pixels_clamped_mmx(block, dest, line_size);
  1863. }
  1864. static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
  1865. {
  1866. ff_idct_xvid_mmx (block);
  1867. ff_add_pixels_clamped_mmx(block, dest, line_size);
  1868. }
  1869. static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
  1870. {
  1871. ff_idct_xvid_mmx2 (block);
  1872. ff_put_pixels_clamped_mmx(block, dest, line_size);
  1873. }
  1874. static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
  1875. {
  1876. ff_idct_xvid_mmx2 (block);
  1877. ff_add_pixels_clamped_mmx(block, dest, line_size);
  1878. }
  1879. static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
  1880. {
  1881. int i;
  1882. __asm__ volatile("pxor %%mm7, %%mm7":);
  1883. for(i=0; i<blocksize; i+=2) {
  1884. __asm__ volatile(
  1885. "movq %0, %%mm0 \n\t"
  1886. "movq %1, %%mm1 \n\t"
  1887. "movq %%mm0, %%mm2 \n\t"
  1888. "movq %%mm1, %%mm3 \n\t"
  1889. "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
  1890. "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
  1891. "pslld $31, %%mm2 \n\t" // keep only the sign bit
  1892. "pxor %%mm2, %%mm1 \n\t"
  1893. "movq %%mm3, %%mm4 \n\t"
  1894. "pand %%mm1, %%mm3 \n\t"
  1895. "pandn %%mm1, %%mm4 \n\t"
  1896. "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  1897. "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  1898. "movq %%mm3, %1 \n\t"
  1899. "movq %%mm0, %0 \n\t"
  1900. :"+m"(mag[i]), "+m"(ang[i])
  1901. ::"memory"
  1902. );
  1903. }
  1904. __asm__ volatile("femms");
  1905. }
  1906. static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
  1907. {
  1908. int i;
  1909. __asm__ volatile(
  1910. "movaps %0, %%xmm5 \n\t"
  1911. ::"m"(ff_pdw_80000000[0])
  1912. );
  1913. for(i=0; i<blocksize; i+=4) {
  1914. __asm__ volatile(
  1915. "movaps %0, %%xmm0 \n\t"
  1916. "movaps %1, %%xmm1 \n\t"
  1917. "xorps %%xmm2, %%xmm2 \n\t"
  1918. "xorps %%xmm3, %%xmm3 \n\t"
  1919. "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
  1920. "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
  1921. "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
  1922. "xorps %%xmm2, %%xmm1 \n\t"
  1923. "movaps %%xmm3, %%xmm4 \n\t"
  1924. "andps %%xmm1, %%xmm3 \n\t"
  1925. "andnps %%xmm1, %%xmm4 \n\t"
  1926. "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  1927. "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  1928. "movaps %%xmm3, %1 \n\t"
  1929. "movaps %%xmm0, %0 \n\t"
  1930. :"+m"(mag[i]), "+m"(ang[i])
  1931. ::"memory"
  1932. );
  1933. }
  1934. }
  1935. #define IF1(x) x
  1936. #define IF0(x)
  1937. #define MIX5(mono,stereo)\
  1938. __asm__ volatile(\
  1939. "movss 0(%2), %%xmm5 \n"\
  1940. "movss 8(%2), %%xmm6 \n"\
  1941. "movss 24(%2), %%xmm7 \n"\
  1942. "shufps $0, %%xmm5, %%xmm5 \n"\
  1943. "shufps $0, %%xmm6, %%xmm6 \n"\
  1944. "shufps $0, %%xmm7, %%xmm7 \n"\
  1945. "1: \n"\
  1946. "movaps (%0,%1), %%xmm0 \n"\
  1947. "movaps 0x400(%0,%1), %%xmm1 \n"\
  1948. "movaps 0x800(%0,%1), %%xmm2 \n"\
  1949. "movaps 0xc00(%0,%1), %%xmm3 \n"\
  1950. "movaps 0x1000(%0,%1), %%xmm4 \n"\
  1951. "mulps %%xmm5, %%xmm0 \n"\
  1952. "mulps %%xmm6, %%xmm1 \n"\
  1953. "mulps %%xmm5, %%xmm2 \n"\
  1954. "mulps %%xmm7, %%xmm3 \n"\
  1955. "mulps %%xmm7, %%xmm4 \n"\
  1956. stereo("addps %%xmm1, %%xmm0 \n")\
  1957. "addps %%xmm1, %%xmm2 \n"\
  1958. "addps %%xmm3, %%xmm0 \n"\
  1959. "addps %%xmm4, %%xmm2 \n"\
  1960. mono("addps %%xmm2, %%xmm0 \n")\
  1961. "movaps %%xmm0, (%0,%1) \n"\
  1962. stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
  1963. "add $16, %0 \n"\
  1964. "jl 1b \n"\
  1965. :"+&r"(i)\
  1966. :"r"(samples[0]+len), "r"(matrix)\
  1967. :XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
  1968. "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
  1969. "memory"\
  1970. );
  1971. #define MIX_MISC(stereo)\
  1972. __asm__ volatile(\
  1973. "1: \n"\
  1974. "movaps (%3,%0), %%xmm0 \n"\
  1975. stereo("movaps %%xmm0, %%xmm1 \n")\
  1976. "mulps %%xmm4, %%xmm0 \n"\
  1977. stereo("mulps %%xmm5, %%xmm1 \n")\
  1978. "lea 1024(%3,%0), %1 \n"\
  1979. "mov %5, %2 \n"\
  1980. "2: \n"\
  1981. "movaps (%1), %%xmm2 \n"\
  1982. stereo("movaps %%xmm2, %%xmm3 \n")\
  1983. "mulps (%4,%2), %%xmm2 \n"\
  1984. stereo("mulps 16(%4,%2), %%xmm3 \n")\
  1985. "addps %%xmm2, %%xmm0 \n"\
  1986. stereo("addps %%xmm3, %%xmm1 \n")\
  1987. "add $1024, %1 \n"\
  1988. "add $32, %2 \n"\
  1989. "jl 2b \n"\
  1990. "movaps %%xmm0, (%3,%0) \n"\
  1991. stereo("movaps %%xmm1, 1024(%3,%0) \n")\
  1992. "add $16, %0 \n"\
  1993. "jl 1b \n"\
  1994. :"+&r"(i), "=&r"(j), "=&r"(k)\
  1995. :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
  1996. :"memory"\
  1997. );
  1998. static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
  1999. {
  2000. int (*matrix_cmp)[2] = (int(*)[2])matrix;
  2001. intptr_t i,j,k;
  2002. i = -len*sizeof(float);
  2003. if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
  2004. MIX5(IF0,IF1);
  2005. } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
  2006. MIX5(IF1,IF0);
  2007. } else {
  2008. DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
  2009. j = 2*in_ch*sizeof(float);
  2010. __asm__ volatile(
  2011. "1: \n"
  2012. "sub $8, %0 \n"
  2013. "movss (%2,%0), %%xmm4 \n"
  2014. "movss 4(%2,%0), %%xmm5 \n"
  2015. "shufps $0, %%xmm4, %%xmm4 \n"
  2016. "shufps $0, %%xmm5, %%xmm5 \n"
  2017. "movaps %%xmm4, (%1,%0,4) \n"
  2018. "movaps %%xmm5, 16(%1,%0,4) \n"
  2019. "jg 1b \n"
  2020. :"+&r"(j)
  2021. :"r"(matrix_simd), "r"(matrix)
  2022. :"memory"
  2023. );
  2024. if(out_ch == 2) {
  2025. MIX_MISC(IF1);
  2026. } else {
  2027. MIX_MISC(IF0);
  2028. }
  2029. }
  2030. }
  2031. static void vector_fmul_3dnow(float *dst, const float *src0, const float *src1, int len){
  2032. x86_reg i = (len-4)*4;
  2033. __asm__ volatile(
  2034. "1: \n\t"
  2035. "movq (%2,%0), %%mm0 \n\t"
  2036. "movq 8(%2,%0), %%mm1 \n\t"
  2037. "pfmul (%3,%0), %%mm0 \n\t"
  2038. "pfmul 8(%3,%0), %%mm1 \n\t"
  2039. "movq %%mm0, (%1,%0) \n\t"
  2040. "movq %%mm1, 8(%1,%0) \n\t"
  2041. "sub $16, %0 \n\t"
  2042. "jge 1b \n\t"
  2043. "femms \n\t"
  2044. :"+r"(i)
  2045. :"r"(dst), "r"(src0), "r"(src1)
  2046. :"memory"
  2047. );
  2048. }
  2049. static void vector_fmul_sse(float *dst, const float *src0, const float *src1, int len){
  2050. x86_reg i = (len-8)*4;
  2051. __asm__ volatile(
  2052. "1: \n\t"
  2053. "movaps (%2,%0), %%xmm0 \n\t"
  2054. "movaps 16(%2,%0), %%xmm1 \n\t"
  2055. "mulps (%3,%0), %%xmm0 \n\t"
  2056. "mulps 16(%3,%0), %%xmm1 \n\t"
  2057. "movaps %%xmm0, (%1,%0) \n\t"
  2058. "movaps %%xmm1, 16(%1,%0) \n\t"
  2059. "sub $32, %0 \n\t"
  2060. "jge 1b \n\t"
  2061. :"+r"(i)
  2062. :"r"(dst), "r"(src0), "r"(src1)
  2063. :"memory"
  2064. );
  2065. }
  2066. static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
  2067. x86_reg i = len*4-16;
  2068. __asm__ volatile(
  2069. "1: \n\t"
  2070. "pswapd 8(%1), %%mm0 \n\t"
  2071. "pswapd (%1), %%mm1 \n\t"
  2072. "pfmul (%3,%0), %%mm0 \n\t"
  2073. "pfmul 8(%3,%0), %%mm1 \n\t"
  2074. "movq %%mm0, (%2,%0) \n\t"
  2075. "movq %%mm1, 8(%2,%0) \n\t"
  2076. "add $16, %1 \n\t"
  2077. "sub $16, %0 \n\t"
  2078. "jge 1b \n\t"
  2079. :"+r"(i), "+r"(src1)
  2080. :"r"(dst), "r"(src0)
  2081. );
  2082. __asm__ volatile("femms");
  2083. }
  2084. static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
  2085. x86_reg i = len*4-32;
  2086. __asm__ volatile(
  2087. "1: \n\t"
  2088. "movaps 16(%1), %%xmm0 \n\t"
  2089. "movaps (%1), %%xmm1 \n\t"
  2090. "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
  2091. "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
  2092. "mulps (%3,%0), %%xmm0 \n\t"
  2093. "mulps 16(%3,%0), %%xmm1 \n\t"
  2094. "movaps %%xmm0, (%2,%0) \n\t"
  2095. "movaps %%xmm1, 16(%2,%0) \n\t"
  2096. "add $32, %1 \n\t"
  2097. "sub $32, %0 \n\t"
  2098. "jge 1b \n\t"
  2099. :"+r"(i), "+r"(src1)
  2100. :"r"(dst), "r"(src0)
  2101. );
  2102. }
  2103. static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
  2104. const float *src2, int len){
  2105. x86_reg i = (len-4)*4;
  2106. __asm__ volatile(
  2107. "1: \n\t"
  2108. "movq (%2,%0), %%mm0 \n\t"
  2109. "movq 8(%2,%0), %%mm1 \n\t"
  2110. "pfmul (%3,%0), %%mm0 \n\t"
  2111. "pfmul 8(%3,%0), %%mm1 \n\t"
  2112. "pfadd (%4,%0), %%mm0 \n\t"
  2113. "pfadd 8(%4,%0), %%mm1 \n\t"
  2114. "movq %%mm0, (%1,%0) \n\t"
  2115. "movq %%mm1, 8(%1,%0) \n\t"
  2116. "sub $16, %0 \n\t"
  2117. "jge 1b \n\t"
  2118. :"+r"(i)
  2119. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  2120. :"memory"
  2121. );
  2122. __asm__ volatile("femms");
  2123. }
  2124. static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
  2125. const float *src2, int len){
  2126. x86_reg i = (len-8)*4;
  2127. __asm__ volatile(
  2128. "1: \n\t"
  2129. "movaps (%2,%0), %%xmm0 \n\t"
  2130. "movaps 16(%2,%0), %%xmm1 \n\t"
  2131. "mulps (%3,%0), %%xmm0 \n\t"
  2132. "mulps 16(%3,%0), %%xmm1 \n\t"
  2133. "addps (%4,%0), %%xmm0 \n\t"
  2134. "addps 16(%4,%0), %%xmm1 \n\t"
  2135. "movaps %%xmm0, (%1,%0) \n\t"
  2136. "movaps %%xmm1, 16(%1,%0) \n\t"
  2137. "sub $32, %0 \n\t"
  2138. "jge 1b \n\t"
  2139. :"+r"(i)
  2140. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  2141. :"memory"
  2142. );
  2143. }
  2144. #if HAVE_6REGS
  2145. static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
  2146. const float *win, int len){
  2147. x86_reg i = -len*4;
  2148. x86_reg j = len*4-8;
  2149. __asm__ volatile(
  2150. "1: \n"
  2151. "pswapd (%5,%1), %%mm1 \n"
  2152. "movq (%5,%0), %%mm0 \n"
  2153. "pswapd (%4,%1), %%mm5 \n"
  2154. "movq (%3,%0), %%mm4 \n"
  2155. "movq %%mm0, %%mm2 \n"
  2156. "movq %%mm1, %%mm3 \n"
  2157. "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
  2158. "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j]
  2159. "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
  2160. "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i]
  2161. "pfadd %%mm3, %%mm2 \n"
  2162. "pfsub %%mm0, %%mm1 \n"
  2163. "pswapd %%mm2, %%mm2 \n"
  2164. "movq %%mm1, (%2,%0) \n"
  2165. "movq %%mm2, (%2,%1) \n"
  2166. "sub $8, %1 \n"
  2167. "add $8, %0 \n"
  2168. "jl 1b \n"
  2169. "femms \n"
  2170. :"+r"(i), "+r"(j)
  2171. :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
  2172. );
  2173. }
  2174. static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
  2175. const float *win, int len){
  2176. x86_reg i = -len*4;
  2177. x86_reg j = len*4-16;
  2178. __asm__ volatile(
  2179. "1: \n"
  2180. "movaps (%5,%1), %%xmm1 \n"
  2181. "movaps (%5,%0), %%xmm0 \n"
  2182. "movaps (%4,%1), %%xmm5 \n"
  2183. "movaps (%3,%0), %%xmm4 \n"
  2184. "shufps $0x1b, %%xmm1, %%xmm1 \n"
  2185. "shufps $0x1b, %%xmm5, %%xmm5 \n"
  2186. "movaps %%xmm0, %%xmm2 \n"
  2187. "movaps %%xmm1, %%xmm3 \n"
  2188. "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
  2189. "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j]
  2190. "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
  2191. "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i]
  2192. "addps %%xmm3, %%xmm2 \n"
  2193. "subps %%xmm0, %%xmm1 \n"
  2194. "shufps $0x1b, %%xmm2, %%xmm2 \n"
  2195. "movaps %%xmm1, (%2,%0) \n"
  2196. "movaps %%xmm2, (%2,%1) \n"
  2197. "sub $16, %1 \n"
  2198. "add $16, %0 \n"
  2199. "jl 1b \n"
  2200. :"+r"(i), "+r"(j)
  2201. :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
  2202. );
  2203. }
  2204. #endif /* HAVE_6REGS */
  2205. static void vector_clipf_sse(float *dst, const float *src, float min, float max,
  2206. int len)
  2207. {
  2208. x86_reg i = (len-16)*4;
  2209. __asm__ volatile(
  2210. "movss %3, %%xmm4 \n"
  2211. "movss %4, %%xmm5 \n"
  2212. "shufps $0, %%xmm4, %%xmm4 \n"
  2213. "shufps $0, %%xmm5, %%xmm5 \n"
  2214. "1: \n\t"
  2215. "movaps (%2,%0), %%xmm0 \n\t" // 3/1 on intel
  2216. "movaps 16(%2,%0), %%xmm1 \n\t"
  2217. "movaps 32(%2,%0), %%xmm2 \n\t"
  2218. "movaps 48(%2,%0), %%xmm3 \n\t"
  2219. "maxps %%xmm4, %%xmm0 \n\t"
  2220. "maxps %%xmm4, %%xmm1 \n\t"
  2221. "maxps %%xmm4, %%xmm2 \n\t"
  2222. "maxps %%xmm4, %%xmm3 \n\t"
  2223. "minps %%xmm5, %%xmm0 \n\t"
  2224. "minps %%xmm5, %%xmm1 \n\t"
  2225. "minps %%xmm5, %%xmm2 \n\t"
  2226. "minps %%xmm5, %%xmm3 \n\t"
  2227. "movaps %%xmm0, (%1,%0) \n\t"
  2228. "movaps %%xmm1, 16(%1,%0) \n\t"
  2229. "movaps %%xmm2, 32(%1,%0) \n\t"
  2230. "movaps %%xmm3, 48(%1,%0) \n\t"
  2231. "sub $64, %0 \n\t"
  2232. "jge 1b \n\t"
  2233. :"+&r"(i)
  2234. :"r"(dst), "r"(src), "m"(min), "m"(max)
  2235. :"memory"
  2236. );
  2237. }
  2238. void ff_vp3_idct_mmx(int16_t *input_data);
  2239. void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block);
  2240. void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block);
  2241. void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block);
  2242. void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
  2243. void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
  2244. void ff_vp3_idct_sse2(int16_t *input_data);
  2245. void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block);
  2246. void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block);
  2247. int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift);
  2248. int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift);
  2249. int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
  2250. int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
  2251. int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
  2252. void ff_apply_window_int16_mmxext (int16_t *output, const int16_t *input,
  2253. const int16_t *window, unsigned int len);
  2254. void ff_apply_window_int16_mmxext_ba (int16_t *output, const int16_t *input,
  2255. const int16_t *window, unsigned int len);
  2256. void ff_apply_window_int16_sse2 (int16_t *output, const int16_t *input,
  2257. const int16_t *window, unsigned int len);
  2258. void ff_apply_window_int16_sse2_ba (int16_t *output, const int16_t *input,
  2259. const int16_t *window, unsigned int len);
  2260. void ff_apply_window_int16_ssse3 (int16_t *output, const int16_t *input,
  2261. const int16_t *window, unsigned int len);
  2262. void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
  2263. const int16_t *window, unsigned int len);
  2264. void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
  2265. int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
  2266. int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
  2267. float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
  2268. void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src, int32_t min,
  2269. int32_t max, unsigned int len);
  2270. void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src, int32_t min,
  2271. int32_t max, unsigned int len);
  2272. void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src, int32_t min,
  2273. int32_t max, unsigned int len);
  2274. void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src, int32_t min,
  2275. int32_t max, unsigned int len);
  2276. extern void ff_butterflies_float_interleave_sse(float *dst, const float *src0,
  2277. const float *src1, int len);
  2278. extern void ff_butterflies_float_interleave_avx(float *dst, const float *src0,
  2279. const float *src1, int len);
  2280. void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
  2281. {
  2282. int mm_flags = av_get_cpu_flags();
  2283. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  2284. const int bit_depth = avctx->bits_per_raw_sample;
  2285. if (avctx->dsp_mask) {
  2286. if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
  2287. mm_flags |= (avctx->dsp_mask & 0xffff);
  2288. else
  2289. mm_flags &= ~(avctx->dsp_mask & 0xffff);
  2290. }
  2291. #if 0
  2292. av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
  2293. if (mm_flags & AV_CPU_FLAG_MMX)
  2294. av_log(avctx, AV_LOG_INFO, " mmx");
  2295. if (mm_flags & AV_CPU_FLAG_MMX2)
  2296. av_log(avctx, AV_LOG_INFO, " mmx2");
  2297. if (mm_flags & AV_CPU_FLAG_3DNOW)
  2298. av_log(avctx, AV_LOG_INFO, " 3dnow");
  2299. if (mm_flags & AV_CPU_FLAG_SSE)
  2300. av_log(avctx, AV_LOG_INFO, " sse");
  2301. if (mm_flags & AV_CPU_FLAG_SSE2)
  2302. av_log(avctx, AV_LOG_INFO, " sse2");
  2303. av_log(avctx, AV_LOG_INFO, "\n");
  2304. #endif
  2305. if (mm_flags & AV_CPU_FLAG_MMX) {
  2306. const int idct_algo= avctx->idct_algo;
  2307. if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) {
  2308. if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
  2309. c->idct_put= ff_simple_idct_put_mmx;
  2310. c->idct_add= ff_simple_idct_add_mmx;
  2311. c->idct = ff_simple_idct_mmx;
  2312. c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
  2313. #if CONFIG_GPL
  2314. }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
  2315. if(mm_flags & AV_CPU_FLAG_MMX2){
  2316. c->idct_put= ff_libmpeg2mmx2_idct_put;
  2317. c->idct_add= ff_libmpeg2mmx2_idct_add;
  2318. c->idct = ff_mmxext_idct;
  2319. }else{
  2320. c->idct_put= ff_libmpeg2mmx_idct_put;
  2321. c->idct_add= ff_libmpeg2mmx_idct_add;
  2322. c->idct = ff_mmx_idct;
  2323. }
  2324. c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
  2325. #endif
  2326. }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
  2327. idct_algo==FF_IDCT_VP3 && HAVE_YASM){
  2328. if(mm_flags & AV_CPU_FLAG_SSE2){
  2329. c->idct_put= ff_vp3_idct_put_sse2;
  2330. c->idct_add= ff_vp3_idct_add_sse2;
  2331. c->idct = ff_vp3_idct_sse2;
  2332. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  2333. }else{
  2334. c->idct_put= ff_vp3_idct_put_mmx;
  2335. c->idct_add= ff_vp3_idct_add_mmx;
  2336. c->idct = ff_vp3_idct_mmx;
  2337. c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
  2338. }
  2339. }else if(idct_algo==FF_IDCT_CAVS){
  2340. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  2341. }else if(idct_algo==FF_IDCT_XVIDMMX){
  2342. if(mm_flags & AV_CPU_FLAG_SSE2){
  2343. c->idct_put= ff_idct_xvid_sse2_put;
  2344. c->idct_add= ff_idct_xvid_sse2_add;
  2345. c->idct = ff_idct_xvid_sse2;
  2346. c->idct_permutation_type= FF_SSE2_IDCT_PERM;
  2347. }else if(mm_flags & AV_CPU_FLAG_MMX2){
  2348. c->idct_put= ff_idct_xvid_mmx2_put;
  2349. c->idct_add= ff_idct_xvid_mmx2_add;
  2350. c->idct = ff_idct_xvid_mmx2;
  2351. }else{
  2352. c->idct_put= ff_idct_xvid_mmx_put;
  2353. c->idct_add= ff_idct_xvid_mmx_add;
  2354. c->idct = ff_idct_xvid_mmx;
  2355. }
  2356. }
  2357. }
  2358. c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
  2359. c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
  2360. c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
  2361. if (!high_bit_depth) {
  2362. c->clear_block = clear_block_mmx;
  2363. c->clear_blocks = clear_blocks_mmx;
  2364. if ((mm_flags & AV_CPU_FLAG_SSE) &&
  2365. !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
  2366. /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
  2367. c->clear_block = clear_block_sse;
  2368. c->clear_blocks = clear_blocks_sse;
  2369. }
  2370. }
  2371. #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  2372. c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
  2373. c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
  2374. c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
  2375. c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
  2376. if (!high_bit_depth) {
  2377. SET_HPEL_FUNCS(put, 0, 16, mmx);
  2378. SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
  2379. SET_HPEL_FUNCS(avg, 0, 16, mmx);
  2380. SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
  2381. SET_HPEL_FUNCS(put, 1, 8, mmx);
  2382. SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
  2383. SET_HPEL_FUNCS(avg, 1, 8, mmx);
  2384. SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
  2385. }
  2386. #if ARCH_X86_32 || !HAVE_YASM
  2387. c->gmc= gmc_mmx;
  2388. #endif
  2389. #if ARCH_X86_32 && HAVE_YASM
  2390. if (!high_bit_depth)
  2391. c->emulated_edge_mc = emulated_edge_mc_mmx;
  2392. #endif
  2393. c->add_bytes= add_bytes_mmx;
  2394. if (!high_bit_depth)
  2395. c->draw_edges = draw_edges_mmx;
  2396. c->put_no_rnd_pixels_l2[0]= put_vp_no_rnd_pixels16_l2_mmx;
  2397. c->put_no_rnd_pixels_l2[1]= put_vp_no_rnd_pixels8_l2_mmx;
  2398. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  2399. c->h263_v_loop_filter= h263_v_loop_filter_mmx;
  2400. c->h263_h_loop_filter= h263_h_loop_filter_mmx;
  2401. }
  2402. #if HAVE_YASM
  2403. if (!high_bit_depth) {
  2404. c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd;
  2405. c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx;
  2406. }
  2407. c->vector_clip_int32 = ff_vector_clip_int32_mmx;
  2408. #endif
  2409. if (mm_flags & AV_CPU_FLAG_MMX2) {
  2410. c->prefetch = prefetch_mmx2;
  2411. if (!high_bit_depth) {
  2412. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
  2413. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
  2414. c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
  2415. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
  2416. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
  2417. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
  2418. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
  2419. c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
  2420. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
  2421. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
  2422. }
  2423. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2424. if (!high_bit_depth) {
  2425. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
  2426. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
  2427. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
  2428. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
  2429. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
  2430. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
  2431. }
  2432. if (CONFIG_VP3_DECODER && HAVE_YASM) {
  2433. c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
  2434. c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
  2435. }
  2436. }
  2437. if (CONFIG_VP3_DECODER && HAVE_YASM) {
  2438. c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
  2439. }
  2440. if (CONFIG_VP3_DECODER
  2441. && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
  2442. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
  2443. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
  2444. }
  2445. #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
  2446. c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
  2447. c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
  2448. c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
  2449. c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
  2450. c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
  2451. c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
  2452. c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
  2453. c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
  2454. c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
  2455. c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
  2456. c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
  2457. c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
  2458. c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
  2459. c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
  2460. c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
  2461. c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU
  2462. SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2, );
  2463. SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2, );
  2464. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2, );
  2465. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2, );
  2466. SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2, );
  2467. SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2, );
  2468. if (!high_bit_depth) {
  2469. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2, );
  2470. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2, );
  2471. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2, );
  2472. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2, );
  2473. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2, );
  2474. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2, );
  2475. }
  2476. else if (bit_depth == 10) {
  2477. #if HAVE_YASM
  2478. #if !ARCH_X86_64
  2479. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
  2480. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
  2481. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_);
  2482. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_);
  2483. #endif
  2484. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_);
  2485. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_);
  2486. #endif
  2487. }
  2488. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2, );
  2489. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2, );
  2490. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2, );
  2491. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2, );
  2492. #if HAVE_YASM
  2493. if (!high_bit_depth) {
  2494. c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd;
  2495. c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2;
  2496. c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2;
  2497. c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2;
  2498. }
  2499. if (bit_depth == 10) {
  2500. c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_10_mmxext;
  2501. c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_10_mmxext;
  2502. c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_10_mmxext;
  2503. c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_10_mmxext;
  2504. }
  2505. c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
  2506. #endif
  2507. #if HAVE_7REGS
  2508. if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW))
  2509. c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
  2510. #endif
  2511. } else if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW)) {
  2512. c->prefetch = prefetch_3dnow;
  2513. if (!high_bit_depth) {
  2514. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  2515. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  2516. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  2517. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  2518. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  2519. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  2520. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  2521. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  2522. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  2523. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  2524. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2525. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  2526. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  2527. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  2528. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  2529. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  2530. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  2531. }
  2532. }
  2533. if (CONFIG_VP3_DECODER
  2534. && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
  2535. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
  2536. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
  2537. }
  2538. SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow, );
  2539. SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow, );
  2540. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow, );
  2541. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow, );
  2542. SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow, );
  2543. SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow, );
  2544. if (!high_bit_depth) {
  2545. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow, );
  2546. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow, );
  2547. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow, );
  2548. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow, );
  2549. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow, );
  2550. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow, );
  2551. }
  2552. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow, );
  2553. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow, );
  2554. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow, );
  2555. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow, );
  2556. #if HAVE_YASM
  2557. if (!high_bit_depth) {
  2558. c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd;
  2559. c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow;
  2560. }
  2561. #endif
  2562. }
  2563. #define H264_QPEL_FUNCS(x, y, CPU)\
  2564. c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
  2565. c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
  2566. c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
  2567. c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
  2568. if((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW)){
  2569. // these functions are slower than mmx on AMD, but faster on Intel
  2570. if (!high_bit_depth) {
  2571. c->put_pixels_tab[0][0] = put_pixels16_sse2;
  2572. c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
  2573. c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
  2574. H264_QPEL_FUNCS(0, 0, sse2);
  2575. }
  2576. }
  2577. if(mm_flags & AV_CPU_FLAG_SSE2){
  2578. if (!high_bit_depth) {
  2579. H264_QPEL_FUNCS(0, 1, sse2);
  2580. H264_QPEL_FUNCS(0, 2, sse2);
  2581. H264_QPEL_FUNCS(0, 3, sse2);
  2582. H264_QPEL_FUNCS(1, 1, sse2);
  2583. H264_QPEL_FUNCS(1, 2, sse2);
  2584. H264_QPEL_FUNCS(1, 3, sse2);
  2585. H264_QPEL_FUNCS(2, 1, sse2);
  2586. H264_QPEL_FUNCS(2, 2, sse2);
  2587. H264_QPEL_FUNCS(2, 3, sse2);
  2588. H264_QPEL_FUNCS(3, 1, sse2);
  2589. H264_QPEL_FUNCS(3, 2, sse2);
  2590. H264_QPEL_FUNCS(3, 3, sse2);
  2591. }
  2592. #if HAVE_YASM
  2593. #define H264_QPEL_FUNCS_10(x, y, CPU)\
  2594. c->put_h264_qpel_pixels_tab[0][x+y*4] = ff_put_h264_qpel16_mc##x##y##_10_##CPU;\
  2595. c->put_h264_qpel_pixels_tab[1][x+y*4] = ff_put_h264_qpel8_mc##x##y##_10_##CPU;\
  2596. c->avg_h264_qpel_pixels_tab[0][x+y*4] = ff_avg_h264_qpel16_mc##x##y##_10_##CPU;\
  2597. c->avg_h264_qpel_pixels_tab[1][x+y*4] = ff_avg_h264_qpel8_mc##x##y##_10_##CPU;
  2598. if (bit_depth == 10) {
  2599. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
  2600. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_);
  2601. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
  2602. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_);
  2603. H264_QPEL_FUNCS_10(1, 0, sse2_cache64)
  2604. H264_QPEL_FUNCS_10(2, 0, sse2_cache64)
  2605. H264_QPEL_FUNCS_10(3, 0, sse2_cache64)
  2606. c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_10_sse2;
  2607. c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_10_sse2;
  2608. }
  2609. #endif
  2610. }
  2611. #if HAVE_SSSE3
  2612. if(mm_flags & AV_CPU_FLAG_SSSE3){
  2613. if (!high_bit_depth) {
  2614. H264_QPEL_FUNCS(1, 0, ssse3);
  2615. H264_QPEL_FUNCS(1, 1, ssse3);
  2616. H264_QPEL_FUNCS(1, 2, ssse3);
  2617. H264_QPEL_FUNCS(1, 3, ssse3);
  2618. H264_QPEL_FUNCS(2, 0, ssse3);
  2619. H264_QPEL_FUNCS(2, 1, ssse3);
  2620. H264_QPEL_FUNCS(2, 2, ssse3);
  2621. H264_QPEL_FUNCS(2, 3, ssse3);
  2622. H264_QPEL_FUNCS(3, 0, ssse3);
  2623. H264_QPEL_FUNCS(3, 1, ssse3);
  2624. H264_QPEL_FUNCS(3, 2, ssse3);
  2625. H264_QPEL_FUNCS(3, 3, ssse3);
  2626. }
  2627. #if HAVE_YASM
  2628. else if (bit_depth == 10) {
  2629. H264_QPEL_FUNCS_10(1, 0, ssse3_cache64)
  2630. H264_QPEL_FUNCS_10(2, 0, ssse3_cache64)
  2631. H264_QPEL_FUNCS_10(3, 0, ssse3_cache64)
  2632. }
  2633. if (!high_bit_depth) {
  2634. c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd;
  2635. c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd;
  2636. c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3;
  2637. c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3;
  2638. }
  2639. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
  2640. if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
  2641. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
  2642. #endif
  2643. }
  2644. #endif
  2645. if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW)) {
  2646. c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
  2647. c->vector_fmul = vector_fmul_3dnow;
  2648. }
  2649. if (HAVE_AMD3DNOWEXT && (mm_flags & AV_CPU_FLAG_3DNOWEXT)) {
  2650. c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
  2651. #if HAVE_6REGS
  2652. c->vector_fmul_window = vector_fmul_window_3dnow2;
  2653. #endif
  2654. }
  2655. if(mm_flags & AV_CPU_FLAG_MMX2){
  2656. #if HAVE_YASM
  2657. c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
  2658. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
  2659. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  2660. c->apply_window_int16 = ff_apply_window_int16_mmxext_ba;
  2661. } else {
  2662. c->apply_window_int16 = ff_apply_window_int16_mmxext;
  2663. }
  2664. #endif
  2665. }
  2666. if(mm_flags & AV_CPU_FLAG_SSE){
  2667. c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
  2668. c->ac3_downmix = ac3_downmix_sse;
  2669. c->vector_fmul = vector_fmul_sse;
  2670. c->vector_fmul_reverse = vector_fmul_reverse_sse;
  2671. c->vector_fmul_add = vector_fmul_add_sse;
  2672. #if HAVE_6REGS
  2673. c->vector_fmul_window = vector_fmul_window_sse;
  2674. #endif
  2675. c->vector_clipf = vector_clipf_sse;
  2676. #if HAVE_YASM
  2677. c->scalarproduct_float = ff_scalarproduct_float_sse;
  2678. c->butterflies_float_interleave = ff_butterflies_float_interleave_sse;
  2679. #endif
  2680. }
  2681. if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW))
  2682. c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
  2683. if(mm_flags & AV_CPU_FLAG_SSE2){
  2684. #if HAVE_YASM
  2685. c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
  2686. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
  2687. if (mm_flags & AV_CPU_FLAG_ATOM) {
  2688. c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
  2689. } else {
  2690. c->vector_clip_int32 = ff_vector_clip_int32_sse2;
  2691. }
  2692. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  2693. c->apply_window_int16 = ff_apply_window_int16_sse2_ba;
  2694. } else {
  2695. if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  2696. c->apply_window_int16 = ff_apply_window_int16_sse2;
  2697. }
  2698. }
  2699. if (!high_bit_depth)
  2700. c->emulated_edge_mc = emulated_edge_mc_sse;
  2701. c->gmc= gmc_sse;
  2702. #endif
  2703. }
  2704. if (mm_flags & AV_CPU_FLAG_SSSE3) {
  2705. #if HAVE_YASM
  2706. if (mm_flags & AV_CPU_FLAG_ATOM) {
  2707. c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
  2708. } else {
  2709. c->apply_window_int16 = ff_apply_window_int16_ssse3;
  2710. }
  2711. if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) { // cachesplit
  2712. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
  2713. }
  2714. #endif
  2715. }
  2716. if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE) {
  2717. #if HAVE_YASM
  2718. c->vector_clip_int32 = ff_vector_clip_int32_sse4;
  2719. #endif
  2720. }
  2721. #if HAVE_AVX && HAVE_YASM
  2722. if (mm_flags & AV_CPU_FLAG_AVX) {
  2723. if (bit_depth == 10) {
  2724. //AVX implies !cache64.
  2725. //TODO: Port cache(32|64) detection from x264.
  2726. H264_QPEL_FUNCS_10(1, 0, sse2)
  2727. H264_QPEL_FUNCS_10(2, 0, sse2)
  2728. H264_QPEL_FUNCS_10(3, 0, sse2)
  2729. c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_10_avx;
  2730. c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_10_avx;
  2731. }
  2732. c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
  2733. }
  2734. #endif
  2735. }
  2736. if (CONFIG_ENCODERS)
  2737. dsputilenc_init_mmx(c, avctx);
  2738. }