You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1732 lines
83KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/attributes.h"
  25. #include "libavutil/cpu.h"
  26. #include "libavutil/x86/asm.h"
  27. #include "libavcodec/dsputil.h"
  28. #include "libavcodec/h264dsp.h"
  29. #include "libavcodec/mpegvideo.h"
  30. #include "libavcodec/simple_idct.h"
  31. #include "dsputil_mmx.h"
  32. #include "idct_xvid.h"
  33. //#undef NDEBUG
  34. //#include <assert.h>
  35. /* pixel operations */
  36. DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
  37. DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
  38. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1) = { 0x0001000100010001ULL, 0x0001000100010001ULL };
  39. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2) = { 0x0002000200020002ULL, 0x0002000200020002ULL };
  40. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3) = { 0x0003000300030003ULL, 0x0003000300030003ULL };
  41. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4) = { 0x0004000400040004ULL, 0x0004000400040004ULL };
  42. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5) = { 0x0005000500050005ULL, 0x0005000500050005ULL };
  43. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8) = { 0x0008000800080008ULL, 0x0008000800080008ULL };
  44. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9) = { 0x0009000900090009ULL, 0x0009000900090009ULL };
  45. DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL;
  46. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16) = { 0x0010001000100010ULL, 0x0010001000100010ULL };
  47. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17) = { 0x0011001100110011ULL, 0x0011001100110011ULL };
  48. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18) = { 0x0012001200120012ULL, 0x0012001200120012ULL };
  49. DECLARE_ALIGNED(8, const uint64_t, ff_pw_20) = 0x0014001400140014ULL;
  50. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27) = { 0x001B001B001B001BULL, 0x001B001B001B001BULL };
  51. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28) = { 0x001C001C001C001CULL, 0x001C001C001C001CULL };
  52. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32) = { 0x0020002000200020ULL, 0x0020002000200020ULL };
  53. DECLARE_ALIGNED(8, const uint64_t, ff_pw_42) = 0x002A002A002A002AULL;
  54. DECLARE_ALIGNED(8, const uint64_t, ff_pw_53) = 0x0035003500350035ULL;
  55. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63) = { 0x003F003F003F003FULL, 0x003F003F003F003FULL };
  56. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64) = { 0x0040004000400040ULL, 0x0040004000400040ULL };
  57. DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL;
  58. DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
  59. DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
  60. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL };
  61. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
  62. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0) = { 0x0000000000000000ULL, 0x0000000000000000ULL };
  63. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1) = { 0x0101010101010101ULL, 0x0101010101010101ULL };
  64. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3) = { 0x0303030303030303ULL, 0x0303030303030303ULL };
  65. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4) = { 0x0404040404040404ULL, 0x0404040404040404ULL };
  66. DECLARE_ALIGNED(8, const uint64_t, ff_pb_7) = 0x0707070707070707ULL;
  67. DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F) = 0x1F1F1F1F1F1F1F1FULL;
  68. DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F) = 0x3F3F3F3F3F3F3F3FULL;
  69. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80) = { 0x8080808080808080ULL, 0x8080808080808080ULL };
  70. DECLARE_ALIGNED(8, const uint64_t, ff_pb_81) = 0x8181818181818181ULL;
  71. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1) = { 0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL };
  72. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8) = { 0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL };
  73. DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC) = 0xFCFCFCFCFCFCFCFCULL;
  74. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE) = { 0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL };
  75. DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
  76. DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
  77. #if HAVE_YASM
  78. void ff_put_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  79. ptrdiff_t line_size, int h);
  80. void ff_put_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  81. ptrdiff_t line_size, int h);
  82. void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  83. int dstStride, int src1Stride, int h);
  84. void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1,
  85. uint8_t *src2, int dstStride,
  86. int src1Stride, int h);
  87. void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  88. int dstStride, int src1Stride, int h);
  89. void ff_put_pixels16_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  90. ptrdiff_t line_size, int h);
  91. void ff_put_pixels16_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  92. ptrdiff_t line_size, int h);
  93. void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  94. int dstStride, int src1Stride, int h);
  95. void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  96. int dstStride, int src1Stride, int h);
  97. void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  98. int dstStride, int src1Stride, int h);
  99. void ff_put_no_rnd_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  100. ptrdiff_t line_size, int h);
  101. void ff_put_no_rnd_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  102. ptrdiff_t line_size, int h);
  103. void ff_put_no_rnd_pixels8_x2_exact_mmxext(uint8_t *block,
  104. const uint8_t *pixels,
  105. ptrdiff_t line_size, int h);
  106. void ff_put_no_rnd_pixels8_x2_exact_3dnow(uint8_t *block,
  107. const uint8_t *pixels,
  108. ptrdiff_t line_size, int h);
  109. void ff_put_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  110. ptrdiff_t line_size, int h);
  111. void ff_put_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  112. ptrdiff_t line_size, int h);
  113. void ff_put_no_rnd_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  114. ptrdiff_t line_size, int h);
  115. void ff_put_no_rnd_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  116. ptrdiff_t line_size, int h);
  117. void ff_put_no_rnd_pixels8_y2_exact_mmxext(uint8_t *block,
  118. const uint8_t *pixels,
  119. ptrdiff_t line_size, int h);
  120. void ff_put_no_rnd_pixels8_y2_exact_3dnow(uint8_t *block,
  121. const uint8_t *pixels,
  122. ptrdiff_t line_size, int h);
  123. void ff_avg_pixels8_mmxext(uint8_t *block, const uint8_t *pixels,
  124. ptrdiff_t line_size, int h);
  125. void ff_avg_pixels8_3dnow(uint8_t *block, const uint8_t *pixels,
  126. ptrdiff_t line_size, int h);
  127. void ff_avg_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  128. ptrdiff_t line_size, int h);
  129. void ff_avg_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  130. ptrdiff_t line_size, int h);
  131. void ff_avg_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  132. ptrdiff_t line_size, int h);
  133. void ff_avg_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  134. ptrdiff_t line_size, int h);
  135. void ff_avg_pixels8_xy2_mmxext(uint8_t *block, const uint8_t *pixels,
  136. ptrdiff_t line_size, int h);
  137. void ff_avg_pixels8_xy2_3dnow(uint8_t *block, const uint8_t *pixels,
  138. ptrdiff_t line_size, int h);
  139. void ff_put_pixels8_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h);
  140. static void ff_put_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
  141. ptrdiff_t line_size, int h)
  142. {
  143. ff_put_pixels8_mmxext(block, pixels, line_size, h);
  144. ff_put_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
  145. }
  146. void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  147. int dstStride, int srcStride, int h);
  148. void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  149. int dstStride, int srcStride, int h);
  150. void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  151. int dstStride, int srcStride,
  152. int h);
  153. void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  154. int dstStride, int srcStride, int h);
  155. void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  156. int dstStride, int srcStride, int h);
  157. void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  158. int dstStride, int srcStride,
  159. int h);
  160. void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  161. int dstStride, int srcStride);
  162. void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  163. int dstStride, int srcStride);
  164. void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  165. int dstStride, int srcStride);
  166. void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  167. int dstStride, int srcStride);
  168. void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  169. int dstStride, int srcStride);
  170. void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  171. int dstStride, int srcStride);
  172. #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
  173. #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
  174. #endif /* HAVE_YASM */
  175. #if HAVE_INLINE_ASM
  176. #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
  177. #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
  178. #define MOVQ_BFE(regd) \
  179. __asm__ volatile ( \
  180. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  181. "paddb %%"#regd", %%"#regd" \n\t" ::)
  182. #ifndef PIC
  183. #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
  184. #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
  185. #else
  186. // for shared library it's better to use this way for accessing constants
  187. // pcmpeqd -> -1
  188. #define MOVQ_BONE(regd) \
  189. __asm__ volatile ( \
  190. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  191. "psrlw $15, %%"#regd" \n\t" \
  192. "packuswb %%"#regd", %%"#regd" \n\t" ::)
  193. #define MOVQ_WTWO(regd) \
  194. __asm__ volatile ( \
  195. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  196. "psrlw $15, %%"#regd" \n\t" \
  197. "psllw $1, %%"#regd" \n\t"::)
  198. #endif
  199. // using regr as temporary and for the output result
  200. // first argument is unmodifed and second is trashed
  201. // regfe is supposed to contain 0xfefefefefefefefe
  202. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  203. "movq "#rega", "#regr" \n\t" \
  204. "pand "#regb", "#regr" \n\t" \
  205. "pxor "#rega", "#regb" \n\t" \
  206. "pand "#regfe", "#regb" \n\t" \
  207. "psrlq $1, "#regb" \n\t" \
  208. "paddb "#regb", "#regr" \n\t"
  209. #define PAVGB_MMX(rega, regb, regr, regfe) \
  210. "movq "#rega", "#regr" \n\t" \
  211. "por "#regb", "#regr" \n\t" \
  212. "pxor "#rega", "#regb" \n\t" \
  213. "pand "#regfe", "#regb" \n\t" \
  214. "psrlq $1, "#regb" \n\t" \
  215. "psubb "#regb", "#regr" \n\t"
  216. // mm6 is supposed to contain 0xfefefefefefefefe
  217. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  218. "movq "#rega", "#regr" \n\t" \
  219. "movq "#regc", "#regp" \n\t" \
  220. "pand "#regb", "#regr" \n\t" \
  221. "pand "#regd", "#regp" \n\t" \
  222. "pxor "#rega", "#regb" \n\t" \
  223. "pxor "#regc", "#regd" \n\t" \
  224. "pand %%mm6, "#regb" \n\t" \
  225. "pand %%mm6, "#regd" \n\t" \
  226. "psrlq $1, "#regb" \n\t" \
  227. "psrlq $1, "#regd" \n\t" \
  228. "paddb "#regb", "#regr" \n\t" \
  229. "paddb "#regd", "#regp" \n\t"
  230. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  231. "movq "#rega", "#regr" \n\t" \
  232. "movq "#regc", "#regp" \n\t" \
  233. "por "#regb", "#regr" \n\t" \
  234. "por "#regd", "#regp" \n\t" \
  235. "pxor "#rega", "#regb" \n\t" \
  236. "pxor "#regc", "#regd" \n\t" \
  237. "pand %%mm6, "#regb" \n\t" \
  238. "pand %%mm6, "#regd" \n\t" \
  239. "psrlq $1, "#regd" \n\t" \
  240. "psrlq $1, "#regb" \n\t" \
  241. "psubb "#regb", "#regr" \n\t" \
  242. "psubb "#regd", "#regp" \n\t"
  243. /***********************************/
  244. /* MMX no rounding */
  245. #define NO_RND 1
  246. #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
  247. #define SET_RND MOVQ_WONE
  248. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  249. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  250. #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
  251. #include "dsputil_rnd_template.c"
  252. #undef DEF
  253. #undef SET_RND
  254. #undef PAVGBP
  255. #undef PAVGB
  256. #undef NO_RND
  257. /***********************************/
  258. /* MMX rounding */
  259. #define DEF(x, y) x ## _ ## y ## _mmx
  260. #define SET_RND MOVQ_WTWO
  261. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  262. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  263. #include "dsputil_rnd_template.c"
  264. #undef DEF
  265. #undef SET_RND
  266. #undef PAVGBP
  267. #undef PAVGB
  268. #undef OP_AVG
  269. #endif /* HAVE_INLINE_ASM */
  270. #if HAVE_YASM
  271. #define ff_put_pixels8_mmx ff_put_pixels8_mmxext
  272. /***********************************/
  273. /* 3Dnow specific */
  274. #define DEF(x) x ## _3dnow
  275. #include "dsputil_avg_template.c"
  276. #undef DEF
  277. /***********************************/
  278. /* MMXEXT specific */
  279. #define DEF(x) x ## _mmxext
  280. #include "dsputil_avg_template.c"
  281. #undef DEF
  282. #endif /* HAVE_YASM */
  283. #if HAVE_INLINE_ASM
  284. #define put_no_rnd_pixels16_mmx put_pixels16_mmx
  285. #define put_no_rnd_pixels8_mmx put_pixels8_mmx
  286. #define put_pixels16_mmxext put_pixels16_mmx
  287. #define put_pixels8_mmxext put_pixels8_mmx
  288. #define put_pixels4_mmxext put_pixels4_mmx
  289. #define put_no_rnd_pixels16_mmxext put_no_rnd_pixels16_mmx
  290. #define put_no_rnd_pixels8_mmxext put_no_rnd_pixels8_mmx
  291. /***********************************/
  292. /* standard MMX */
  293. void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  294. int line_size)
  295. {
  296. const int16_t *p;
  297. uint8_t *pix;
  298. /* read the pixels */
  299. p = block;
  300. pix = pixels;
  301. /* unrolled loop */
  302. __asm__ volatile (
  303. "movq (%3), %%mm0 \n\t"
  304. "movq 8(%3), %%mm1 \n\t"
  305. "movq 16(%3), %%mm2 \n\t"
  306. "movq 24(%3), %%mm3 \n\t"
  307. "movq 32(%3), %%mm4 \n\t"
  308. "movq 40(%3), %%mm5 \n\t"
  309. "movq 48(%3), %%mm6 \n\t"
  310. "movq 56(%3), %%mm7 \n\t"
  311. "packuswb %%mm1, %%mm0 \n\t"
  312. "packuswb %%mm3, %%mm2 \n\t"
  313. "packuswb %%mm5, %%mm4 \n\t"
  314. "packuswb %%mm7, %%mm6 \n\t"
  315. "movq %%mm0, (%0) \n\t"
  316. "movq %%mm2, (%0, %1) \n\t"
  317. "movq %%mm4, (%0, %1, 2) \n\t"
  318. "movq %%mm6, (%0, %2) \n\t"
  319. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
  320. "r"(p)
  321. : "memory");
  322. pix += line_size * 4;
  323. p += 32;
  324. // if here would be an exact copy of the code above
  325. // compiler would generate some very strange code
  326. // thus using "r"
  327. __asm__ volatile (
  328. "movq (%3), %%mm0 \n\t"
  329. "movq 8(%3), %%mm1 \n\t"
  330. "movq 16(%3), %%mm2 \n\t"
  331. "movq 24(%3), %%mm3 \n\t"
  332. "movq 32(%3), %%mm4 \n\t"
  333. "movq 40(%3), %%mm5 \n\t"
  334. "movq 48(%3), %%mm6 \n\t"
  335. "movq 56(%3), %%mm7 \n\t"
  336. "packuswb %%mm1, %%mm0 \n\t"
  337. "packuswb %%mm3, %%mm2 \n\t"
  338. "packuswb %%mm5, %%mm4 \n\t"
  339. "packuswb %%mm7, %%mm6 \n\t"
  340. "movq %%mm0, (%0) \n\t"
  341. "movq %%mm2, (%0, %1) \n\t"
  342. "movq %%mm4, (%0, %1, 2) \n\t"
  343. "movq %%mm6, (%0, %2) \n\t"
  344. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
  345. : "memory");
  346. }
  347. #define put_signed_pixels_clamped_mmx_half(off) \
  348. "movq "#off"(%2), %%mm1 \n\t" \
  349. "movq 16 + "#off"(%2), %%mm2 \n\t" \
  350. "movq 32 + "#off"(%2), %%mm3 \n\t" \
  351. "movq 48 + "#off"(%2), %%mm4 \n\t" \
  352. "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
  353. "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
  354. "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
  355. "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
  356. "paddb %%mm0, %%mm1 \n\t" \
  357. "paddb %%mm0, %%mm2 \n\t" \
  358. "paddb %%mm0, %%mm3 \n\t" \
  359. "paddb %%mm0, %%mm4 \n\t" \
  360. "movq %%mm1, (%0) \n\t" \
  361. "movq %%mm2, (%0, %3) \n\t" \
  362. "movq %%mm3, (%0, %3, 2) \n\t" \
  363. "movq %%mm4, (%0, %1) \n\t"
  364. void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  365. int line_size)
  366. {
  367. x86_reg line_skip = line_size;
  368. x86_reg line_skip3;
  369. __asm__ volatile (
  370. "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
  371. "lea (%3, %3, 2), %1 \n\t"
  372. put_signed_pixels_clamped_mmx_half(0)
  373. "lea (%0, %3, 4), %0 \n\t"
  374. put_signed_pixels_clamped_mmx_half(64)
  375. : "+&r"(pixels), "=&r"(line_skip3)
  376. : "r"(block), "r"(line_skip)
  377. : "memory");
  378. }
  379. void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  380. int line_size)
  381. {
  382. const int16_t *p;
  383. uint8_t *pix;
  384. int i;
  385. /* read the pixels */
  386. p = block;
  387. pix = pixels;
  388. MOVQ_ZERO(mm7);
  389. i = 4;
  390. do {
  391. __asm__ volatile (
  392. "movq (%2), %%mm0 \n\t"
  393. "movq 8(%2), %%mm1 \n\t"
  394. "movq 16(%2), %%mm2 \n\t"
  395. "movq 24(%2), %%mm3 \n\t"
  396. "movq %0, %%mm4 \n\t"
  397. "movq %1, %%mm6 \n\t"
  398. "movq %%mm4, %%mm5 \n\t"
  399. "punpcklbw %%mm7, %%mm4 \n\t"
  400. "punpckhbw %%mm7, %%mm5 \n\t"
  401. "paddsw %%mm4, %%mm0 \n\t"
  402. "paddsw %%mm5, %%mm1 \n\t"
  403. "movq %%mm6, %%mm5 \n\t"
  404. "punpcklbw %%mm7, %%mm6 \n\t"
  405. "punpckhbw %%mm7, %%mm5 \n\t"
  406. "paddsw %%mm6, %%mm2 \n\t"
  407. "paddsw %%mm5, %%mm3 \n\t"
  408. "packuswb %%mm1, %%mm0 \n\t"
  409. "packuswb %%mm3, %%mm2 \n\t"
  410. "movq %%mm0, %0 \n\t"
  411. "movq %%mm2, %1 \n\t"
  412. : "+m"(*pix), "+m"(*(pix + line_size))
  413. : "r"(p)
  414. : "memory");
  415. pix += line_size * 2;
  416. p += 16;
  417. } while (--i);
  418. }
  419. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
  420. int line_size, int h)
  421. {
  422. __asm__ volatile (
  423. "lea (%3, %3), %%"REG_a" \n\t"
  424. ".p2align 3 \n\t"
  425. "1: \n\t"
  426. "movq (%1 ), %%mm0 \n\t"
  427. "movq (%1, %3), %%mm1 \n\t"
  428. "movq %%mm0, (%2) \n\t"
  429. "movq %%mm1, (%2, %3) \n\t"
  430. "add %%"REG_a", %1 \n\t"
  431. "add %%"REG_a", %2 \n\t"
  432. "movq (%1 ), %%mm0 \n\t"
  433. "movq (%1, %3), %%mm1 \n\t"
  434. "movq %%mm0, (%2) \n\t"
  435. "movq %%mm1, (%2, %3) \n\t"
  436. "add %%"REG_a", %1 \n\t"
  437. "add %%"REG_a", %2 \n\t"
  438. "subl $4, %0 \n\t"
  439. "jnz 1b \n\t"
  440. : "+g"(h), "+r"(pixels), "+r"(block)
  441. : "r"((x86_reg)line_size)
  442. : "%"REG_a, "memory"
  443. );
  444. }
  445. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
  446. int line_size, int h)
  447. {
  448. __asm__ volatile (
  449. "lea (%3, %3), %%"REG_a" \n\t"
  450. ".p2align 3 \n\t"
  451. "1: \n\t"
  452. "movq (%1 ), %%mm0 \n\t"
  453. "movq 8(%1 ), %%mm4 \n\t"
  454. "movq (%1, %3), %%mm1 \n\t"
  455. "movq 8(%1, %3), %%mm5 \n\t"
  456. "movq %%mm0, (%2) \n\t"
  457. "movq %%mm4, 8(%2) \n\t"
  458. "movq %%mm1, (%2, %3) \n\t"
  459. "movq %%mm5, 8(%2, %3) \n\t"
  460. "add %%"REG_a", %1 \n\t"
  461. "add %%"REG_a", %2 \n\t"
  462. "movq (%1 ), %%mm0 \n\t"
  463. "movq 8(%1 ), %%mm4 \n\t"
  464. "movq (%1, %3), %%mm1 \n\t"
  465. "movq 8(%1, %3), %%mm5 \n\t"
  466. "movq %%mm0, (%2) \n\t"
  467. "movq %%mm4, 8(%2) \n\t"
  468. "movq %%mm1, (%2, %3) \n\t"
  469. "movq %%mm5, 8(%2, %3) \n\t"
  470. "add %%"REG_a", %1 \n\t"
  471. "add %%"REG_a", %2 \n\t"
  472. "subl $4, %0 \n\t"
  473. "jnz 1b \n\t"
  474. : "+g"(h), "+r"(pixels), "+r"(block)
  475. : "r"((x86_reg)line_size)
  476. : "%"REG_a, "memory"
  477. );
  478. }
  479. #define CLEAR_BLOCKS(name, n) \
  480. static void name(int16_t *blocks) \
  481. { \
  482. __asm__ volatile ( \
  483. "pxor %%mm7, %%mm7 \n\t" \
  484. "mov %1, %%"REG_a" \n\t" \
  485. "1: \n\t" \
  486. "movq %%mm7, (%0, %%"REG_a") \n\t" \
  487. "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
  488. "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
  489. "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
  490. "add $32, %%"REG_a" \n\t" \
  491. "js 1b \n\t" \
  492. :: "r"(((uint8_t *)blocks) + 128 * n), \
  493. "i"(-128 * n) \
  494. : "%"REG_a \
  495. ); \
  496. }
  497. CLEAR_BLOCKS(clear_blocks_mmx, 6)
  498. CLEAR_BLOCKS(clear_block_mmx, 1)
  499. static void clear_block_sse(int16_t *block)
  500. {
  501. __asm__ volatile (
  502. "xorps %%xmm0, %%xmm0 \n"
  503. "movaps %%xmm0, (%0) \n"
  504. "movaps %%xmm0, 16(%0) \n"
  505. "movaps %%xmm0, 32(%0) \n"
  506. "movaps %%xmm0, 48(%0) \n"
  507. "movaps %%xmm0, 64(%0) \n"
  508. "movaps %%xmm0, 80(%0) \n"
  509. "movaps %%xmm0, 96(%0) \n"
  510. "movaps %%xmm0, 112(%0) \n"
  511. :: "r"(block)
  512. : "memory"
  513. );
  514. }
  515. static void clear_blocks_sse(int16_t *blocks)
  516. {
  517. __asm__ volatile (
  518. "xorps %%xmm0, %%xmm0 \n"
  519. "mov %1, %%"REG_a" \n"
  520. "1: \n"
  521. "movaps %%xmm0, (%0, %%"REG_a") \n"
  522. "movaps %%xmm0, 16(%0, %%"REG_a") \n"
  523. "movaps %%xmm0, 32(%0, %%"REG_a") \n"
  524. "movaps %%xmm0, 48(%0, %%"REG_a") \n"
  525. "movaps %%xmm0, 64(%0, %%"REG_a") \n"
  526. "movaps %%xmm0, 80(%0, %%"REG_a") \n"
  527. "movaps %%xmm0, 96(%0, %%"REG_a") \n"
  528. "movaps %%xmm0, 112(%0, %%"REG_a") \n"
  529. "add $128, %%"REG_a" \n"
  530. "js 1b \n"
  531. :: "r"(((uint8_t *)blocks) + 128 * 6),
  532. "i"(-128 * 6)
  533. : "%"REG_a
  534. );
  535. }
  536. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
  537. {
  538. x86_reg i = 0;
  539. __asm__ volatile (
  540. "jmp 2f \n\t"
  541. "1: \n\t"
  542. "movq (%1, %0), %%mm0 \n\t"
  543. "movq (%2, %0), %%mm1 \n\t"
  544. "paddb %%mm0, %%mm1 \n\t"
  545. "movq %%mm1, (%2, %0) \n\t"
  546. "movq 8(%1, %0), %%mm0 \n\t"
  547. "movq 8(%2, %0), %%mm1 \n\t"
  548. "paddb %%mm0, %%mm1 \n\t"
  549. "movq %%mm1, 8(%2, %0) \n\t"
  550. "add $16, %0 \n\t"
  551. "2: \n\t"
  552. "cmp %3, %0 \n\t"
  553. "js 1b \n\t"
  554. : "+r"(i)
  555. : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
  556. );
  557. for ( ; i < w; i++)
  558. dst[i + 0] += src[i + 0];
  559. }
  560. #if HAVE_7REGS
  561. static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
  562. const uint8_t *diff, int w,
  563. int *left, int *left_top)
  564. {
  565. x86_reg w2 = -w;
  566. x86_reg x;
  567. int l = *left & 0xff;
  568. int tl = *left_top & 0xff;
  569. int t;
  570. __asm__ volatile (
  571. "mov %7, %3 \n"
  572. "1: \n"
  573. "movzbl (%3, %4), %2 \n"
  574. "mov %2, %k3 \n"
  575. "sub %b1, %b3 \n"
  576. "add %b0, %b3 \n"
  577. "mov %2, %1 \n"
  578. "cmp %0, %2 \n"
  579. "cmovg %0, %2 \n"
  580. "cmovg %1, %0 \n"
  581. "cmp %k3, %0 \n"
  582. "cmovg %k3, %0 \n"
  583. "mov %7, %3 \n"
  584. "cmp %2, %0 \n"
  585. "cmovl %2, %0 \n"
  586. "add (%6, %4), %b0 \n"
  587. "mov %b0, (%5, %4) \n"
  588. "inc %4 \n"
  589. "jl 1b \n"
  590. : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
  591. : "r"(dst + w), "r"(diff + w), "rm"(top + w)
  592. );
  593. *left = l;
  594. *left_top = tl;
  595. }
  596. #endif
  597. #endif /* HAVE_INLINE_ASM */
  598. void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
  599. void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
  600. #if HAVE_INLINE_ASM
  601. /* Draw the edges of width 'w' of an image of size width, height
  602. * this MMX version can only handle w == 8 || w == 16. */
  603. static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
  604. int w, int h, int sides)
  605. {
  606. uint8_t *ptr, *last_line;
  607. int i;
  608. last_line = buf + (height - 1) * wrap;
  609. /* left and right */
  610. ptr = buf;
  611. if (w == 8) {
  612. __asm__ volatile (
  613. "1: \n\t"
  614. "movd (%0), %%mm0 \n\t"
  615. "punpcklbw %%mm0, %%mm0 \n\t"
  616. "punpcklwd %%mm0, %%mm0 \n\t"
  617. "punpckldq %%mm0, %%mm0 \n\t"
  618. "movq %%mm0, -8(%0) \n\t"
  619. "movq -8(%0, %2), %%mm1 \n\t"
  620. "punpckhbw %%mm1, %%mm1 \n\t"
  621. "punpckhwd %%mm1, %%mm1 \n\t"
  622. "punpckhdq %%mm1, %%mm1 \n\t"
  623. "movq %%mm1, (%0, %2) \n\t"
  624. "add %1, %0 \n\t"
  625. "cmp %3, %0 \n\t"
  626. "jb 1b \n\t"
  627. : "+r"(ptr)
  628. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  629. );
  630. } else {
  631. __asm__ volatile (
  632. "1: \n\t"
  633. "movd (%0), %%mm0 \n\t"
  634. "punpcklbw %%mm0, %%mm0 \n\t"
  635. "punpcklwd %%mm0, %%mm0 \n\t"
  636. "punpckldq %%mm0, %%mm0 \n\t"
  637. "movq %%mm0, -8(%0) \n\t"
  638. "movq %%mm0, -16(%0) \n\t"
  639. "movq -8(%0, %2), %%mm1 \n\t"
  640. "punpckhbw %%mm1, %%mm1 \n\t"
  641. "punpckhwd %%mm1, %%mm1 \n\t"
  642. "punpckhdq %%mm1, %%mm1 \n\t"
  643. "movq %%mm1, (%0, %2) \n\t"
  644. "movq %%mm1, 8(%0, %2) \n\t"
  645. "add %1, %0 \n\t"
  646. "cmp %3, %0 \n\t"
  647. "jb 1b \n\t"
  648. : "+r"(ptr)
  649. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  650. );
  651. }
  652. /* top and bottom (and hopefully also the corners) */
  653. if (sides & EDGE_TOP) {
  654. for (i = 0; i < h; i += 4) {
  655. ptr = buf - (i + 1) * wrap - w;
  656. __asm__ volatile (
  657. "1: \n\t"
  658. "movq (%1, %0), %%mm0 \n\t"
  659. "movq %%mm0, (%0) \n\t"
  660. "movq %%mm0, (%0, %2) \n\t"
  661. "movq %%mm0, (%0, %2, 2) \n\t"
  662. "movq %%mm0, (%0, %3) \n\t"
  663. "add $8, %0 \n\t"
  664. "cmp %4, %0 \n\t"
  665. "jb 1b \n\t"
  666. : "+r"(ptr)
  667. : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
  668. "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
  669. );
  670. }
  671. }
  672. if (sides & EDGE_BOTTOM) {
  673. for (i = 0; i < h; i += 4) {
  674. ptr = last_line + (i + 1) * wrap - w;
  675. __asm__ volatile (
  676. "1: \n\t"
  677. "movq (%1, %0), %%mm0 \n\t"
  678. "movq %%mm0, (%0) \n\t"
  679. "movq %%mm0, (%0, %2) \n\t"
  680. "movq %%mm0, (%0, %2, 2) \n\t"
  681. "movq %%mm0, (%0, %3) \n\t"
  682. "add $8, %0 \n\t"
  683. "cmp %4, %0 \n\t"
  684. "jb 1b \n\t"
  685. : "+r"(ptr)
  686. : "r"((x86_reg)last_line - (x86_reg)ptr - w),
  687. "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
  688. "r"(ptr + width + 2 * w)
  689. );
  690. }
  691. }
  692. }
  693. #endif /* HAVE_INLINE_ASM */
  694. #if HAVE_YASM
  695. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX) \
  696. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
  697. int stride) \
  698. { \
  699. ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \
  700. } \
  701. \
  702. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
  703. int stride) \
  704. { \
  705. uint64_t temp[8]; \
  706. uint8_t * const half = (uint8_t*)temp; \
  707. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
  708. stride, 8); \
  709. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \
  710. stride, stride, 8); \
  711. } \
  712. \
  713. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
  714. int stride) \
  715. { \
  716. ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \
  717. stride, 8); \
  718. } \
  719. \
  720. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
  721. int stride) \
  722. { \
  723. uint64_t temp[8]; \
  724. uint8_t * const half = (uint8_t*)temp; \
  725. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
  726. stride, 8); \
  727. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \
  728. stride, 8); \
  729. } \
  730. \
  731. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
  732. int stride) \
  733. { \
  734. uint64_t temp[8]; \
  735. uint8_t * const half = (uint8_t*)temp; \
  736. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \
  737. 8, stride); \
  738. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \
  739. stride, stride, 8); \
  740. } \
  741. \
  742. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
  743. int stride) \
  744. { \
  745. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, \
  746. stride, stride); \
  747. } \
  748. \
  749. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
  750. int stride) \
  751. { \
  752. uint64_t temp[8]; \
  753. uint8_t * const half = (uint8_t*)temp; \
  754. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \
  755. 8, stride); \
  756. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
  757. stride, 8); \
  758. } \
  759. \
  760. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
  761. int stride) \
  762. { \
  763. uint64_t half[8 + 9]; \
  764. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  765. uint8_t * const halfHV = ((uint8_t*)half); \
  766. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  767. stride, 9); \
  768. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \
  769. stride, 9); \
  770. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  771. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
  772. stride, 8, 8); \
  773. } \
  774. \
  775. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
  776. int stride) \
  777. { \
  778. uint64_t half[8 + 9]; \
  779. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  780. uint8_t * const halfHV = ((uint8_t*)half); \
  781. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  782. stride, 9); \
  783. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  784. stride, 9); \
  785. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  786. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
  787. stride, 8, 8); \
  788. } \
  789. \
  790. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
  791. int stride) \
  792. { \
  793. uint64_t half[8 + 9]; \
  794. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  795. uint8_t * const halfHV = ((uint8_t*)half); \
  796. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  797. stride, 9); \
  798. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \
  799. stride, 9); \
  800. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  801. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
  802. stride, 8, 8); \
  803. } \
  804. \
  805. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
  806. int stride) \
  807. { \
  808. uint64_t half[8 + 9]; \
  809. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  810. uint8_t * const halfHV = ((uint8_t*)half); \
  811. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  812. stride, 9); \
  813. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  814. stride, 9); \
  815. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  816. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
  817. stride, 8, 8); \
  818. } \
  819. \
  820. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
  821. int stride) \
  822. { \
  823. uint64_t half[8 + 9]; \
  824. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  825. uint8_t * const halfHV = ((uint8_t*)half); \
  826. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  827. stride, 9); \
  828. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  829. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
  830. stride, 8, 8); \
  831. } \
  832. \
  833. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
  834. int stride) \
  835. { \
  836. uint64_t half[8 + 9]; \
  837. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  838. uint8_t * const halfHV = ((uint8_t*)half); \
  839. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  840. stride, 9); \
  841. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  842. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
  843. stride, 8, 8); \
  844. } \
  845. \
  846. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
  847. int stride) \
  848. { \
  849. uint64_t half[8 + 9]; \
  850. uint8_t * const halfH = ((uint8_t*)half); \
  851. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  852. stride, 9); \
  853. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, \
  854. 8, stride, 9); \
  855. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
  856. stride, 8); \
  857. } \
  858. \
  859. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
  860. int stride) \
  861. { \
  862. uint64_t half[8 + 9]; \
  863. uint8_t * const halfH = ((uint8_t*)half); \
  864. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  865. stride, 9); \
  866. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  867. stride, 9); \
  868. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
  869. stride, 8); \
  870. } \
  871. \
  872. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
  873. int stride) \
  874. { \
  875. uint64_t half[9]; \
  876. uint8_t * const halfH = ((uint8_t*)half); \
  877. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  878. stride, 9); \
  879. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
  880. stride, 8); \
  881. } \
  882. \
  883. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
  884. int stride) \
  885. { \
  886. ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \
  887. } \
  888. \
  889. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
  890. int stride) \
  891. { \
  892. uint64_t temp[32]; \
  893. uint8_t * const half = (uint8_t*)temp; \
  894. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
  895. stride, 16); \
  896. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \
  897. stride, 16); \
  898. } \
  899. \
  900. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
  901. int stride) \
  902. { \
  903. ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \
  904. stride, stride, 16);\
  905. } \
  906. \
  907. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
  908. int stride) \
  909. { \
  910. uint64_t temp[32]; \
  911. uint8_t * const half = (uint8_t*)temp; \
  912. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
  913. stride, 16); \
  914. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \
  915. stride, stride, 16); \
  916. } \
  917. \
  918. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
  919. int stride) \
  920. { \
  921. uint64_t temp[32]; \
  922. uint8_t * const half = (uint8_t*)temp; \
  923. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
  924. stride); \
  925. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \
  926. stride, 16); \
  927. } \
  928. \
  929. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
  930. int stride) \
  931. { \
  932. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, \
  933. stride, stride); \
  934. } \
  935. \
  936. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
  937. int stride) \
  938. { \
  939. uint64_t temp[32]; \
  940. uint8_t * const half = (uint8_t*)temp; \
  941. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
  942. stride); \
  943. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \
  944. stride, stride, 16); \
  945. } \
  946. \
  947. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
  948. int stride) \
  949. { \
  950. uint64_t half[16 * 2 + 17 * 2]; \
  951. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  952. uint8_t * const halfHV = ((uint8_t*)half); \
  953. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  954. stride, 17); \
  955. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  956. stride, 17); \
  957. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  958. 16, 16); \
  959. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
  960. stride, 16, 16); \
  961. } \
  962. \
  963. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
  964. int stride) \
  965. { \
  966. uint64_t half[16 * 2 + 17 * 2]; \
  967. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  968. uint8_t * const halfHV = ((uint8_t*)half); \
  969. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  970. stride, 17); \
  971. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  972. stride, 17); \
  973. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  974. 16, 16); \
  975. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
  976. stride, 16, 16); \
  977. } \
  978. \
  979. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
  980. int stride) \
  981. { \
  982. uint64_t half[16 * 2 + 17 * 2]; \
  983. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  984. uint8_t * const halfHV = ((uint8_t*)half); \
  985. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  986. stride, 17); \
  987. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  988. stride, 17); \
  989. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  990. 16, 16); \
  991. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
  992. stride, 16, 16); \
  993. } \
  994. \
  995. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
  996. int stride) \
  997. { \
  998. uint64_t half[16 * 2 + 17 * 2]; \
  999. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1000. uint8_t * const halfHV = ((uint8_t*)half); \
  1001. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1002. stride, 17); \
  1003. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  1004. stride, 17); \
  1005. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1006. 16, 16); \
  1007. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
  1008. stride, 16, 16); \
  1009. } \
  1010. \
  1011. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
  1012. int stride) \
  1013. { \
  1014. uint64_t half[16 * 2 + 17 * 2]; \
  1015. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1016. uint8_t * const halfHV = ((uint8_t*)half); \
  1017. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1018. stride, 17); \
  1019. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1020. 16, 16); \
  1021. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
  1022. stride, 16, 16); \
  1023. } \
  1024. \
  1025. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
  1026. int stride) \
  1027. { \
  1028. uint64_t half[16 * 2 + 17 * 2]; \
  1029. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1030. uint8_t * const halfHV = ((uint8_t*)half); \
  1031. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1032. stride, 17); \
  1033. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1034. 16, 16); \
  1035. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
  1036. stride, 16, 16); \
  1037. } \
  1038. \
  1039. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
  1040. int stride) \
  1041. { \
  1042. uint64_t half[17 * 2]; \
  1043. uint8_t * const halfH = ((uint8_t*)half); \
  1044. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1045. stride, 17); \
  1046. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  1047. stride, 17); \
  1048. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
  1049. stride, 16); \
  1050. } \
  1051. \
  1052. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
  1053. int stride) \
  1054. { \
  1055. uint64_t half[17 * 2]; \
  1056. uint8_t * const halfH = ((uint8_t*)half); \
  1057. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1058. stride, 17); \
  1059. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  1060. stride, 17); \
  1061. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
  1062. stride, 16); \
  1063. } \
  1064. \
  1065. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
  1066. int stride) \
  1067. { \
  1068. uint64_t half[17 * 2]; \
  1069. uint8_t * const halfH = ((uint8_t*)half); \
  1070. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1071. stride, 17); \
  1072. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
  1073. stride, 16); \
  1074. }
  1075. #define PUT_OP(a, b, temp, size) \
  1076. "mov"#size" "#a", "#b" \n\t"
  1077. #define AVG_MMXEXT_OP(a, b, temp, size) \
  1078. "mov"#size" "#b", "#temp" \n\t" \
  1079. "pavgb "#temp", "#a" \n\t" \
  1080. "mov"#size" "#a", "#b" \n\t"
  1081. QPEL_OP(put_, ff_pw_16, _, PUT_OP, mmxext)
  1082. QPEL_OP(avg_, ff_pw_16, _, AVG_MMXEXT_OP, mmxext)
  1083. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmxext)
  1084. #endif /* HAVE_YASM */
  1085. #if HAVE_INLINE_ASM
  1086. void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1087. {
  1088. put_pixels8_xy2_mmx(dst, src, stride, 8);
  1089. }
  1090. void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1091. {
  1092. put_pixels16_xy2_mmx(dst, src, stride, 16);
  1093. }
  1094. void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1095. {
  1096. avg_pixels8_xy2_mmx(dst, src, stride, 8);
  1097. }
  1098. void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1099. {
  1100. avg_pixels16_xy2_mmx(dst, src, stride, 16);
  1101. }
  1102. static void gmc_mmx(uint8_t *dst, uint8_t *src,
  1103. int stride, int h, int ox, int oy,
  1104. int dxx, int dxy, int dyx, int dyy,
  1105. int shift, int r, int width, int height)
  1106. {
  1107. const int w = 8;
  1108. const int ix = ox >> (16 + shift);
  1109. const int iy = oy >> (16 + shift);
  1110. const int oxs = ox >> 4;
  1111. const int oys = oy >> 4;
  1112. const int dxxs = dxx >> 4;
  1113. const int dxys = dxy >> 4;
  1114. const int dyxs = dyx >> 4;
  1115. const int dyys = dyy >> 4;
  1116. const uint16_t r4[4] = { r, r, r, r };
  1117. const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
  1118. const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
  1119. const uint64_t shift2 = 2 * shift;
  1120. int x, y;
  1121. const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
  1122. const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
  1123. const int dxh = dxy * (h - 1);
  1124. const int dyw = dyx * (w - 1);
  1125. if ( // non-constant fullpel offset (3% of blocks)
  1126. ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
  1127. (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
  1128. // uses more than 16 bits of subpel mv (only at huge resolution)
  1129. || (dxx | dxy | dyx | dyy) & 15 ||
  1130. (unsigned)ix >= width - w ||
  1131. (unsigned)iy >= height - h) {
  1132. // FIXME could still use mmx for some of the rows
  1133. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
  1134. shift, r, width, height);
  1135. return;
  1136. }
  1137. src += ix + iy * stride;
  1138. __asm__ volatile (
  1139. "movd %0, %%mm6 \n\t"
  1140. "pxor %%mm7, %%mm7 \n\t"
  1141. "punpcklwd %%mm6, %%mm6 \n\t"
  1142. "punpcklwd %%mm6, %%mm6 \n\t"
  1143. :: "r"(1<<shift)
  1144. );
  1145. for (x = 0; x < w; x += 4) {
  1146. uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
  1147. oxs - dxys + dxxs * (x + 1),
  1148. oxs - dxys + dxxs * (x + 2),
  1149. oxs - dxys + dxxs * (x + 3) };
  1150. uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
  1151. oys - dyys + dyxs * (x + 1),
  1152. oys - dyys + dyxs * (x + 2),
  1153. oys - dyys + dyxs * (x + 3) };
  1154. for (y = 0; y < h; y++) {
  1155. __asm__ volatile (
  1156. "movq %0, %%mm4 \n\t"
  1157. "movq %1, %%mm5 \n\t"
  1158. "paddw %2, %%mm4 \n\t"
  1159. "paddw %3, %%mm5 \n\t"
  1160. "movq %%mm4, %0 \n\t"
  1161. "movq %%mm5, %1 \n\t"
  1162. "psrlw $12, %%mm4 \n\t"
  1163. "psrlw $12, %%mm5 \n\t"
  1164. : "+m"(*dx4), "+m"(*dy4)
  1165. : "m"(*dxy4), "m"(*dyy4)
  1166. );
  1167. __asm__ volatile (
  1168. "movq %%mm6, %%mm2 \n\t"
  1169. "movq %%mm6, %%mm1 \n\t"
  1170. "psubw %%mm4, %%mm2 \n\t"
  1171. "psubw %%mm5, %%mm1 \n\t"
  1172. "movq %%mm2, %%mm0 \n\t"
  1173. "movq %%mm4, %%mm3 \n\t"
  1174. "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
  1175. "pmullw %%mm5, %%mm3 \n\t" // dx * dy
  1176. "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
  1177. "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
  1178. "movd %4, %%mm5 \n\t"
  1179. "movd %3, %%mm4 \n\t"
  1180. "punpcklbw %%mm7, %%mm5 \n\t"
  1181. "punpcklbw %%mm7, %%mm4 \n\t"
  1182. "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
  1183. "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
  1184. "movd %2, %%mm5 \n\t"
  1185. "movd %1, %%mm4 \n\t"
  1186. "punpcklbw %%mm7, %%mm5 \n\t"
  1187. "punpcklbw %%mm7, %%mm4 \n\t"
  1188. "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
  1189. "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
  1190. "paddw %5, %%mm1 \n\t"
  1191. "paddw %%mm3, %%mm2 \n\t"
  1192. "paddw %%mm1, %%mm0 \n\t"
  1193. "paddw %%mm2, %%mm0 \n\t"
  1194. "psrlw %6, %%mm0 \n\t"
  1195. "packuswb %%mm0, %%mm0 \n\t"
  1196. "movd %%mm0, %0 \n\t"
  1197. : "=m"(dst[x + y * stride])
  1198. : "m"(src[0]), "m"(src[1]),
  1199. "m"(src[stride]), "m"(src[stride + 1]),
  1200. "m"(*r4), "m"(shift2)
  1201. );
  1202. src += stride;
  1203. }
  1204. src += 4 - h * stride;
  1205. }
  1206. }
  1207. #endif /* HAVE_INLINE_ASM */
  1208. void ff_put_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
  1209. ptrdiff_t line_size, int h);
  1210. void ff_avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
  1211. ptrdiff_t line_size, int h);
  1212. #if HAVE_INLINE_ASM
  1213. /* CAVS-specific */
  1214. void ff_put_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
  1215. {
  1216. put_pixels8_mmx(dst, src, stride, 8);
  1217. }
  1218. void ff_avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
  1219. {
  1220. avg_pixels8_mmx(dst, src, stride, 8);
  1221. }
  1222. void ff_put_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
  1223. {
  1224. put_pixels16_mmx(dst, src, stride, 16);
  1225. }
  1226. void ff_avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
  1227. {
  1228. avg_pixels16_mmx(dst, src, stride, 16);
  1229. }
  1230. #endif /* HAVE_INLINE_ASM */
  1231. #if HAVE_YASM
  1232. /* VC-1-specific */
  1233. void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
  1234. int stride, int rnd)
  1235. {
  1236. ff_put_pixels8_mmx(dst, src, stride, 8);
  1237. }
  1238. void ff_avg_vc1_mspel_mc00_mmxext(uint8_t *dst, const uint8_t *src,
  1239. int stride, int rnd)
  1240. {
  1241. ff_avg_pixels8_mmxext(dst, src, stride, 8);
  1242. }
  1243. #endif /* HAVE_YASM */
  1244. #if HAVE_INLINE_ASM
  1245. static void vector_clipf_sse(float *dst, const float *src,
  1246. float min, float max, int len)
  1247. {
  1248. x86_reg i = (len - 16) * 4;
  1249. __asm__ volatile (
  1250. "movss %3, %%xmm4 \n\t"
  1251. "movss %4, %%xmm5 \n\t"
  1252. "shufps $0, %%xmm4, %%xmm4 \n\t"
  1253. "shufps $0, %%xmm5, %%xmm5 \n\t"
  1254. "1: \n\t"
  1255. "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
  1256. "movaps 16(%2, %0), %%xmm1 \n\t"
  1257. "movaps 32(%2, %0), %%xmm2 \n\t"
  1258. "movaps 48(%2, %0), %%xmm3 \n\t"
  1259. "maxps %%xmm4, %%xmm0 \n\t"
  1260. "maxps %%xmm4, %%xmm1 \n\t"
  1261. "maxps %%xmm4, %%xmm2 \n\t"
  1262. "maxps %%xmm4, %%xmm3 \n\t"
  1263. "minps %%xmm5, %%xmm0 \n\t"
  1264. "minps %%xmm5, %%xmm1 \n\t"
  1265. "minps %%xmm5, %%xmm2 \n\t"
  1266. "minps %%xmm5, %%xmm3 \n\t"
  1267. "movaps %%xmm0, (%1, %0) \n\t"
  1268. "movaps %%xmm1, 16(%1, %0) \n\t"
  1269. "movaps %%xmm2, 32(%1, %0) \n\t"
  1270. "movaps %%xmm3, 48(%1, %0) \n\t"
  1271. "sub $64, %0 \n\t"
  1272. "jge 1b \n\t"
  1273. : "+&r"(i)
  1274. : "r"(dst), "r"(src), "m"(min), "m"(max)
  1275. : "memory"
  1276. );
  1277. }
  1278. #endif /* HAVE_INLINE_ASM */
  1279. int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
  1280. int order);
  1281. int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
  1282. int order);
  1283. int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
  1284. const int16_t *v3,
  1285. int order, int mul);
  1286. int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
  1287. const int16_t *v3,
  1288. int order, int mul);
  1289. int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
  1290. const int16_t *v3,
  1291. int order, int mul);
  1292. void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
  1293. const int16_t *window, unsigned int len);
  1294. void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
  1295. const int16_t *window, unsigned int len);
  1296. void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
  1297. const int16_t *window, unsigned int len);
  1298. void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
  1299. const int16_t *window, unsigned int len);
  1300. void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
  1301. const int16_t *window, unsigned int len);
  1302. void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
  1303. const int16_t *window, unsigned int len);
  1304. void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
  1305. void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
  1306. void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
  1307. const uint8_t *diff, int w,
  1308. int *left, int *left_top);
  1309. int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
  1310. int w, int left);
  1311. int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
  1312. int w, int left);
  1313. void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src,
  1314. int32_t min, int32_t max, unsigned int len);
  1315. void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
  1316. int32_t min, int32_t max, unsigned int len);
  1317. void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
  1318. int32_t min, int32_t max, unsigned int len);
  1319. void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src,
  1320. int32_t min, int32_t max, unsigned int len);
  1321. #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
  1322. do { \
  1323. c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
  1324. c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
  1325. c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
  1326. c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
  1327. c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
  1328. c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
  1329. c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
  1330. c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
  1331. c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
  1332. c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
  1333. c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
  1334. c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
  1335. c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
  1336. c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
  1337. c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
  1338. c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
  1339. } while (0)
  1340. #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  1341. do { \
  1342. c->PFX ## _pixels_tab IDX [0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
  1343. c->PFX ## _pixels_tab IDX [1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
  1344. c->PFX ## _pixels_tab IDX [2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
  1345. c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
  1346. } while (0)
  1347. static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
  1348. int mm_flags)
  1349. {
  1350. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1351. #if HAVE_INLINE_ASM
  1352. c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
  1353. c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
  1354. c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
  1355. if (!high_bit_depth) {
  1356. c->clear_block = clear_block_mmx;
  1357. c->clear_blocks = clear_blocks_mmx;
  1358. c->draw_edges = draw_edges_mmx;
  1359. SET_HPEL_FUNCS(put, [0], 16, mmx);
  1360. SET_HPEL_FUNCS(put_no_rnd, [0], 16, mmx);
  1361. SET_HPEL_FUNCS(avg, [0], 16, mmx);
  1362. SET_HPEL_FUNCS(avg_no_rnd, , 16, mmx);
  1363. SET_HPEL_FUNCS(put, [1], 8, mmx);
  1364. SET_HPEL_FUNCS(put_no_rnd, [1], 8, mmx);
  1365. SET_HPEL_FUNCS(avg, [1], 8, mmx);
  1366. switch (avctx->idct_algo) {
  1367. case FF_IDCT_AUTO:
  1368. case FF_IDCT_SIMPLEMMX:
  1369. c->idct_put = ff_simple_idct_put_mmx;
  1370. c->idct_add = ff_simple_idct_add_mmx;
  1371. c->idct = ff_simple_idct_mmx;
  1372. c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
  1373. break;
  1374. case FF_IDCT_XVIDMMX:
  1375. c->idct_put = ff_idct_xvid_mmx_put;
  1376. c->idct_add = ff_idct_xvid_mmx_add;
  1377. c->idct = ff_idct_xvid_mmx;
  1378. break;
  1379. }
  1380. }
  1381. c->gmc = gmc_mmx;
  1382. c->add_bytes = add_bytes_mmx;
  1383. #endif /* HAVE_INLINE_ASM */
  1384. #if HAVE_YASM
  1385. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  1386. c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
  1387. c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
  1388. }
  1389. c->vector_clip_int32 = ff_vector_clip_int32_mmx;
  1390. #endif
  1391. }
  1392. static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
  1393. int mm_flags)
  1394. {
  1395. const int bit_depth = avctx->bits_per_raw_sample;
  1396. const int high_bit_depth = bit_depth > 8;
  1397. #if HAVE_YASM
  1398. SET_QPEL_FUNCS(avg_qpel, 0, 16, mmxext, );
  1399. SET_QPEL_FUNCS(avg_qpel, 1, 8, mmxext, );
  1400. SET_QPEL_FUNCS(put_qpel, 0, 16, mmxext, );
  1401. SET_QPEL_FUNCS(put_qpel, 1, 8, mmxext, );
  1402. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
  1403. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmxext, );
  1404. if (!high_bit_depth) {
  1405. c->put_pixels_tab[0][1] = ff_put_pixels16_x2_mmxext;
  1406. c->put_pixels_tab[0][2] = ff_put_pixels16_y2_mmxext;
  1407. c->avg_pixels_tab[0][0] = ff_avg_pixels16_mmxext;
  1408. c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_mmxext;
  1409. c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_mmxext;
  1410. c->put_pixels_tab[1][1] = ff_put_pixels8_x2_mmxext;
  1411. c->put_pixels_tab[1][2] = ff_put_pixels8_y2_mmxext;
  1412. c->avg_pixels_tab[1][0] = ff_avg_pixels8_mmxext;
  1413. c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_mmxext;
  1414. c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext;
  1415. }
  1416. if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
  1417. if (!high_bit_depth) {
  1418. c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_mmxext;
  1419. c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_mmxext;
  1420. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext;
  1421. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext;
  1422. c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_mmxext;
  1423. c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext;
  1424. }
  1425. }
  1426. #endif /* HAVE_YASM */
  1427. #if HAVE_INLINE_ASM
  1428. if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
  1429. c->idct_put = ff_idct_xvid_mmxext_put;
  1430. c->idct_add = ff_idct_xvid_mmxext_add;
  1431. c->idct = ff_idct_xvid_mmxext;
  1432. }
  1433. #endif /* HAVE_INLINE_ASM */
  1434. #if HAVE_MMXEXT_EXTERNAL
  1435. if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
  1436. avctx->codec_id == AV_CODEC_ID_THEORA)) {
  1437. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_mmxext;
  1438. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_mmxext;
  1439. }
  1440. /* slower than cmov version on AMD */
  1441. if (!(mm_flags & AV_CPU_FLAG_3DNOW))
  1442. c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
  1443. c->scalarproduct_int16 = ff_scalarproduct_int16_mmxext;
  1444. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
  1445. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  1446. c->apply_window_int16 = ff_apply_window_int16_mmxext;
  1447. } else {
  1448. c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
  1449. }
  1450. #endif /* HAVE_MMXEXT_EXTERNAL */
  1451. }
  1452. static av_cold void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
  1453. int mm_flags)
  1454. {
  1455. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1456. #if HAVE_YASM
  1457. if (!high_bit_depth) {
  1458. c->put_pixels_tab[0][1] = ff_put_pixels16_x2_3dnow;
  1459. c->put_pixels_tab[0][2] = ff_put_pixels16_y2_3dnow;
  1460. c->avg_pixels_tab[0][0] = ff_avg_pixels16_3dnow;
  1461. c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_3dnow;
  1462. c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_3dnow;
  1463. c->put_pixels_tab[1][1] = ff_put_pixels8_x2_3dnow;
  1464. c->put_pixels_tab[1][2] = ff_put_pixels8_y2_3dnow;
  1465. c->avg_pixels_tab[1][0] = ff_avg_pixels8_3dnow;
  1466. c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_3dnow;
  1467. c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow;
  1468. if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1469. c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_3dnow;
  1470. c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_3dnow;
  1471. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow;
  1472. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow;
  1473. c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_3dnow;
  1474. c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow;
  1475. }
  1476. }
  1477. if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
  1478. avctx->codec_id == AV_CODEC_ID_THEORA)) {
  1479. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_3dnow;
  1480. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_3dnow;
  1481. }
  1482. #endif /* HAVE_YASM */
  1483. }
  1484. static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx,
  1485. int mm_flags)
  1486. {
  1487. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1488. #if HAVE_INLINE_ASM
  1489. if (!high_bit_depth) {
  1490. if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
  1491. /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
  1492. c->clear_block = clear_block_sse;
  1493. c->clear_blocks = clear_blocks_sse;
  1494. }
  1495. }
  1496. c->vector_clipf = vector_clipf_sse;
  1497. #endif /* HAVE_INLINE_ASM */
  1498. }
  1499. static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
  1500. int mm_flags)
  1501. {
  1502. const int bit_depth = avctx->bits_per_raw_sample;
  1503. const int high_bit_depth = bit_depth > 8;
  1504. #if HAVE_SSE2_INLINE
  1505. if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
  1506. c->idct_put = ff_idct_xvid_sse2_put;
  1507. c->idct_add = ff_idct_xvid_sse2_add;
  1508. c->idct = ff_idct_xvid_sse2;
  1509. c->idct_permutation_type = FF_SSE2_IDCT_PERM;
  1510. }
  1511. #endif /* HAVE_SSE2_INLINE */
  1512. #if HAVE_SSE2_EXTERNAL
  1513. if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  1514. // these functions are slower than mmx on AMD, but faster on Intel
  1515. if (!high_bit_depth) {
  1516. c->put_pixels_tab[0][0] = ff_put_pixels16_sse2;
  1517. c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2;
  1518. c->avg_pixels_tab[0][0] = ff_avg_pixels16_sse2;
  1519. }
  1520. }
  1521. c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
  1522. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
  1523. if (mm_flags & AV_CPU_FLAG_ATOM) {
  1524. c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
  1525. } else {
  1526. c->vector_clip_int32 = ff_vector_clip_int32_sse2;
  1527. }
  1528. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  1529. c->apply_window_int16 = ff_apply_window_int16_sse2;
  1530. } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  1531. c->apply_window_int16 = ff_apply_window_int16_round_sse2;
  1532. }
  1533. c->bswap_buf = ff_bswap32_buf_sse2;
  1534. #endif /* HAVE_SSE2_EXTERNAL */
  1535. }
  1536. static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
  1537. int mm_flags)
  1538. {
  1539. #if HAVE_SSSE3_EXTERNAL
  1540. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
  1541. if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
  1542. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
  1543. if (mm_flags & AV_CPU_FLAG_ATOM)
  1544. c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
  1545. else
  1546. c->apply_window_int16 = ff_apply_window_int16_ssse3;
  1547. if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
  1548. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
  1549. c->bswap_buf = ff_bswap32_buf_ssse3;
  1550. #endif /* HAVE_SSSE3_EXTERNAL */
  1551. }
  1552. static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
  1553. int mm_flags)
  1554. {
  1555. #if HAVE_SSE4_EXTERNAL
  1556. c->vector_clip_int32 = ff_vector_clip_int32_sse4;
  1557. #endif /* HAVE_SSE4_EXTERNAL */
  1558. }
  1559. av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
  1560. {
  1561. int mm_flags = av_get_cpu_flags();
  1562. #if HAVE_7REGS && HAVE_INLINE_ASM
  1563. if (mm_flags & AV_CPU_FLAG_CMOV)
  1564. c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
  1565. #endif
  1566. if (mm_flags & AV_CPU_FLAG_MMX)
  1567. dsputil_init_mmx(c, avctx, mm_flags);
  1568. if (mm_flags & AV_CPU_FLAG_MMXEXT)
  1569. dsputil_init_mmxext(c, avctx, mm_flags);
  1570. if (mm_flags & AV_CPU_FLAG_3DNOW)
  1571. dsputil_init_3dnow(c, avctx, mm_flags);
  1572. if (mm_flags & AV_CPU_FLAG_SSE)
  1573. dsputil_init_sse(c, avctx, mm_flags);
  1574. if (mm_flags & AV_CPU_FLAG_SSE2)
  1575. dsputil_init_sse2(c, avctx, mm_flags);
  1576. if (mm_flags & AV_CPU_FLAG_SSSE3)
  1577. dsputil_init_ssse3(c, avctx, mm_flags);
  1578. if (mm_flags & AV_CPU_FLAG_SSE4)
  1579. dsputil_init_sse4(c, avctx, mm_flags);
  1580. if (CONFIG_ENCODERS)
  1581. ff_dsputilenc_init_mmx(c, avctx);
  1582. }