You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1665 lines
80KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/attributes.h"
  25. #include "libavutil/cpu.h"
  26. #include "libavutil/x86/asm.h"
  27. #include "libavcodec/dsputil.h"
  28. #include "libavcodec/h264dsp.h"
  29. #include "libavcodec/mpegvideo.h"
  30. #include "libavcodec/simple_idct.h"
  31. #include "dsputil_mmx.h"
  32. #include "idct_xvid.h"
  33. //#undef NDEBUG
  34. //#include <assert.h>
  35. /* pixel operations */
  36. DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL;
  37. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17) = { 0x0011001100110011ULL, 0x0011001100110011ULL };
  38. DECLARE_ALIGNED(8, const uint64_t, ff_pw_20) = 0x0014001400140014ULL;
  39. DECLARE_ALIGNED(8, const uint64_t, ff_pw_42) = 0x002A002A002A002AULL;
  40. DECLARE_ALIGNED(8, const uint64_t, ff_pw_53) = 0x0035003500350035ULL;
  41. DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL;
  42. DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
  43. DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
  44. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL };
  45. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
  46. DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F) = 0x3F3F3F3F3F3F3F3FULL;
  47. DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC) = 0xFCFCFCFCFCFCFCFCULL;
  48. DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
  49. DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
  50. #if HAVE_YASM
  51. void ff_put_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  52. ptrdiff_t line_size, int h);
  53. void ff_put_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  54. ptrdiff_t line_size, int h);
  55. void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  56. int dstStride, int src1Stride, int h);
  57. void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1,
  58. uint8_t *src2, int dstStride,
  59. int src1Stride, int h);
  60. void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  61. int dstStride, int src1Stride, int h);
  62. void ff_put_pixels16_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  63. ptrdiff_t line_size, int h);
  64. void ff_put_pixels16_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  65. ptrdiff_t line_size, int h);
  66. void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  67. int dstStride, int src1Stride, int h);
  68. void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  69. int dstStride, int src1Stride, int h);
  70. void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  71. int dstStride, int src1Stride, int h);
  72. void ff_put_no_rnd_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  73. ptrdiff_t line_size, int h);
  74. void ff_put_no_rnd_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  75. ptrdiff_t line_size, int h);
  76. void ff_put_no_rnd_pixels8_x2_exact_mmxext(uint8_t *block,
  77. const uint8_t *pixels,
  78. ptrdiff_t line_size, int h);
  79. void ff_put_no_rnd_pixels8_x2_exact_3dnow(uint8_t *block,
  80. const uint8_t *pixels,
  81. ptrdiff_t line_size, int h);
  82. void ff_put_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  83. ptrdiff_t line_size, int h);
  84. void ff_put_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  85. ptrdiff_t line_size, int h);
  86. void ff_put_no_rnd_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  87. ptrdiff_t line_size, int h);
  88. void ff_put_no_rnd_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  89. ptrdiff_t line_size, int h);
  90. void ff_put_no_rnd_pixels8_y2_exact_mmxext(uint8_t *block,
  91. const uint8_t *pixels,
  92. ptrdiff_t line_size, int h);
  93. void ff_put_no_rnd_pixels8_y2_exact_3dnow(uint8_t *block,
  94. const uint8_t *pixels,
  95. ptrdiff_t line_size, int h);
  96. void ff_avg_pixels8_3dnow(uint8_t *block, const uint8_t *pixels,
  97. ptrdiff_t line_size, int h);
  98. void ff_avg_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  99. ptrdiff_t line_size, int h);
  100. void ff_avg_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  101. ptrdiff_t line_size, int h);
  102. void ff_avg_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  103. ptrdiff_t line_size, int h);
  104. void ff_avg_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  105. ptrdiff_t line_size, int h);
  106. void ff_avg_pixels8_xy2_mmxext(uint8_t *block, const uint8_t *pixels,
  107. ptrdiff_t line_size, int h);
  108. void ff_avg_pixels8_xy2_3dnow(uint8_t *block, const uint8_t *pixels,
  109. ptrdiff_t line_size, int h);
  110. static void ff_put_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
  111. ptrdiff_t line_size, int h)
  112. {
  113. ff_put_pixels8_mmxext(block, pixels, line_size, h);
  114. ff_put_pixels8_mmxext(block + 8, pixels + 8, line_size, h);
  115. }
  116. void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  117. int dstStride, int srcStride, int h);
  118. void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  119. int dstStride, int srcStride, int h);
  120. void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  121. int dstStride, int srcStride,
  122. int h);
  123. void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  124. int dstStride, int srcStride, int h);
  125. void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  126. int dstStride, int srcStride, int h);
  127. void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  128. int dstStride, int srcStride,
  129. int h);
  130. void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  131. int dstStride, int srcStride);
  132. void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  133. int dstStride, int srcStride);
  134. void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  135. int dstStride, int srcStride);
  136. void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  137. int dstStride, int srcStride);
  138. void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  139. int dstStride, int srcStride);
  140. void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  141. int dstStride, int srcStride);
  142. #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
  143. #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
  144. #endif /* HAVE_YASM */
  145. #if HAVE_INLINE_ASM
  146. #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
  147. #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
  148. #define MOVQ_BFE(regd) \
  149. __asm__ volatile ( \
  150. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  151. "paddb %%"#regd", %%"#regd" \n\t" ::)
  152. #ifndef PIC
  153. #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
  154. #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
  155. #else
  156. // for shared library it's better to use this way for accessing constants
  157. // pcmpeqd -> -1
  158. #define MOVQ_BONE(regd) \
  159. __asm__ volatile ( \
  160. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  161. "psrlw $15, %%"#regd" \n\t" \
  162. "packuswb %%"#regd", %%"#regd" \n\t" ::)
  163. #define MOVQ_WTWO(regd) \
  164. __asm__ volatile ( \
  165. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  166. "psrlw $15, %%"#regd" \n\t" \
  167. "psllw $1, %%"#regd" \n\t"::)
  168. #endif
  169. // using regr as temporary and for the output result
  170. // first argument is unmodifed and second is trashed
  171. // regfe is supposed to contain 0xfefefefefefefefe
  172. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  173. "movq "#rega", "#regr" \n\t" \
  174. "pand "#regb", "#regr" \n\t" \
  175. "pxor "#rega", "#regb" \n\t" \
  176. "pand "#regfe", "#regb" \n\t" \
  177. "psrlq $1, "#regb" \n\t" \
  178. "paddb "#regb", "#regr" \n\t"
  179. #define PAVGB_MMX(rega, regb, regr, regfe) \
  180. "movq "#rega", "#regr" \n\t" \
  181. "por "#regb", "#regr" \n\t" \
  182. "pxor "#rega", "#regb" \n\t" \
  183. "pand "#regfe", "#regb" \n\t" \
  184. "psrlq $1, "#regb" \n\t" \
  185. "psubb "#regb", "#regr" \n\t"
  186. // mm6 is supposed to contain 0xfefefefefefefefe
  187. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  188. "movq "#rega", "#regr" \n\t" \
  189. "movq "#regc", "#regp" \n\t" \
  190. "pand "#regb", "#regr" \n\t" \
  191. "pand "#regd", "#regp" \n\t" \
  192. "pxor "#rega", "#regb" \n\t" \
  193. "pxor "#regc", "#regd" \n\t" \
  194. "pand %%mm6, "#regb" \n\t" \
  195. "pand %%mm6, "#regd" \n\t" \
  196. "psrlq $1, "#regb" \n\t" \
  197. "psrlq $1, "#regd" \n\t" \
  198. "paddb "#regb", "#regr" \n\t" \
  199. "paddb "#regd", "#regp" \n\t"
  200. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  201. "movq "#rega", "#regr" \n\t" \
  202. "movq "#regc", "#regp" \n\t" \
  203. "por "#regb", "#regr" \n\t" \
  204. "por "#regd", "#regp" \n\t" \
  205. "pxor "#rega", "#regb" \n\t" \
  206. "pxor "#regc", "#regd" \n\t" \
  207. "pand %%mm6, "#regb" \n\t" \
  208. "pand %%mm6, "#regd" \n\t" \
  209. "psrlq $1, "#regd" \n\t" \
  210. "psrlq $1, "#regb" \n\t" \
  211. "psubb "#regb", "#regr" \n\t" \
  212. "psubb "#regd", "#regp" \n\t"
  213. /***********************************/
  214. /* MMX no rounding */
  215. #define NO_RND 1
  216. #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
  217. #define SET_RND MOVQ_WONE
  218. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  219. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  220. #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
  221. #include "dsputil_rnd_template.c"
  222. #undef DEF
  223. #undef SET_RND
  224. #undef PAVGBP
  225. #undef PAVGB
  226. #undef NO_RND
  227. /***********************************/
  228. /* MMX rounding */
  229. #define DEF(x, y) x ## _ ## y ## _mmx
  230. #define SET_RND MOVQ_WTWO
  231. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  232. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  233. #include "dsputil_rnd_template.c"
  234. #undef DEF
  235. #undef SET_RND
  236. #undef PAVGBP
  237. #undef PAVGB
  238. #undef OP_AVG
  239. #endif /* HAVE_INLINE_ASM */
  240. #if HAVE_YASM
  241. /***********************************/
  242. /* 3Dnow specific */
  243. #define DEF(x) x ## _3dnow
  244. #include "dsputil_avg_template.c"
  245. #undef DEF
  246. /***********************************/
  247. /* MMXEXT specific */
  248. #define DEF(x) x ## _mmxext
  249. #include "dsputil_avg_template.c"
  250. #undef DEF
  251. #endif /* HAVE_YASM */
  252. #if HAVE_INLINE_ASM
  253. #define put_no_rnd_pixels16_mmx put_pixels16_mmx
  254. #define put_no_rnd_pixels8_mmx put_pixels8_mmx
  255. /***********************************/
  256. /* standard MMX */
  257. void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  258. int line_size)
  259. {
  260. const int16_t *p;
  261. uint8_t *pix;
  262. /* read the pixels */
  263. p = block;
  264. pix = pixels;
  265. /* unrolled loop */
  266. __asm__ volatile (
  267. "movq (%3), %%mm0 \n\t"
  268. "movq 8(%3), %%mm1 \n\t"
  269. "movq 16(%3), %%mm2 \n\t"
  270. "movq 24(%3), %%mm3 \n\t"
  271. "movq 32(%3), %%mm4 \n\t"
  272. "movq 40(%3), %%mm5 \n\t"
  273. "movq 48(%3), %%mm6 \n\t"
  274. "movq 56(%3), %%mm7 \n\t"
  275. "packuswb %%mm1, %%mm0 \n\t"
  276. "packuswb %%mm3, %%mm2 \n\t"
  277. "packuswb %%mm5, %%mm4 \n\t"
  278. "packuswb %%mm7, %%mm6 \n\t"
  279. "movq %%mm0, (%0) \n\t"
  280. "movq %%mm2, (%0, %1) \n\t"
  281. "movq %%mm4, (%0, %1, 2) \n\t"
  282. "movq %%mm6, (%0, %2) \n\t"
  283. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
  284. "r"(p)
  285. : "memory");
  286. pix += line_size * 4;
  287. p += 32;
  288. // if here would be an exact copy of the code above
  289. // compiler would generate some very strange code
  290. // thus using "r"
  291. __asm__ volatile (
  292. "movq (%3), %%mm0 \n\t"
  293. "movq 8(%3), %%mm1 \n\t"
  294. "movq 16(%3), %%mm2 \n\t"
  295. "movq 24(%3), %%mm3 \n\t"
  296. "movq 32(%3), %%mm4 \n\t"
  297. "movq 40(%3), %%mm5 \n\t"
  298. "movq 48(%3), %%mm6 \n\t"
  299. "movq 56(%3), %%mm7 \n\t"
  300. "packuswb %%mm1, %%mm0 \n\t"
  301. "packuswb %%mm3, %%mm2 \n\t"
  302. "packuswb %%mm5, %%mm4 \n\t"
  303. "packuswb %%mm7, %%mm6 \n\t"
  304. "movq %%mm0, (%0) \n\t"
  305. "movq %%mm2, (%0, %1) \n\t"
  306. "movq %%mm4, (%0, %1, 2) \n\t"
  307. "movq %%mm6, (%0, %2) \n\t"
  308. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
  309. : "memory");
  310. }
  311. #define put_signed_pixels_clamped_mmx_half(off) \
  312. "movq "#off"(%2), %%mm1 \n\t" \
  313. "movq 16 + "#off"(%2), %%mm2 \n\t" \
  314. "movq 32 + "#off"(%2), %%mm3 \n\t" \
  315. "movq 48 + "#off"(%2), %%mm4 \n\t" \
  316. "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
  317. "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
  318. "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
  319. "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
  320. "paddb %%mm0, %%mm1 \n\t" \
  321. "paddb %%mm0, %%mm2 \n\t" \
  322. "paddb %%mm0, %%mm3 \n\t" \
  323. "paddb %%mm0, %%mm4 \n\t" \
  324. "movq %%mm1, (%0) \n\t" \
  325. "movq %%mm2, (%0, %3) \n\t" \
  326. "movq %%mm3, (%0, %3, 2) \n\t" \
  327. "movq %%mm4, (%0, %1) \n\t"
  328. void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  329. int line_size)
  330. {
  331. x86_reg line_skip = line_size;
  332. x86_reg line_skip3;
  333. __asm__ volatile (
  334. "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
  335. "lea (%3, %3, 2), %1 \n\t"
  336. put_signed_pixels_clamped_mmx_half(0)
  337. "lea (%0, %3, 4), %0 \n\t"
  338. put_signed_pixels_clamped_mmx_half(64)
  339. : "+&r"(pixels), "=&r"(line_skip3)
  340. : "r"(block), "r"(line_skip)
  341. : "memory");
  342. }
  343. void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  344. int line_size)
  345. {
  346. const int16_t *p;
  347. uint8_t *pix;
  348. int i;
  349. /* read the pixels */
  350. p = block;
  351. pix = pixels;
  352. MOVQ_ZERO(mm7);
  353. i = 4;
  354. do {
  355. __asm__ volatile (
  356. "movq (%2), %%mm0 \n\t"
  357. "movq 8(%2), %%mm1 \n\t"
  358. "movq 16(%2), %%mm2 \n\t"
  359. "movq 24(%2), %%mm3 \n\t"
  360. "movq %0, %%mm4 \n\t"
  361. "movq %1, %%mm6 \n\t"
  362. "movq %%mm4, %%mm5 \n\t"
  363. "punpcklbw %%mm7, %%mm4 \n\t"
  364. "punpckhbw %%mm7, %%mm5 \n\t"
  365. "paddsw %%mm4, %%mm0 \n\t"
  366. "paddsw %%mm5, %%mm1 \n\t"
  367. "movq %%mm6, %%mm5 \n\t"
  368. "punpcklbw %%mm7, %%mm6 \n\t"
  369. "punpckhbw %%mm7, %%mm5 \n\t"
  370. "paddsw %%mm6, %%mm2 \n\t"
  371. "paddsw %%mm5, %%mm3 \n\t"
  372. "packuswb %%mm1, %%mm0 \n\t"
  373. "packuswb %%mm3, %%mm2 \n\t"
  374. "movq %%mm0, %0 \n\t"
  375. "movq %%mm2, %1 \n\t"
  376. : "+m"(*pix), "+m"(*(pix + line_size))
  377. : "r"(p)
  378. : "memory");
  379. pix += line_size * 2;
  380. p += 16;
  381. } while (--i);
  382. }
  383. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
  384. ptrdiff_t line_size, int h)
  385. {
  386. __asm__ volatile (
  387. "lea (%3, %3), %%"REG_a" \n\t"
  388. ".p2align 3 \n\t"
  389. "1: \n\t"
  390. "movq (%1 ), %%mm0 \n\t"
  391. "movq (%1, %3), %%mm1 \n\t"
  392. "movq %%mm0, (%2) \n\t"
  393. "movq %%mm1, (%2, %3) \n\t"
  394. "add %%"REG_a", %1 \n\t"
  395. "add %%"REG_a", %2 \n\t"
  396. "movq (%1 ), %%mm0 \n\t"
  397. "movq (%1, %3), %%mm1 \n\t"
  398. "movq %%mm0, (%2) \n\t"
  399. "movq %%mm1, (%2, %3) \n\t"
  400. "add %%"REG_a", %1 \n\t"
  401. "add %%"REG_a", %2 \n\t"
  402. "subl $4, %0 \n\t"
  403. "jnz 1b \n\t"
  404. : "+g"(h), "+r"(pixels), "+r"(block)
  405. : "r"((x86_reg)line_size)
  406. : "%"REG_a, "memory"
  407. );
  408. }
  409. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
  410. ptrdiff_t line_size, int h)
  411. {
  412. __asm__ volatile (
  413. "lea (%3, %3), %%"REG_a" \n\t"
  414. ".p2align 3 \n\t"
  415. "1: \n\t"
  416. "movq (%1 ), %%mm0 \n\t"
  417. "movq 8(%1 ), %%mm4 \n\t"
  418. "movq (%1, %3), %%mm1 \n\t"
  419. "movq 8(%1, %3), %%mm5 \n\t"
  420. "movq %%mm0, (%2) \n\t"
  421. "movq %%mm4, 8(%2) \n\t"
  422. "movq %%mm1, (%2, %3) \n\t"
  423. "movq %%mm5, 8(%2, %3) \n\t"
  424. "add %%"REG_a", %1 \n\t"
  425. "add %%"REG_a", %2 \n\t"
  426. "movq (%1 ), %%mm0 \n\t"
  427. "movq 8(%1 ), %%mm4 \n\t"
  428. "movq (%1, %3), %%mm1 \n\t"
  429. "movq 8(%1, %3), %%mm5 \n\t"
  430. "movq %%mm0, (%2) \n\t"
  431. "movq %%mm4, 8(%2) \n\t"
  432. "movq %%mm1, (%2, %3) \n\t"
  433. "movq %%mm5, 8(%2, %3) \n\t"
  434. "add %%"REG_a", %1 \n\t"
  435. "add %%"REG_a", %2 \n\t"
  436. "subl $4, %0 \n\t"
  437. "jnz 1b \n\t"
  438. : "+g"(h), "+r"(pixels), "+r"(block)
  439. : "r"((x86_reg)line_size)
  440. : "%"REG_a, "memory"
  441. );
  442. }
  443. #define CLEAR_BLOCKS(name, n) \
  444. static void name(int16_t *blocks) \
  445. { \
  446. __asm__ volatile ( \
  447. "pxor %%mm7, %%mm7 \n\t" \
  448. "mov %1, %%"REG_a" \n\t" \
  449. "1: \n\t" \
  450. "movq %%mm7, (%0, %%"REG_a") \n\t" \
  451. "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
  452. "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
  453. "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
  454. "add $32, %%"REG_a" \n\t" \
  455. "js 1b \n\t" \
  456. :: "r"(((uint8_t *)blocks) + 128 * n), \
  457. "i"(-128 * n) \
  458. : "%"REG_a \
  459. ); \
  460. }
  461. CLEAR_BLOCKS(clear_blocks_mmx, 6)
  462. CLEAR_BLOCKS(clear_block_mmx, 1)
  463. static void clear_block_sse(int16_t *block)
  464. {
  465. __asm__ volatile (
  466. "xorps %%xmm0, %%xmm0 \n"
  467. "movaps %%xmm0, (%0) \n"
  468. "movaps %%xmm0, 16(%0) \n"
  469. "movaps %%xmm0, 32(%0) \n"
  470. "movaps %%xmm0, 48(%0) \n"
  471. "movaps %%xmm0, 64(%0) \n"
  472. "movaps %%xmm0, 80(%0) \n"
  473. "movaps %%xmm0, 96(%0) \n"
  474. "movaps %%xmm0, 112(%0) \n"
  475. :: "r"(block)
  476. : "memory"
  477. );
  478. }
  479. static void clear_blocks_sse(int16_t *blocks)
  480. {
  481. __asm__ volatile (
  482. "xorps %%xmm0, %%xmm0 \n"
  483. "mov %1, %%"REG_a" \n"
  484. "1: \n"
  485. "movaps %%xmm0, (%0, %%"REG_a") \n"
  486. "movaps %%xmm0, 16(%0, %%"REG_a") \n"
  487. "movaps %%xmm0, 32(%0, %%"REG_a") \n"
  488. "movaps %%xmm0, 48(%0, %%"REG_a") \n"
  489. "movaps %%xmm0, 64(%0, %%"REG_a") \n"
  490. "movaps %%xmm0, 80(%0, %%"REG_a") \n"
  491. "movaps %%xmm0, 96(%0, %%"REG_a") \n"
  492. "movaps %%xmm0, 112(%0, %%"REG_a") \n"
  493. "add $128, %%"REG_a" \n"
  494. "js 1b \n"
  495. :: "r"(((uint8_t *)blocks) + 128 * 6),
  496. "i"(-128 * 6)
  497. : "%"REG_a
  498. );
  499. }
  500. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
  501. {
  502. x86_reg i = 0;
  503. __asm__ volatile (
  504. "jmp 2f \n\t"
  505. "1: \n\t"
  506. "movq (%1, %0), %%mm0 \n\t"
  507. "movq (%2, %0), %%mm1 \n\t"
  508. "paddb %%mm0, %%mm1 \n\t"
  509. "movq %%mm1, (%2, %0) \n\t"
  510. "movq 8(%1, %0), %%mm0 \n\t"
  511. "movq 8(%2, %0), %%mm1 \n\t"
  512. "paddb %%mm0, %%mm1 \n\t"
  513. "movq %%mm1, 8(%2, %0) \n\t"
  514. "add $16, %0 \n\t"
  515. "2: \n\t"
  516. "cmp %3, %0 \n\t"
  517. "js 1b \n\t"
  518. : "+r"(i)
  519. : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
  520. );
  521. for ( ; i < w; i++)
  522. dst[i + 0] += src[i + 0];
  523. }
  524. #if HAVE_7REGS
  525. static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
  526. const uint8_t *diff, int w,
  527. int *left, int *left_top)
  528. {
  529. x86_reg w2 = -w;
  530. x86_reg x;
  531. int l = *left & 0xff;
  532. int tl = *left_top & 0xff;
  533. int t;
  534. __asm__ volatile (
  535. "mov %7, %3 \n"
  536. "1: \n"
  537. "movzbl (%3, %4), %2 \n"
  538. "mov %2, %k3 \n"
  539. "sub %b1, %b3 \n"
  540. "add %b0, %b3 \n"
  541. "mov %2, %1 \n"
  542. "cmp %0, %2 \n"
  543. "cmovg %0, %2 \n"
  544. "cmovg %1, %0 \n"
  545. "cmp %k3, %0 \n"
  546. "cmovg %k3, %0 \n"
  547. "mov %7, %3 \n"
  548. "cmp %2, %0 \n"
  549. "cmovl %2, %0 \n"
  550. "add (%6, %4), %b0 \n"
  551. "mov %b0, (%5, %4) \n"
  552. "inc %4 \n"
  553. "jl 1b \n"
  554. : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
  555. : "r"(dst + w), "r"(diff + w), "rm"(top + w)
  556. );
  557. *left = l;
  558. *left_top = tl;
  559. }
  560. #endif
  561. /* Draw the edges of width 'w' of an image of size width, height
  562. * this MMX version can only handle w == 8 || w == 16. */
  563. static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
  564. int w, int h, int sides)
  565. {
  566. uint8_t *ptr, *last_line;
  567. int i;
  568. last_line = buf + (height - 1) * wrap;
  569. /* left and right */
  570. ptr = buf;
  571. if (w == 8) {
  572. __asm__ volatile (
  573. "1: \n\t"
  574. "movd (%0), %%mm0 \n\t"
  575. "punpcklbw %%mm0, %%mm0 \n\t"
  576. "punpcklwd %%mm0, %%mm0 \n\t"
  577. "punpckldq %%mm0, %%mm0 \n\t"
  578. "movq %%mm0, -8(%0) \n\t"
  579. "movq -8(%0, %2), %%mm1 \n\t"
  580. "punpckhbw %%mm1, %%mm1 \n\t"
  581. "punpckhwd %%mm1, %%mm1 \n\t"
  582. "punpckhdq %%mm1, %%mm1 \n\t"
  583. "movq %%mm1, (%0, %2) \n\t"
  584. "add %1, %0 \n\t"
  585. "cmp %3, %0 \n\t"
  586. "jb 1b \n\t"
  587. : "+r"(ptr)
  588. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  589. );
  590. } else {
  591. __asm__ volatile (
  592. "1: \n\t"
  593. "movd (%0), %%mm0 \n\t"
  594. "punpcklbw %%mm0, %%mm0 \n\t"
  595. "punpcklwd %%mm0, %%mm0 \n\t"
  596. "punpckldq %%mm0, %%mm0 \n\t"
  597. "movq %%mm0, -8(%0) \n\t"
  598. "movq %%mm0, -16(%0) \n\t"
  599. "movq -8(%0, %2), %%mm1 \n\t"
  600. "punpckhbw %%mm1, %%mm1 \n\t"
  601. "punpckhwd %%mm1, %%mm1 \n\t"
  602. "punpckhdq %%mm1, %%mm1 \n\t"
  603. "movq %%mm1, (%0, %2) \n\t"
  604. "movq %%mm1, 8(%0, %2) \n\t"
  605. "add %1, %0 \n\t"
  606. "cmp %3, %0 \n\t"
  607. "jb 1b \n\t"
  608. : "+r"(ptr)
  609. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  610. );
  611. }
  612. /* top and bottom (and hopefully also the corners) */
  613. if (sides & EDGE_TOP) {
  614. for (i = 0; i < h; i += 4) {
  615. ptr = buf - (i + 1) * wrap - w;
  616. __asm__ volatile (
  617. "1: \n\t"
  618. "movq (%1, %0), %%mm0 \n\t"
  619. "movq %%mm0, (%0) \n\t"
  620. "movq %%mm0, (%0, %2) \n\t"
  621. "movq %%mm0, (%0, %2, 2) \n\t"
  622. "movq %%mm0, (%0, %3) \n\t"
  623. "add $8, %0 \n\t"
  624. "cmp %4, %0 \n\t"
  625. "jb 1b \n\t"
  626. : "+r"(ptr)
  627. : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
  628. "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
  629. );
  630. }
  631. }
  632. if (sides & EDGE_BOTTOM) {
  633. for (i = 0; i < h; i += 4) {
  634. ptr = last_line + (i + 1) * wrap - w;
  635. __asm__ volatile (
  636. "1: \n\t"
  637. "movq (%1, %0), %%mm0 \n\t"
  638. "movq %%mm0, (%0) \n\t"
  639. "movq %%mm0, (%0, %2) \n\t"
  640. "movq %%mm0, (%0, %2, 2) \n\t"
  641. "movq %%mm0, (%0, %3) \n\t"
  642. "add $8, %0 \n\t"
  643. "cmp %4, %0 \n\t"
  644. "jb 1b \n\t"
  645. : "+r"(ptr)
  646. : "r"((x86_reg)last_line - (x86_reg)ptr - w),
  647. "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
  648. "r"(ptr + width + 2 * w)
  649. );
  650. }
  651. }
  652. }
  653. #endif /* HAVE_INLINE_ASM */
  654. #if HAVE_YASM
  655. #define QPEL_OP(OPNAME, ROUNDER, RND, MMX) \
  656. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
  657. ptrdiff_t stride) \
  658. { \
  659. ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \
  660. } \
  661. \
  662. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
  663. ptrdiff_t stride) \
  664. { \
  665. uint64_t temp[8]; \
  666. uint8_t * const half = (uint8_t*)temp; \
  667. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
  668. stride, 8); \
  669. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \
  670. stride, stride, 8); \
  671. } \
  672. \
  673. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
  674. ptrdiff_t stride) \
  675. { \
  676. ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \
  677. stride, 8); \
  678. } \
  679. \
  680. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
  681. ptrdiff_t stride) \
  682. { \
  683. uint64_t temp[8]; \
  684. uint8_t * const half = (uint8_t*)temp; \
  685. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
  686. stride, 8); \
  687. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \
  688. stride, 8); \
  689. } \
  690. \
  691. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
  692. ptrdiff_t stride) \
  693. { \
  694. uint64_t temp[8]; \
  695. uint8_t * const half = (uint8_t*)temp; \
  696. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \
  697. 8, stride); \
  698. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \
  699. stride, stride, 8); \
  700. } \
  701. \
  702. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
  703. ptrdiff_t stride) \
  704. { \
  705. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, \
  706. stride, stride); \
  707. } \
  708. \
  709. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
  710. ptrdiff_t stride) \
  711. { \
  712. uint64_t temp[8]; \
  713. uint8_t * const half = (uint8_t*)temp; \
  714. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \
  715. 8, stride); \
  716. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
  717. stride, 8); \
  718. } \
  719. \
  720. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
  721. ptrdiff_t stride) \
  722. { \
  723. uint64_t half[8 + 9]; \
  724. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  725. uint8_t * const halfHV = ((uint8_t*)half); \
  726. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  727. stride, 9); \
  728. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \
  729. stride, 9); \
  730. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  731. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
  732. stride, 8, 8); \
  733. } \
  734. \
  735. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
  736. ptrdiff_t stride) \
  737. { \
  738. uint64_t half[8 + 9]; \
  739. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  740. uint8_t * const halfHV = ((uint8_t*)half); \
  741. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  742. stride, 9); \
  743. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  744. stride, 9); \
  745. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  746. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
  747. stride, 8, 8); \
  748. } \
  749. \
  750. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
  751. ptrdiff_t stride) \
  752. { \
  753. uint64_t half[8 + 9]; \
  754. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  755. uint8_t * const halfHV = ((uint8_t*)half); \
  756. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  757. stride, 9); \
  758. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \
  759. stride, 9); \
  760. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  761. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
  762. stride, 8, 8); \
  763. } \
  764. \
  765. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
  766. ptrdiff_t stride) \
  767. { \
  768. uint64_t half[8 + 9]; \
  769. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  770. uint8_t * const halfHV = ((uint8_t*)half); \
  771. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  772. stride, 9); \
  773. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  774. stride, 9); \
  775. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  776. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
  777. stride, 8, 8); \
  778. } \
  779. \
  780. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
  781. ptrdiff_t stride) \
  782. { \
  783. uint64_t half[8 + 9]; \
  784. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  785. uint8_t * const halfHV = ((uint8_t*)half); \
  786. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  787. stride, 9); \
  788. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  789. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
  790. stride, 8, 8); \
  791. } \
  792. \
  793. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
  794. ptrdiff_t stride) \
  795. { \
  796. uint64_t half[8 + 9]; \
  797. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  798. uint8_t * const halfHV = ((uint8_t*)half); \
  799. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  800. stride, 9); \
  801. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  802. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
  803. stride, 8, 8); \
  804. } \
  805. \
  806. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
  807. ptrdiff_t stride) \
  808. { \
  809. uint64_t half[8 + 9]; \
  810. uint8_t * const halfH = ((uint8_t*)half); \
  811. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  812. stride, 9); \
  813. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, \
  814. 8, stride, 9); \
  815. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
  816. stride, 8); \
  817. } \
  818. \
  819. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
  820. ptrdiff_t stride) \
  821. { \
  822. uint64_t half[8 + 9]; \
  823. uint8_t * const halfH = ((uint8_t*)half); \
  824. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  825. stride, 9); \
  826. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  827. stride, 9); \
  828. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
  829. stride, 8); \
  830. } \
  831. \
  832. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
  833. ptrdiff_t stride) \
  834. { \
  835. uint64_t half[9]; \
  836. uint8_t * const halfH = ((uint8_t*)half); \
  837. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  838. stride, 9); \
  839. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
  840. stride, 8); \
  841. } \
  842. \
  843. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
  844. ptrdiff_t stride) \
  845. { \
  846. ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \
  847. } \
  848. \
  849. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
  850. ptrdiff_t stride) \
  851. { \
  852. uint64_t temp[32]; \
  853. uint8_t * const half = (uint8_t*)temp; \
  854. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
  855. stride, 16); \
  856. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \
  857. stride, 16); \
  858. } \
  859. \
  860. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
  861. ptrdiff_t stride) \
  862. { \
  863. ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \
  864. stride, stride, 16);\
  865. } \
  866. \
  867. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
  868. ptrdiff_t stride) \
  869. { \
  870. uint64_t temp[32]; \
  871. uint8_t * const half = (uint8_t*)temp; \
  872. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
  873. stride, 16); \
  874. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \
  875. stride, stride, 16); \
  876. } \
  877. \
  878. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
  879. ptrdiff_t stride) \
  880. { \
  881. uint64_t temp[32]; \
  882. uint8_t * const half = (uint8_t*)temp; \
  883. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
  884. stride); \
  885. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \
  886. stride, 16); \
  887. } \
  888. \
  889. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
  890. ptrdiff_t stride) \
  891. { \
  892. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, \
  893. stride, stride); \
  894. } \
  895. \
  896. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
  897. ptrdiff_t stride) \
  898. { \
  899. uint64_t temp[32]; \
  900. uint8_t * const half = (uint8_t*)temp; \
  901. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
  902. stride); \
  903. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \
  904. stride, stride, 16); \
  905. } \
  906. \
  907. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
  908. ptrdiff_t stride) \
  909. { \
  910. uint64_t half[16 * 2 + 17 * 2]; \
  911. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  912. uint8_t * const halfHV = ((uint8_t*)half); \
  913. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  914. stride, 17); \
  915. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  916. stride, 17); \
  917. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  918. 16, 16); \
  919. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
  920. stride, 16, 16); \
  921. } \
  922. \
  923. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
  924. ptrdiff_t stride) \
  925. { \
  926. uint64_t half[16 * 2 + 17 * 2]; \
  927. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  928. uint8_t * const halfHV = ((uint8_t*)half); \
  929. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  930. stride, 17); \
  931. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  932. stride, 17); \
  933. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  934. 16, 16); \
  935. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
  936. stride, 16, 16); \
  937. } \
  938. \
  939. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
  940. ptrdiff_t stride) \
  941. { \
  942. uint64_t half[16 * 2 + 17 * 2]; \
  943. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  944. uint8_t * const halfHV = ((uint8_t*)half); \
  945. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  946. stride, 17); \
  947. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  948. stride, 17); \
  949. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  950. 16, 16); \
  951. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
  952. stride, 16, 16); \
  953. } \
  954. \
  955. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
  956. ptrdiff_t stride) \
  957. { \
  958. uint64_t half[16 * 2 + 17 * 2]; \
  959. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  960. uint8_t * const halfHV = ((uint8_t*)half); \
  961. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  962. stride, 17); \
  963. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  964. stride, 17); \
  965. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  966. 16, 16); \
  967. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
  968. stride, 16, 16); \
  969. } \
  970. \
  971. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
  972. ptrdiff_t stride) \
  973. { \
  974. uint64_t half[16 * 2 + 17 * 2]; \
  975. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  976. uint8_t * const halfHV = ((uint8_t*)half); \
  977. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  978. stride, 17); \
  979. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  980. 16, 16); \
  981. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
  982. stride, 16, 16); \
  983. } \
  984. \
  985. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
  986. ptrdiff_t stride) \
  987. { \
  988. uint64_t half[16 * 2 + 17 * 2]; \
  989. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  990. uint8_t * const halfHV = ((uint8_t*)half); \
  991. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  992. stride, 17); \
  993. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  994. 16, 16); \
  995. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
  996. stride, 16, 16); \
  997. } \
  998. \
  999. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
  1000. ptrdiff_t stride) \
  1001. { \
  1002. uint64_t half[17 * 2]; \
  1003. uint8_t * const halfH = ((uint8_t*)half); \
  1004. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1005. stride, 17); \
  1006. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  1007. stride, 17); \
  1008. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
  1009. stride, 16); \
  1010. } \
  1011. \
  1012. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
  1013. ptrdiff_t stride) \
  1014. { \
  1015. uint64_t half[17 * 2]; \
  1016. uint8_t * const halfH = ((uint8_t*)half); \
  1017. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1018. stride, 17); \
  1019. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  1020. stride, 17); \
  1021. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
  1022. stride, 16); \
  1023. } \
  1024. \
  1025. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
  1026. ptrdiff_t stride) \
  1027. { \
  1028. uint64_t half[17 * 2]; \
  1029. uint8_t * const halfH = ((uint8_t*)half); \
  1030. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1031. stride, 17); \
  1032. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
  1033. stride, 16); \
  1034. }
  1035. QPEL_OP(put_, ff_pw_16, _, mmxext)
  1036. QPEL_OP(avg_, ff_pw_16, _, mmxext)
  1037. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, mmxext)
  1038. #endif /* HAVE_YASM */
  1039. #if HAVE_INLINE_ASM
  1040. void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1041. {
  1042. put_pixels8_xy2_mmx(dst, src, stride, 8);
  1043. }
  1044. void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1045. {
  1046. put_pixels16_xy2_mmx(dst, src, stride, 16);
  1047. }
  1048. void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1049. {
  1050. avg_pixels8_xy2_mmx(dst, src, stride, 8);
  1051. }
  1052. void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1053. {
  1054. avg_pixels16_xy2_mmx(dst, src, stride, 16);
  1055. }
  1056. static void gmc_mmx(uint8_t *dst, uint8_t *src,
  1057. int stride, int h, int ox, int oy,
  1058. int dxx, int dxy, int dyx, int dyy,
  1059. int shift, int r, int width, int height)
  1060. {
  1061. const int w = 8;
  1062. const int ix = ox >> (16 + shift);
  1063. const int iy = oy >> (16 + shift);
  1064. const int oxs = ox >> 4;
  1065. const int oys = oy >> 4;
  1066. const int dxxs = dxx >> 4;
  1067. const int dxys = dxy >> 4;
  1068. const int dyxs = dyx >> 4;
  1069. const int dyys = dyy >> 4;
  1070. const uint16_t r4[4] = { r, r, r, r };
  1071. const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
  1072. const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
  1073. const uint64_t shift2 = 2 * shift;
  1074. int x, y;
  1075. const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
  1076. const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
  1077. const int dxh = dxy * (h - 1);
  1078. const int dyw = dyx * (w - 1);
  1079. if ( // non-constant fullpel offset (3% of blocks)
  1080. ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
  1081. (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
  1082. // uses more than 16 bits of subpel mv (only at huge resolution)
  1083. || (dxx | dxy | dyx | dyy) & 15 ||
  1084. (unsigned)ix >= width - w ||
  1085. (unsigned)iy >= height - h) {
  1086. // FIXME could still use mmx for some of the rows
  1087. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
  1088. shift, r, width, height);
  1089. return;
  1090. }
  1091. src += ix + iy * stride;
  1092. __asm__ volatile (
  1093. "movd %0, %%mm6 \n\t"
  1094. "pxor %%mm7, %%mm7 \n\t"
  1095. "punpcklwd %%mm6, %%mm6 \n\t"
  1096. "punpcklwd %%mm6, %%mm6 \n\t"
  1097. :: "r"(1<<shift)
  1098. );
  1099. for (x = 0; x < w; x += 4) {
  1100. uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
  1101. oxs - dxys + dxxs * (x + 1),
  1102. oxs - dxys + dxxs * (x + 2),
  1103. oxs - dxys + dxxs * (x + 3) };
  1104. uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
  1105. oys - dyys + dyxs * (x + 1),
  1106. oys - dyys + dyxs * (x + 2),
  1107. oys - dyys + dyxs * (x + 3) };
  1108. for (y = 0; y < h; y++) {
  1109. __asm__ volatile (
  1110. "movq %0, %%mm4 \n\t"
  1111. "movq %1, %%mm5 \n\t"
  1112. "paddw %2, %%mm4 \n\t"
  1113. "paddw %3, %%mm5 \n\t"
  1114. "movq %%mm4, %0 \n\t"
  1115. "movq %%mm5, %1 \n\t"
  1116. "psrlw $12, %%mm4 \n\t"
  1117. "psrlw $12, %%mm5 \n\t"
  1118. : "+m"(*dx4), "+m"(*dy4)
  1119. : "m"(*dxy4), "m"(*dyy4)
  1120. );
  1121. __asm__ volatile (
  1122. "movq %%mm6, %%mm2 \n\t"
  1123. "movq %%mm6, %%mm1 \n\t"
  1124. "psubw %%mm4, %%mm2 \n\t"
  1125. "psubw %%mm5, %%mm1 \n\t"
  1126. "movq %%mm2, %%mm0 \n\t"
  1127. "movq %%mm4, %%mm3 \n\t"
  1128. "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
  1129. "pmullw %%mm5, %%mm3 \n\t" // dx * dy
  1130. "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
  1131. "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
  1132. "movd %4, %%mm5 \n\t"
  1133. "movd %3, %%mm4 \n\t"
  1134. "punpcklbw %%mm7, %%mm5 \n\t"
  1135. "punpcklbw %%mm7, %%mm4 \n\t"
  1136. "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
  1137. "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
  1138. "movd %2, %%mm5 \n\t"
  1139. "movd %1, %%mm4 \n\t"
  1140. "punpcklbw %%mm7, %%mm5 \n\t"
  1141. "punpcklbw %%mm7, %%mm4 \n\t"
  1142. "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
  1143. "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
  1144. "paddw %5, %%mm1 \n\t"
  1145. "paddw %%mm3, %%mm2 \n\t"
  1146. "paddw %%mm1, %%mm0 \n\t"
  1147. "paddw %%mm2, %%mm0 \n\t"
  1148. "psrlw %6, %%mm0 \n\t"
  1149. "packuswb %%mm0, %%mm0 \n\t"
  1150. "movd %%mm0, %0 \n\t"
  1151. : "=m"(dst[x + y * stride])
  1152. : "m"(src[0]), "m"(src[1]),
  1153. "m"(src[stride]), "m"(src[stride + 1]),
  1154. "m"(*r4), "m"(shift2)
  1155. );
  1156. src += stride;
  1157. }
  1158. src += 4 - h * stride;
  1159. }
  1160. }
  1161. /* CAVS-specific */
  1162. void ff_put_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1163. {
  1164. put_pixels8_mmx(dst, src, stride, 8);
  1165. }
  1166. void ff_avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1167. {
  1168. avg_pixels8_mmx(dst, src, stride, 8);
  1169. }
  1170. void ff_put_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1171. {
  1172. put_pixels16_mmx(dst, src, stride, 16);
  1173. }
  1174. void ff_avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
  1175. {
  1176. avg_pixels16_mmx(dst, src, stride, 16);
  1177. }
  1178. /* VC-1-specific */
  1179. void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
  1180. ptrdiff_t stride, int rnd)
  1181. {
  1182. put_pixels8_mmx(dst, src, stride, 8);
  1183. }
  1184. static void vector_clipf_sse(float *dst, const float *src,
  1185. float min, float max, int len)
  1186. {
  1187. x86_reg i = (len - 16) * 4;
  1188. __asm__ volatile (
  1189. "movss %3, %%xmm4 \n\t"
  1190. "movss %4, %%xmm5 \n\t"
  1191. "shufps $0, %%xmm4, %%xmm4 \n\t"
  1192. "shufps $0, %%xmm5, %%xmm5 \n\t"
  1193. "1: \n\t"
  1194. "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
  1195. "movaps 16(%2, %0), %%xmm1 \n\t"
  1196. "movaps 32(%2, %0), %%xmm2 \n\t"
  1197. "movaps 48(%2, %0), %%xmm3 \n\t"
  1198. "maxps %%xmm4, %%xmm0 \n\t"
  1199. "maxps %%xmm4, %%xmm1 \n\t"
  1200. "maxps %%xmm4, %%xmm2 \n\t"
  1201. "maxps %%xmm4, %%xmm3 \n\t"
  1202. "minps %%xmm5, %%xmm0 \n\t"
  1203. "minps %%xmm5, %%xmm1 \n\t"
  1204. "minps %%xmm5, %%xmm2 \n\t"
  1205. "minps %%xmm5, %%xmm3 \n\t"
  1206. "movaps %%xmm0, (%1, %0) \n\t"
  1207. "movaps %%xmm1, 16(%1, %0) \n\t"
  1208. "movaps %%xmm2, 32(%1, %0) \n\t"
  1209. "movaps %%xmm3, 48(%1, %0) \n\t"
  1210. "sub $64, %0 \n\t"
  1211. "jge 1b \n\t"
  1212. : "+&r"(i)
  1213. : "r"(dst), "r"(src), "m"(min), "m"(max)
  1214. : "memory"
  1215. );
  1216. }
  1217. #endif /* HAVE_INLINE_ASM */
  1218. void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
  1219. void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
  1220. int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
  1221. int order);
  1222. int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
  1223. int order);
  1224. int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
  1225. const int16_t *v3,
  1226. int order, int mul);
  1227. int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
  1228. const int16_t *v3,
  1229. int order, int mul);
  1230. int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
  1231. const int16_t *v3,
  1232. int order, int mul);
  1233. void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
  1234. const int16_t *window, unsigned int len);
  1235. void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
  1236. const int16_t *window, unsigned int len);
  1237. void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
  1238. const int16_t *window, unsigned int len);
  1239. void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
  1240. const int16_t *window, unsigned int len);
  1241. void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
  1242. const int16_t *window, unsigned int len);
  1243. void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
  1244. const int16_t *window, unsigned int len);
  1245. void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
  1246. void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
  1247. void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
  1248. const uint8_t *diff, int w,
  1249. int *left, int *left_top);
  1250. int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
  1251. int w, int left);
  1252. int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
  1253. int w, int left);
  1254. void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src,
  1255. int32_t min, int32_t max, unsigned int len);
  1256. void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
  1257. int32_t min, int32_t max, unsigned int len);
  1258. void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
  1259. int32_t min, int32_t max, unsigned int len);
  1260. void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src,
  1261. int32_t min, int32_t max, unsigned int len);
  1262. #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
  1263. do { \
  1264. c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
  1265. c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
  1266. c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
  1267. c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
  1268. c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
  1269. c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
  1270. c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
  1271. c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
  1272. c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
  1273. c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
  1274. c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
  1275. c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
  1276. c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
  1277. c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
  1278. c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
  1279. c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
  1280. } while (0)
  1281. #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  1282. do { \
  1283. c->PFX ## _pixels_tab IDX [0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
  1284. c->PFX ## _pixels_tab IDX [1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
  1285. c->PFX ## _pixels_tab IDX [2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
  1286. c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
  1287. } while (0)
  1288. static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
  1289. int mm_flags)
  1290. {
  1291. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1292. #if HAVE_INLINE_ASM
  1293. c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
  1294. c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
  1295. c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
  1296. if (!high_bit_depth) {
  1297. c->clear_block = clear_block_mmx;
  1298. c->clear_blocks = clear_blocks_mmx;
  1299. c->draw_edges = draw_edges_mmx;
  1300. SET_HPEL_FUNCS(put, [0], 16, mmx);
  1301. SET_HPEL_FUNCS(put_no_rnd, [0], 16, mmx);
  1302. SET_HPEL_FUNCS(avg, [0], 16, mmx);
  1303. SET_HPEL_FUNCS(avg_no_rnd, , 16, mmx);
  1304. SET_HPEL_FUNCS(put, [1], 8, mmx);
  1305. SET_HPEL_FUNCS(put_no_rnd, [1], 8, mmx);
  1306. SET_HPEL_FUNCS(avg, [1], 8, mmx);
  1307. switch (avctx->idct_algo) {
  1308. case FF_IDCT_AUTO:
  1309. case FF_IDCT_SIMPLEMMX:
  1310. c->idct_put = ff_simple_idct_put_mmx;
  1311. c->idct_add = ff_simple_idct_add_mmx;
  1312. c->idct = ff_simple_idct_mmx;
  1313. c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
  1314. break;
  1315. case FF_IDCT_XVIDMMX:
  1316. c->idct_put = ff_idct_xvid_mmx_put;
  1317. c->idct_add = ff_idct_xvid_mmx_add;
  1318. c->idct = ff_idct_xvid_mmx;
  1319. break;
  1320. }
  1321. }
  1322. c->gmc = gmc_mmx;
  1323. c->add_bytes = add_bytes_mmx;
  1324. #endif /* HAVE_INLINE_ASM */
  1325. #if HAVE_YASM
  1326. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  1327. c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
  1328. c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
  1329. }
  1330. c->vector_clip_int32 = ff_vector_clip_int32_mmx;
  1331. #endif
  1332. }
  1333. static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
  1334. int mm_flags)
  1335. {
  1336. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1337. #if HAVE_YASM
  1338. SET_QPEL_FUNCS(avg_qpel, 0, 16, mmxext, );
  1339. SET_QPEL_FUNCS(avg_qpel, 1, 8, mmxext, );
  1340. SET_QPEL_FUNCS(put_qpel, 0, 16, mmxext, );
  1341. SET_QPEL_FUNCS(put_qpel, 1, 8, mmxext, );
  1342. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
  1343. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmxext, );
  1344. if (!high_bit_depth) {
  1345. c->put_pixels_tab[0][1] = ff_put_pixels16_x2_mmxext;
  1346. c->put_pixels_tab[0][2] = ff_put_pixels16_y2_mmxext;
  1347. c->avg_pixels_tab[0][0] = ff_avg_pixels16_mmxext;
  1348. c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_mmxext;
  1349. c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_mmxext;
  1350. c->put_pixels_tab[1][1] = ff_put_pixels8_x2_mmxext;
  1351. c->put_pixels_tab[1][2] = ff_put_pixels8_y2_mmxext;
  1352. c->avg_pixels_tab[1][0] = ff_avg_pixels8_mmxext;
  1353. c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_mmxext;
  1354. c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext;
  1355. }
  1356. if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
  1357. if (!high_bit_depth) {
  1358. c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_mmxext;
  1359. c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_mmxext;
  1360. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext;
  1361. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext;
  1362. c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_mmxext;
  1363. c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext;
  1364. }
  1365. }
  1366. #endif /* HAVE_YASM */
  1367. #if HAVE_INLINE_ASM
  1368. if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
  1369. c->idct_put = ff_idct_xvid_mmxext_put;
  1370. c->idct_add = ff_idct_xvid_mmxext_add;
  1371. c->idct = ff_idct_xvid_mmxext;
  1372. }
  1373. #endif /* HAVE_INLINE_ASM */
  1374. #if HAVE_MMXEXT_EXTERNAL
  1375. if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
  1376. avctx->codec_id == AV_CODEC_ID_THEORA)) {
  1377. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_mmxext;
  1378. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_mmxext;
  1379. }
  1380. /* slower than cmov version on AMD */
  1381. if (!(mm_flags & AV_CPU_FLAG_3DNOW))
  1382. c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
  1383. c->scalarproduct_int16 = ff_scalarproduct_int16_mmxext;
  1384. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
  1385. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  1386. c->apply_window_int16 = ff_apply_window_int16_mmxext;
  1387. } else {
  1388. c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
  1389. }
  1390. #endif /* HAVE_MMXEXT_EXTERNAL */
  1391. }
  1392. static av_cold void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
  1393. int mm_flags)
  1394. {
  1395. #if HAVE_YASM
  1396. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1397. if (!high_bit_depth) {
  1398. c->put_pixels_tab[0][1] = ff_put_pixels16_x2_3dnow;
  1399. c->put_pixels_tab[0][2] = ff_put_pixels16_y2_3dnow;
  1400. c->avg_pixels_tab[0][0] = ff_avg_pixels16_3dnow;
  1401. c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_3dnow;
  1402. c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_3dnow;
  1403. c->put_pixels_tab[1][1] = ff_put_pixels8_x2_3dnow;
  1404. c->put_pixels_tab[1][2] = ff_put_pixels8_y2_3dnow;
  1405. c->avg_pixels_tab[1][0] = ff_avg_pixels8_3dnow;
  1406. c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_3dnow;
  1407. c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow;
  1408. if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1409. c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_3dnow;
  1410. c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_3dnow;
  1411. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow;
  1412. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow;
  1413. c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_3dnow;
  1414. c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow;
  1415. }
  1416. }
  1417. if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
  1418. avctx->codec_id == AV_CODEC_ID_THEORA)) {
  1419. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_3dnow;
  1420. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_3dnow;
  1421. }
  1422. #endif /* HAVE_YASM */
  1423. }
  1424. static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx,
  1425. int mm_flags)
  1426. {
  1427. #if HAVE_INLINE_ASM
  1428. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1429. if (!high_bit_depth) {
  1430. if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
  1431. /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
  1432. c->clear_block = clear_block_sse;
  1433. c->clear_blocks = clear_blocks_sse;
  1434. }
  1435. }
  1436. c->vector_clipf = vector_clipf_sse;
  1437. #endif /* HAVE_INLINE_ASM */
  1438. }
  1439. static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
  1440. int mm_flags)
  1441. {
  1442. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1443. #if HAVE_SSE2_INLINE
  1444. if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
  1445. c->idct_put = ff_idct_xvid_sse2_put;
  1446. c->idct_add = ff_idct_xvid_sse2_add;
  1447. c->idct = ff_idct_xvid_sse2;
  1448. c->idct_permutation_type = FF_SSE2_IDCT_PERM;
  1449. }
  1450. #endif /* HAVE_SSE2_INLINE */
  1451. #if HAVE_SSE2_EXTERNAL
  1452. if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  1453. // these functions are slower than mmx on AMD, but faster on Intel
  1454. if (!high_bit_depth) {
  1455. c->put_pixels_tab[0][0] = ff_put_pixels16_sse2;
  1456. c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2;
  1457. c->avg_pixels_tab[0][0] = ff_avg_pixels16_sse2;
  1458. }
  1459. }
  1460. c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
  1461. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
  1462. if (mm_flags & AV_CPU_FLAG_ATOM) {
  1463. c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
  1464. } else {
  1465. c->vector_clip_int32 = ff_vector_clip_int32_sse2;
  1466. }
  1467. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  1468. c->apply_window_int16 = ff_apply_window_int16_sse2;
  1469. } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  1470. c->apply_window_int16 = ff_apply_window_int16_round_sse2;
  1471. }
  1472. c->bswap_buf = ff_bswap32_buf_sse2;
  1473. #endif /* HAVE_SSE2_EXTERNAL */
  1474. }
  1475. static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
  1476. int mm_flags)
  1477. {
  1478. #if HAVE_SSSE3_EXTERNAL
  1479. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
  1480. if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
  1481. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
  1482. if (mm_flags & AV_CPU_FLAG_ATOM)
  1483. c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
  1484. else
  1485. c->apply_window_int16 = ff_apply_window_int16_ssse3;
  1486. if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
  1487. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
  1488. c->bswap_buf = ff_bswap32_buf_ssse3;
  1489. #endif /* HAVE_SSSE3_EXTERNAL */
  1490. }
  1491. static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
  1492. int mm_flags)
  1493. {
  1494. #if HAVE_SSE4_EXTERNAL
  1495. c->vector_clip_int32 = ff_vector_clip_int32_sse4;
  1496. #endif /* HAVE_SSE4_EXTERNAL */
  1497. }
  1498. av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
  1499. {
  1500. int mm_flags = av_get_cpu_flags();
  1501. #if HAVE_7REGS && HAVE_INLINE_ASM
  1502. if (mm_flags & AV_CPU_FLAG_CMOV)
  1503. c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
  1504. #endif
  1505. if (mm_flags & AV_CPU_FLAG_MMX)
  1506. dsputil_init_mmx(c, avctx, mm_flags);
  1507. if (mm_flags & AV_CPU_FLAG_MMXEXT)
  1508. dsputil_init_mmxext(c, avctx, mm_flags);
  1509. if (mm_flags & AV_CPU_FLAG_3DNOW)
  1510. dsputil_init_3dnow(c, avctx, mm_flags);
  1511. if (mm_flags & AV_CPU_FLAG_SSE)
  1512. dsputil_init_sse(c, avctx, mm_flags);
  1513. if (mm_flags & AV_CPU_FLAG_SSE2)
  1514. dsputil_init_sse2(c, avctx, mm_flags);
  1515. if (mm_flags & AV_CPU_FLAG_SSSE3)
  1516. dsputil_init_ssse3(c, avctx, mm_flags);
  1517. if (mm_flags & AV_CPU_FLAG_SSE4)
  1518. dsputil_init_sse4(c, avctx, mm_flags);
  1519. if (CONFIG_ENCODERS)
  1520. ff_dsputilenc_init_mmx(c, avctx);
  1521. }