You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2230 lines
114KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/cpu.h"
  25. #include "libavutil/x86/asm.h"
  26. #include "libavcodec/dsputil.h"
  27. #include "libavcodec/h264dsp.h"
  28. #include "libavcodec/mpegvideo.h"
  29. #include "libavcodec/simple_idct.h"
  30. #include "dsputil_mmx.h"
  31. #include "idct_xvid.h"
  32. //#undef NDEBUG
  33. //#include <assert.h>
  34. /* pixel operations */
  35. DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
  36. DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
  37. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1) = { 0x0001000100010001ULL, 0x0001000100010001ULL };
  38. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2) = { 0x0002000200020002ULL, 0x0002000200020002ULL };
  39. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3) = { 0x0003000300030003ULL, 0x0003000300030003ULL };
  40. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4) = { 0x0004000400040004ULL, 0x0004000400040004ULL };
  41. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5) = { 0x0005000500050005ULL, 0x0005000500050005ULL };
  42. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8) = { 0x0008000800080008ULL, 0x0008000800080008ULL };
  43. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9) = { 0x0009000900090009ULL, 0x0009000900090009ULL };
  44. DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL;
  45. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16) = { 0x0010001000100010ULL, 0x0010001000100010ULL };
  46. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17) = { 0x0011001100110011ULL, 0x0011001100110011ULL };
  47. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18) = { 0x0012001200120012ULL, 0x0012001200120012ULL };
  48. DECLARE_ALIGNED(8, const uint64_t, ff_pw_20) = 0x0014001400140014ULL;
  49. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27) = { 0x001B001B001B001BULL, 0x001B001B001B001BULL };
  50. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28) = { 0x001C001C001C001CULL, 0x001C001C001C001CULL };
  51. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32) = { 0x0020002000200020ULL, 0x0020002000200020ULL };
  52. DECLARE_ALIGNED(8, const uint64_t, ff_pw_42) = 0x002A002A002A002AULL;
  53. DECLARE_ALIGNED(8, const uint64_t, ff_pw_53) = 0x0035003500350035ULL;
  54. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63) = { 0x003F003F003F003FULL, 0x003F003F003F003FULL };
  55. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64) = { 0x0040004000400040ULL, 0x0040004000400040ULL };
  56. DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL;
  57. DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
  58. DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
  59. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL };
  60. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
  61. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0) = { 0x0000000000000000ULL, 0x0000000000000000ULL };
  62. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1) = { 0x0101010101010101ULL, 0x0101010101010101ULL };
  63. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3) = { 0x0303030303030303ULL, 0x0303030303030303ULL };
  64. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4) = { 0x0404040404040404ULL, 0x0404040404040404ULL };
  65. DECLARE_ALIGNED(8, const uint64_t, ff_pb_7) = 0x0707070707070707ULL;
  66. DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F) = 0x1F1F1F1F1F1F1F1FULL;
  67. DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F) = 0x3F3F3F3F3F3F3F3FULL;
  68. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80) = { 0x8080808080808080ULL, 0x8080808080808080ULL };
  69. DECLARE_ALIGNED(8, const uint64_t, ff_pb_81) = 0x8181818181818181ULL;
  70. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1) = { 0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL };
  71. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8) = { 0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL };
  72. DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC) = 0xFCFCFCFCFCFCFCFCULL;
  73. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE) = { 0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL };
  74. DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
  75. DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
  76. #if HAVE_INLINE_ASM
  77. #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
  78. #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
  79. #define MOVQ_BFE(regd) \
  80. __asm__ volatile ( \
  81. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  82. "paddb %%"#regd", %%"#regd" \n\t" ::)
  83. #ifndef PIC
  84. #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
  85. #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
  86. #else
  87. // for shared library it's better to use this way for accessing constants
  88. // pcmpeqd -> -1
  89. #define MOVQ_BONE(regd) \
  90. __asm__ volatile ( \
  91. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  92. "psrlw $15, %%"#regd" \n\t" \
  93. "packuswb %%"#regd", %%"#regd" \n\t" ::)
  94. #define MOVQ_WTWO(regd) \
  95. __asm__ volatile ( \
  96. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  97. "psrlw $15, %%"#regd" \n\t" \
  98. "psllw $1, %%"#regd" \n\t"::)
  99. #endif
  100. // using regr as temporary and for the output result
  101. // first argument is unmodifed and second is trashed
  102. // regfe is supposed to contain 0xfefefefefefefefe
  103. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  104. "movq "#rega", "#regr" \n\t" \
  105. "pand "#regb", "#regr" \n\t" \
  106. "pxor "#rega", "#regb" \n\t" \
  107. "pand "#regfe", "#regb" \n\t" \
  108. "psrlq $1, "#regb" \n\t" \
  109. "paddb "#regb", "#regr" \n\t"
  110. #define PAVGB_MMX(rega, regb, regr, regfe) \
  111. "movq "#rega", "#regr" \n\t" \
  112. "por "#regb", "#regr" \n\t" \
  113. "pxor "#rega", "#regb" \n\t" \
  114. "pand "#regfe", "#regb" \n\t" \
  115. "psrlq $1, "#regb" \n\t" \
  116. "psubb "#regb", "#regr" \n\t"
  117. // mm6 is supposed to contain 0xfefefefefefefefe
  118. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  119. "movq "#rega", "#regr" \n\t" \
  120. "movq "#regc", "#regp" \n\t" \
  121. "pand "#regb", "#regr" \n\t" \
  122. "pand "#regd", "#regp" \n\t" \
  123. "pxor "#rega", "#regb" \n\t" \
  124. "pxor "#regc", "#regd" \n\t" \
  125. "pand %%mm6, "#regb" \n\t" \
  126. "pand %%mm6, "#regd" \n\t" \
  127. "psrlq $1, "#regb" \n\t" \
  128. "psrlq $1, "#regd" \n\t" \
  129. "paddb "#regb", "#regr" \n\t" \
  130. "paddb "#regd", "#regp" \n\t"
  131. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  132. "movq "#rega", "#regr" \n\t" \
  133. "movq "#regc", "#regp" \n\t" \
  134. "por "#regb", "#regr" \n\t" \
  135. "por "#regd", "#regp" \n\t" \
  136. "pxor "#rega", "#regb" \n\t" \
  137. "pxor "#regc", "#regd" \n\t" \
  138. "pand %%mm6, "#regb" \n\t" \
  139. "pand %%mm6, "#regd" \n\t" \
  140. "psrlq $1, "#regd" \n\t" \
  141. "psrlq $1, "#regb" \n\t" \
  142. "psubb "#regb", "#regr" \n\t" \
  143. "psubb "#regd", "#regp" \n\t"
  144. /***********************************/
  145. /* MMX no rounding */
  146. #define NO_RND 1
  147. #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
  148. #define SET_RND MOVQ_WONE
  149. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  150. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  151. #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
  152. #include "dsputil_rnd_template.c"
  153. #undef DEF
  154. #undef SET_RND
  155. #undef PAVGBP
  156. #undef PAVGB
  157. #undef NO_RND
  158. /***********************************/
  159. /* MMX rounding */
  160. #define DEF(x, y) x ## _ ## y ## _mmx
  161. #define SET_RND MOVQ_WTWO
  162. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  163. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  164. #include "dsputil_rnd_template.c"
  165. #undef DEF
  166. #undef SET_RND
  167. #undef PAVGBP
  168. #undef PAVGB
  169. #undef OP_AVG
  170. /***********************************/
  171. /* 3Dnow specific */
  172. #define DEF(x) x ## _3dnow
  173. #define PAVGB "pavgusb"
  174. #define SKIP_FOR_3DNOW
  175. #include "dsputil_avg_template.c"
  176. #undef DEF
  177. #undef PAVGB
  178. #undef SKIP_FOR_3DNOW
  179. /***********************************/
  180. /* MMXEXT specific */
  181. #define DEF(x) x ## _mmxext
  182. /* Introduced only in MMXEXT set */
  183. #define PAVGB "pavgb"
  184. #include "dsputil_avg_template.c"
  185. #undef DEF
  186. #undef PAVGB
  187. #define put_no_rnd_pixels16_mmx put_pixels16_mmx
  188. #define put_no_rnd_pixels8_mmx put_pixels8_mmx
  189. #define put_pixels16_mmxext put_pixels16_mmx
  190. #define put_pixels8_mmxext put_pixels8_mmx
  191. #define put_pixels4_mmxext put_pixels4_mmx
  192. #define put_no_rnd_pixels16_mmxext put_no_rnd_pixels16_mmx
  193. #define put_no_rnd_pixels8_mmxext put_no_rnd_pixels8_mmx
  194. /***********************************/
  195. /* standard MMX */
  196. void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  197. int line_size)
  198. {
  199. const int16_t *p;
  200. uint8_t *pix;
  201. /* read the pixels */
  202. p = block;
  203. pix = pixels;
  204. /* unrolled loop */
  205. __asm__ volatile (
  206. "movq (%3), %%mm0 \n\t"
  207. "movq 8(%3), %%mm1 \n\t"
  208. "movq 16(%3), %%mm2 \n\t"
  209. "movq 24(%3), %%mm3 \n\t"
  210. "movq 32(%3), %%mm4 \n\t"
  211. "movq 40(%3), %%mm5 \n\t"
  212. "movq 48(%3), %%mm6 \n\t"
  213. "movq 56(%3), %%mm7 \n\t"
  214. "packuswb %%mm1, %%mm0 \n\t"
  215. "packuswb %%mm3, %%mm2 \n\t"
  216. "packuswb %%mm5, %%mm4 \n\t"
  217. "packuswb %%mm7, %%mm6 \n\t"
  218. "movq %%mm0, (%0) \n\t"
  219. "movq %%mm2, (%0, %1) \n\t"
  220. "movq %%mm4, (%0, %1, 2) \n\t"
  221. "movq %%mm6, (%0, %2) \n\t"
  222. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
  223. "r"(p)
  224. : "memory");
  225. pix += line_size * 4;
  226. p += 32;
  227. // if here would be an exact copy of the code above
  228. // compiler would generate some very strange code
  229. // thus using "r"
  230. __asm__ volatile (
  231. "movq (%3), %%mm0 \n\t"
  232. "movq 8(%3), %%mm1 \n\t"
  233. "movq 16(%3), %%mm2 \n\t"
  234. "movq 24(%3), %%mm3 \n\t"
  235. "movq 32(%3), %%mm4 \n\t"
  236. "movq 40(%3), %%mm5 \n\t"
  237. "movq 48(%3), %%mm6 \n\t"
  238. "movq 56(%3), %%mm7 \n\t"
  239. "packuswb %%mm1, %%mm0 \n\t"
  240. "packuswb %%mm3, %%mm2 \n\t"
  241. "packuswb %%mm5, %%mm4 \n\t"
  242. "packuswb %%mm7, %%mm6 \n\t"
  243. "movq %%mm0, (%0) \n\t"
  244. "movq %%mm2, (%0, %1) \n\t"
  245. "movq %%mm4, (%0, %1, 2) \n\t"
  246. "movq %%mm6, (%0, %2) \n\t"
  247. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
  248. : "memory");
  249. }
  250. #define put_signed_pixels_clamped_mmx_half(off) \
  251. "movq "#off"(%2), %%mm1 \n\t" \
  252. "movq 16 + "#off"(%2), %%mm2 \n\t" \
  253. "movq 32 + "#off"(%2), %%mm3 \n\t" \
  254. "movq 48 + "#off"(%2), %%mm4 \n\t" \
  255. "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
  256. "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
  257. "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
  258. "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
  259. "paddb %%mm0, %%mm1 \n\t" \
  260. "paddb %%mm0, %%mm2 \n\t" \
  261. "paddb %%mm0, %%mm3 \n\t" \
  262. "paddb %%mm0, %%mm4 \n\t" \
  263. "movq %%mm1, (%0) \n\t" \
  264. "movq %%mm2, (%0, %3) \n\t" \
  265. "movq %%mm3, (%0, %3, 2) \n\t" \
  266. "movq %%mm4, (%0, %1) \n\t"
  267. void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  268. int line_size)
  269. {
  270. x86_reg line_skip = line_size;
  271. x86_reg line_skip3;
  272. __asm__ volatile (
  273. "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
  274. "lea (%3, %3, 2), %1 \n\t"
  275. put_signed_pixels_clamped_mmx_half(0)
  276. "lea (%0, %3, 4), %0 \n\t"
  277. put_signed_pixels_clamped_mmx_half(64)
  278. : "+&r"(pixels), "=&r"(line_skip3)
  279. : "r"(block), "r"(line_skip)
  280. : "memory");
  281. }
  282. void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  283. int line_size)
  284. {
  285. const int16_t *p;
  286. uint8_t *pix;
  287. int i;
  288. /* read the pixels */
  289. p = block;
  290. pix = pixels;
  291. MOVQ_ZERO(mm7);
  292. i = 4;
  293. do {
  294. __asm__ volatile (
  295. "movq (%2), %%mm0 \n\t"
  296. "movq 8(%2), %%mm1 \n\t"
  297. "movq 16(%2), %%mm2 \n\t"
  298. "movq 24(%2), %%mm3 \n\t"
  299. "movq %0, %%mm4 \n\t"
  300. "movq %1, %%mm6 \n\t"
  301. "movq %%mm4, %%mm5 \n\t"
  302. "punpcklbw %%mm7, %%mm4 \n\t"
  303. "punpckhbw %%mm7, %%mm5 \n\t"
  304. "paddsw %%mm4, %%mm0 \n\t"
  305. "paddsw %%mm5, %%mm1 \n\t"
  306. "movq %%mm6, %%mm5 \n\t"
  307. "punpcklbw %%mm7, %%mm6 \n\t"
  308. "punpckhbw %%mm7, %%mm5 \n\t"
  309. "paddsw %%mm6, %%mm2 \n\t"
  310. "paddsw %%mm5, %%mm3 \n\t"
  311. "packuswb %%mm1, %%mm0 \n\t"
  312. "packuswb %%mm3, %%mm2 \n\t"
  313. "movq %%mm0, %0 \n\t"
  314. "movq %%mm2, %1 \n\t"
  315. : "+m"(*pix), "+m"(*(pix + line_size))
  316. : "r"(p)
  317. : "memory");
  318. pix += line_size * 2;
  319. p += 16;
  320. } while (--i);
  321. }
  322. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
  323. int line_size, int h)
  324. {
  325. __asm__ volatile (
  326. "lea (%3, %3), %%"REG_a" \n\t"
  327. ".p2align 3 \n\t"
  328. "1: \n\t"
  329. "movq (%1 ), %%mm0 \n\t"
  330. "movq (%1, %3), %%mm1 \n\t"
  331. "movq %%mm0, (%2) \n\t"
  332. "movq %%mm1, (%2, %3) \n\t"
  333. "add %%"REG_a", %1 \n\t"
  334. "add %%"REG_a", %2 \n\t"
  335. "movq (%1 ), %%mm0 \n\t"
  336. "movq (%1, %3), %%mm1 \n\t"
  337. "movq %%mm0, (%2) \n\t"
  338. "movq %%mm1, (%2, %3) \n\t"
  339. "add %%"REG_a", %1 \n\t"
  340. "add %%"REG_a", %2 \n\t"
  341. "subl $4, %0 \n\t"
  342. "jnz 1b \n\t"
  343. : "+g"(h), "+r"(pixels), "+r"(block)
  344. : "r"((x86_reg)line_size)
  345. : "%"REG_a, "memory"
  346. );
  347. }
  348. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
  349. int line_size, int h)
  350. {
  351. __asm__ volatile (
  352. "lea (%3, %3), %%"REG_a" \n\t"
  353. ".p2align 3 \n\t"
  354. "1: \n\t"
  355. "movq (%1 ), %%mm0 \n\t"
  356. "movq 8(%1 ), %%mm4 \n\t"
  357. "movq (%1, %3), %%mm1 \n\t"
  358. "movq 8(%1, %3), %%mm5 \n\t"
  359. "movq %%mm0, (%2) \n\t"
  360. "movq %%mm4, 8(%2) \n\t"
  361. "movq %%mm1, (%2, %3) \n\t"
  362. "movq %%mm5, 8(%2, %3) \n\t"
  363. "add %%"REG_a", %1 \n\t"
  364. "add %%"REG_a", %2 \n\t"
  365. "movq (%1 ), %%mm0 \n\t"
  366. "movq 8(%1 ), %%mm4 \n\t"
  367. "movq (%1, %3), %%mm1 \n\t"
  368. "movq 8(%1, %3), %%mm5 \n\t"
  369. "movq %%mm0, (%2) \n\t"
  370. "movq %%mm4, 8(%2) \n\t"
  371. "movq %%mm1, (%2, %3) \n\t"
  372. "movq %%mm5, 8(%2, %3) \n\t"
  373. "add %%"REG_a", %1 \n\t"
  374. "add %%"REG_a", %2 \n\t"
  375. "subl $4, %0 \n\t"
  376. "jnz 1b \n\t"
  377. : "+g"(h), "+r"(pixels), "+r"(block)
  378. : "r"((x86_reg)line_size)
  379. : "%"REG_a, "memory"
  380. );
  381. }
  382. #define CLEAR_BLOCKS(name, n) \
  383. static void name(int16_t *blocks) \
  384. { \
  385. __asm__ volatile ( \
  386. "pxor %%mm7, %%mm7 \n\t" \
  387. "mov %1, %%"REG_a" \n\t" \
  388. "1: \n\t" \
  389. "movq %%mm7, (%0, %%"REG_a") \n\t" \
  390. "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
  391. "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
  392. "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
  393. "add $32, %%"REG_a" \n\t" \
  394. "js 1b \n\t" \
  395. :: "r"(((uint8_t *)blocks) + 128 * n), \
  396. "i"(-128 * n) \
  397. : "%"REG_a \
  398. ); \
  399. }
  400. CLEAR_BLOCKS(clear_blocks_mmx, 6)
  401. CLEAR_BLOCKS(clear_block_mmx, 1)
  402. static void clear_block_sse(int16_t *block)
  403. {
  404. __asm__ volatile (
  405. "xorps %%xmm0, %%xmm0 \n"
  406. "movaps %%xmm0, (%0) \n"
  407. "movaps %%xmm0, 16(%0) \n"
  408. "movaps %%xmm0, 32(%0) \n"
  409. "movaps %%xmm0, 48(%0) \n"
  410. "movaps %%xmm0, 64(%0) \n"
  411. "movaps %%xmm0, 80(%0) \n"
  412. "movaps %%xmm0, 96(%0) \n"
  413. "movaps %%xmm0, 112(%0) \n"
  414. :: "r"(block)
  415. : "memory"
  416. );
  417. }
  418. static void clear_blocks_sse(int16_t *blocks)
  419. {
  420. __asm__ volatile (
  421. "xorps %%xmm0, %%xmm0 \n"
  422. "mov %1, %%"REG_a" \n"
  423. "1: \n"
  424. "movaps %%xmm0, (%0, %%"REG_a") \n"
  425. "movaps %%xmm0, 16(%0, %%"REG_a") \n"
  426. "movaps %%xmm0, 32(%0, %%"REG_a") \n"
  427. "movaps %%xmm0, 48(%0, %%"REG_a") \n"
  428. "movaps %%xmm0, 64(%0, %%"REG_a") \n"
  429. "movaps %%xmm0, 80(%0, %%"REG_a") \n"
  430. "movaps %%xmm0, 96(%0, %%"REG_a") \n"
  431. "movaps %%xmm0, 112(%0, %%"REG_a") \n"
  432. "add $128, %%"REG_a" \n"
  433. "js 1b \n"
  434. :: "r"(((uint8_t *)blocks) + 128 * 6),
  435. "i"(-128 * 6)
  436. : "%"REG_a
  437. );
  438. }
  439. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
  440. {
  441. x86_reg i = 0;
  442. __asm__ volatile (
  443. "jmp 2f \n\t"
  444. "1: \n\t"
  445. "movq (%1, %0), %%mm0 \n\t"
  446. "movq (%2, %0), %%mm1 \n\t"
  447. "paddb %%mm0, %%mm1 \n\t"
  448. "movq %%mm1, (%2, %0) \n\t"
  449. "movq 8(%1, %0), %%mm0 \n\t"
  450. "movq 8(%2, %0), %%mm1 \n\t"
  451. "paddb %%mm0, %%mm1 \n\t"
  452. "movq %%mm1, 8(%2, %0) \n\t"
  453. "add $16, %0 \n\t"
  454. "2: \n\t"
  455. "cmp %3, %0 \n\t"
  456. "js 1b \n\t"
  457. : "+r"(i)
  458. : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
  459. );
  460. for ( ; i < w; i++)
  461. dst[i + 0] += src[i + 0];
  462. }
  463. #if HAVE_7REGS
  464. static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
  465. const uint8_t *diff, int w,
  466. int *left, int *left_top)
  467. {
  468. x86_reg w2 = -w;
  469. x86_reg x;
  470. int l = *left & 0xff;
  471. int tl = *left_top & 0xff;
  472. int t;
  473. __asm__ volatile (
  474. "mov %7, %3 \n"
  475. "1: \n"
  476. "movzbl (%3, %4), %2 \n"
  477. "mov %2, %k3 \n"
  478. "sub %b1, %b3 \n"
  479. "add %b0, %b3 \n"
  480. "mov %2, %1 \n"
  481. "cmp %0, %2 \n"
  482. "cmovg %0, %2 \n"
  483. "cmovg %1, %0 \n"
  484. "cmp %k3, %0 \n"
  485. "cmovg %k3, %0 \n"
  486. "mov %7, %3 \n"
  487. "cmp %2, %0 \n"
  488. "cmovl %2, %0 \n"
  489. "add (%6, %4), %b0 \n"
  490. "mov %b0, (%5, %4) \n"
  491. "inc %4 \n"
  492. "jl 1b \n"
  493. : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
  494. : "r"(dst + w), "r"(diff + w), "rm"(top + w)
  495. );
  496. *left = l;
  497. *left_top = tl;
  498. }
  499. #endif
  500. static inline void transpose4x4(uint8_t *dst, uint8_t *src, x86_reg dst_stride, x86_reg src_stride){
  501. __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
  502. "movd (%1), %%mm0 \n\t"
  503. "add %3, %1 \n\t"
  504. "movd (%1), %%mm1 \n\t"
  505. "movd (%1,%3,1), %%mm2 \n\t"
  506. "movd (%1,%3,2), %%mm3 \n\t"
  507. "punpcklbw %%mm1, %%mm0 \n\t"
  508. "punpcklbw %%mm3, %%mm2 \n\t"
  509. "movq %%mm0, %%mm1 \n\t"
  510. "punpcklwd %%mm2, %%mm0 \n\t"
  511. "punpckhwd %%mm2, %%mm1 \n\t"
  512. "movd %%mm0, (%0) \n\t"
  513. "add %2, %0 \n\t"
  514. "punpckhdq %%mm0, %%mm0 \n\t"
  515. "movd %%mm0, (%0) \n\t"
  516. "movd %%mm1, (%0,%2,1) \n\t"
  517. "punpckhdq %%mm1, %%mm1 \n\t"
  518. "movd %%mm1, (%0,%2,2) \n\t"
  519. : "+&r" (dst),
  520. "+&r" (src)
  521. : "r" (dst_stride),
  522. "r" (src_stride)
  523. : "memory"
  524. );
  525. }
  526. #define H263_LOOP_FILTER \
  527. "pxor %%mm7, %%mm7 \n\t" \
  528. "movq %0, %%mm0 \n\t" \
  529. "movq %0, %%mm1 \n\t" \
  530. "movq %3, %%mm2 \n\t" \
  531. "movq %3, %%mm3 \n\t" \
  532. "punpcklbw %%mm7, %%mm0 \n\t" \
  533. "punpckhbw %%mm7, %%mm1 \n\t" \
  534. "punpcklbw %%mm7, %%mm2 \n\t" \
  535. "punpckhbw %%mm7, %%mm3 \n\t" \
  536. "psubw %%mm2, %%mm0 \n\t" \
  537. "psubw %%mm3, %%mm1 \n\t" \
  538. "movq %1, %%mm2 \n\t" \
  539. "movq %1, %%mm3 \n\t" \
  540. "movq %2, %%mm4 \n\t" \
  541. "movq %2, %%mm5 \n\t" \
  542. "punpcklbw %%mm7, %%mm2 \n\t" \
  543. "punpckhbw %%mm7, %%mm3 \n\t" \
  544. "punpcklbw %%mm7, %%mm4 \n\t" \
  545. "punpckhbw %%mm7, %%mm5 \n\t" \
  546. "psubw %%mm2, %%mm4 \n\t" \
  547. "psubw %%mm3, %%mm5 \n\t" \
  548. "psllw $2, %%mm4 \n\t" \
  549. "psllw $2, %%mm5 \n\t" \
  550. "paddw %%mm0, %%mm4 \n\t" \
  551. "paddw %%mm1, %%mm5 \n\t" \
  552. "pxor %%mm6, %%mm6 \n\t" \
  553. "pcmpgtw %%mm4, %%mm6 \n\t" \
  554. "pcmpgtw %%mm5, %%mm7 \n\t" \
  555. "pxor %%mm6, %%mm4 \n\t" \
  556. "pxor %%mm7, %%mm5 \n\t" \
  557. "psubw %%mm6, %%mm4 \n\t" \
  558. "psubw %%mm7, %%mm5 \n\t" \
  559. "psrlw $3, %%mm4 \n\t" \
  560. "psrlw $3, %%mm5 \n\t" \
  561. "packuswb %%mm5, %%mm4 \n\t" \
  562. "packsswb %%mm7, %%mm6 \n\t" \
  563. "pxor %%mm7, %%mm7 \n\t" \
  564. "movd %4, %%mm2 \n\t" \
  565. "punpcklbw %%mm2, %%mm2 \n\t" \
  566. "punpcklbw %%mm2, %%mm2 \n\t" \
  567. "punpcklbw %%mm2, %%mm2 \n\t" \
  568. "psubusb %%mm4, %%mm2 \n\t" \
  569. "movq %%mm2, %%mm3 \n\t" \
  570. "psubusb %%mm4, %%mm3 \n\t" \
  571. "psubb %%mm3, %%mm2 \n\t" \
  572. "movq %1, %%mm3 \n\t" \
  573. "movq %2, %%mm4 \n\t" \
  574. "pxor %%mm6, %%mm3 \n\t" \
  575. "pxor %%mm6, %%mm4 \n\t" \
  576. "paddusb %%mm2, %%mm3 \n\t" \
  577. "psubusb %%mm2, %%mm4 \n\t" \
  578. "pxor %%mm6, %%mm3 \n\t" \
  579. "pxor %%mm6, %%mm4 \n\t" \
  580. "paddusb %%mm2, %%mm2 \n\t" \
  581. "packsswb %%mm1, %%mm0 \n\t" \
  582. "pcmpgtb %%mm0, %%mm7 \n\t" \
  583. "pxor %%mm7, %%mm0 \n\t" \
  584. "psubb %%mm7, %%mm0 \n\t" \
  585. "movq %%mm0, %%mm1 \n\t" \
  586. "psubusb %%mm2, %%mm0 \n\t" \
  587. "psubb %%mm0, %%mm1 \n\t" \
  588. "pand %5, %%mm1 \n\t" \
  589. "psrlw $2, %%mm1 \n\t" \
  590. "pxor %%mm7, %%mm1 \n\t" \
  591. "psubb %%mm7, %%mm1 \n\t" \
  592. "movq %0, %%mm5 \n\t" \
  593. "movq %3, %%mm6 \n\t" \
  594. "psubb %%mm1, %%mm5 \n\t" \
  595. "paddb %%mm1, %%mm6 \n\t"
  596. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale)
  597. {
  598. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  599. const int strength = ff_h263_loop_filter_strength[qscale];
  600. __asm__ volatile (
  601. H263_LOOP_FILTER
  602. "movq %%mm3, %1 \n\t"
  603. "movq %%mm4, %2 \n\t"
  604. "movq %%mm5, %0 \n\t"
  605. "movq %%mm6, %3 \n\t"
  606. : "+m"(*(uint64_t*)(src - 2 * stride)),
  607. "+m"(*(uint64_t*)(src - 1 * stride)),
  608. "+m"(*(uint64_t*)(src + 0 * stride)),
  609. "+m"(*(uint64_t*)(src + 1 * stride))
  610. : "g"(2 * strength), "m"(ff_pb_FC)
  611. );
  612. }
  613. }
  614. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale)
  615. {
  616. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  617. const int strength = ff_h263_loop_filter_strength[qscale];
  618. DECLARE_ALIGNED(8, uint64_t, temp)[4];
  619. uint8_t *btemp = (uint8_t*)temp;
  620. src -= 2;
  621. transpose4x4(btemp, src, 8, stride);
  622. transpose4x4(btemp + 4, src + 4 * stride, 8, stride);
  623. __asm__ volatile (
  624. H263_LOOP_FILTER // 5 3 4 6
  625. : "+m"(temp[0]),
  626. "+m"(temp[1]),
  627. "+m"(temp[2]),
  628. "+m"(temp[3])
  629. : "g"(2 * strength), "m"(ff_pb_FC)
  630. );
  631. __asm__ volatile (
  632. "movq %%mm5, %%mm1 \n\t"
  633. "movq %%mm4, %%mm0 \n\t"
  634. "punpcklbw %%mm3, %%mm5 \n\t"
  635. "punpcklbw %%mm6, %%mm4 \n\t"
  636. "punpckhbw %%mm3, %%mm1 \n\t"
  637. "punpckhbw %%mm6, %%mm0 \n\t"
  638. "movq %%mm5, %%mm3 \n\t"
  639. "movq %%mm1, %%mm6 \n\t"
  640. "punpcklwd %%mm4, %%mm5 \n\t"
  641. "punpcklwd %%mm0, %%mm1 \n\t"
  642. "punpckhwd %%mm4, %%mm3 \n\t"
  643. "punpckhwd %%mm0, %%mm6 \n\t"
  644. "movd %%mm5, (%0) \n\t"
  645. "punpckhdq %%mm5, %%mm5 \n\t"
  646. "movd %%mm5, (%0, %2) \n\t"
  647. "movd %%mm3, (%0, %2, 2) \n\t"
  648. "punpckhdq %%mm3, %%mm3 \n\t"
  649. "movd %%mm3, (%0, %3) \n\t"
  650. "movd %%mm1, (%1) \n\t"
  651. "punpckhdq %%mm1, %%mm1 \n\t"
  652. "movd %%mm1, (%1, %2) \n\t"
  653. "movd %%mm6, (%1, %2, 2) \n\t"
  654. "punpckhdq %%mm6, %%mm6 \n\t"
  655. "movd %%mm6, (%1, %3) \n\t"
  656. :: "r"(src),
  657. "r"(src + 4 * stride),
  658. "r"((x86_reg)stride),
  659. "r"((x86_reg)(3 * stride))
  660. );
  661. }
  662. }
  663. /* Draw the edges of width 'w' of an image of size width, height
  664. * this MMX version can only handle w == 8 || w == 16. */
  665. static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
  666. int w, int h, int sides)
  667. {
  668. uint8_t *ptr, *last_line;
  669. int i;
  670. last_line = buf + (height - 1) * wrap;
  671. /* left and right */
  672. ptr = buf;
  673. if (w == 8) {
  674. __asm__ volatile (
  675. "1: \n\t"
  676. "movd (%0), %%mm0 \n\t"
  677. "punpcklbw %%mm0, %%mm0 \n\t"
  678. "punpcklwd %%mm0, %%mm0 \n\t"
  679. "punpckldq %%mm0, %%mm0 \n\t"
  680. "movq %%mm0, -8(%0) \n\t"
  681. "movq -8(%0, %2), %%mm1 \n\t"
  682. "punpckhbw %%mm1, %%mm1 \n\t"
  683. "punpckhwd %%mm1, %%mm1 \n\t"
  684. "punpckhdq %%mm1, %%mm1 \n\t"
  685. "movq %%mm1, (%0, %2) \n\t"
  686. "add %1, %0 \n\t"
  687. "cmp %3, %0 \n\t"
  688. "jb 1b \n\t"
  689. : "+r"(ptr)
  690. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  691. );
  692. } else {
  693. __asm__ volatile (
  694. "1: \n\t"
  695. "movd (%0), %%mm0 \n\t"
  696. "punpcklbw %%mm0, %%mm0 \n\t"
  697. "punpcklwd %%mm0, %%mm0 \n\t"
  698. "punpckldq %%mm0, %%mm0 \n\t"
  699. "movq %%mm0, -8(%0) \n\t"
  700. "movq %%mm0, -16(%0) \n\t"
  701. "movq -8(%0, %2), %%mm1 \n\t"
  702. "punpckhbw %%mm1, %%mm1 \n\t"
  703. "punpckhwd %%mm1, %%mm1 \n\t"
  704. "punpckhdq %%mm1, %%mm1 \n\t"
  705. "movq %%mm1, (%0, %2) \n\t"
  706. "movq %%mm1, 8(%0, %2) \n\t"
  707. "add %1, %0 \n\t"
  708. "cmp %3, %0 \n\t"
  709. "jb 1b \n\t"
  710. : "+r"(ptr)
  711. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  712. );
  713. }
  714. /* top and bottom (and hopefully also the corners) */
  715. if (sides & EDGE_TOP) {
  716. for (i = 0; i < h; i += 4) {
  717. ptr = buf - (i + 1) * wrap - w;
  718. __asm__ volatile (
  719. "1: \n\t"
  720. "movq (%1, %0), %%mm0 \n\t"
  721. "movq %%mm0, (%0) \n\t"
  722. "movq %%mm0, (%0, %2) \n\t"
  723. "movq %%mm0, (%0, %2, 2) \n\t"
  724. "movq %%mm0, (%0, %3) \n\t"
  725. "add $8, %0 \n\t"
  726. "cmp %4, %0 \n\t"
  727. "jb 1b \n\t"
  728. : "+r"(ptr)
  729. : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
  730. "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
  731. );
  732. }
  733. }
  734. if (sides & EDGE_BOTTOM) {
  735. for (i = 0; i < h; i += 4) {
  736. ptr = last_line + (i + 1) * wrap - w;
  737. __asm__ volatile (
  738. "1: \n\t"
  739. "movq (%1, %0), %%mm0 \n\t"
  740. "movq %%mm0, (%0) \n\t"
  741. "movq %%mm0, (%0, %2) \n\t"
  742. "movq %%mm0, (%0, %2, 2) \n\t"
  743. "movq %%mm0, (%0, %3) \n\t"
  744. "add $8, %0 \n\t"
  745. "cmp %4, %0 \n\t"
  746. "jb 1b \n\t"
  747. : "+r"(ptr)
  748. : "r"((x86_reg)last_line - (x86_reg)ptr - w),
  749. "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
  750. "r"(ptr + width + 2 * w)
  751. );
  752. }
  753. }
  754. }
  755. #define QPEL_V_LOW(m3, m4, m5, m6, pw_20, pw_3, rnd, \
  756. in0, in1, in2, in7, out, OP) \
  757. "paddw "#m4", "#m3" \n\t" /* x1 */ \
  758. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */ \
  759. "pmullw "#m3", %%mm4 \n\t" /* 20x1 */ \
  760. "movq "#in7", "#m3" \n\t" /* d */ \
  761. "movq "#in0", %%mm5 \n\t" /* D */ \
  762. "paddw "#m3", %%mm5 \n\t" /* x4 */ \
  763. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */ \
  764. "movq "#in1", %%mm5 \n\t" /* C */ \
  765. "movq "#in2", %%mm6 \n\t" /* B */ \
  766. "paddw "#m6", %%mm5 \n\t" /* x3 */ \
  767. "paddw "#m5", %%mm6 \n\t" /* x2 */ \
  768. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */ \
  769. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */ \
  770. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */ \
  771. "paddw "#rnd", %%mm4 \n\t" /* x2 */ \
  772. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */ \
  773. "psraw $5, %%mm5 \n\t" \
  774. "packuswb %%mm5, %%mm5 \n\t" \
  775. OP(%%mm5, out, %%mm7, d)
  776. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMXEXT) \
  777. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, \
  778. uint8_t *src, \
  779. int dstStride, \
  780. int srcStride, \
  781. int h) \
  782. { \
  783. uint64_t temp; \
  784. \
  785. __asm__ volatile ( \
  786. "pxor %%mm7, %%mm7 \n\t" \
  787. "1: \n\t" \
  788. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
  789. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
  790. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
  791. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
  792. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
  793. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
  794. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
  795. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
  796. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
  797. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
  798. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
  799. "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
  800. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
  801. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
  802. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
  803. "paddw %%mm3, %%mm5 \n\t" /* b */ \
  804. "paddw %%mm2, %%mm6 \n\t" /* c */ \
  805. "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
  806. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
  807. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
  808. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
  809. "paddw %%mm4, %%mm0 \n\t" /* a */ \
  810. "paddw %%mm1, %%mm5 \n\t" /* d */ \
  811. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
  812. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
  813. "paddw %6, %%mm6 \n\t" \
  814. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
  815. "psraw $5, %%mm0 \n\t" \
  816. "movq %%mm0, %5 \n\t" \
  817. /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
  818. \
  819. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */ \
  820. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */ \
  821. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */ \
  822. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */ \
  823. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */ \
  824. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */ \
  825. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */ \
  826. "paddw %%mm0, %%mm2 \n\t" /* b */ \
  827. "paddw %%mm5, %%mm3 \n\t" /* c */ \
  828. "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
  829. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
  830. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */ \
  831. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */ \
  832. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */ \
  833. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */ \
  834. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
  835. "paddw %%mm2, %%mm1 \n\t" /* a */ \
  836. "paddw %%mm6, %%mm4 \n\t" /* d */ \
  837. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
  838. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */ \
  839. "paddw %6, %%mm1 \n\t" \
  840. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */ \
  841. "psraw $5, %%mm3 \n\t" \
  842. "movq %5, %%mm1 \n\t" \
  843. "packuswb %%mm3, %%mm1 \n\t" \
  844. OP_MMXEXT(%%mm1, (%1), %%mm4, q) \
  845. /* mm0 = GHIJ, mm2 = FGHI, mm5 = HIJK, mm6 = IJKL, mm7 = 0 */ \
  846. \
  847. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */ \
  848. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */ \
  849. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */ \
  850. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */ \
  851. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */ \
  852. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */ \
  853. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */ \
  854. "paddw %%mm1, %%mm5 \n\t" /* b */ \
  855. "paddw %%mm4, %%mm0 \n\t" /* c */ \
  856. "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
  857. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */ \
  858. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */ \
  859. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */ \
  860. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */ \
  861. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */ \
  862. "paddw %%mm3, %%mm2 \n\t" /* d */ \
  863. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */ \
  864. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */ \
  865. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */ \
  866. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */ \
  867. "paddw %%mm2, %%mm6 \n\t" /* a */ \
  868. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */ \
  869. "paddw %6, %%mm0 \n\t" \
  870. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
  871. "psraw $5, %%mm0 \n\t" \
  872. /* mm1 = KLMN, mm2 = JKLM, mm3 = MNOP, */ \
  873. /* mm4 = LMNO, mm5 = NOPQ mm7 = 0 */ \
  874. \
  875. "paddw %%mm5, %%mm3 \n\t" /* a */ \
  876. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */ \
  877. "paddw %%mm4, %%mm6 \n\t" /* b */ \
  878. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */ \
  879. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */ \
  880. "paddw %%mm1, %%mm4 \n\t" /* c */ \
  881. "paddw %%mm2, %%mm5 \n\t" /* d */ \
  882. "paddw %%mm6, %%mm6 \n\t" /* 2b */ \
  883. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */ \
  884. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */ \
  885. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */ \
  886. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */ \
  887. "paddw %6, %%mm4 \n\t" \
  888. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */ \
  889. "psraw $5, %%mm4 \n\t" \
  890. "packuswb %%mm4, %%mm0 \n\t" \
  891. OP_MMXEXT(%%mm0, 8(%1), %%mm4, q) \
  892. \
  893. "add %3, %0 \n\t" \
  894. "add %4, %1 \n\t" \
  895. "decl %2 \n\t" \
  896. "jnz 1b \n\t" \
  897. : "+a"(src), "+c"(dst), "+D"(h) \
  898. : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), \
  899. /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(temp), "m"(ROUNDER) \
  900. : "memory" \
  901. ); \
  902. } \
  903. \
  904. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, \
  905. uint8_t *src, \
  906. int dstStride, \
  907. int srcStride, \
  908. int h) \
  909. { \
  910. __asm__ volatile ( \
  911. "pxor %%mm7, %%mm7 \n\t" \
  912. "1: \n\t" \
  913. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
  914. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
  915. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
  916. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
  917. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
  918. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
  919. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
  920. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
  921. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
  922. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
  923. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
  924. "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
  925. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
  926. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
  927. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
  928. "paddw %%mm3, %%mm5 \n\t" /* b */ \
  929. "paddw %%mm2, %%mm6 \n\t" /* c */ \
  930. "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
  931. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
  932. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
  933. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
  934. "paddw %%mm4, %%mm0 \n\t" /* a */ \
  935. "paddw %%mm1, %%mm5 \n\t" /* d */ \
  936. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
  937. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
  938. "paddw %5, %%mm6 \n\t" \
  939. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
  940. "psraw $5, %%mm0 \n\t" \
  941. /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
  942. \
  943. "movd 5(%0), %%mm5 \n\t" /* FGHI */ \
  944. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */ \
  945. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */ \
  946. "paddw %%mm5, %%mm1 \n\t" /* a */ \
  947. "paddw %%mm6, %%mm2 \n\t" /* b */ \
  948. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */ \
  949. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */ \
  950. "paddw %%mm6, %%mm3 \n\t" /* c */ \
  951. "paddw %%mm5, %%mm4 \n\t" /* d */ \
  952. "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
  953. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
  954. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
  955. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
  956. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */ \
  957. "paddw %5, %%mm1 \n\t" \
  958. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */ \
  959. "psraw $5, %%mm3 \n\t" \
  960. "packuswb %%mm3, %%mm0 \n\t" \
  961. OP_MMXEXT(%%mm0, (%1), %%mm4, q) \
  962. \
  963. "add %3, %0 \n\t" \
  964. "add %4, %1 \n\t" \
  965. "decl %2 \n\t" \
  966. "jnz 1b \n\t" \
  967. : "+a"(src), "+c"(dst), "+d"(h) \
  968. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), \
  969. /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER) \
  970. : "memory" \
  971. ); \
  972. }
  973. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX) \
  974. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, \
  975. uint8_t *src, \
  976. int dstStride, \
  977. int srcStride) \
  978. { \
  979. uint64_t temp[17 * 4]; \
  980. uint64_t *temp_ptr = temp; \
  981. int count = 17; \
  982. \
  983. /* FIXME unroll */ \
  984. __asm__ volatile ( \
  985. "pxor %%mm7, %%mm7 \n\t" \
  986. "1: \n\t" \
  987. "movq (%0), %%mm0 \n\t" \
  988. "movq (%0), %%mm1 \n\t" \
  989. "movq 8(%0), %%mm2 \n\t" \
  990. "movq 8(%0), %%mm3 \n\t" \
  991. "punpcklbw %%mm7, %%mm0 \n\t" \
  992. "punpckhbw %%mm7, %%mm1 \n\t" \
  993. "punpcklbw %%mm7, %%mm2 \n\t" \
  994. "punpckhbw %%mm7, %%mm3 \n\t" \
  995. "movq %%mm0, (%1) \n\t" \
  996. "movq %%mm1, 17 * 8(%1) \n\t" \
  997. "movq %%mm2, 2 * 17 * 8(%1) \n\t" \
  998. "movq %%mm3, 3 * 17 * 8(%1) \n\t" \
  999. "add $8, %1 \n\t" \
  1000. "add %3, %0 \n\t" \
  1001. "decl %2 \n\t" \
  1002. "jnz 1b \n\t" \
  1003. : "+r"(src), "+r"(temp_ptr), "+r"(count) \
  1004. : "r"((x86_reg)srcStride) \
  1005. : "memory" \
  1006. ); \
  1007. \
  1008. temp_ptr = temp; \
  1009. count = 4; \
  1010. \
  1011. /* FIXME reorder for speed */ \
  1012. __asm__ volatile ( \
  1013. /* "pxor %%mm7, %%mm7 \n\t" */ \
  1014. "1: \n\t" \
  1015. "movq (%0), %%mm0 \n\t" \
  1016. "movq 8(%0), %%mm1 \n\t" \
  1017. "movq 16(%0), %%mm2 \n\t" \
  1018. "movq 24(%0), %%mm3 \n\t" \
  1019. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
  1020. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
  1021. "add %4, %1 \n\t" \
  1022. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
  1023. \
  1024. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
  1025. "add %4, %1 \n\t" \
  1026. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
  1027. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP) \
  1028. "add %4, %1 \n\t" \
  1029. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP) \
  1030. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP) \
  1031. "add %4, %1 \n\t" \
  1032. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP) \
  1033. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0), 104(%0), (%1, %3), OP) \
  1034. "add %4, %1 \n\t" \
  1035. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0), 112(%0), (%1), OP) \
  1036. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0), 120(%0), (%1, %3), OP) \
  1037. "add %4, %1 \n\t" \
  1038. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0), 128(%0), (%1), OP) \
  1039. \
  1040. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0), 128(%0), (%1, %3), OP) \
  1041. "add %4, %1 \n\t" \
  1042. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0), 104(%0), 120(%0), (%1), OP) \
  1043. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0), 104(%0), 112(%0), 112(%0), (%1, %3), OP) \
  1044. \
  1045. "add $136, %0 \n\t" \
  1046. "add %6, %1 \n\t" \
  1047. "decl %2 \n\t" \
  1048. "jnz 1b \n\t" \
  1049. \
  1050. : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
  1051. : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
  1052. /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
  1053. "g"(4 - 14 * (x86_reg)dstStride) \
  1054. : "memory" \
  1055. ); \
  1056. } \
  1057. \
  1058. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, \
  1059. uint8_t *src, \
  1060. int dstStride, \
  1061. int srcStride) \
  1062. { \
  1063. uint64_t temp[9 * 2]; \
  1064. uint64_t *temp_ptr = temp; \
  1065. int count = 9; \
  1066. \
  1067. /* FIXME unroll */ \
  1068. __asm__ volatile ( \
  1069. "pxor %%mm7, %%mm7 \n\t" \
  1070. "1: \n\t" \
  1071. "movq (%0), %%mm0 \n\t" \
  1072. "movq (%0), %%mm1 \n\t" \
  1073. "punpcklbw %%mm7, %%mm0 \n\t" \
  1074. "punpckhbw %%mm7, %%mm1 \n\t" \
  1075. "movq %%mm0, (%1) \n\t" \
  1076. "movq %%mm1, 9*8(%1) \n\t" \
  1077. "add $8, %1 \n\t" \
  1078. "add %3, %0 \n\t" \
  1079. "decl %2 \n\t" \
  1080. "jnz 1b \n\t" \
  1081. : "+r"(src), "+r"(temp_ptr), "+r"(count) \
  1082. : "r"((x86_reg)srcStride) \
  1083. : "memory" \
  1084. ); \
  1085. \
  1086. temp_ptr = temp; \
  1087. count = 2; \
  1088. \
  1089. /* FIXME reorder for speed */ \
  1090. __asm__ volatile ( \
  1091. /* "pxor %%mm7, %%mm7 \n\t" */ \
  1092. "1: \n\t" \
  1093. "movq (%0), %%mm0 \n\t" \
  1094. "movq 8(%0), %%mm1 \n\t" \
  1095. "movq 16(%0), %%mm2 \n\t" \
  1096. "movq 24(%0), %%mm3 \n\t" \
  1097. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
  1098. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
  1099. "add %4, %1 \n\t" \
  1100. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
  1101. \
  1102. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
  1103. "add %4, %1 \n\t" \
  1104. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
  1105. \
  1106. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP) \
  1107. "add %4, %1 \n\t" \
  1108. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP) \
  1109. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP) \
  1110. \
  1111. "add $72, %0 \n\t" \
  1112. "add %6, %1 \n\t" \
  1113. "decl %2 \n\t" \
  1114. "jnz 1b \n\t" \
  1115. \
  1116. : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
  1117. : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
  1118. /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
  1119. "g"(4 - 6 * (x86_reg)dstStride) \
  1120. : "memory" \
  1121. ); \
  1122. } \
  1123. \
  1124. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
  1125. int stride) \
  1126. { \
  1127. OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \
  1128. } \
  1129. \
  1130. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
  1131. int stride) \
  1132. { \
  1133. uint64_t temp[8]; \
  1134. uint8_t * const half = (uint8_t*)temp; \
  1135. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
  1136. stride, 8); \
  1137. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8); \
  1138. } \
  1139. \
  1140. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
  1141. int stride) \
  1142. { \
  1143. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \
  1144. stride, 8); \
  1145. } \
  1146. \
  1147. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
  1148. int stride) \
  1149. { \
  1150. uint64_t temp[8]; \
  1151. uint8_t * const half = (uint8_t*)temp; \
  1152. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
  1153. stride, 8); \
  1154. OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \
  1155. stride, 8); \
  1156. } \
  1157. \
  1158. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
  1159. int stride) \
  1160. { \
  1161. uint64_t temp[8]; \
  1162. uint8_t * const half = (uint8_t*)temp; \
  1163. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride); \
  1164. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8); \
  1165. } \
  1166. \
  1167. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
  1168. int stride) \
  1169. { \
  1170. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride); \
  1171. } \
  1172. \
  1173. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
  1174. int stride) \
  1175. { \
  1176. uint64_t temp[8]; \
  1177. uint8_t * const half = (uint8_t*)temp; \
  1178. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride); \
  1179. OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride, \
  1180. stride, 8); \
  1181. } \
  1182. \
  1183. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
  1184. int stride) \
  1185. { \
  1186. uint64_t half[8 + 9]; \
  1187. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1188. uint8_t * const halfHV = ((uint8_t*)half); \
  1189. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1190. stride, 9); \
  1191. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
  1192. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1193. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
  1194. } \
  1195. \
  1196. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
  1197. int stride) \
  1198. { \
  1199. uint64_t half[8 + 9]; \
  1200. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1201. uint8_t * const halfHV = ((uint8_t*)half); \
  1202. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1203. stride, 9); \
  1204. put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  1205. stride, 9); \
  1206. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1207. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
  1208. } \
  1209. \
  1210. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
  1211. int stride) \
  1212. { \
  1213. uint64_t half[8 + 9]; \
  1214. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1215. uint8_t * const halfHV = ((uint8_t*)half); \
  1216. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1217. stride, 9); \
  1218. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
  1219. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1220. OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
  1221. } \
  1222. \
  1223. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
  1224. int stride) \
  1225. { \
  1226. uint64_t half[8 + 9]; \
  1227. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1228. uint8_t * const halfHV = ((uint8_t*)half); \
  1229. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1230. stride, 9); \
  1231. put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  1232. stride, 9); \
  1233. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1234. OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
  1235. } \
  1236. \
  1237. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
  1238. int stride) \
  1239. { \
  1240. uint64_t half[8 + 9]; \
  1241. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1242. uint8_t * const halfHV = ((uint8_t*)half); \
  1243. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1244. stride, 9); \
  1245. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1246. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
  1247. } \
  1248. \
  1249. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
  1250. int stride) \
  1251. { \
  1252. uint64_t half[8 + 9]; \
  1253. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1254. uint8_t * const halfHV = ((uint8_t*)half); \
  1255. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1256. stride, 9); \
  1257. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1258. OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
  1259. } \
  1260. \
  1261. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
  1262. int stride) \
  1263. { \
  1264. uint64_t half[8 + 9]; \
  1265. uint8_t * const halfH = ((uint8_t*)half); \
  1266. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1267. stride, 9); \
  1268. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
  1269. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
  1270. } \
  1271. \
  1272. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
  1273. int stride) \
  1274. { \
  1275. uint64_t half[8 + 9]; \
  1276. uint8_t * const halfH = ((uint8_t*)half); \
  1277. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1278. stride, 9); \
  1279. put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  1280. stride, 9); \
  1281. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
  1282. } \
  1283. \
  1284. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
  1285. int stride) \
  1286. { \
  1287. uint64_t half[9]; \
  1288. uint8_t * const halfH = ((uint8_t*)half); \
  1289. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1290. stride, 9); \
  1291. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
  1292. } \
  1293. \
  1294. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
  1295. int stride) \
  1296. { \
  1297. OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \
  1298. } \
  1299. \
  1300. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
  1301. int stride) \
  1302. { \
  1303. uint64_t temp[32]; \
  1304. uint8_t * const half = (uint8_t*)temp; \
  1305. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
  1306. stride, 16); \
  1307. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16); \
  1308. } \
  1309. \
  1310. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
  1311. int stride) \
  1312. { \
  1313. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \
  1314. stride, stride, 16); \
  1315. } \
  1316. \
  1317. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
  1318. int stride) \
  1319. { \
  1320. uint64_t temp[32]; \
  1321. uint8_t * const half = (uint8_t*)temp; \
  1322. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
  1323. stride, 16); \
  1324. OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \
  1325. stride, stride, 16); \
  1326. } \
  1327. \
  1328. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
  1329. int stride) \
  1330. { \
  1331. uint64_t temp[32]; \
  1332. uint8_t * const half = (uint8_t*)temp; \
  1333. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
  1334. stride); \
  1335. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16); \
  1336. } \
  1337. \
  1338. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
  1339. int stride) \
  1340. { \
  1341. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride); \
  1342. } \
  1343. \
  1344. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
  1345. int stride) \
  1346. { \
  1347. uint64_t temp[32]; \
  1348. uint8_t * const half = (uint8_t*)temp; \
  1349. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
  1350. stride); \
  1351. OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \
  1352. stride, stride, 16); \
  1353. } \
  1354. \
  1355. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
  1356. int stride) \
  1357. { \
  1358. uint64_t half[16 * 2 + 17 * 2]; \
  1359. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1360. uint8_t * const halfHV = ((uint8_t*)half); \
  1361. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1362. stride, 17); \
  1363. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  1364. stride, 17); \
  1365. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1366. 16, 16); \
  1367. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
  1368. } \
  1369. \
  1370. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
  1371. int stride) \
  1372. { \
  1373. uint64_t half[16 * 2 + 17 * 2]; \
  1374. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1375. uint8_t * const halfHV = ((uint8_t*)half); \
  1376. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1377. stride, 17); \
  1378. put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  1379. stride, 17); \
  1380. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1381. 16, 16); \
  1382. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
  1383. } \
  1384. \
  1385. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
  1386. int stride) \
  1387. { \
  1388. uint64_t half[16 * 2 + 17 * 2]; \
  1389. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1390. uint8_t * const halfHV = ((uint8_t*)half); \
  1391. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1392. stride, 17); \
  1393. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  1394. stride, 17); \
  1395. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1396. 16, 16); \
  1397. OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
  1398. 16, 16); \
  1399. } \
  1400. \
  1401. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
  1402. int stride) \
  1403. { \
  1404. uint64_t half[16 * 2 + 17 * 2]; \
  1405. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1406. uint8_t * const halfHV = ((uint8_t*)half); \
  1407. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1408. stride, 17); \
  1409. put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  1410. stride, 17); \
  1411. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1412. 16, 16); \
  1413. OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
  1414. 16, 16); \
  1415. } \
  1416. \
  1417. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
  1418. int stride) \
  1419. { \
  1420. uint64_t half[16 * 2 + 17 * 2]; \
  1421. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1422. uint8_t * const halfHV = ((uint8_t*)half); \
  1423. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1424. stride, 17); \
  1425. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1426. 16, 16); \
  1427. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
  1428. } \
  1429. \
  1430. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
  1431. int stride) \
  1432. { \
  1433. uint64_t half[16 * 2 + 17 * 2]; \
  1434. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1435. uint8_t * const halfHV = ((uint8_t*)half); \
  1436. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1437. stride, 17); \
  1438. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1439. 16, 16); \
  1440. OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
  1441. 16, 16); \
  1442. } \
  1443. \
  1444. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
  1445. int stride) \
  1446. { \
  1447. uint64_t half[17 * 2]; \
  1448. uint8_t * const halfH = ((uint8_t*)half); \
  1449. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1450. stride, 17); \
  1451. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  1452. stride, 17); \
  1453. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
  1454. } \
  1455. \
  1456. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
  1457. int stride) \
  1458. { \
  1459. uint64_t half[17 * 2]; \
  1460. uint8_t * const halfH = ((uint8_t*)half); \
  1461. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1462. stride, 17); \
  1463. put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  1464. stride, 17); \
  1465. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
  1466. } \
  1467. \
  1468. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
  1469. int stride) \
  1470. { \
  1471. uint64_t half[17 * 2]; \
  1472. uint8_t * const halfH = ((uint8_t*)half); \
  1473. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1474. stride, 17); \
  1475. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
  1476. }
  1477. #define PUT_OP(a, b, temp, size) \
  1478. "mov"#size" "#a", "#b" \n\t"
  1479. #define AVG_MMXEXT_OP(a, b, temp, size) \
  1480. "mov"#size" "#b", "#temp" \n\t" \
  1481. "pavgb "#temp", "#a" \n\t" \
  1482. "mov"#size" "#a", "#b" \n\t"
  1483. QPEL_BASE(put_, ff_pw_16, _, PUT_OP)
  1484. QPEL_BASE(avg_, ff_pw_16, _, AVG_MMXEXT_OP)
  1485. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP)
  1486. QPEL_OP(put_, ff_pw_16, _, PUT_OP, mmxext)
  1487. QPEL_OP(avg_, ff_pw_16, _, AVG_MMXEXT_OP, mmxext)
  1488. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmxext)
  1489. void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1490. {
  1491. put_pixels8_xy2_mmx(dst, src, stride, 8);
  1492. }
  1493. void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1494. {
  1495. put_pixels16_xy2_mmx(dst, src, stride, 16);
  1496. }
  1497. void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1498. {
  1499. avg_pixels8_xy2_mmx(dst, src, stride, 8);
  1500. }
  1501. void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1502. {
  1503. avg_pixels16_xy2_mmx(dst, src, stride, 16);
  1504. }
  1505. static void gmc_mmx(uint8_t *dst, uint8_t *src,
  1506. int stride, int h, int ox, int oy,
  1507. int dxx, int dxy, int dyx, int dyy,
  1508. int shift, int r, int width, int height)
  1509. {
  1510. const int w = 8;
  1511. const int ix = ox >> (16 + shift);
  1512. const int iy = oy >> (16 + shift);
  1513. const int oxs = ox >> 4;
  1514. const int oys = oy >> 4;
  1515. const int dxxs = dxx >> 4;
  1516. const int dxys = dxy >> 4;
  1517. const int dyxs = dyx >> 4;
  1518. const int dyys = dyy >> 4;
  1519. const uint16_t r4[4] = { r, r, r, r };
  1520. const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
  1521. const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
  1522. const uint64_t shift2 = 2 * shift;
  1523. int x, y;
  1524. const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
  1525. const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
  1526. const int dxh = dxy * (h - 1);
  1527. const int dyw = dyx * (w - 1);
  1528. if ( // non-constant fullpel offset (3% of blocks)
  1529. ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
  1530. (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
  1531. // uses more than 16 bits of subpel mv (only at huge resolution)
  1532. || (dxx | dxy | dyx | dyy) & 15 ||
  1533. (unsigned)ix >= width - w ||
  1534. (unsigned)iy >= height - h) {
  1535. // FIXME could still use mmx for some of the rows
  1536. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
  1537. shift, r, width, height);
  1538. return;
  1539. }
  1540. src += ix + iy * stride;
  1541. __asm__ volatile (
  1542. "movd %0, %%mm6 \n\t"
  1543. "pxor %%mm7, %%mm7 \n\t"
  1544. "punpcklwd %%mm6, %%mm6 \n\t"
  1545. "punpcklwd %%mm6, %%mm6 \n\t"
  1546. :: "r"(1<<shift)
  1547. );
  1548. for (x = 0; x < w; x += 4) {
  1549. uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
  1550. oxs - dxys + dxxs * (x + 1),
  1551. oxs - dxys + dxxs * (x + 2),
  1552. oxs - dxys + dxxs * (x + 3) };
  1553. uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
  1554. oys - dyys + dyxs * (x + 1),
  1555. oys - dyys + dyxs * (x + 2),
  1556. oys - dyys + dyxs * (x + 3) };
  1557. for (y = 0; y < h; y++) {
  1558. __asm__ volatile (
  1559. "movq %0, %%mm4 \n\t"
  1560. "movq %1, %%mm5 \n\t"
  1561. "paddw %2, %%mm4 \n\t"
  1562. "paddw %3, %%mm5 \n\t"
  1563. "movq %%mm4, %0 \n\t"
  1564. "movq %%mm5, %1 \n\t"
  1565. "psrlw $12, %%mm4 \n\t"
  1566. "psrlw $12, %%mm5 \n\t"
  1567. : "+m"(*dx4), "+m"(*dy4)
  1568. : "m"(*dxy4), "m"(*dyy4)
  1569. );
  1570. __asm__ volatile (
  1571. "movq %%mm6, %%mm2 \n\t"
  1572. "movq %%mm6, %%mm1 \n\t"
  1573. "psubw %%mm4, %%mm2 \n\t"
  1574. "psubw %%mm5, %%mm1 \n\t"
  1575. "movq %%mm2, %%mm0 \n\t"
  1576. "movq %%mm4, %%mm3 \n\t"
  1577. "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
  1578. "pmullw %%mm5, %%mm3 \n\t" // dx * dy
  1579. "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
  1580. "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
  1581. "movd %4, %%mm5 \n\t"
  1582. "movd %3, %%mm4 \n\t"
  1583. "punpcklbw %%mm7, %%mm5 \n\t"
  1584. "punpcklbw %%mm7, %%mm4 \n\t"
  1585. "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
  1586. "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
  1587. "movd %2, %%mm5 \n\t"
  1588. "movd %1, %%mm4 \n\t"
  1589. "punpcklbw %%mm7, %%mm5 \n\t"
  1590. "punpcklbw %%mm7, %%mm4 \n\t"
  1591. "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
  1592. "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
  1593. "paddw %5, %%mm1 \n\t"
  1594. "paddw %%mm3, %%mm2 \n\t"
  1595. "paddw %%mm1, %%mm0 \n\t"
  1596. "paddw %%mm2, %%mm0 \n\t"
  1597. "psrlw %6, %%mm0 \n\t"
  1598. "packuswb %%mm0, %%mm0 \n\t"
  1599. "movd %%mm0, %0 \n\t"
  1600. : "=m"(dst[x + y * stride])
  1601. : "m"(src[0]), "m"(src[1]),
  1602. "m"(src[stride]), "m"(src[stride + 1]),
  1603. "m"(*r4), "m"(shift2)
  1604. );
  1605. src += stride;
  1606. }
  1607. src += 4 - h * stride;
  1608. }
  1609. }
  1610. #endif /* HAVE_INLINE_ASM */
  1611. void ff_put_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
  1612. int line_size, int h);
  1613. void ff_avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
  1614. int line_size, int h);
  1615. void ff_put_h264_chroma_mc8_rnd_mmx (uint8_t *dst, uint8_t *src,
  1616. int stride, int h, int x, int y);
  1617. void ff_avg_h264_chroma_mc8_rnd_mmxext(uint8_t *dst, uint8_t *src,
  1618. int stride, int h, int x, int y);
  1619. void ff_avg_h264_chroma_mc8_rnd_3dnow(uint8_t *dst, uint8_t *src,
  1620. int stride, int h, int x, int y);
  1621. void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
  1622. int stride, int h, int x, int y);
  1623. void ff_avg_h264_chroma_mc4_mmxext (uint8_t *dst, uint8_t *src,
  1624. int stride, int h, int x, int y);
  1625. void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
  1626. int stride, int h, int x, int y);
  1627. void ff_put_h264_chroma_mc2_mmxext (uint8_t *dst, uint8_t *src,
  1628. int stride, int h, int x, int y);
  1629. void ff_avg_h264_chroma_mc2_mmxext (uint8_t *dst, uint8_t *src,
  1630. int stride, int h, int x, int y);
  1631. void ff_put_h264_chroma_mc8_rnd_ssse3(uint8_t *dst, uint8_t *src,
  1632. int stride, int h, int x, int y);
  1633. void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
  1634. int stride, int h, int x, int y);
  1635. void ff_avg_h264_chroma_mc8_rnd_ssse3(uint8_t *dst, uint8_t *src,
  1636. int stride, int h, int x, int y);
  1637. void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
  1638. int stride, int h, int x, int y);
  1639. #define CHROMA_MC(OP, NUM, DEPTH, OPT) \
  1640. void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
  1641. (uint8_t *dst, uint8_t *src, \
  1642. int stride, int h, int x, int y);
  1643. CHROMA_MC(put, 2, 10, mmxext)
  1644. CHROMA_MC(avg, 2, 10, mmxext)
  1645. CHROMA_MC(put, 4, 10, mmxext)
  1646. CHROMA_MC(avg, 4, 10, mmxext)
  1647. CHROMA_MC(put, 8, 10, sse2)
  1648. CHROMA_MC(avg, 8, 10, sse2)
  1649. CHROMA_MC(put, 8, 10, avx)
  1650. CHROMA_MC(avg, 8, 10, avx)
  1651. #if HAVE_INLINE_ASM
  1652. /* CAVS-specific */
  1653. void ff_put_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
  1654. {
  1655. put_pixels8_mmx(dst, src, stride, 8);
  1656. }
  1657. void ff_avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
  1658. {
  1659. avg_pixels8_mmx(dst, src, stride, 8);
  1660. }
  1661. void ff_put_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
  1662. {
  1663. put_pixels16_mmx(dst, src, stride, 16);
  1664. }
  1665. void ff_avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride)
  1666. {
  1667. avg_pixels16_mmx(dst, src, stride, 16);
  1668. }
  1669. /* VC-1-specific */
  1670. void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
  1671. int stride, int rnd)
  1672. {
  1673. put_pixels8_mmx(dst, src, stride, 8);
  1674. }
  1675. void ff_avg_vc1_mspel_mc00_mmxext(uint8_t *dst, const uint8_t *src,
  1676. int stride, int rnd)
  1677. {
  1678. avg_pixels8_mmxext(dst, src, stride, 8);
  1679. }
  1680. static void vector_clipf_sse(float *dst, const float *src,
  1681. float min, float max, int len)
  1682. {
  1683. x86_reg i = (len - 16) * 4;
  1684. __asm__ volatile (
  1685. "movss %3, %%xmm4 \n\t"
  1686. "movss %4, %%xmm5 \n\t"
  1687. "shufps $0, %%xmm4, %%xmm4 \n\t"
  1688. "shufps $0, %%xmm5, %%xmm5 \n\t"
  1689. "1: \n\t"
  1690. "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
  1691. "movaps 16(%2, %0), %%xmm1 \n\t"
  1692. "movaps 32(%2, %0), %%xmm2 \n\t"
  1693. "movaps 48(%2, %0), %%xmm3 \n\t"
  1694. "maxps %%xmm4, %%xmm0 \n\t"
  1695. "maxps %%xmm4, %%xmm1 \n\t"
  1696. "maxps %%xmm4, %%xmm2 \n\t"
  1697. "maxps %%xmm4, %%xmm3 \n\t"
  1698. "minps %%xmm5, %%xmm0 \n\t"
  1699. "minps %%xmm5, %%xmm1 \n\t"
  1700. "minps %%xmm5, %%xmm2 \n\t"
  1701. "minps %%xmm5, %%xmm3 \n\t"
  1702. "movaps %%xmm0, (%1, %0) \n\t"
  1703. "movaps %%xmm1, 16(%1, %0) \n\t"
  1704. "movaps %%xmm2, 32(%1, %0) \n\t"
  1705. "movaps %%xmm3, 48(%1, %0) \n\t"
  1706. "sub $64, %0 \n\t"
  1707. "jge 1b \n\t"
  1708. : "+&r"(i)
  1709. : "r"(dst), "r"(src), "m"(min), "m"(max)
  1710. : "memory"
  1711. );
  1712. }
  1713. #endif /* HAVE_INLINE_ASM */
  1714. int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
  1715. int order);
  1716. int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
  1717. int order);
  1718. int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
  1719. const int16_t *v3,
  1720. int order, int mul);
  1721. int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
  1722. const int16_t *v3,
  1723. int order, int mul);
  1724. int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
  1725. const int16_t *v3,
  1726. int order, int mul);
  1727. void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
  1728. const int16_t *window, unsigned int len);
  1729. void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
  1730. const int16_t *window, unsigned int len);
  1731. void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
  1732. const int16_t *window, unsigned int len);
  1733. void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
  1734. const int16_t *window, unsigned int len);
  1735. void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
  1736. const int16_t *window, unsigned int len);
  1737. void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
  1738. const int16_t *window, unsigned int len);
  1739. void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
  1740. void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
  1741. void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
  1742. const uint8_t *diff, int w,
  1743. int *left, int *left_top);
  1744. int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
  1745. int w, int left);
  1746. int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
  1747. int w, int left);
  1748. void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src,
  1749. int32_t min, int32_t max, unsigned int len);
  1750. void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
  1751. int32_t min, int32_t max, unsigned int len);
  1752. void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
  1753. int32_t min, int32_t max, unsigned int len);
  1754. void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src,
  1755. int32_t min, int32_t max, unsigned int len);
  1756. #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
  1757. do { \
  1758. c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
  1759. c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
  1760. c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
  1761. c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
  1762. c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
  1763. c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
  1764. c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
  1765. c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
  1766. c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
  1767. c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
  1768. c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
  1769. c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
  1770. c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
  1771. c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
  1772. c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
  1773. c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
  1774. } while (0)
  1775. #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  1776. do { \
  1777. c->PFX ## _pixels_tab IDX [0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
  1778. c->PFX ## _pixels_tab IDX [1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
  1779. c->PFX ## _pixels_tab IDX [2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
  1780. c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
  1781. } while (0)
  1782. static void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
  1783. {
  1784. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1785. #if HAVE_INLINE_ASM
  1786. c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
  1787. c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
  1788. c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
  1789. if (!high_bit_depth) {
  1790. c->clear_block = clear_block_mmx;
  1791. c->clear_blocks = clear_blocks_mmx;
  1792. c->draw_edges = draw_edges_mmx;
  1793. SET_HPEL_FUNCS(put, [0], 16, mmx);
  1794. SET_HPEL_FUNCS(put_no_rnd, [0], 16, mmx);
  1795. SET_HPEL_FUNCS(avg, [0], 16, mmx);
  1796. SET_HPEL_FUNCS(avg_no_rnd, , 16, mmx);
  1797. SET_HPEL_FUNCS(put, [1], 8, mmx);
  1798. SET_HPEL_FUNCS(put_no_rnd, [1], 8, mmx);
  1799. SET_HPEL_FUNCS(avg, [1], 8, mmx);
  1800. switch (avctx->idct_algo) {
  1801. case FF_IDCT_AUTO:
  1802. case FF_IDCT_SIMPLEMMX:
  1803. c->idct_put = ff_simple_idct_put_mmx;
  1804. c->idct_add = ff_simple_idct_add_mmx;
  1805. c->idct = ff_simple_idct_mmx;
  1806. c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
  1807. break;
  1808. case FF_IDCT_XVIDMMX:
  1809. c->idct_put = ff_idct_xvid_mmx_put;
  1810. c->idct_add = ff_idct_xvid_mmx_add;
  1811. c->idct = ff_idct_xvid_mmx;
  1812. break;
  1813. }
  1814. }
  1815. c->gmc = gmc_mmx;
  1816. c->add_bytes = add_bytes_mmx;
  1817. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  1818. c->h263_v_loop_filter = h263_v_loop_filter_mmx;
  1819. c->h263_h_loop_filter = h263_h_loop_filter_mmx;
  1820. }
  1821. #endif /* HAVE_INLINE_ASM */
  1822. #if HAVE_YASM
  1823. if (!high_bit_depth && CONFIG_H264CHROMA) {
  1824. c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_rnd_mmx;
  1825. c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_mmx;
  1826. }
  1827. c->vector_clip_int32 = ff_vector_clip_int32_mmx;
  1828. #endif
  1829. }
  1830. static void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
  1831. int mm_flags)
  1832. {
  1833. const int bit_depth = avctx->bits_per_raw_sample;
  1834. const int high_bit_depth = bit_depth > 8;
  1835. #if HAVE_INLINE_ASM
  1836. SET_QPEL_FUNCS(avg_qpel, 0, 16, mmxext, );
  1837. SET_QPEL_FUNCS(avg_qpel, 1, 8, mmxext, );
  1838. SET_QPEL_FUNCS(put_qpel, 0, 16, mmxext, );
  1839. SET_QPEL_FUNCS(put_qpel, 1, 8, mmxext, );
  1840. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
  1841. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmxext, );
  1842. if (!high_bit_depth) {
  1843. c->put_pixels_tab[0][1] = put_pixels16_x2_mmxext;
  1844. c->put_pixels_tab[0][2] = put_pixels16_y2_mmxext;
  1845. c->avg_pixels_tab[0][0] = avg_pixels16_mmxext;
  1846. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmxext;
  1847. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmxext;
  1848. c->put_pixels_tab[1][1] = put_pixels8_x2_mmxext;
  1849. c->put_pixels_tab[1][2] = put_pixels8_y2_mmxext;
  1850. c->avg_pixels_tab[1][0] = avg_pixels8_mmxext;
  1851. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmxext;
  1852. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmxext;
  1853. }
  1854. if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
  1855. if (!high_bit_depth) {
  1856. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmxext;
  1857. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmxext;
  1858. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmxext;
  1859. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmxext;
  1860. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmxext;
  1861. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmxext;
  1862. }
  1863. }
  1864. if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
  1865. c->idct_put = ff_idct_xvid_mmxext_put;
  1866. c->idct_add = ff_idct_xvid_mmxext_add;
  1867. c->idct = ff_idct_xvid_mmxext;
  1868. }
  1869. if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
  1870. avctx->codec_id == AV_CODEC_ID_THEORA)) {
  1871. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmxext;
  1872. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmxext;
  1873. }
  1874. #endif /* HAVE_INLINE_ASM */
  1875. #if HAVE_MMXEXT_EXTERNAL
  1876. if (!high_bit_depth && CONFIG_H264CHROMA) {
  1877. c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_mmxext;
  1878. c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmxext;
  1879. c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmxext;
  1880. c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmxext;
  1881. }
  1882. if (bit_depth == 10 && CONFIG_H264CHROMA) {
  1883. c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext;
  1884. c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext;
  1885. c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext;
  1886. c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext;
  1887. }
  1888. /* slower than cmov version on AMD */
  1889. if (!(mm_flags & AV_CPU_FLAG_3DNOW))
  1890. c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
  1891. c->scalarproduct_int16 = ff_scalarproduct_int16_mmxext;
  1892. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
  1893. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  1894. c->apply_window_int16 = ff_apply_window_int16_mmxext;
  1895. } else {
  1896. c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
  1897. }
  1898. #endif /* HAVE_MMXEXT_EXTERNAL */
  1899. }
  1900. static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
  1901. int mm_flags)
  1902. {
  1903. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1904. #if HAVE_INLINE_ASM
  1905. if (!high_bit_depth) {
  1906. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  1907. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  1908. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  1909. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  1910. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  1911. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  1912. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  1913. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  1914. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  1915. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  1916. if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1917. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  1918. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  1919. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  1920. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  1921. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  1922. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  1923. }
  1924. }
  1925. if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
  1926. avctx->codec_id == AV_CODEC_ID_THEORA)) {
  1927. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
  1928. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
  1929. }
  1930. #endif /* HAVE_INLINE_ASM */
  1931. #if HAVE_YASM
  1932. if (!high_bit_depth && CONFIG_H264CHROMA) {
  1933. c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_3dnow;
  1934. c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_3dnow;
  1935. }
  1936. #endif /* HAVE_YASM */
  1937. }
  1938. static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
  1939. {
  1940. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  1941. #if HAVE_INLINE_ASM
  1942. if (!high_bit_depth) {
  1943. if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
  1944. /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
  1945. c->clear_block = clear_block_sse;
  1946. c->clear_blocks = clear_blocks_sse;
  1947. }
  1948. }
  1949. c->vector_clipf = vector_clipf_sse;
  1950. #endif /* HAVE_INLINE_ASM */
  1951. }
  1952. static void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
  1953. int mm_flags)
  1954. {
  1955. const int bit_depth = avctx->bits_per_raw_sample;
  1956. const int high_bit_depth = bit_depth > 8;
  1957. #if HAVE_SSE2_INLINE
  1958. if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
  1959. c->idct_put = ff_idct_xvid_sse2_put;
  1960. c->idct_add = ff_idct_xvid_sse2_add;
  1961. c->idct = ff_idct_xvid_sse2;
  1962. c->idct_permutation_type = FF_SSE2_IDCT_PERM;
  1963. }
  1964. #endif /* HAVE_SSE2_INLINE */
  1965. #if HAVE_SSE2_EXTERNAL
  1966. if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  1967. // these functions are slower than mmx on AMD, but faster on Intel
  1968. if (!high_bit_depth) {
  1969. c->put_pixels_tab[0][0] = ff_put_pixels16_sse2;
  1970. c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2;
  1971. c->avg_pixels_tab[0][0] = ff_avg_pixels16_sse2;
  1972. }
  1973. }
  1974. if (bit_depth == 10) {
  1975. if (CONFIG_H264CHROMA) {
  1976. c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
  1977. c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
  1978. }
  1979. }
  1980. c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
  1981. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
  1982. if (mm_flags & AV_CPU_FLAG_ATOM) {
  1983. c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
  1984. } else {
  1985. c->vector_clip_int32 = ff_vector_clip_int32_sse2;
  1986. }
  1987. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  1988. c->apply_window_int16 = ff_apply_window_int16_sse2;
  1989. } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  1990. c->apply_window_int16 = ff_apply_window_int16_round_sse2;
  1991. }
  1992. c->bswap_buf = ff_bswap32_buf_sse2;
  1993. #endif /* HAVE_SSE2_EXTERNAL */
  1994. }
  1995. static void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
  1996. int mm_flags)
  1997. {
  1998. #if HAVE_SSSE3_EXTERNAL
  1999. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  2000. if (!high_bit_depth && CONFIG_H264CHROMA) {
  2001. c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_rnd_ssse3;
  2002. c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_ssse3;
  2003. c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_ssse3;
  2004. c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_ssse3;
  2005. }
  2006. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
  2007. if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
  2008. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
  2009. if (mm_flags & AV_CPU_FLAG_ATOM)
  2010. c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
  2011. else
  2012. c->apply_window_int16 = ff_apply_window_int16_ssse3;
  2013. if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
  2014. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
  2015. c->bswap_buf = ff_bswap32_buf_ssse3;
  2016. #endif /* HAVE_SSSE3_EXTERNAL */
  2017. }
  2018. static void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
  2019. int mm_flags)
  2020. {
  2021. #if HAVE_SSE4_EXTERNAL
  2022. c->vector_clip_int32 = ff_vector_clip_int32_sse4;
  2023. #endif /* HAVE_SSE4_EXTERNAL */
  2024. }
  2025. static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
  2026. {
  2027. #if HAVE_AVX_EXTERNAL
  2028. const int bit_depth = avctx->bits_per_raw_sample;
  2029. if (bit_depth == 10) {
  2030. // AVX implies !cache64.
  2031. // TODO: Port cache(32|64) detection from x264.
  2032. if (CONFIG_H264CHROMA) {
  2033. c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
  2034. c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
  2035. }
  2036. }
  2037. #endif /* HAVE_AVX_EXTERNAL */
  2038. }
  2039. void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
  2040. {
  2041. int mm_flags = av_get_cpu_flags();
  2042. #if HAVE_7REGS && HAVE_INLINE_ASM
  2043. if (mm_flags & AV_CPU_FLAG_CMOV)
  2044. c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
  2045. #endif
  2046. if (mm_flags & AV_CPU_FLAG_MMX)
  2047. dsputil_init_mmx(c, avctx, mm_flags);
  2048. if (mm_flags & AV_CPU_FLAG_MMXEXT)
  2049. dsputil_init_mmxext(c, avctx, mm_flags);
  2050. if (mm_flags & AV_CPU_FLAG_3DNOW)
  2051. dsputil_init_3dnow(c, avctx, mm_flags);
  2052. if (mm_flags & AV_CPU_FLAG_SSE)
  2053. dsputil_init_sse(c, avctx, mm_flags);
  2054. if (mm_flags & AV_CPU_FLAG_SSE2)
  2055. dsputil_init_sse2(c, avctx, mm_flags);
  2056. if (mm_flags & AV_CPU_FLAG_SSSE3)
  2057. dsputil_init_ssse3(c, avctx, mm_flags);
  2058. if (mm_flags & AV_CPU_FLAG_SSE4)
  2059. dsputil_init_sse4(c, avctx, mm_flags);
  2060. if (mm_flags & AV_CPU_FLAG_AVX)
  2061. dsputil_init_avx(c, avctx, mm_flags);
  2062. if (CONFIG_ENCODERS)
  2063. ff_dsputilenc_init_mmx(c, avctx);
  2064. }