You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3191 lines
158KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "libavutil/cpu.h"
  25. #include "libavutil/x86_cpu.h"
  26. #include "libavcodec/dsputil.h"
  27. #include "libavcodec/h264dsp.h"
  28. #include "libavcodec/mpegvideo.h"
  29. #include "libavcodec/simple_idct.h"
  30. #include "libavcodec/ac3dec.h"
  31. #include "dsputil_mmx.h"
  32. #include "idct_xvid.h"
  33. #include "diracdsp_mmx.h"
  34. //#undef NDEBUG
  35. //#include <assert.h>
  36. /* pixel operations */
  37. DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
  38. DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
  39. DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
  40. { 0x8000000080000000ULL, 0x8000000080000000ULL };
  41. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1) = { 0x0001000100010001ULL, 0x0001000100010001ULL };
  42. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_2) = { 0x0002000200020002ULL, 0x0002000200020002ULL };
  43. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3) = { 0x0003000300030003ULL, 0x0003000300030003ULL };
  44. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4) = { 0x0004000400040004ULL, 0x0004000400040004ULL };
  45. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5) = { 0x0005000500050005ULL, 0x0005000500050005ULL };
  46. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8) = { 0x0008000800080008ULL, 0x0008000800080008ULL };
  47. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9) = { 0x0009000900090009ULL, 0x0009000900090009ULL };
  48. DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL;
  49. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16) = { 0x0010001000100010ULL, 0x0010001000100010ULL };
  50. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17) = { 0x0011001100110011ULL, 0x0011001100110011ULL };
  51. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18) = { 0x0012001200120012ULL, 0x0012001200120012ULL };
  52. DECLARE_ALIGNED(8, const uint64_t, ff_pw_20) = 0x0014001400140014ULL;
  53. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27) = { 0x001B001B001B001BULL, 0x001B001B001B001BULL };
  54. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28) = { 0x001C001C001C001CULL, 0x001C001C001C001CULL };
  55. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32) = { 0x0020002000200020ULL, 0x0020002000200020ULL };
  56. DECLARE_ALIGNED(8, const uint64_t, ff_pw_42) = 0x002A002A002A002AULL;
  57. DECLARE_ALIGNED(8, const uint64_t, ff_pw_53) = 0x0035003500350035ULL;
  58. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63) = { 0x003F003F003F003FULL, 0x003F003F003F003FULL };
  59. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64) = { 0x0040004000400040ULL, 0x0040004000400040ULL };
  60. DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL;
  61. DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
  62. DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
  63. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL };
  64. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
  65. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0) = { 0x0000000000000000ULL, 0x0000000000000000ULL };
  66. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1) = { 0x0101010101010101ULL, 0x0101010101010101ULL };
  67. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3) = { 0x0303030303030303ULL, 0x0303030303030303ULL };
  68. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4) = { 0x0404040404040404ULL, 0x0404040404040404ULL };
  69. DECLARE_ALIGNED(8, const uint64_t, ff_pb_7) = 0x0707070707070707ULL;
  70. DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F) = 0x1F1F1F1F1F1F1F1FULL;
  71. DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F) = 0x3F3F3F3F3F3F3F3FULL;
  72. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80) = { 0x8080808080808080ULL, 0x8080808080808080ULL };
  73. DECLARE_ALIGNED(8, const uint64_t, ff_pb_81) = 0x8181818181818181ULL;
  74. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1) = { 0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL };
  75. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8) = { 0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL };
  76. DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC) = 0xFCFCFCFCFCFCFCFCULL;
  77. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE) = { 0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL };
  78. DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
  79. DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
  80. #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
  81. #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
  82. #define MOVQ_BFE(regd) \
  83. __asm__ volatile ( \
  84. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  85. "paddb %%"#regd", %%"#regd" \n\t" ::)
  86. #ifndef PIC
  87. #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
  88. #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
  89. #else
  90. // for shared library it's better to use this way for accessing constants
  91. // pcmpeqd -> -1
  92. #define MOVQ_BONE(regd) \
  93. __asm__ volatile ( \
  94. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  95. "psrlw $15, %%"#regd" \n\t" \
  96. "packuswb %%"#regd", %%"#regd" \n\t" ::)
  97. #define MOVQ_WTWO(regd) \
  98. __asm__ volatile ( \
  99. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  100. "psrlw $15, %%"#regd" \n\t" \
  101. "psllw $1, %%"#regd" \n\t"::)
  102. #endif
  103. // using regr as temporary and for the output result
  104. // first argument is unmodifed and second is trashed
  105. // regfe is supposed to contain 0xfefefefefefefefe
  106. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  107. "movq "#rega", "#regr" \n\t" \
  108. "pand "#regb", "#regr" \n\t" \
  109. "pxor "#rega", "#regb" \n\t" \
  110. "pand "#regfe", "#regb" \n\t" \
  111. "psrlq $1, "#regb" \n\t" \
  112. "paddb "#regb", "#regr" \n\t"
  113. #define PAVGB_MMX(rega, regb, regr, regfe) \
  114. "movq "#rega", "#regr" \n\t" \
  115. "por "#regb", "#regr" \n\t" \
  116. "pxor "#rega", "#regb" \n\t" \
  117. "pand "#regfe", "#regb" \n\t" \
  118. "psrlq $1, "#regb" \n\t" \
  119. "psubb "#regb", "#regr" \n\t"
  120. // mm6 is supposed to contain 0xfefefefefefefefe
  121. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  122. "movq "#rega", "#regr" \n\t" \
  123. "movq "#regc", "#regp" \n\t" \
  124. "pand "#regb", "#regr" \n\t" \
  125. "pand "#regd", "#regp" \n\t" \
  126. "pxor "#rega", "#regb" \n\t" \
  127. "pxor "#regc", "#regd" \n\t" \
  128. "pand %%mm6, "#regb" \n\t" \
  129. "pand %%mm6, "#regd" \n\t" \
  130. "psrlq $1, "#regb" \n\t" \
  131. "psrlq $1, "#regd" \n\t" \
  132. "paddb "#regb", "#regr" \n\t" \
  133. "paddb "#regd", "#regp" \n\t"
  134. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  135. "movq "#rega", "#regr" \n\t" \
  136. "movq "#regc", "#regp" \n\t" \
  137. "por "#regb", "#regr" \n\t" \
  138. "por "#regd", "#regp" \n\t" \
  139. "pxor "#rega", "#regb" \n\t" \
  140. "pxor "#regc", "#regd" \n\t" \
  141. "pand %%mm6, "#regb" \n\t" \
  142. "pand %%mm6, "#regd" \n\t" \
  143. "psrlq $1, "#regd" \n\t" \
  144. "psrlq $1, "#regb" \n\t" \
  145. "psubb "#regb", "#regr" \n\t" \
  146. "psubb "#regd", "#regp" \n\t"
  147. /***********************************/
  148. /* MMX no rounding */
  149. #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
  150. #define SET_RND MOVQ_WONE
  151. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  152. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  153. #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
  154. #include "dsputil_mmx_rnd_template.c"
  155. #undef DEF
  156. #undef SET_RND
  157. #undef PAVGBP
  158. #undef PAVGB
  159. /***********************************/
  160. /* MMX rounding */
  161. #define DEF(x, y) x ## _ ## y ## _mmx
  162. #define SET_RND MOVQ_WTWO
  163. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  164. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  165. #include "dsputil_mmx_rnd_template.c"
  166. #undef DEF
  167. #undef SET_RND
  168. #undef PAVGBP
  169. #undef PAVGB
  170. #undef OP_AVG
  171. /***********************************/
  172. /* 3Dnow specific */
  173. #define DEF(x) x ## _3dnow
  174. #define PAVGB "pavgusb"
  175. #define OP_AVG PAVGB
  176. #include "dsputil_mmx_avg_template.c"
  177. #undef DEF
  178. #undef PAVGB
  179. #undef OP_AVG
  180. /***********************************/
  181. /* MMX2 specific */
  182. #define DEF(x) x ## _mmx2
  183. /* Introduced only in MMX2 set */
  184. #define PAVGB "pavgb"
  185. #define OP_AVG PAVGB
  186. #include "dsputil_mmx_avg_template.c"
  187. #undef DEF
  188. #undef PAVGB
  189. #undef OP_AVG
  190. #define put_no_rnd_pixels16_mmx put_pixels16_mmx
  191. #define put_no_rnd_pixels8_mmx put_pixels8_mmx
  192. #define put_pixels16_mmx2 put_pixels16_mmx
  193. #define put_pixels8_mmx2 put_pixels8_mmx
  194. #define put_pixels4_mmx2 put_pixels4_mmx
  195. #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
  196. #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
  197. #define put_pixels16_3dnow put_pixels16_mmx
  198. #define put_pixels8_3dnow put_pixels8_mmx
  199. #define put_pixels4_3dnow put_pixels4_mmx
  200. #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
  201. #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
  202. /***********************************/
  203. /* standard MMX */
  204. void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
  205. int line_size)
  206. {
  207. const DCTELEM *p;
  208. uint8_t *pix;
  209. /* read the pixels */
  210. p = block;
  211. pix = pixels;
  212. /* unrolled loop */
  213. __asm__ volatile (
  214. "movq %3, %%mm0 \n\t"
  215. "movq 8%3, %%mm1 \n\t"
  216. "movq 16%3, %%mm2 \n\t"
  217. "movq 24%3, %%mm3 \n\t"
  218. "movq 32%3, %%mm4 \n\t"
  219. "movq 40%3, %%mm5 \n\t"
  220. "movq 48%3, %%mm6 \n\t"
  221. "movq 56%3, %%mm7 \n\t"
  222. "packuswb %%mm1, %%mm0 \n\t"
  223. "packuswb %%mm3, %%mm2 \n\t"
  224. "packuswb %%mm5, %%mm4 \n\t"
  225. "packuswb %%mm7, %%mm6 \n\t"
  226. "movq %%mm0, (%0) \n\t"
  227. "movq %%mm2, (%0, %1) \n\t"
  228. "movq %%mm4, (%0, %1, 2) \n\t"
  229. "movq %%mm6, (%0, %2) \n\t"
  230. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
  231. "m"(*p)
  232. : "memory");
  233. pix += line_size * 4;
  234. p += 32;
  235. // if here would be an exact copy of the code above
  236. // compiler would generate some very strange code
  237. // thus using "r"
  238. __asm__ volatile (
  239. "movq (%3), %%mm0 \n\t"
  240. "movq 8(%3), %%mm1 \n\t"
  241. "movq 16(%3), %%mm2 \n\t"
  242. "movq 24(%3), %%mm3 \n\t"
  243. "movq 32(%3), %%mm4 \n\t"
  244. "movq 40(%3), %%mm5 \n\t"
  245. "movq 48(%3), %%mm6 \n\t"
  246. "movq 56(%3), %%mm7 \n\t"
  247. "packuswb %%mm1, %%mm0 \n\t"
  248. "packuswb %%mm3, %%mm2 \n\t"
  249. "packuswb %%mm5, %%mm4 \n\t"
  250. "packuswb %%mm7, %%mm6 \n\t"
  251. "movq %%mm0, (%0) \n\t"
  252. "movq %%mm2, (%0, %1) \n\t"
  253. "movq %%mm4, (%0, %1, 2) \n\t"
  254. "movq %%mm6, (%0, %2) \n\t"
  255. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
  256. : "memory");
  257. }
  258. #define put_signed_pixels_clamped_mmx_half(off) \
  259. "movq "#off"(%2), %%mm1 \n\t" \
  260. "movq 16 + "#off"(%2), %%mm2 \n\t" \
  261. "movq 32 + "#off"(%2), %%mm3 \n\t" \
  262. "movq 48 + "#off"(%2), %%mm4 \n\t" \
  263. "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
  264. "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
  265. "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
  266. "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
  267. "paddb %%mm0, %%mm1 \n\t" \
  268. "paddb %%mm0, %%mm2 \n\t" \
  269. "paddb %%mm0, %%mm3 \n\t" \
  270. "paddb %%mm0, %%mm4 \n\t" \
  271. "movq %%mm1, (%0) \n\t" \
  272. "movq %%mm2, (%0, %3) \n\t" \
  273. "movq %%mm3, (%0, %3, 2) \n\t" \
  274. "movq %%mm4, (%0, %1) \n\t"
  275. void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
  276. int line_size)
  277. {
  278. x86_reg line_skip = line_size;
  279. x86_reg line_skip3;
  280. __asm__ volatile (
  281. "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
  282. "lea (%3, %3, 2), %1 \n\t"
  283. put_signed_pixels_clamped_mmx_half(0)
  284. "lea (%0, %3, 4), %0 \n\t"
  285. put_signed_pixels_clamped_mmx_half(64)
  286. : "+&r"(pixels), "=&r"(line_skip3)
  287. : "r"(block), "r"(line_skip)
  288. : "memory");
  289. }
  290. void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels,
  291. int line_size)
  292. {
  293. const DCTELEM *p;
  294. uint8_t *pix;
  295. int i;
  296. /* read the pixels */
  297. p = block;
  298. pix = pixels;
  299. MOVQ_ZERO(mm7);
  300. i = 4;
  301. do {
  302. __asm__ volatile (
  303. "movq (%2), %%mm0 \n\t"
  304. "movq 8(%2), %%mm1 \n\t"
  305. "movq 16(%2), %%mm2 \n\t"
  306. "movq 24(%2), %%mm3 \n\t"
  307. "movq %0, %%mm4 \n\t"
  308. "movq %1, %%mm6 \n\t"
  309. "movq %%mm4, %%mm5 \n\t"
  310. "punpcklbw %%mm7, %%mm4 \n\t"
  311. "punpckhbw %%mm7, %%mm5 \n\t"
  312. "paddsw %%mm4, %%mm0 \n\t"
  313. "paddsw %%mm5, %%mm1 \n\t"
  314. "movq %%mm6, %%mm5 \n\t"
  315. "punpcklbw %%mm7, %%mm6 \n\t"
  316. "punpckhbw %%mm7, %%mm5 \n\t"
  317. "paddsw %%mm6, %%mm2 \n\t"
  318. "paddsw %%mm5, %%mm3 \n\t"
  319. "packuswb %%mm1, %%mm0 \n\t"
  320. "packuswb %%mm3, %%mm2 \n\t"
  321. "movq %%mm0, %0 \n\t"
  322. "movq %%mm2, %1 \n\t"
  323. : "+m"(*pix), "+m"(*(pix + line_size))
  324. : "r"(p)
  325. : "memory");
  326. pix += line_size * 2;
  327. p += 16;
  328. } while (--i);
  329. }
  330. static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels,
  331. int line_size, int h)
  332. {
  333. __asm__ volatile (
  334. "lea (%3, %3), %%"REG_a" \n\t"
  335. ".p2align 3 \n\t"
  336. "1: \n\t"
  337. "movd (%1 ), %%mm0 \n\t"
  338. "movd (%1, %3), %%mm1 \n\t"
  339. "movd %%mm0, (%2) \n\t"
  340. "movd %%mm1, (%2, %3) \n\t"
  341. "add %%"REG_a", %1 \n\t"
  342. "add %%"REG_a", %2 \n\t"
  343. "movd (%1 ), %%mm0 \n\t"
  344. "movd (%1, %3), %%mm1 \n\t"
  345. "movd %%mm0, (%2) \n\t"
  346. "movd %%mm1, (%2, %3) \n\t"
  347. "add %%"REG_a", %1 \n\t"
  348. "add %%"REG_a", %2 \n\t"
  349. "subl $4, %0 \n\t"
  350. "jnz 1b \n\t"
  351. : "+g"(h), "+r"(pixels), "+r"(block)
  352. : "r"((x86_reg)line_size)
  353. : "%"REG_a, "memory"
  354. );
  355. }
  356. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
  357. int line_size, int h)
  358. {
  359. __asm__ volatile (
  360. "lea (%3, %3), %%"REG_a" \n\t"
  361. ".p2align 3 \n\t"
  362. "1: \n\t"
  363. "movq (%1 ), %%mm0 \n\t"
  364. "movq (%1, %3), %%mm1 \n\t"
  365. "movq %%mm0, (%2) \n\t"
  366. "movq %%mm1, (%2, %3) \n\t"
  367. "add %%"REG_a", %1 \n\t"
  368. "add %%"REG_a", %2 \n\t"
  369. "movq (%1 ), %%mm0 \n\t"
  370. "movq (%1, %3), %%mm1 \n\t"
  371. "movq %%mm0, (%2) \n\t"
  372. "movq %%mm1, (%2, %3) \n\t"
  373. "add %%"REG_a", %1 \n\t"
  374. "add %%"REG_a", %2 \n\t"
  375. "subl $4, %0 \n\t"
  376. "jnz 1b \n\t"
  377. : "+g"(h), "+r"(pixels), "+r"(block)
  378. : "r"((x86_reg)line_size)
  379. : "%"REG_a, "memory"
  380. );
  381. }
  382. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
  383. int line_size, int h)
  384. {
  385. __asm__ volatile (
  386. "lea (%3, %3), %%"REG_a" \n\t"
  387. ".p2align 3 \n\t"
  388. "1: \n\t"
  389. "movq (%1 ), %%mm0 \n\t"
  390. "movq 8(%1 ), %%mm4 \n\t"
  391. "movq (%1, %3), %%mm1 \n\t"
  392. "movq 8(%1, %3), %%mm5 \n\t"
  393. "movq %%mm0, (%2) \n\t"
  394. "movq %%mm4, 8(%2) \n\t"
  395. "movq %%mm1, (%2, %3) \n\t"
  396. "movq %%mm5, 8(%2, %3) \n\t"
  397. "add %%"REG_a", %1 \n\t"
  398. "add %%"REG_a", %2 \n\t"
  399. "movq (%1 ), %%mm0 \n\t"
  400. "movq 8(%1 ), %%mm4 \n\t"
  401. "movq (%1, %3), %%mm1 \n\t"
  402. "movq 8(%1, %3), %%mm5 \n\t"
  403. "movq %%mm0, (%2) \n\t"
  404. "movq %%mm4, 8(%2) \n\t"
  405. "movq %%mm1, (%2, %3) \n\t"
  406. "movq %%mm5, 8(%2, %3) \n\t"
  407. "add %%"REG_a", %1 \n\t"
  408. "add %%"REG_a", %2 \n\t"
  409. "subl $4, %0 \n\t"
  410. "jnz 1b \n\t"
  411. : "+g"(h), "+r"(pixels), "+r"(block)
  412. : "r"((x86_reg)line_size)
  413. : "%"REG_a, "memory"
  414. );
  415. }
  416. static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
  417. int line_size, int h)
  418. {
  419. __asm__ volatile (
  420. "1: \n\t"
  421. "movdqu (%1 ), %%xmm0 \n\t"
  422. "movdqu (%1, %3 ), %%xmm1 \n\t"
  423. "movdqu (%1, %3, 2), %%xmm2 \n\t"
  424. "movdqu (%1, %4 ), %%xmm3 \n\t"
  425. "lea (%1, %3, 4), %1 \n\t"
  426. "movdqa %%xmm0, (%2) \n\t"
  427. "movdqa %%xmm1, (%2, %3) \n\t"
  428. "movdqa %%xmm2, (%2, %3, 2) \n\t"
  429. "movdqa %%xmm3, (%2, %4) \n\t"
  430. "subl $4, %0 \n\t"
  431. "lea (%2, %3, 4), %2 \n\t"
  432. "jnz 1b \n\t"
  433. : "+g"(h), "+r"(pixels), "+r"(block)
  434. : "r"((x86_reg)line_size), "r"((x86_reg)3L * line_size)
  435. : "memory"
  436. );
  437. }
  438. static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
  439. int line_size, int h)
  440. {
  441. __asm__ volatile (
  442. "1: \n\t"
  443. "movdqu (%1 ), %%xmm0 \n\t"
  444. "movdqu (%1, %3 ), %%xmm1 \n\t"
  445. "movdqu (%1, %3, 2), %%xmm2 \n\t"
  446. "movdqu (%1, %4 ), %%xmm3 \n\t"
  447. "lea (%1, %3, 4), %1 \n\t"
  448. "pavgb (%2 ), %%xmm0 \n\t"
  449. "pavgb (%2, %3 ), %%xmm1 \n\t"
  450. "pavgb (%2, %3, 2), %%xmm2 \n\t"
  451. "pavgb (%2, %4), %%xmm3 \n\t"
  452. "movdqa %%xmm0, (%2) \n\t"
  453. "movdqa %%xmm1, (%2, %3) \n\t"
  454. "movdqa %%xmm2, (%2, %3, 2) \n\t"
  455. "movdqa %%xmm3, (%2, %4) \n\t"
  456. "subl $4, %0 \n\t"
  457. "lea (%2, %3, 4), %2 \n\t"
  458. "jnz 1b \n\t"
  459. : "+g"(h), "+r"(pixels), "+r"(block)
  460. : "r"((x86_reg)line_size), "r"((x86_reg)3L * line_size)
  461. : "memory"
  462. );
  463. }
  464. #define CLEAR_BLOCKS(name, n) \
  465. static void name(DCTELEM *blocks) \
  466. { \
  467. __asm__ volatile ( \
  468. "pxor %%mm7, %%mm7 \n\t" \
  469. "mov %1, %%"REG_a" \n\t" \
  470. "1: \n\t" \
  471. "movq %%mm7, (%0, %%"REG_a") \n\t" \
  472. "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
  473. "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
  474. "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
  475. "add $32, %%"REG_a" \n\t" \
  476. "js 1b \n\t" \
  477. :: "r"(((uint8_t *)blocks) + 128 * n), \
  478. "i"(-128 * n) \
  479. : "%"REG_a \
  480. ); \
  481. }
  482. CLEAR_BLOCKS(clear_blocks_mmx, 6)
  483. CLEAR_BLOCKS(clear_block_mmx, 1)
  484. static void clear_block_sse(DCTELEM *block)
  485. {
  486. __asm__ volatile (
  487. "xorps %%xmm0, %%xmm0 \n"
  488. "movaps %%xmm0, (%0) \n"
  489. "movaps %%xmm0, 16(%0) \n"
  490. "movaps %%xmm0, 32(%0) \n"
  491. "movaps %%xmm0, 48(%0) \n"
  492. "movaps %%xmm0, 64(%0) \n"
  493. "movaps %%xmm0, 80(%0) \n"
  494. "movaps %%xmm0, 96(%0) \n"
  495. "movaps %%xmm0, 112(%0) \n"
  496. :: "r"(block)
  497. : "memory"
  498. );
  499. }
  500. static void clear_blocks_sse(DCTELEM *blocks)
  501. {
  502. __asm__ volatile (
  503. "xorps %%xmm0, %%xmm0 \n"
  504. "mov %1, %%"REG_a" \n"
  505. "1: \n"
  506. "movaps %%xmm0, (%0, %%"REG_a") \n"
  507. "movaps %%xmm0, 16(%0, %%"REG_a") \n"
  508. "movaps %%xmm0, 32(%0, %%"REG_a") \n"
  509. "movaps %%xmm0, 48(%0, %%"REG_a") \n"
  510. "movaps %%xmm0, 64(%0, %%"REG_a") \n"
  511. "movaps %%xmm0, 80(%0, %%"REG_a") \n"
  512. "movaps %%xmm0, 96(%0, %%"REG_a") \n"
  513. "movaps %%xmm0, 112(%0, %%"REG_a") \n"
  514. "add $128, %%"REG_a" \n"
  515. "js 1b \n"
  516. :: "r"(((uint8_t *)blocks) + 128 * 6),
  517. "i"(-128 * 6)
  518. : "%"REG_a
  519. );
  520. }
  521. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
  522. {
  523. x86_reg i = 0;
  524. __asm__ volatile (
  525. "jmp 2f \n\t"
  526. "1: \n\t"
  527. "movq (%1, %0), %%mm0 \n\t"
  528. "movq (%2, %0), %%mm1 \n\t"
  529. "paddb %%mm0, %%mm1 \n\t"
  530. "movq %%mm1, (%2, %0) \n\t"
  531. "movq 8(%1, %0), %%mm0 \n\t"
  532. "movq 8(%2, %0), %%mm1 \n\t"
  533. "paddb %%mm0, %%mm1 \n\t"
  534. "movq %%mm1, 8(%2, %0) \n\t"
  535. "add $16, %0 \n\t"
  536. "2: \n\t"
  537. "cmp %3, %0 \n\t"
  538. "js 1b \n\t"
  539. : "+r"(i)
  540. : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
  541. );
  542. for ( ; i < w; i++)
  543. dst[i + 0] += src[i + 0];
  544. }
  545. #if HAVE_7REGS
  546. static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
  547. const uint8_t *diff, int w,
  548. int *left, int *left_top)
  549. {
  550. x86_reg w2 = -w;
  551. x86_reg x;
  552. int l = *left & 0xff;
  553. int tl = *left_top & 0xff;
  554. int t;
  555. __asm__ volatile (
  556. "mov %7, %3 \n"
  557. "1: \n"
  558. "movzbl (%3, %4), %2 \n"
  559. "mov %2, %k3 \n"
  560. "sub %b1, %b3 \n"
  561. "add %b0, %b3 \n"
  562. "mov %2, %1 \n"
  563. "cmp %0, %2 \n"
  564. "cmovg %0, %2 \n"
  565. "cmovg %1, %0 \n"
  566. "cmp %k3, %0 \n"
  567. "cmovg %k3, %0 \n"
  568. "mov %7, %3 \n"
  569. "cmp %2, %0 \n"
  570. "cmovl %2, %0 \n"
  571. "add (%6, %4), %b0 \n"
  572. "mov %b0, (%5, %4) \n"
  573. "inc %4 \n"
  574. "jl 1b \n"
  575. : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
  576. : "r"(dst + w), "r"(diff + w), "rm"(top + w)
  577. );
  578. *left = l;
  579. *left_top = tl;
  580. }
  581. #endif
  582. #define H263_LOOP_FILTER \
  583. "pxor %%mm7, %%mm7 \n\t" \
  584. "movq %0, %%mm0 \n\t" \
  585. "movq %0, %%mm1 \n\t" \
  586. "movq %3, %%mm2 \n\t" \
  587. "movq %3, %%mm3 \n\t" \
  588. "punpcklbw %%mm7, %%mm0 \n\t" \
  589. "punpckhbw %%mm7, %%mm1 \n\t" \
  590. "punpcklbw %%mm7, %%mm2 \n\t" \
  591. "punpckhbw %%mm7, %%mm3 \n\t" \
  592. "psubw %%mm2, %%mm0 \n\t" \
  593. "psubw %%mm3, %%mm1 \n\t" \
  594. "movq %1, %%mm2 \n\t" \
  595. "movq %1, %%mm3 \n\t" \
  596. "movq %2, %%mm4 \n\t" \
  597. "movq %2, %%mm5 \n\t" \
  598. "punpcklbw %%mm7, %%mm2 \n\t" \
  599. "punpckhbw %%mm7, %%mm3 \n\t" \
  600. "punpcklbw %%mm7, %%mm4 \n\t" \
  601. "punpckhbw %%mm7, %%mm5 \n\t" \
  602. "psubw %%mm2, %%mm4 \n\t" \
  603. "psubw %%mm3, %%mm5 \n\t" \
  604. "psllw $2, %%mm4 \n\t" \
  605. "psllw $2, %%mm5 \n\t" \
  606. "paddw %%mm0, %%mm4 \n\t" \
  607. "paddw %%mm1, %%mm5 \n\t" \
  608. "pxor %%mm6, %%mm6 \n\t" \
  609. "pcmpgtw %%mm4, %%mm6 \n\t" \
  610. "pcmpgtw %%mm5, %%mm7 \n\t" \
  611. "pxor %%mm6, %%mm4 \n\t" \
  612. "pxor %%mm7, %%mm5 \n\t" \
  613. "psubw %%mm6, %%mm4 \n\t" \
  614. "psubw %%mm7, %%mm5 \n\t" \
  615. "psrlw $3, %%mm4 \n\t" \
  616. "psrlw $3, %%mm5 \n\t" \
  617. "packuswb %%mm5, %%mm4 \n\t" \
  618. "packsswb %%mm7, %%mm6 \n\t" \
  619. "pxor %%mm7, %%mm7 \n\t" \
  620. "movd %4, %%mm2 \n\t" \
  621. "punpcklbw %%mm2, %%mm2 \n\t" \
  622. "punpcklbw %%mm2, %%mm2 \n\t" \
  623. "punpcklbw %%mm2, %%mm2 \n\t" \
  624. "psubusb %%mm4, %%mm2 \n\t" \
  625. "movq %%mm2, %%mm3 \n\t" \
  626. "psubusb %%mm4, %%mm3 \n\t" \
  627. "psubb %%mm3, %%mm2 \n\t" \
  628. "movq %1, %%mm3 \n\t" \
  629. "movq %2, %%mm4 \n\t" \
  630. "pxor %%mm6, %%mm3 \n\t" \
  631. "pxor %%mm6, %%mm4 \n\t" \
  632. "paddusb %%mm2, %%mm3 \n\t" \
  633. "psubusb %%mm2, %%mm4 \n\t" \
  634. "pxor %%mm6, %%mm3 \n\t" \
  635. "pxor %%mm6, %%mm4 \n\t" \
  636. "paddusb %%mm2, %%mm2 \n\t" \
  637. "packsswb %%mm1, %%mm0 \n\t" \
  638. "pcmpgtb %%mm0, %%mm7 \n\t" \
  639. "pxor %%mm7, %%mm0 \n\t" \
  640. "psubb %%mm7, %%mm0 \n\t" \
  641. "movq %%mm0, %%mm1 \n\t" \
  642. "psubusb %%mm2, %%mm0 \n\t" \
  643. "psubb %%mm0, %%mm1 \n\t" \
  644. "pand %5, %%mm1 \n\t" \
  645. "psrlw $2, %%mm1 \n\t" \
  646. "pxor %%mm7, %%mm1 \n\t" \
  647. "psubb %%mm7, %%mm1 \n\t" \
  648. "movq %0, %%mm5 \n\t" \
  649. "movq %3, %%mm6 \n\t" \
  650. "psubb %%mm1, %%mm5 \n\t" \
  651. "paddb %%mm1, %%mm6 \n\t"
  652. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale)
  653. {
  654. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  655. const int strength = ff_h263_loop_filter_strength[qscale];
  656. __asm__ volatile (
  657. H263_LOOP_FILTER
  658. "movq %%mm3, %1 \n\t"
  659. "movq %%mm4, %2 \n\t"
  660. "movq %%mm5, %0 \n\t"
  661. "movq %%mm6, %3 \n\t"
  662. : "+m"(*(uint64_t*)(src - 2 * stride)),
  663. "+m"(*(uint64_t*)(src - 1 * stride)),
  664. "+m"(*(uint64_t*)(src + 0 * stride)),
  665. "+m"(*(uint64_t*)(src + 1 * stride))
  666. : "g"(2 * strength), "m"(ff_pb_FC)
  667. );
  668. }
  669. }
  670. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale)
  671. {
  672. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  673. const int strength = ff_h263_loop_filter_strength[qscale];
  674. DECLARE_ALIGNED(8, uint64_t, temp)[4];
  675. uint8_t *btemp = (uint8_t*)temp;
  676. src -= 2;
  677. transpose4x4(btemp, src, 8, stride);
  678. transpose4x4(btemp + 4, src + 4 * stride, 8, stride);
  679. __asm__ volatile (
  680. H263_LOOP_FILTER // 5 3 4 6
  681. : "+m"(temp[0]),
  682. "+m"(temp[1]),
  683. "+m"(temp[2]),
  684. "+m"(temp[3])
  685. : "g"(2 * strength), "m"(ff_pb_FC)
  686. );
  687. __asm__ volatile (
  688. "movq %%mm5, %%mm1 \n\t"
  689. "movq %%mm4, %%mm0 \n\t"
  690. "punpcklbw %%mm3, %%mm5 \n\t"
  691. "punpcklbw %%mm6, %%mm4 \n\t"
  692. "punpckhbw %%mm3, %%mm1 \n\t"
  693. "punpckhbw %%mm6, %%mm0 \n\t"
  694. "movq %%mm5, %%mm3 \n\t"
  695. "movq %%mm1, %%mm6 \n\t"
  696. "punpcklwd %%mm4, %%mm5 \n\t"
  697. "punpcklwd %%mm0, %%mm1 \n\t"
  698. "punpckhwd %%mm4, %%mm3 \n\t"
  699. "punpckhwd %%mm0, %%mm6 \n\t"
  700. "movd %%mm5, (%0) \n\t"
  701. "punpckhdq %%mm5, %%mm5 \n\t"
  702. "movd %%mm5, (%0, %2) \n\t"
  703. "movd %%mm3, (%0, %2, 2) \n\t"
  704. "punpckhdq %%mm3, %%mm3 \n\t"
  705. "movd %%mm3, (%0, %3) \n\t"
  706. "movd %%mm1, (%1) \n\t"
  707. "punpckhdq %%mm1, %%mm1 \n\t"
  708. "movd %%mm1, (%1, %2) \n\t"
  709. "movd %%mm6, (%1, %2, 2) \n\t"
  710. "punpckhdq %%mm6, %%mm6 \n\t"
  711. "movd %%mm6, (%1, %3) \n\t"
  712. :: "r"(src),
  713. "r"(src + 4 * stride),
  714. "r"((x86_reg)stride),
  715. "r"((x86_reg)(3 * stride))
  716. );
  717. }
  718. }
  719. /* Draw the edges of width 'w' of an image of size width, height
  720. * this MMX version can only handle w == 8 || w == 16. */
  721. static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
  722. int w, int h, int sides)
  723. {
  724. uint8_t *ptr, *last_line;
  725. int i;
  726. last_line = buf + (height - 1) * wrap;
  727. /* left and right */
  728. ptr = buf;
  729. if (w == 8) {
  730. __asm__ volatile (
  731. "1: \n\t"
  732. "movd (%0), %%mm0 \n\t"
  733. "punpcklbw %%mm0, %%mm0 \n\t"
  734. "punpcklwd %%mm0, %%mm0 \n\t"
  735. "punpckldq %%mm0, %%mm0 \n\t"
  736. "movq %%mm0, -8(%0) \n\t"
  737. "movq -8(%0, %2), %%mm1 \n\t"
  738. "punpckhbw %%mm1, %%mm1 \n\t"
  739. "punpckhwd %%mm1, %%mm1 \n\t"
  740. "punpckhdq %%mm1, %%mm1 \n\t"
  741. "movq %%mm1, (%0, %2) \n\t"
  742. "add %1, %0 \n\t"
  743. "cmp %3, %0 \n\t"
  744. "jb 1b \n\t"
  745. : "+r"(ptr)
  746. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  747. );
  748. } else {
  749. __asm__ volatile (
  750. "1: \n\t"
  751. "movd (%0), %%mm0 \n\t"
  752. "punpcklbw %%mm0, %%mm0 \n\t"
  753. "punpcklwd %%mm0, %%mm0 \n\t"
  754. "punpckldq %%mm0, %%mm0 \n\t"
  755. "movq %%mm0, -8(%0) \n\t"
  756. "movq %%mm0, -16(%0) \n\t"
  757. "movq -8(%0, %2), %%mm1 \n\t"
  758. "punpckhbw %%mm1, %%mm1 \n\t"
  759. "punpckhwd %%mm1, %%mm1 \n\t"
  760. "punpckhdq %%mm1, %%mm1 \n\t"
  761. "movq %%mm1, (%0, %2) \n\t"
  762. "movq %%mm1, 8(%0, %2) \n\t"
  763. "add %1, %0 \n\t"
  764. "cmp %3, %0 \n\t"
  765. "jb 1b \n\t"
  766. : "+r"(ptr)
  767. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  768. );
  769. }
  770. /* top and bottom (and hopefully also the corners) */
  771. if (sides & EDGE_TOP) {
  772. for (i = 0; i < h; i += 4) {
  773. ptr = buf - (i + 1) * wrap - w;
  774. __asm__ volatile (
  775. "1: \n\t"
  776. "movq (%1, %0), %%mm0 \n\t"
  777. "movq %%mm0, (%0) \n\t"
  778. "movq %%mm0, (%0, %2) \n\t"
  779. "movq %%mm0, (%0, %2, 2) \n\t"
  780. "movq %%mm0, (%0, %3) \n\t"
  781. "add $8, %0 \n\t"
  782. "cmp %4, %0 \n\t"
  783. "jb 1b \n\t"
  784. : "+r"(ptr)
  785. : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
  786. "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
  787. );
  788. }
  789. }
  790. if (sides & EDGE_BOTTOM) {
  791. for (i = 0; i < h; i += 4) {
  792. ptr = last_line + (i + 1) * wrap - w;
  793. __asm__ volatile (
  794. "1: \n\t"
  795. "movq (%1, %0), %%mm0 \n\t"
  796. "movq %%mm0, (%0) \n\t"
  797. "movq %%mm0, (%0, %2) \n\t"
  798. "movq %%mm0, (%0, %2, 2) \n\t"
  799. "movq %%mm0, (%0, %3) \n\t"
  800. "add $8, %0 \n\t"
  801. "cmp %4, %0 \n\t"
  802. "jb 1b \n\t"
  803. : "+r"(ptr)
  804. : "r"((x86_reg)last_line - (x86_reg)ptr - w),
  805. "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
  806. "r"(ptr + width + 2 * w)
  807. );
  808. }
  809. }
  810. }
  811. #define QPEL_V_LOW(m3, m4, m5, m6, pw_20, pw_3, rnd, \
  812. in0, in1, in2, in7, out, OP) \
  813. "paddw "#m4", "#m3" \n\t" /* x1 */ \
  814. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */ \
  815. "pmullw "#m3", %%mm4 \n\t" /* 20x1 */ \
  816. "movq "#in7", "#m3" \n\t" /* d */ \
  817. "movq "#in0", %%mm5 \n\t" /* D */ \
  818. "paddw "#m3", %%mm5 \n\t" /* x4 */ \
  819. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */ \
  820. "movq "#in1", %%mm5 \n\t" /* C */ \
  821. "movq "#in2", %%mm6 \n\t" /* B */ \
  822. "paddw "#m6", %%mm5 \n\t" /* x3 */ \
  823. "paddw "#m5", %%mm6 \n\t" /* x2 */ \
  824. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */ \
  825. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */ \
  826. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */ \
  827. "paddw "#rnd", %%mm4 \n\t" /* x2 */ \
  828. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */ \
  829. "psraw $5, %%mm5 \n\t" \
  830. "packuswb %%mm5, %%mm5 \n\t" \
  831. OP(%%mm5, out, %%mm7, d)
  832. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW) \
  833. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, \
  834. uint8_t *src, \
  835. int dstStride, \
  836. int srcStride, \
  837. int h) \
  838. { \
  839. uint64_t temp; \
  840. \
  841. __asm__ volatile ( \
  842. "pxor %%mm7, %%mm7 \n\t" \
  843. "1: \n\t" \
  844. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
  845. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
  846. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
  847. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
  848. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
  849. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
  850. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
  851. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
  852. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
  853. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
  854. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
  855. "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
  856. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
  857. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
  858. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
  859. "paddw %%mm3, %%mm5 \n\t" /* b */ \
  860. "paddw %%mm2, %%mm6 \n\t" /* c */ \
  861. "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
  862. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
  863. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
  864. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
  865. "paddw %%mm4, %%mm0 \n\t" /* a */ \
  866. "paddw %%mm1, %%mm5 \n\t" /* d */ \
  867. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
  868. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
  869. "paddw %6, %%mm6 \n\t" \
  870. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
  871. "psraw $5, %%mm0 \n\t" \
  872. "movq %%mm0, %5 \n\t" \
  873. /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
  874. \
  875. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */ \
  876. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */ \
  877. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */ \
  878. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */ \
  879. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */ \
  880. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */ \
  881. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */ \
  882. "paddw %%mm0, %%mm2 \n\t" /* b */ \
  883. "paddw %%mm5, %%mm3 \n\t" /* c */ \
  884. "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
  885. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
  886. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */ \
  887. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */ \
  888. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */ \
  889. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */ \
  890. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
  891. "paddw %%mm2, %%mm1 \n\t" /* a */ \
  892. "paddw %%mm6, %%mm4 \n\t" /* d */ \
  893. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
  894. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */ \
  895. "paddw %6, %%mm1 \n\t" \
  896. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */ \
  897. "psraw $5, %%mm3 \n\t" \
  898. "movq %5, %%mm1 \n\t" \
  899. "packuswb %%mm3, %%mm1 \n\t" \
  900. OP_MMX2(%%mm1, (%1), %%mm4, q) \
  901. /* mm0 = GHIJ, mm2 = FGHI, mm5 = HIJK, mm6 = IJKL, mm7 = 0 */ \
  902. \
  903. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */ \
  904. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */ \
  905. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */ \
  906. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */ \
  907. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */ \
  908. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */ \
  909. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */ \
  910. "paddw %%mm1, %%mm5 \n\t" /* b */ \
  911. "paddw %%mm4, %%mm0 \n\t" /* c */ \
  912. "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
  913. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */ \
  914. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */ \
  915. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */ \
  916. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */ \
  917. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */ \
  918. "paddw %%mm3, %%mm2 \n\t" /* d */ \
  919. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */ \
  920. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */ \
  921. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */ \
  922. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */ \
  923. "paddw %%mm2, %%mm6 \n\t" /* a */ \
  924. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */ \
  925. "paddw %6, %%mm0 \n\t" \
  926. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
  927. "psraw $5, %%mm0 \n\t" \
  928. /* mm1 = KLMN, mm2 = JKLM, mm3 = MNOP, */ \
  929. /* mm4 = LMNO, mm5 = NOPQ mm7 = 0 */ \
  930. \
  931. "paddw %%mm5, %%mm3 \n\t" /* a */ \
  932. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */ \
  933. "paddw %%mm4, %%mm6 \n\t" /* b */ \
  934. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */ \
  935. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */ \
  936. "paddw %%mm1, %%mm4 \n\t" /* c */ \
  937. "paddw %%mm2, %%mm5 \n\t" /* d */ \
  938. "paddw %%mm6, %%mm6 \n\t" /* 2b */ \
  939. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */ \
  940. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */ \
  941. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */ \
  942. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */ \
  943. "paddw %6, %%mm4 \n\t" \
  944. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */ \
  945. "psraw $5, %%mm4 \n\t" \
  946. "packuswb %%mm4, %%mm0 \n\t" \
  947. OP_MMX2(%%mm0, 8(%1), %%mm4, q) \
  948. \
  949. "add %3, %0 \n\t" \
  950. "add %4, %1 \n\t" \
  951. "decl %2 \n\t" \
  952. "jnz 1b \n\t" \
  953. : "+a"(src), "+c"(dst), "+D"(h) \
  954. : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), \
  955. /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(temp), "m"(ROUNDER) \
  956. : "memory" \
  957. ); \
  958. } \
  959. \
  960. static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, \
  961. uint8_t *src, \
  962. int dstStride, \
  963. int srcStride, \
  964. int h) \
  965. { \
  966. int i; \
  967. int16_t temp[16]; \
  968. /* quick HACK, XXX FIXME MUST be optimized */ \
  969. for (i = 0; i < h; i++) { \
  970. temp[ 0] = (src[ 0] + src[ 1]) * 20 - (src[ 0] + src[ 2]) * 6 + \
  971. (src[ 1] + src[ 3]) * 3 - (src[ 2] + src[ 4]); \
  972. temp[ 1] = (src[ 1] + src[ 2]) * 20 - (src[ 0] + src[ 3]) * 6 + \
  973. (src[ 0] + src[ 4]) * 3 - (src[ 1] + src[ 5]); \
  974. temp[ 2] = (src[ 2] + src[ 3]) * 20 - (src[ 1] + src[ 4]) * 6 + \
  975. (src[ 0] + src[ 5]) * 3 - (src[ 0] + src[ 6]); \
  976. temp[ 3] = (src[ 3] + src[ 4]) * 20 - (src[ 2] + src[ 5]) * 6 + \
  977. (src[ 1] + src[ 6]) * 3 - (src[ 0] + src[ 7]); \
  978. temp[ 4] = (src[ 4] + src[ 5]) * 20 - (src[ 3] + src[ 6]) * 6 + \
  979. (src[ 2] + src[ 7]) * 3 - (src[ 1] + src[ 8]); \
  980. temp[ 5] = (src[ 5] + src[ 6]) * 20 - (src[ 4] + src[ 7]) * 6 + \
  981. (src[ 3] + src[ 8]) * 3 - (src[ 2] + src[ 9]); \
  982. temp[ 6] = (src[ 6] + src[ 7]) * 20 - (src[ 5] + src[ 8]) * 6 + \
  983. (src[ 4] + src[ 9]) * 3 - (src[ 3] + src[10]); \
  984. temp[ 7] = (src[ 7] + src[ 8]) * 20 - (src[ 6] + src[ 9]) * 6 + \
  985. (src[ 5] + src[10]) * 3 - (src[ 4] + src[11]); \
  986. temp[ 8] = (src[ 8] + src[ 9]) * 20 - (src[ 7] + src[10]) * 6 + \
  987. (src[ 6] + src[11]) * 3 - (src[ 5] + src[12]); \
  988. temp[ 9] = (src[ 9] + src[10]) * 20 - (src[ 8] + src[11]) * 6 + \
  989. (src[ 7] + src[12]) * 3 - (src[ 6] + src[13]); \
  990. temp[10] = (src[10] + src[11]) * 20 - (src[ 9] + src[12]) * 6 + \
  991. (src[ 8] + src[13]) * 3 - (src[ 7] + src[14]); \
  992. temp[11] = (src[11] + src[12]) * 20 - (src[10] + src[13]) * 6 + \
  993. (src[ 9] + src[14]) * 3 - (src[ 8] + src[15]); \
  994. temp[12] = (src[12] + src[13]) * 20 - (src[11] + src[14]) * 6 + \
  995. (src[10] + src[15]) * 3 - (src[ 9] + src[16]); \
  996. temp[13] = (src[13] + src[14]) * 20 - (src[12] + src[15]) * 6 + \
  997. (src[11] + src[16]) * 3 - (src[10] + src[16]); \
  998. temp[14] = (src[14] + src[15]) * 20 - (src[13] + src[16]) * 6 + \
  999. (src[12] + src[16]) * 3 - (src[11] + src[15]); \
  1000. temp[15] = (src[15] + src[16]) * 20 - (src[14] + src[16]) * 6 + \
  1001. (src[13] + src[15]) * 3 - (src[12] + src[14]); \
  1002. __asm__ volatile ( \
  1003. "movq (%0), %%mm0 \n\t" \
  1004. "movq 8(%0), %%mm1 \n\t" \
  1005. "paddw %2, %%mm0 \n\t" \
  1006. "paddw %2, %%mm1 \n\t" \
  1007. "psraw $5, %%mm0 \n\t" \
  1008. "psraw $5, %%mm1 \n\t" \
  1009. "packuswb %%mm1, %%mm0 \n\t" \
  1010. OP_3DNOW(%%mm0, (%1), %%mm1, q) \
  1011. "movq 16(%0), %%mm0 \n\t" \
  1012. "movq 24(%0), %%mm1 \n\t" \
  1013. "paddw %2, %%mm0 \n\t" \
  1014. "paddw %2, %%mm1 \n\t" \
  1015. "psraw $5, %%mm0 \n\t" \
  1016. "psraw $5, %%mm1 \n\t" \
  1017. "packuswb %%mm1, %%mm0 \n\t" \
  1018. OP_3DNOW(%%mm0, 8(%1), %%mm1, q) \
  1019. :: "r"(temp), "r"(dst), "m"(ROUNDER) \
  1020. : "memory" \
  1021. ); \
  1022. dst += dstStride; \
  1023. src += srcStride; \
  1024. } \
  1025. } \
  1026. \
  1027. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, \
  1028. uint8_t *src, \
  1029. int dstStride, \
  1030. int srcStride, \
  1031. int h) \
  1032. { \
  1033. __asm__ volatile ( \
  1034. "pxor %%mm7, %%mm7 \n\t" \
  1035. "1: \n\t" \
  1036. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */ \
  1037. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */ \
  1038. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */ \
  1039. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */ \
  1040. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */ \
  1041. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */ \
  1042. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */ \
  1043. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */ \
  1044. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */ \
  1045. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */ \
  1046. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */ \
  1047. "psllq $24, %%mm4 \n\t" /* 000ABCDE */ \
  1048. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */ \
  1049. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */ \
  1050. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */ \
  1051. "paddw %%mm3, %%mm5 \n\t" /* b */ \
  1052. "paddw %%mm2, %%mm6 \n\t" /* c */ \
  1053. "paddw %%mm5, %%mm5 \n\t" /* 2b */ \
  1054. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */ \
  1055. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */ \
  1056. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */ \
  1057. "paddw %%mm4, %%mm0 \n\t" /* a */ \
  1058. "paddw %%mm1, %%mm5 \n\t" /* d */ \
  1059. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */ \
  1060. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */ \
  1061. "paddw %5, %%mm6 \n\t" \
  1062. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */ \
  1063. "psraw $5, %%mm0 \n\t" \
  1064. /* mm1 = EFGH, mm2 = DEFG, mm3 = CDEF, mm4 = BCDE, mm7 = 0 */ \
  1065. \
  1066. "movd 5(%0), %%mm5 \n\t" /* FGHI */ \
  1067. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */ \
  1068. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */ \
  1069. "paddw %%mm5, %%mm1 \n\t" /* a */ \
  1070. "paddw %%mm6, %%mm2 \n\t" /* b */ \
  1071. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */ \
  1072. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */ \
  1073. "paddw %%mm6, %%mm3 \n\t" /* c */ \
  1074. "paddw %%mm5, %%mm4 \n\t" /* d */ \
  1075. "paddw %%mm2, %%mm2 \n\t" /* 2b */ \
  1076. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */ \
  1077. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */ \
  1078. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */ \
  1079. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */ \
  1080. "paddw %5, %%mm1 \n\t" \
  1081. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */ \
  1082. "psraw $5, %%mm3 \n\t" \
  1083. "packuswb %%mm3, %%mm0 \n\t" \
  1084. OP_MMX2(%%mm0, (%1), %%mm4, q) \
  1085. \
  1086. "add %3, %0 \n\t" \
  1087. "add %4, %1 \n\t" \
  1088. "decl %2 \n\t" \
  1089. "jnz 1b \n\t" \
  1090. : "+a"(src), "+c"(dst), "+d"(h) \
  1091. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), \
  1092. /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER) \
  1093. : "memory" \
  1094. ); \
  1095. } \
  1096. \
  1097. static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, \
  1098. uint8_t *src, \
  1099. int dstStride, \
  1100. int srcStride, \
  1101. int h) \
  1102. { \
  1103. int i; \
  1104. int16_t temp[8]; \
  1105. /* quick HACK, XXX FIXME MUST be optimized */ \
  1106. for (i = 0; i < h; i++) { \
  1107. temp[0] = (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + \
  1108. (src[1] + src[3]) * 3 - (src[2] + src[4]); \
  1109. temp[1] = (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + \
  1110. (src[0] + src[4]) * 3 - (src[1] + src[5]); \
  1111. temp[2] = (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + \
  1112. (src[0] + src[5]) * 3 - (src[0] + src[6]); \
  1113. temp[3] = (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + \
  1114. (src[1] + src[6]) * 3 - (src[0] + src[7]); \
  1115. temp[4] = (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + \
  1116. (src[2] + src[7]) * 3 - (src[1] + src[8]); \
  1117. temp[5] = (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + \
  1118. (src[3] + src[8]) * 3 - (src[2] + src[8]); \
  1119. temp[6] = (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + \
  1120. (src[4] + src[8]) * 3 - (src[3] + src[7]); \
  1121. temp[7] = (src[7] + src[8]) * 20 - (src[6] + src[8]) * 6 + \
  1122. (src[5] + src[7]) * 3 - (src[4] + src[6]); \
  1123. __asm__ volatile ( \
  1124. "movq (%0), %%mm0 \n\t" \
  1125. "movq 8(%0), %%mm1 \n\t" \
  1126. "paddw %2, %%mm0 \n\t" \
  1127. "paddw %2, %%mm1 \n\t" \
  1128. "psraw $5, %%mm0 \n\t" \
  1129. "psraw $5, %%mm1 \n\t" \
  1130. "packuswb %%mm1, %%mm0 \n\t" \
  1131. OP_3DNOW(%%mm0, (%1), %%mm1, q) \
  1132. :: "r"(temp), "r"(dst), "m"(ROUNDER) \
  1133. : "memory" \
  1134. ); \
  1135. dst += dstStride; \
  1136. src += srcStride; \
  1137. } \
  1138. }
  1139. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX) \
  1140. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, \
  1141. uint8_t *src, \
  1142. int dstStride, \
  1143. int srcStride) \
  1144. { \
  1145. uint64_t temp[17 * 4]; \
  1146. uint64_t *temp_ptr = temp; \
  1147. int count = 17; \
  1148. \
  1149. /* FIXME unroll */ \
  1150. __asm__ volatile ( \
  1151. "pxor %%mm7, %%mm7 \n\t" \
  1152. "1: \n\t" \
  1153. "movq (%0), %%mm0 \n\t" \
  1154. "movq (%0), %%mm1 \n\t" \
  1155. "movq 8(%0), %%mm2 \n\t" \
  1156. "movq 8(%0), %%mm3 \n\t" \
  1157. "punpcklbw %%mm7, %%mm0 \n\t" \
  1158. "punpckhbw %%mm7, %%mm1 \n\t" \
  1159. "punpcklbw %%mm7, %%mm2 \n\t" \
  1160. "punpckhbw %%mm7, %%mm3 \n\t" \
  1161. "movq %%mm0, (%1) \n\t" \
  1162. "movq %%mm1, 17 * 8(%1) \n\t" \
  1163. "movq %%mm2, 2 * 17 * 8(%1) \n\t" \
  1164. "movq %%mm3, 3 * 17 * 8(%1) \n\t" \
  1165. "add $8, %1 \n\t" \
  1166. "add %3, %0 \n\t" \
  1167. "decl %2 \n\t" \
  1168. "jnz 1b \n\t" \
  1169. : "+r"(src), "+r"(temp_ptr), "+r"(count) \
  1170. : "r"((x86_reg)srcStride) \
  1171. : "memory" \
  1172. ); \
  1173. \
  1174. temp_ptr = temp; \
  1175. count = 4; \
  1176. \
  1177. /* FIXME reorder for speed */ \
  1178. __asm__ volatile ( \
  1179. /* "pxor %%mm7, %%mm7 \n\t" */ \
  1180. "1: \n\t" \
  1181. "movq (%0), %%mm0 \n\t" \
  1182. "movq 8(%0), %%mm1 \n\t" \
  1183. "movq 16(%0), %%mm2 \n\t" \
  1184. "movq 24(%0), %%mm3 \n\t" \
  1185. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
  1186. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
  1187. "add %4, %1 \n\t" \
  1188. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
  1189. \
  1190. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
  1191. "add %4, %1 \n\t" \
  1192. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
  1193. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP) \
  1194. "add %4, %1 \n\t" \
  1195. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP) \
  1196. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP) \
  1197. "add %4, %1 \n\t" \
  1198. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP) \
  1199. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0), 104(%0), (%1, %3), OP) \
  1200. "add %4, %1 \n\t" \
  1201. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0), 112(%0), (%1), OP) \
  1202. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0), 120(%0), (%1, %3), OP) \
  1203. "add %4, %1 \n\t" \
  1204. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0), 128(%0), (%1), OP) \
  1205. \
  1206. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0), 128(%0), (%1, %3), OP) \
  1207. "add %4, %1 \n\t" \
  1208. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0), 104(%0), 120(%0), (%1), OP) \
  1209. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0), 104(%0), 112(%0), 112(%0), (%1, %3), OP) \
  1210. \
  1211. "add $136, %0 \n\t" \
  1212. "add %6, %1 \n\t" \
  1213. "decl %2 \n\t" \
  1214. "jnz 1b \n\t" \
  1215. \
  1216. : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
  1217. : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
  1218. /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
  1219. "g"(4 - 14 * (x86_reg)dstStride) \
  1220. : "memory" \
  1221. ); \
  1222. } \
  1223. \
  1224. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, \
  1225. uint8_t *src, \
  1226. int dstStride, \
  1227. int srcStride) \
  1228. { \
  1229. uint64_t temp[9 * 2]; \
  1230. uint64_t *temp_ptr = temp; \
  1231. int count = 9; \
  1232. \
  1233. /* FIXME unroll */ \
  1234. __asm__ volatile ( \
  1235. "pxor %%mm7, %%mm7 \n\t" \
  1236. "1: \n\t" \
  1237. "movq (%0), %%mm0 \n\t" \
  1238. "movq (%0), %%mm1 \n\t" \
  1239. "punpcklbw %%mm7, %%mm0 \n\t" \
  1240. "punpckhbw %%mm7, %%mm1 \n\t" \
  1241. "movq %%mm0, (%1) \n\t" \
  1242. "movq %%mm1, 9*8(%1) \n\t" \
  1243. "add $8, %1 \n\t" \
  1244. "add %3, %0 \n\t" \
  1245. "decl %2 \n\t" \
  1246. "jnz 1b \n\t" \
  1247. : "+r"(src), "+r"(temp_ptr), "+r"(count) \
  1248. : "r"((x86_reg)srcStride) \
  1249. : "memory" \
  1250. ); \
  1251. \
  1252. temp_ptr = temp; \
  1253. count = 2; \
  1254. \
  1255. /* FIXME reorder for speed */ \
  1256. __asm__ volatile ( \
  1257. /* "pxor %%mm7, %%mm7 \n\t" */ \
  1258. "1: \n\t" \
  1259. "movq (%0), %%mm0 \n\t" \
  1260. "movq 8(%0), %%mm1 \n\t" \
  1261. "movq 16(%0), %%mm2 \n\t" \
  1262. "movq 24(%0), %%mm3 \n\t" \
  1263. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP) \
  1264. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP) \
  1265. "add %4, %1 \n\t" \
  1266. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP) \
  1267. \
  1268. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP) \
  1269. "add %4, %1 \n\t" \
  1270. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP) \
  1271. \
  1272. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP) \
  1273. "add %4, %1 \n\t" \
  1274. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP) \
  1275. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP) \
  1276. \
  1277. "add $72, %0 \n\t" \
  1278. "add %6, %1 \n\t" \
  1279. "decl %2 \n\t" \
  1280. "jnz 1b \n\t" \
  1281. \
  1282. : "+r"(temp_ptr), "+r"(dst), "+g"(count) \
  1283. : "r"((x86_reg)dstStride), "r"(2 * (x86_reg)dstStride), \
  1284. /* "m"(ff_pw_20), "m"(ff_pw_3), */ "m"(ROUNDER), \
  1285. "g"(4 - 6 * (x86_reg)dstStride) \
  1286. : "memory" \
  1287. ); \
  1288. } \
  1289. \
  1290. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
  1291. int stride) \
  1292. { \
  1293. OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \
  1294. } \
  1295. \
  1296. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
  1297. int stride) \
  1298. { \
  1299. uint64_t temp[8]; \
  1300. uint8_t * const half = (uint8_t*)temp; \
  1301. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
  1302. stride, 8); \
  1303. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8); \
  1304. } \
  1305. \
  1306. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
  1307. int stride) \
  1308. { \
  1309. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \
  1310. stride, 8); \
  1311. } \
  1312. \
  1313. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
  1314. int stride) \
  1315. { \
  1316. uint64_t temp[8]; \
  1317. uint8_t * const half = (uint8_t*)temp; \
  1318. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
  1319. stride, 8); \
  1320. OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \
  1321. stride, 8); \
  1322. } \
  1323. \
  1324. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
  1325. int stride) \
  1326. { \
  1327. uint64_t temp[8]; \
  1328. uint8_t * const half = (uint8_t*)temp; \
  1329. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride); \
  1330. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8); \
  1331. } \
  1332. \
  1333. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
  1334. int stride) \
  1335. { \
  1336. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride); \
  1337. } \
  1338. \
  1339. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
  1340. int stride) \
  1341. { \
  1342. uint64_t temp[8]; \
  1343. uint8_t * const half = (uint8_t*)temp; \
  1344. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride); \
  1345. OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride, \
  1346. stride, 8); \
  1347. } \
  1348. \
  1349. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
  1350. int stride) \
  1351. { \
  1352. uint64_t half[8 + 9]; \
  1353. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1354. uint8_t * const halfHV = ((uint8_t*)half); \
  1355. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1356. stride, 9); \
  1357. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
  1358. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1359. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
  1360. } \
  1361. \
  1362. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
  1363. int stride) \
  1364. { \
  1365. uint64_t half[8 + 9]; \
  1366. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1367. uint8_t * const halfHV = ((uint8_t*)half); \
  1368. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1369. stride, 9); \
  1370. put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  1371. stride, 9); \
  1372. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1373. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
  1374. } \
  1375. \
  1376. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
  1377. int stride) \
  1378. { \
  1379. uint64_t half[8 + 9]; \
  1380. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1381. uint8_t * const halfHV = ((uint8_t*)half); \
  1382. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1383. stride, 9); \
  1384. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
  1385. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1386. OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
  1387. } \
  1388. \
  1389. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
  1390. int stride) \
  1391. { \
  1392. uint64_t half[8 + 9]; \
  1393. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1394. uint8_t * const halfHV = ((uint8_t*)half); \
  1395. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1396. stride, 9); \
  1397. put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  1398. stride, 9); \
  1399. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1400. OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
  1401. } \
  1402. \
  1403. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
  1404. int stride) \
  1405. { \
  1406. uint64_t half[8 + 9]; \
  1407. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1408. uint8_t * const halfHV = ((uint8_t*)half); \
  1409. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1410. stride, 9); \
  1411. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1412. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8); \
  1413. } \
  1414. \
  1415. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
  1416. int stride) \
  1417. { \
  1418. uint64_t half[8 + 9]; \
  1419. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  1420. uint8_t * const halfHV = ((uint8_t*)half); \
  1421. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1422. stride, 9); \
  1423. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8); \
  1424. OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, stride, 8, 8); \
  1425. } \
  1426. \
  1427. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
  1428. int stride) \
  1429. { \
  1430. uint64_t half[8 + 9]; \
  1431. uint8_t * const halfH = ((uint8_t*)half); \
  1432. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1433. stride, 9); \
  1434. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9); \
  1435. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
  1436. } \
  1437. \
  1438. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
  1439. int stride) \
  1440. { \
  1441. uint64_t half[8 + 9]; \
  1442. uint8_t * const halfH = ((uint8_t*)half); \
  1443. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1444. stride, 9); \
  1445. put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  1446. stride, 9); \
  1447. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
  1448. } \
  1449. \
  1450. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
  1451. int stride) \
  1452. { \
  1453. uint64_t half[9]; \
  1454. uint8_t * const halfH = ((uint8_t*)half); \
  1455. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  1456. stride, 9); \
  1457. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8); \
  1458. } \
  1459. \
  1460. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
  1461. int stride) \
  1462. { \
  1463. OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \
  1464. } \
  1465. \
  1466. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
  1467. int stride) \
  1468. { \
  1469. uint64_t temp[32]; \
  1470. uint8_t * const half = (uint8_t*)temp; \
  1471. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
  1472. stride, 16); \
  1473. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16); \
  1474. } \
  1475. \
  1476. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
  1477. int stride) \
  1478. { \
  1479. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \
  1480. stride, stride, 16); \
  1481. } \
  1482. \
  1483. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
  1484. int stride) \
  1485. { \
  1486. uint64_t temp[32]; \
  1487. uint8_t * const half = (uint8_t*)temp; \
  1488. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
  1489. stride, 16); \
  1490. OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \
  1491. stride, stride, 16); \
  1492. } \
  1493. \
  1494. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
  1495. int stride) \
  1496. { \
  1497. uint64_t temp[32]; \
  1498. uint8_t * const half = (uint8_t*)temp; \
  1499. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
  1500. stride); \
  1501. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16); \
  1502. } \
  1503. \
  1504. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
  1505. int stride) \
  1506. { \
  1507. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride); \
  1508. } \
  1509. \
  1510. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
  1511. int stride) \
  1512. { \
  1513. uint64_t temp[32]; \
  1514. uint8_t * const half = (uint8_t*)temp; \
  1515. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
  1516. stride); \
  1517. OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \
  1518. stride, stride, 16); \
  1519. } \
  1520. \
  1521. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
  1522. int stride) \
  1523. { \
  1524. uint64_t half[16 * 2 + 17 * 2]; \
  1525. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1526. uint8_t * const halfHV = ((uint8_t*)half); \
  1527. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1528. stride, 17); \
  1529. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  1530. stride, 17); \
  1531. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1532. 16, 16); \
  1533. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
  1534. } \
  1535. \
  1536. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
  1537. int stride) \
  1538. { \
  1539. uint64_t half[16 * 2 + 17 * 2]; \
  1540. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1541. uint8_t * const halfHV = ((uint8_t*)half); \
  1542. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1543. stride, 17); \
  1544. put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  1545. stride, 17); \
  1546. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1547. 16, 16); \
  1548. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
  1549. } \
  1550. \
  1551. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
  1552. int stride) \
  1553. { \
  1554. uint64_t half[16 * 2 + 17 * 2]; \
  1555. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1556. uint8_t * const halfHV = ((uint8_t*)half); \
  1557. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1558. stride, 17); \
  1559. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  1560. stride, 17); \
  1561. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1562. 16, 16); \
  1563. OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
  1564. 16, 16); \
  1565. } \
  1566. \
  1567. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
  1568. int stride) \
  1569. { \
  1570. uint64_t half[16 * 2 + 17 * 2]; \
  1571. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1572. uint8_t * const halfHV = ((uint8_t*)half); \
  1573. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1574. stride, 17); \
  1575. put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  1576. stride, 17); \
  1577. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1578. 16, 16); \
  1579. OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
  1580. 16, 16); \
  1581. } \
  1582. \
  1583. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
  1584. int stride) \
  1585. { \
  1586. uint64_t half[16 * 2 + 17 * 2]; \
  1587. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1588. uint8_t * const halfHV = ((uint8_t*)half); \
  1589. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1590. stride, 17); \
  1591. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1592. 16, 16); \
  1593. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16); \
  1594. } \
  1595. \
  1596. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
  1597. int stride) \
  1598. { \
  1599. uint64_t half[16 * 2 + 17 * 2]; \
  1600. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  1601. uint8_t * const halfHV = ((uint8_t*)half); \
  1602. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1603. stride, 17); \
  1604. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  1605. 16, 16); \
  1606. OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, stride, \
  1607. 16, 16); \
  1608. } \
  1609. \
  1610. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
  1611. int stride) \
  1612. { \
  1613. uint64_t half[17 * 2]; \
  1614. uint8_t * const halfH = ((uint8_t*)half); \
  1615. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1616. stride, 17); \
  1617. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  1618. stride, 17); \
  1619. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
  1620. } \
  1621. \
  1622. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
  1623. int stride) \
  1624. { \
  1625. uint64_t half[17 * 2]; \
  1626. uint8_t * const halfH = ((uint8_t*)half); \
  1627. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1628. stride, 17); \
  1629. put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  1630. stride, 17); \
  1631. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
  1632. } \
  1633. \
  1634. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
  1635. int stride) \
  1636. { \
  1637. uint64_t half[17 * 2]; \
  1638. uint8_t * const halfH = ((uint8_t*)half); \
  1639. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  1640. stride, 17); \
  1641. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16); \
  1642. }
  1643. #define PUT_OP(a, b, temp, size) \
  1644. "mov"#size" "#a", "#b" \n\t"
  1645. #define AVG_3DNOW_OP(a, b, temp, size) \
  1646. "mov"#size" "#b", "#temp" \n\t" \
  1647. "pavgusb "#temp", "#a" \n\t" \
  1648. "mov"#size" "#a", "#b" \n\t"
  1649. #define AVG_MMX2_OP(a, b, temp, size) \
  1650. "mov"#size" "#b", "#temp" \n\t" \
  1651. "pavgb "#temp", "#a" \n\t" \
  1652. "mov"#size" "#a", "#b" \n\t"
  1653. QPEL_BASE(put_, ff_pw_16, _, PUT_OP, PUT_OP)
  1654. QPEL_BASE(avg_, ff_pw_16, _, AVG_MMX2_OP, AVG_3DNOW_OP)
  1655. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
  1656. QPEL_OP(put_, ff_pw_16, _, PUT_OP, 3dnow)
  1657. QPEL_OP(avg_, ff_pw_16, _, AVG_3DNOW_OP, 3dnow)
  1658. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
  1659. QPEL_OP(put_, ff_pw_16, _, PUT_OP, mmx2)
  1660. QPEL_OP(avg_, ff_pw_16, _, AVG_MMX2_OP, mmx2)
  1661. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
  1662. /***********************************/
  1663. /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
  1664. #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL) \
  1665. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, \
  1666. uint8_t *src, \
  1667. int stride) \
  1668. { \
  1669. OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE); \
  1670. }
  1671. #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2) \
  1672. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, \
  1673. uint8_t *src, \
  1674. int stride) \
  1675. { \
  1676. OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src + S0, stride, SIZE, \
  1677. S1, S2); \
  1678. }
  1679. #define QPEL_2TAP(OPNAME, SIZE, MMX) \
  1680. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX) \
  1681. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX) \
  1682. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx) \
  1683. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX = \
  1684. OPNAME ## qpel ## SIZE ## _mc00_ ## MMX; \
  1685. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX = \
  1686. OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX; \
  1687. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX = \
  1688. OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX; \
  1689. static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, \
  1690. uint8_t *src, \
  1691. int stride) \
  1692. { \
  1693. OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src + 1, stride, SIZE); \
  1694. } \
  1695. static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, \
  1696. uint8_t *src, \
  1697. int stride) \
  1698. { \
  1699. OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src + stride, \
  1700. stride, SIZE); \
  1701. } \
  1702. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0) \
  1703. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0) \
  1704. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0) \
  1705. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0) \
  1706. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1) \
  1707. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1) \
  1708. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1) \
  1709. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride + 1, -stride, -1) \
  1710. QPEL_2TAP(put_, 16, mmx2)
  1711. QPEL_2TAP(avg_, 16, mmx2)
  1712. QPEL_2TAP(put_, 8, mmx2)
  1713. QPEL_2TAP(avg_, 8, mmx2)
  1714. QPEL_2TAP(put_, 16, 3dnow)
  1715. QPEL_2TAP(avg_, 16, 3dnow)
  1716. QPEL_2TAP(put_, 8, 3dnow)
  1717. QPEL_2TAP(avg_, 8, 3dnow)
  1718. void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1719. {
  1720. put_pixels8_xy2_mmx(dst, src, stride, 8);
  1721. }
  1722. void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1723. {
  1724. put_pixels16_xy2_mmx(dst, src, stride, 16);
  1725. }
  1726. void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1727. {
  1728. avg_pixels8_xy2_mmx(dst, src, stride, 8);
  1729. }
  1730. void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride)
  1731. {
  1732. avg_pixels16_xy2_mmx(dst, src, stride, 16);
  1733. }
  1734. #if HAVE_YASM
  1735. typedef void emu_edge_core_func(uint8_t *buf, const uint8_t *src,
  1736. x86_reg linesize, x86_reg start_y,
  1737. x86_reg end_y, x86_reg block_h,
  1738. x86_reg start_x, x86_reg end_x,
  1739. x86_reg block_w);
  1740. extern emu_edge_core_func ff_emu_edge_core_mmx;
  1741. extern emu_edge_core_func ff_emu_edge_core_sse;
  1742. static av_always_inline void emulated_edge_mc(uint8_t *buf, const uint8_t *src,
  1743. int linesize,
  1744. int block_w, int block_h,
  1745. int src_x, int src_y,
  1746. int w, int h,
  1747. emu_edge_core_func *core_fn)
  1748. {
  1749. int start_y, start_x, end_y, end_x, src_y_add = 0;
  1750. if (src_y >= h) {
  1751. src_y_add = h - 1 - src_y;
  1752. src_y = h - 1;
  1753. } else if (src_y <= -block_h) {
  1754. src_y_add = 1 - block_h - src_y;
  1755. src_y = 1 - block_h;
  1756. }
  1757. if (src_x >= w) {
  1758. src += w - 1 - src_x;
  1759. src_x = w - 1;
  1760. } else if (src_x <= -block_w) {
  1761. src += 1 - block_w - src_x;
  1762. src_x = 1 - block_w;
  1763. }
  1764. start_y = FFMAX(0, -src_y);
  1765. start_x = FFMAX(0, -src_x);
  1766. end_y = FFMIN(block_h, h-src_y);
  1767. end_x = FFMIN(block_w, w-src_x);
  1768. assert(start_x < end_x && block_w > 0);
  1769. assert(start_y < end_y && block_h > 0);
  1770. // fill in the to-be-copied part plus all above/below
  1771. src += (src_y_add + start_y) * linesize + start_x;
  1772. buf += start_x;
  1773. core_fn(buf, src, linesize, start_y, end_y,
  1774. block_h, start_x, end_x, block_w);
  1775. }
  1776. #if ARCH_X86_32
  1777. static av_noinline void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src,
  1778. int linesize,
  1779. int block_w, int block_h,
  1780. int src_x, int src_y, int w, int h)
  1781. {
  1782. emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
  1783. w, h, &ff_emu_edge_core_mmx);
  1784. }
  1785. #endif
  1786. static av_noinline void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src,
  1787. int linesize,
  1788. int block_w, int block_h,
  1789. int src_x, int src_y, int w, int h)
  1790. {
  1791. emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
  1792. w, h, &ff_emu_edge_core_sse);
  1793. }
  1794. #endif /* HAVE_YASM */
  1795. typedef void emulated_edge_mc_func(uint8_t *dst, const uint8_t *src,
  1796. int linesize, int block_w, int block_h,
  1797. int src_x, int src_y, int w, int h);
  1798. static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
  1799. int stride, int h, int ox, int oy,
  1800. int dxx, int dxy, int dyx, int dyy,
  1801. int shift, int r, int width, int height,
  1802. emulated_edge_mc_func *emu_edge_fn)
  1803. {
  1804. const int w = 8;
  1805. const int ix = ox >> (16 + shift);
  1806. const int iy = oy >> (16 + shift);
  1807. const int oxs = ox >> 4;
  1808. const int oys = oy >> 4;
  1809. const int dxxs = dxx >> 4;
  1810. const int dxys = dxy >> 4;
  1811. const int dyxs = dyx >> 4;
  1812. const int dyys = dyy >> 4;
  1813. const uint16_t r4[4] = { r, r, r, r };
  1814. const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
  1815. const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
  1816. const uint64_t shift2 = 2 * shift;
  1817. uint8_t edge_buf[(h + 1) * stride];
  1818. int x, y;
  1819. const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
  1820. const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
  1821. const int dxh = dxy * (h - 1);
  1822. const int dyw = dyx * (w - 1);
  1823. if ( // non-constant fullpel offset (3% of blocks)
  1824. ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
  1825. (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
  1826. // uses more than 16 bits of subpel mv (only at huge resolution)
  1827. || (dxx | dxy | dyx | dyy) & 15) {
  1828. // FIXME could still use mmx for some of the rows
  1829. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
  1830. shift, r, width, height);
  1831. return;
  1832. }
  1833. src += ix + iy * stride;
  1834. if ((unsigned)ix >= width - w ||
  1835. (unsigned)iy >= height - h) {
  1836. emu_edge_fn(edge_buf, src, stride, w + 1, h + 1, ix, iy, width, height);
  1837. src = edge_buf;
  1838. }
  1839. __asm__ volatile (
  1840. "movd %0, %%mm6 \n\t"
  1841. "pxor %%mm7, %%mm7 \n\t"
  1842. "punpcklwd %%mm6, %%mm6 \n\t"
  1843. "punpcklwd %%mm6, %%mm6 \n\t"
  1844. :: "r"(1<<shift)
  1845. );
  1846. for (x = 0; x < w; x += 4) {
  1847. uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
  1848. oxs - dxys + dxxs * (x + 1),
  1849. oxs - dxys + dxxs * (x + 2),
  1850. oxs - dxys + dxxs * (x + 3) };
  1851. uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
  1852. oys - dyys + dyxs * (x + 1),
  1853. oys - dyys + dyxs * (x + 2),
  1854. oys - dyys + dyxs * (x + 3) };
  1855. for (y = 0; y < h; y++) {
  1856. __asm__ volatile (
  1857. "movq %0, %%mm4 \n\t"
  1858. "movq %1, %%mm5 \n\t"
  1859. "paddw %2, %%mm4 \n\t"
  1860. "paddw %3, %%mm5 \n\t"
  1861. "movq %%mm4, %0 \n\t"
  1862. "movq %%mm5, %1 \n\t"
  1863. "psrlw $12, %%mm4 \n\t"
  1864. "psrlw $12, %%mm5 \n\t"
  1865. : "+m"(*dx4), "+m"(*dy4)
  1866. : "m"(*dxy4), "m"(*dyy4)
  1867. );
  1868. __asm__ volatile (
  1869. "movq %%mm6, %%mm2 \n\t"
  1870. "movq %%mm6, %%mm1 \n\t"
  1871. "psubw %%mm4, %%mm2 \n\t"
  1872. "psubw %%mm5, %%mm1 \n\t"
  1873. "movq %%mm2, %%mm0 \n\t"
  1874. "movq %%mm4, %%mm3 \n\t"
  1875. "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
  1876. "pmullw %%mm5, %%mm3 \n\t" // dx * dy
  1877. "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
  1878. "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
  1879. "movd %4, %%mm5 \n\t"
  1880. "movd %3, %%mm4 \n\t"
  1881. "punpcklbw %%mm7, %%mm5 \n\t"
  1882. "punpcklbw %%mm7, %%mm4 \n\t"
  1883. "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
  1884. "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
  1885. "movd %2, %%mm5 \n\t"
  1886. "movd %1, %%mm4 \n\t"
  1887. "punpcklbw %%mm7, %%mm5 \n\t"
  1888. "punpcklbw %%mm7, %%mm4 \n\t"
  1889. "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
  1890. "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
  1891. "paddw %5, %%mm1 \n\t"
  1892. "paddw %%mm3, %%mm2 \n\t"
  1893. "paddw %%mm1, %%mm0 \n\t"
  1894. "paddw %%mm2, %%mm0 \n\t"
  1895. "psrlw %6, %%mm0 \n\t"
  1896. "packuswb %%mm0, %%mm0 \n\t"
  1897. "movd %%mm0, %0 \n\t"
  1898. : "=m"(dst[x + y * stride])
  1899. : "m"(src[0]), "m"(src[1]),
  1900. "m"(src[stride]), "m"(src[stride + 1]),
  1901. "m"(*r4), "m"(shift2)
  1902. );
  1903. src += stride;
  1904. }
  1905. src += 4 - h * stride;
  1906. }
  1907. }
  1908. #if HAVE_YASM
  1909. #if ARCH_X86_32
  1910. static void gmc_mmx(uint8_t *dst, uint8_t *src,
  1911. int stride, int h, int ox, int oy,
  1912. int dxx, int dxy, int dyx, int dyy,
  1913. int shift, int r, int width, int height)
  1914. {
  1915. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  1916. width, height, &emulated_edge_mc_mmx);
  1917. }
  1918. #endif
  1919. static void gmc_sse(uint8_t *dst, uint8_t *src,
  1920. int stride, int h, int ox, int oy,
  1921. int dxx, int dxy, int dyx, int dyy,
  1922. int shift, int r, int width, int height)
  1923. {
  1924. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  1925. width, height, &emulated_edge_mc_sse);
  1926. }
  1927. #else
  1928. static void gmc_mmx(uint8_t *dst, uint8_t *src,
  1929. int stride, int h, int ox, int oy,
  1930. int dxx, int dxy, int dyx, int dyy,
  1931. int shift, int r, int width, int height)
  1932. {
  1933. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  1934. width, height, &ff_emulated_edge_mc_8);
  1935. }
  1936. #endif
  1937. #define PREFETCH(name, op) \
  1938. static void name(void *mem, int stride, int h) \
  1939. { \
  1940. const uint8_t *p = mem; \
  1941. do { \
  1942. __asm__ volatile (#op" %0" :: "m"(*p)); \
  1943. p += stride; \
  1944. } while (--h); \
  1945. }
  1946. PREFETCH(prefetch_mmx2, prefetcht0)
  1947. PREFETCH(prefetch_3dnow, prefetch)
  1948. #undef PREFETCH
  1949. #include "h264_qpel_mmx.c"
  1950. void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src,
  1951. int stride, int h, int x, int y);
  1952. void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src,
  1953. int stride, int h, int x, int y);
  1954. void ff_avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst, uint8_t *src,
  1955. int stride, int h, int x, int y);
  1956. void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
  1957. int stride, int h, int x, int y);
  1958. void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
  1959. int stride, int h, int x, int y);
  1960. void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
  1961. int stride, int h, int x, int y);
  1962. void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
  1963. int stride, int h, int x, int y);
  1964. void ff_avg_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
  1965. int stride, int h, int x, int y);
  1966. void ff_put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst, uint8_t *src,
  1967. int stride, int h, int x, int y);
  1968. void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
  1969. int stride, int h, int x, int y);
  1970. void ff_avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst, uint8_t *src,
  1971. int stride, int h, int x, int y);
  1972. void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
  1973. int stride, int h, int x, int y);
  1974. #define CHROMA_MC(OP, NUM, DEPTH, OPT) \
  1975. void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
  1976. (uint8_t *dst, uint8_t *src, \
  1977. int stride, int h, int x, int y);
  1978. CHROMA_MC(put, 2, 10, mmxext)
  1979. CHROMA_MC(avg, 2, 10, mmxext)
  1980. CHROMA_MC(put, 4, 10, mmxext)
  1981. CHROMA_MC(avg, 4, 10, mmxext)
  1982. CHROMA_MC(put, 8, 10, sse2)
  1983. CHROMA_MC(avg, 8, 10, sse2)
  1984. CHROMA_MC(put, 8, 10, avx)
  1985. CHROMA_MC(avg, 8, 10, avx)
  1986. /* CAVS-specific */
  1987. void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride)
  1988. {
  1989. put_pixels8_mmx(dst, src, stride, 8);
  1990. }
  1991. void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride)
  1992. {
  1993. avg_pixels8_mmx(dst, src, stride, 8);
  1994. }
  1995. void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride)
  1996. {
  1997. put_pixels16_mmx(dst, src, stride, 16);
  1998. }
  1999. void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride)
  2000. {
  2001. avg_pixels16_mmx(dst, src, stride, 16);
  2002. }
  2003. /* VC-1-specific */
  2004. void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src,
  2005. int stride, int rnd)
  2006. {
  2007. put_pixels8_mmx(dst, src, stride, 8);
  2008. }
  2009. void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src,
  2010. int stride, int rnd)
  2011. {
  2012. avg_pixels8_mmx2(dst, src, stride, 8);
  2013. }
  2014. /* only used in VP3/5/6 */
  2015. static void put_vp_no_rnd_pixels8_l2_mmx(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h)
  2016. {
  2017. // START_TIMER
  2018. MOVQ_BFE(mm6);
  2019. __asm__ volatile(
  2020. "1: \n\t"
  2021. "movq (%1), %%mm0 \n\t"
  2022. "movq (%2), %%mm1 \n\t"
  2023. "movq (%1,%4), %%mm2 \n\t"
  2024. "movq (%2,%4), %%mm3 \n\t"
  2025. PAVGBP_MMX_NO_RND(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
  2026. "movq %%mm4, (%3) \n\t"
  2027. "movq %%mm5, (%3,%4) \n\t"
  2028. "movq (%1,%4,2), %%mm0 \n\t"
  2029. "movq (%2,%4,2), %%mm1 \n\t"
  2030. "movq (%1,%5), %%mm2 \n\t"
  2031. "movq (%2,%5), %%mm3 \n\t"
  2032. "lea (%1,%4,4), %1 \n\t"
  2033. "lea (%2,%4,4), %2 \n\t"
  2034. PAVGBP_MMX_NO_RND(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
  2035. "movq %%mm4, (%3,%4,2) \n\t"
  2036. "movq %%mm5, (%3,%5) \n\t"
  2037. "lea (%3,%4,4), %3 \n\t"
  2038. "subl $4, %0 \n\t"
  2039. "jnz 1b \n\t"
  2040. :"+r"(h), "+r"(a), "+r"(b), "+r"(dst)
  2041. :"r"((x86_reg)stride), "r"((x86_reg)3L*stride)
  2042. :"memory");
  2043. // STOP_TIMER("put_vp_no_rnd_pixels8_l2_mmx")
  2044. }
  2045. static void put_vp_no_rnd_pixels16_l2_mmx(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h)
  2046. {
  2047. put_vp_no_rnd_pixels8_l2_mmx(dst, a, b, stride, h);
  2048. put_vp_no_rnd_pixels8_l2_mmx(dst+8, a+8, b+8, stride, h);
  2049. }
  2050. #if CONFIG_DIRAC_DECODER
  2051. #define DIRAC_PIXOP(OPNAME, EXT)\
  2052. void ff_ ## OPNAME ## _dirac_pixels8_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  2053. {\
  2054. OPNAME ## _pixels8_ ## EXT(dst, src[0], stride, h);\
  2055. }\
  2056. void ff_ ## OPNAME ## _dirac_pixels16_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  2057. {\
  2058. OPNAME ## _pixels16_ ## EXT(dst, src[0], stride, h);\
  2059. }\
  2060. void ff_ ## OPNAME ## _dirac_pixels32_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  2061. {\
  2062. OPNAME ## _pixels16_ ## EXT(dst , src[0] , stride, h);\
  2063. OPNAME ## _pixels16_ ## EXT(dst+16, src[0]+16, stride, h);\
  2064. }
  2065. DIRAC_PIXOP(put, mmx)
  2066. DIRAC_PIXOP(avg, mmx)
  2067. DIRAC_PIXOP(avg, mmx2)
  2068. void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  2069. {
  2070. put_pixels16_sse2(dst, src[0], stride, h);
  2071. }
  2072. void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  2073. {
  2074. avg_pixels16_sse2(dst, src[0], stride, h);
  2075. }
  2076. void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  2077. {
  2078. put_pixels16_sse2(dst , src[0] , stride, h);
  2079. put_pixels16_sse2(dst+16, src[0]+16, stride, h);
  2080. }
  2081. void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  2082. {
  2083. avg_pixels16_sse2(dst , src[0] , stride, h);
  2084. avg_pixels16_sse2(dst+16, src[0]+16, stride, h);
  2085. }
  2086. #endif
  2087. /* XXX: Those functions should be suppressed ASAP when all IDCTs are
  2088. * converted. */
  2089. #if CONFIG_GPL
  2090. static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size,
  2091. DCTELEM *block)
  2092. {
  2093. ff_mmx_idct(block);
  2094. ff_put_pixels_clamped_mmx(block, dest, line_size);
  2095. }
  2096. static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size,
  2097. DCTELEM *block)
  2098. {
  2099. ff_mmx_idct(block);
  2100. ff_add_pixels_clamped_mmx(block, dest, line_size);
  2101. }
  2102. static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size,
  2103. DCTELEM *block)
  2104. {
  2105. ff_mmxext_idct(block);
  2106. ff_put_pixels_clamped_mmx(block, dest, line_size);
  2107. }
  2108. static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size,
  2109. DCTELEM *block)
  2110. {
  2111. ff_mmxext_idct(block);
  2112. ff_add_pixels_clamped_mmx(block, dest, line_size);
  2113. }
  2114. #endif
  2115. static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
  2116. {
  2117. ff_idct_xvid_mmx(block);
  2118. ff_put_pixels_clamped_mmx(block, dest, line_size);
  2119. }
  2120. static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
  2121. {
  2122. ff_idct_xvid_mmx(block);
  2123. ff_add_pixels_clamped_mmx(block, dest, line_size);
  2124. }
  2125. static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
  2126. {
  2127. ff_idct_xvid_mmx2(block);
  2128. ff_put_pixels_clamped_mmx(block, dest, line_size);
  2129. }
  2130. static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
  2131. {
  2132. ff_idct_xvid_mmx2(block);
  2133. ff_add_pixels_clamped_mmx(block, dest, line_size);
  2134. }
  2135. static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
  2136. {
  2137. int i;
  2138. __asm__ volatile ("pxor %%mm7, %%mm7":);
  2139. for (i = 0; i < blocksize; i += 2) {
  2140. __asm__ volatile (
  2141. "movq %0, %%mm0 \n\t"
  2142. "movq %1, %%mm1 \n\t"
  2143. "movq %%mm0, %%mm2 \n\t"
  2144. "movq %%mm1, %%mm3 \n\t"
  2145. "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
  2146. "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
  2147. "pslld $31, %%mm2 \n\t" // keep only the sign bit
  2148. "pxor %%mm2, %%mm1 \n\t"
  2149. "movq %%mm3, %%mm4 \n\t"
  2150. "pand %%mm1, %%mm3 \n\t"
  2151. "pandn %%mm1, %%mm4 \n\t"
  2152. "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m)))
  2153. "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m)))
  2154. "movq %%mm3, %1 \n\t"
  2155. "movq %%mm0, %0 \n\t"
  2156. : "+m"(mag[i]), "+m"(ang[i])
  2157. :: "memory"
  2158. );
  2159. }
  2160. __asm__ volatile ("femms");
  2161. }
  2162. static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
  2163. {
  2164. int i;
  2165. __asm__ volatile (
  2166. "movaps %0, %%xmm5 \n\t"
  2167. :: "m"(ff_pdw_80000000[0])
  2168. );
  2169. for (i = 0; i < blocksize; i += 4) {
  2170. __asm__ volatile (
  2171. "movaps %0, %%xmm0 \n\t"
  2172. "movaps %1, %%xmm1 \n\t"
  2173. "xorps %%xmm2, %%xmm2 \n\t"
  2174. "xorps %%xmm3, %%xmm3 \n\t"
  2175. "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
  2176. "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
  2177. "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
  2178. "xorps %%xmm2, %%xmm1 \n\t"
  2179. "movaps %%xmm3, %%xmm4 \n\t"
  2180. "andps %%xmm1, %%xmm3 \n\t"
  2181. "andnps %%xmm1, %%xmm4 \n\t"
  2182. "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a < 0) & (a ^ sign(m)))
  2183. "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a > 0) & (a ^ sign(m)))
  2184. "movaps %%xmm3, %1 \n\t"
  2185. "movaps %%xmm0, %0 \n\t"
  2186. : "+m"(mag[i]), "+m"(ang[i])
  2187. :: "memory"
  2188. );
  2189. }
  2190. }
  2191. #define IF1(x) x
  2192. #define IF0(x)
  2193. #define MIX5(mono, stereo) \
  2194. __asm__ volatile ( \
  2195. "movss 0(%2), %%xmm5 \n" \
  2196. "movss 8(%2), %%xmm6 \n" \
  2197. "movss 24(%2), %%xmm7 \n" \
  2198. "shufps $0, %%xmm5, %%xmm5 \n" \
  2199. "shufps $0, %%xmm6, %%xmm6 \n" \
  2200. "shufps $0, %%xmm7, %%xmm7 \n" \
  2201. "1: \n" \
  2202. "movaps (%0, %1), %%xmm0 \n" \
  2203. "movaps 0x400(%0, %1), %%xmm1 \n" \
  2204. "movaps 0x800(%0, %1), %%xmm2 \n" \
  2205. "movaps 0xc00(%0, %1), %%xmm3 \n" \
  2206. "movaps 0x1000(%0, %1), %%xmm4 \n" \
  2207. "mulps %%xmm5, %%xmm0 \n" \
  2208. "mulps %%xmm6, %%xmm1 \n" \
  2209. "mulps %%xmm5, %%xmm2 \n" \
  2210. "mulps %%xmm7, %%xmm3 \n" \
  2211. "mulps %%xmm7, %%xmm4 \n" \
  2212. stereo("addps %%xmm1, %%xmm0 \n") \
  2213. "addps %%xmm1, %%xmm2 \n" \
  2214. "addps %%xmm3, %%xmm0 \n" \
  2215. "addps %%xmm4, %%xmm2 \n" \
  2216. mono("addps %%xmm2, %%xmm0 \n") \
  2217. "movaps %%xmm0, (%0, %1) \n" \
  2218. stereo("movaps %%xmm2, 0x400(%0, %1) \n") \
  2219. "add $16, %0 \n" \
  2220. "jl 1b \n" \
  2221. : "+&r"(i) \
  2222. : "r"(samples[0] + len), "r"(matrix) \
  2223. : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
  2224. "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
  2225. "memory" \
  2226. );
  2227. #define MIX_MISC(stereo) \
  2228. __asm__ volatile ( \
  2229. "1: \n" \
  2230. "movaps (%3, %0), %%xmm0 \n" \
  2231. stereo("movaps %%xmm0, %%xmm1 \n") \
  2232. "mulps %%xmm4, %%xmm0 \n" \
  2233. stereo("mulps %%xmm5, %%xmm1 \n") \
  2234. "lea 1024(%3, %0), %1 \n" \
  2235. "mov %5, %2 \n" \
  2236. "2: \n" \
  2237. "movaps (%1), %%xmm2 \n" \
  2238. stereo("movaps %%xmm2, %%xmm3 \n") \
  2239. "mulps (%4, %2), %%xmm2 \n" \
  2240. stereo("mulps 16(%4, %2), %%xmm3 \n") \
  2241. "addps %%xmm2, %%xmm0 \n" \
  2242. stereo("addps %%xmm3, %%xmm1 \n") \
  2243. "add $1024, %1 \n" \
  2244. "add $32, %2 \n" \
  2245. "jl 2b \n" \
  2246. "movaps %%xmm0, (%3, %0) \n" \
  2247. stereo("movaps %%xmm1, 1024(%3, %0) \n") \
  2248. "add $16, %0 \n" \
  2249. "jl 1b \n" \
  2250. : "+&r"(i), "=&r"(j), "=&r"(k) \
  2251. : "r"(samples[0] + len), "r"(matrix_simd + in_ch), \
  2252. "g"((intptr_t) - 32 * (in_ch - 1)) \
  2253. : "memory" \
  2254. );
  2255. static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2],
  2256. int out_ch, int in_ch, int len)
  2257. {
  2258. int (*matrix_cmp)[2] = (int(*)[2])matrix;
  2259. intptr_t i, j, k;
  2260. i = -len * sizeof(float);
  2261. if (in_ch == 5 && out_ch == 2 &&
  2262. !(matrix_cmp[0][1] | matrix_cmp[2][0] |
  2263. matrix_cmp[3][1] | matrix_cmp[4][0] |
  2264. (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
  2265. (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
  2266. MIX5(IF0, IF1);
  2267. } else if (in_ch == 5 && out_ch == 1 &&
  2268. matrix_cmp[0][0] == matrix_cmp[2][0] &&
  2269. matrix_cmp[3][0] == matrix_cmp[4][0]) {
  2270. MIX5(IF1, IF0);
  2271. } else {
  2272. DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
  2273. j = 2 * in_ch * sizeof(float);
  2274. __asm__ volatile (
  2275. "1: \n"
  2276. "sub $8, %0 \n"
  2277. "movss (%2, %0), %%xmm4 \n"
  2278. "movss 4(%2, %0), %%xmm5 \n"
  2279. "shufps $0, %%xmm4, %%xmm4 \n"
  2280. "shufps $0, %%xmm5, %%xmm5 \n"
  2281. "movaps %%xmm4, (%1, %0, 4) \n"
  2282. "movaps %%xmm5, 16(%1, %0, 4) \n"
  2283. "jg 1b \n"
  2284. : "+&r"(j)
  2285. : "r"(matrix_simd), "r"(matrix)
  2286. : "memory"
  2287. );
  2288. if (out_ch == 2) {
  2289. MIX_MISC(IF1);
  2290. } else {
  2291. MIX_MISC(IF0);
  2292. }
  2293. }
  2294. }
  2295. #if HAVE_6REGS
  2296. static void vector_fmul_window_3dnow2(float *dst, const float *src0,
  2297. const float *src1, const float *win,
  2298. int len)
  2299. {
  2300. x86_reg i = -len * 4;
  2301. x86_reg j = len * 4 - 8;
  2302. __asm__ volatile (
  2303. "1: \n"
  2304. "pswapd (%5, %1), %%mm1 \n"
  2305. "movq (%5, %0), %%mm0 \n"
  2306. "pswapd (%4, %1), %%mm5 \n"
  2307. "movq (%3, %0), %%mm4 \n"
  2308. "movq %%mm0, %%mm2 \n"
  2309. "movq %%mm1, %%mm3 \n"
  2310. "pfmul %%mm4, %%mm2 \n" // src0[len + i] * win[len + i]
  2311. "pfmul %%mm5, %%mm3 \n" // src1[j] * win[len + j]
  2312. "pfmul %%mm4, %%mm1 \n" // src0[len + i] * win[len + j]
  2313. "pfmul %%mm5, %%mm0 \n" // src1[j] * win[len + i]
  2314. "pfadd %%mm3, %%mm2 \n"
  2315. "pfsub %%mm0, %%mm1 \n"
  2316. "pswapd %%mm2, %%mm2 \n"
  2317. "movq %%mm1, (%2, %0) \n"
  2318. "movq %%mm2, (%2, %1) \n"
  2319. "sub $8, %1 \n"
  2320. "add $8, %0 \n"
  2321. "jl 1b \n"
  2322. "femms \n"
  2323. : "+r"(i), "+r"(j)
  2324. : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
  2325. );
  2326. }
  2327. static void vector_fmul_window_sse(float *dst, const float *src0,
  2328. const float *src1, const float *win, int len)
  2329. {
  2330. x86_reg i = -len * 4;
  2331. x86_reg j = len * 4 - 16;
  2332. __asm__ volatile (
  2333. "1: \n"
  2334. "movaps (%5, %1), %%xmm1 \n"
  2335. "movaps (%5, %0), %%xmm0 \n"
  2336. "movaps (%4, %1), %%xmm5 \n"
  2337. "movaps (%3, %0), %%xmm4 \n"
  2338. "shufps $0x1b, %%xmm1, %%xmm1 \n"
  2339. "shufps $0x1b, %%xmm5, %%xmm5 \n"
  2340. "movaps %%xmm0, %%xmm2 \n"
  2341. "movaps %%xmm1, %%xmm3 \n"
  2342. "mulps %%xmm4, %%xmm2 \n" // src0[len + i] * win[len + i]
  2343. "mulps %%xmm5, %%xmm3 \n" // src1[j] * win[len + j]
  2344. "mulps %%xmm4, %%xmm1 \n" // src0[len + i] * win[len + j]
  2345. "mulps %%xmm5, %%xmm0 \n" // src1[j] * win[len + i]
  2346. "addps %%xmm3, %%xmm2 \n"
  2347. "subps %%xmm0, %%xmm1 \n"
  2348. "shufps $0x1b, %%xmm2, %%xmm2 \n"
  2349. "movaps %%xmm1, (%2, %0) \n"
  2350. "movaps %%xmm2, (%2, %1) \n"
  2351. "sub $16, %1 \n"
  2352. "add $16, %0 \n"
  2353. "jl 1b \n"
  2354. : "+r"(i), "+r"(j)
  2355. : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
  2356. );
  2357. }
  2358. #endif /* HAVE_6REGS */
  2359. static void vector_clipf_sse(float *dst, const float *src,
  2360. float min, float max, int len)
  2361. {
  2362. x86_reg i = (len - 16) * 4;
  2363. __asm__ volatile (
  2364. "movss %3, %%xmm4 \n\t"
  2365. "movss %4, %%xmm5 \n\t"
  2366. "shufps $0, %%xmm4, %%xmm4 \n\t"
  2367. "shufps $0, %%xmm5, %%xmm5 \n\t"
  2368. "1: \n\t"
  2369. "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
  2370. "movaps 16(%2, %0), %%xmm1 \n\t"
  2371. "movaps 32(%2, %0), %%xmm2 \n\t"
  2372. "movaps 48(%2, %0), %%xmm3 \n\t"
  2373. "maxps %%xmm4, %%xmm0 \n\t"
  2374. "maxps %%xmm4, %%xmm1 \n\t"
  2375. "maxps %%xmm4, %%xmm2 \n\t"
  2376. "maxps %%xmm4, %%xmm3 \n\t"
  2377. "minps %%xmm5, %%xmm0 \n\t"
  2378. "minps %%xmm5, %%xmm1 \n\t"
  2379. "minps %%xmm5, %%xmm2 \n\t"
  2380. "minps %%xmm5, %%xmm3 \n\t"
  2381. "movaps %%xmm0, (%1, %0) \n\t"
  2382. "movaps %%xmm1, 16(%1, %0) \n\t"
  2383. "movaps %%xmm2, 32(%1, %0) \n\t"
  2384. "movaps %%xmm3, 48(%1, %0) \n\t"
  2385. "sub $64, %0 \n\t"
  2386. "jge 1b \n\t"
  2387. : "+&r"(i)
  2388. : "r"(dst), "r"(src), "m"(min), "m"(max)
  2389. : "memory"
  2390. );
  2391. }
  2392. void ff_vp3_idct_mmx(int16_t *input_data);
  2393. void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block);
  2394. void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block);
  2395. void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size,
  2396. const DCTELEM *block);
  2397. void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
  2398. void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
  2399. void ff_vp3_idct_sse2(int16_t *input_data);
  2400. void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block);
  2401. void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block);
  2402. int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2,
  2403. int order);
  2404. int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
  2405. int order);
  2406. int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2,
  2407. const int16_t *v3,
  2408. int order, int mul);
  2409. int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
  2410. const int16_t *v3,
  2411. int order, int mul);
  2412. int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
  2413. const int16_t *v3,
  2414. int order, int mul);
  2415. void ff_apply_window_int16_mmxext (int16_t *output, const int16_t *input,
  2416. const int16_t *window, unsigned int len);
  2417. void ff_apply_window_int16_mmxext_ba (int16_t *output, const int16_t *input,
  2418. const int16_t *window, unsigned int len);
  2419. void ff_apply_window_int16_sse2 (int16_t *output, const int16_t *input,
  2420. const int16_t *window, unsigned int len);
  2421. void ff_apply_window_int16_sse2_ba (int16_t *output, const int16_t *input,
  2422. const int16_t *window, unsigned int len);
  2423. void ff_apply_window_int16_ssse3 (int16_t *output, const int16_t *input,
  2424. const int16_t *window, unsigned int len);
  2425. void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
  2426. const int16_t *window, unsigned int len);
  2427. void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
  2428. void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
  2429. void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top,
  2430. const uint8_t *diff, int w,
  2431. int *left, int *left_top);
  2432. int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
  2433. int w, int left);
  2434. int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
  2435. int w, int left);
  2436. float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
  2437. void ff_vector_fmul_sse(float *dst, const float *src0, const float *src1,
  2438. int len);
  2439. void ff_vector_fmul_avx(float *dst, const float *src0, const float *src1,
  2440. int len);
  2441. void ff_vector_fmul_reverse_sse(float *dst, const float *src0,
  2442. const float *src1, int len);
  2443. void ff_vector_fmul_reverse_avx(float *dst, const float *src0,
  2444. const float *src1, int len);
  2445. void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
  2446. const float *src2, int len);
  2447. void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1,
  2448. const float *src2, int len);
  2449. void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src,
  2450. int32_t min, int32_t max, unsigned int len);
  2451. void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
  2452. int32_t min, int32_t max, unsigned int len);
  2453. void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
  2454. int32_t min, int32_t max, unsigned int len);
  2455. void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src,
  2456. int32_t min, int32_t max, unsigned int len);
  2457. extern void ff_butterflies_float_interleave_sse(float *dst, const float *src0,
  2458. const float *src1, int len);
  2459. extern void ff_butterflies_float_interleave_avx(float *dst, const float *src0,
  2460. const float *src1, int len);
  2461. #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
  2462. do { \
  2463. c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
  2464. c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
  2465. c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
  2466. c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
  2467. c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
  2468. c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
  2469. c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
  2470. c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
  2471. c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
  2472. c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
  2473. c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
  2474. c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
  2475. c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
  2476. c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
  2477. c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
  2478. c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
  2479. } while (0)
  2480. #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  2481. do { \
  2482. c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
  2483. c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
  2484. c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
  2485. c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
  2486. } while (0)
  2487. #define H264_QPEL_FUNCS(x, y, CPU) \
  2488. do { \
  2489. c->put_h264_qpel_pixels_tab[0][x + y * 4] = put_h264_qpel16_mc ## x ## y ## _ ## CPU; \
  2490. c->put_h264_qpel_pixels_tab[1][x + y * 4] = put_h264_qpel8_mc ## x ## y ## _ ## CPU; \
  2491. c->avg_h264_qpel_pixels_tab[0][x + y * 4] = avg_h264_qpel16_mc ## x ## y ## _ ## CPU; \
  2492. c->avg_h264_qpel_pixels_tab[1][x + y * 4] = avg_h264_qpel8_mc ## x ## y ## _ ## CPU; \
  2493. } while (0)
  2494. #define H264_QPEL_FUNCS_10(x, y, CPU) \
  2495. do { \
  2496. c->put_h264_qpel_pixels_tab[0][x + y * 4] = ff_put_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
  2497. c->put_h264_qpel_pixels_tab[1][x + y * 4] = ff_put_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
  2498. c->avg_h264_qpel_pixels_tab[0][x + y * 4] = ff_avg_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
  2499. c->avg_h264_qpel_pixels_tab[1][x + y * 4] = ff_avg_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
  2500. } while (0)
  2501. static void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
  2502. {
  2503. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  2504. c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
  2505. c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
  2506. c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
  2507. if (!high_bit_depth) {
  2508. c->clear_block = clear_block_mmx;
  2509. c->clear_blocks = clear_blocks_mmx;
  2510. c->draw_edges = draw_edges_mmx;
  2511. SET_HPEL_FUNCS(put, 0, 16, mmx);
  2512. SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
  2513. SET_HPEL_FUNCS(avg, 0, 16, mmx);
  2514. SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
  2515. SET_HPEL_FUNCS(put, 1, 8, mmx);
  2516. SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
  2517. SET_HPEL_FUNCS(avg, 1, 8, mmx);
  2518. SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
  2519. }
  2520. #if ARCH_X86_32 || !HAVE_YASM
  2521. c->gmc = gmc_mmx;
  2522. #endif
  2523. #if ARCH_X86_32 && HAVE_YASM
  2524. if (!high_bit_depth)
  2525. c->emulated_edge_mc = emulated_edge_mc_mmx;
  2526. #endif
  2527. c->add_bytes = add_bytes_mmx;
  2528. c->put_no_rnd_pixels_l2[0]= put_vp_no_rnd_pixels16_l2_mmx;
  2529. c->put_no_rnd_pixels_l2[1]= put_vp_no_rnd_pixels8_l2_mmx;
  2530. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  2531. c->h263_v_loop_filter = h263_v_loop_filter_mmx;
  2532. c->h263_h_loop_filter = h263_h_loop_filter_mmx;
  2533. }
  2534. #if HAVE_YASM
  2535. if (!high_bit_depth && CONFIG_H264CHROMA) {
  2536. c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_mmx_rnd;
  2537. c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_mmx;
  2538. }
  2539. c->vector_clip_int32 = ff_vector_clip_int32_mmx;
  2540. #endif
  2541. }
  2542. static void dsputil_init_mmx2(DSPContext *c, AVCodecContext *avctx,
  2543. int mm_flags)
  2544. {
  2545. const int bit_depth = avctx->bits_per_raw_sample;
  2546. const int high_bit_depth = bit_depth > 8;
  2547. c->prefetch = prefetch_mmx2;
  2548. if (!high_bit_depth) {
  2549. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
  2550. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
  2551. c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
  2552. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
  2553. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
  2554. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
  2555. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
  2556. c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
  2557. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
  2558. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
  2559. }
  2560. if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
  2561. if (!high_bit_depth) {
  2562. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
  2563. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
  2564. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
  2565. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
  2566. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
  2567. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
  2568. }
  2569. if (CONFIG_VP3_DECODER && HAVE_YASM) {
  2570. c->vp3_v_loop_filter = ff_vp3_v_loop_filter_mmx2;
  2571. c->vp3_h_loop_filter = ff_vp3_h_loop_filter_mmx2;
  2572. }
  2573. }
  2574. if (CONFIG_VP3_DECODER && HAVE_YASM)
  2575. c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
  2576. if (CONFIG_VP3_DECODER && (avctx->codec_id == CODEC_ID_VP3 ||
  2577. avctx->codec_id == CODEC_ID_THEORA)) {
  2578. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
  2579. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
  2580. }
  2581. if (CONFIG_H264QPEL) {
  2582. SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2, );
  2583. SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2, );
  2584. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2, );
  2585. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2, );
  2586. SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2, );
  2587. SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2, );
  2588. if (!high_bit_depth) {
  2589. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2, );
  2590. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2, );
  2591. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2, );
  2592. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2, );
  2593. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2, );
  2594. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2, );
  2595. } else if (bit_depth == 10) {
  2596. #if HAVE_YASM
  2597. #if !ARCH_X86_64
  2598. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
  2599. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
  2600. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_);
  2601. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_);
  2602. #endif
  2603. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_);
  2604. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_);
  2605. #endif
  2606. }
  2607. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2, );
  2608. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2, );
  2609. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2, );
  2610. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2, );
  2611. }
  2612. #if HAVE_YASM
  2613. if (!high_bit_depth && CONFIG_H264CHROMA) {
  2614. c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_mmx2_rnd;
  2615. c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmx2;
  2616. c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmx2;
  2617. c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmx2;
  2618. }
  2619. if (bit_depth == 10 && CONFIG_H264CHROMA) {
  2620. c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext;
  2621. c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext;
  2622. c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext;
  2623. c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext;
  2624. }
  2625. c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
  2626. c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
  2627. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
  2628. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  2629. c->apply_window_int16 = ff_apply_window_int16_mmxext_ba;
  2630. } else {
  2631. c->apply_window_int16 = ff_apply_window_int16_mmxext;
  2632. }
  2633. #endif
  2634. }
  2635. static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
  2636. int mm_flags)
  2637. {
  2638. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  2639. c->prefetch = prefetch_3dnow;
  2640. if (!high_bit_depth) {
  2641. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  2642. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  2643. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  2644. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  2645. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  2646. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  2647. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  2648. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  2649. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  2650. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  2651. if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2652. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  2653. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  2654. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  2655. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  2656. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  2657. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  2658. }
  2659. }
  2660. if (CONFIG_VP3_DECODER && (avctx->codec_id == CODEC_ID_VP3 ||
  2661. avctx->codec_id == CODEC_ID_THEORA)) {
  2662. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
  2663. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
  2664. }
  2665. if (CONFIG_H264QPEL) {
  2666. SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow, );
  2667. SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow, );
  2668. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow, );
  2669. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow, );
  2670. SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow, );
  2671. SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow, );
  2672. if (!high_bit_depth) {
  2673. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow, );
  2674. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow, );
  2675. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow, );
  2676. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow, );
  2677. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow, );
  2678. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow, );
  2679. }
  2680. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow, );
  2681. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow, );
  2682. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow, );
  2683. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow, );
  2684. }
  2685. #if HAVE_YASM
  2686. if (!high_bit_depth && CONFIG_H264CHROMA) {
  2687. c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_3dnow_rnd;
  2688. c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_3dnow;
  2689. }
  2690. #endif
  2691. c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
  2692. #if HAVE_7REGS
  2693. c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
  2694. #endif
  2695. }
  2696. static void dsputil_init_3dnow2(DSPContext *c, AVCodecContext *avctx,
  2697. int mm_flags)
  2698. {
  2699. #if HAVE_6REGS
  2700. c->vector_fmul_window = vector_fmul_window_3dnow2;
  2701. #endif
  2702. }
  2703. static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
  2704. {
  2705. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  2706. if (!high_bit_depth) {
  2707. if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
  2708. /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
  2709. c->clear_block = clear_block_sse;
  2710. c->clear_blocks = clear_blocks_sse;
  2711. }
  2712. }
  2713. c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
  2714. c->ac3_downmix = ac3_downmix_sse;
  2715. #if HAVE_YASM
  2716. c->vector_fmul = ff_vector_fmul_sse;
  2717. c->vector_fmul_reverse = ff_vector_fmul_reverse_sse;
  2718. c->vector_fmul_add = ff_vector_fmul_add_sse;
  2719. #endif
  2720. #if HAVE_6REGS
  2721. c->vector_fmul_window = vector_fmul_window_sse;
  2722. #endif
  2723. c->vector_clipf = vector_clipf_sse;
  2724. #if HAVE_YASM
  2725. c->scalarproduct_float = ff_scalarproduct_float_sse;
  2726. c->butterflies_float_interleave = ff_butterflies_float_interleave_sse;
  2727. if (!high_bit_depth)
  2728. c->emulated_edge_mc = emulated_edge_mc_sse;
  2729. c->gmc = gmc_sse;
  2730. #endif
  2731. }
  2732. static void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
  2733. int mm_flags)
  2734. {
  2735. const int bit_depth = avctx->bits_per_raw_sample;
  2736. const int high_bit_depth = bit_depth > 8;
  2737. if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  2738. // these functions are slower than mmx on AMD, but faster on Intel
  2739. if (!high_bit_depth) {
  2740. c->put_pixels_tab[0][0] = put_pixels16_sse2;
  2741. c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
  2742. c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
  2743. if (CONFIG_H264QPEL)
  2744. H264_QPEL_FUNCS(0, 0, sse2);
  2745. }
  2746. }
  2747. if (!high_bit_depth && CONFIG_H264QPEL) {
  2748. H264_QPEL_FUNCS(0, 1, sse2);
  2749. H264_QPEL_FUNCS(0, 2, sse2);
  2750. H264_QPEL_FUNCS(0, 3, sse2);
  2751. H264_QPEL_FUNCS(1, 1, sse2);
  2752. H264_QPEL_FUNCS(1, 2, sse2);
  2753. H264_QPEL_FUNCS(1, 3, sse2);
  2754. H264_QPEL_FUNCS(2, 1, sse2);
  2755. H264_QPEL_FUNCS(2, 2, sse2);
  2756. H264_QPEL_FUNCS(2, 3, sse2);
  2757. H264_QPEL_FUNCS(3, 1, sse2);
  2758. H264_QPEL_FUNCS(3, 2, sse2);
  2759. H264_QPEL_FUNCS(3, 3, sse2);
  2760. }
  2761. #if HAVE_YASM
  2762. if (bit_depth == 10) {
  2763. if (CONFIG_H264QPEL) {
  2764. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
  2765. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_);
  2766. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
  2767. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_);
  2768. H264_QPEL_FUNCS_10(1, 0, sse2_cache64);
  2769. H264_QPEL_FUNCS_10(2, 0, sse2_cache64);
  2770. H264_QPEL_FUNCS_10(3, 0, sse2_cache64);
  2771. }
  2772. if (CONFIG_H264CHROMA) {
  2773. c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
  2774. c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
  2775. }
  2776. }
  2777. c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
  2778. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
  2779. if (mm_flags & AV_CPU_FLAG_ATOM) {
  2780. c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
  2781. } else {
  2782. c->vector_clip_int32 = ff_vector_clip_int32_sse2;
  2783. }
  2784. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  2785. c->apply_window_int16 = ff_apply_window_int16_sse2_ba;
  2786. } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  2787. c->apply_window_int16 = ff_apply_window_int16_sse2;
  2788. }
  2789. c->bswap_buf = ff_bswap32_buf_sse2;
  2790. #endif
  2791. }
  2792. static void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
  2793. int mm_flags)
  2794. {
  2795. #if HAVE_SSSE3
  2796. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  2797. const int bit_depth = avctx->bits_per_raw_sample;
  2798. if (!high_bit_depth && CONFIG_H264QPEL) {
  2799. H264_QPEL_FUNCS(1, 0, ssse3);
  2800. H264_QPEL_FUNCS(1, 1, ssse3);
  2801. H264_QPEL_FUNCS(1, 2, ssse3);
  2802. H264_QPEL_FUNCS(1, 3, ssse3);
  2803. H264_QPEL_FUNCS(2, 0, ssse3);
  2804. H264_QPEL_FUNCS(2, 1, ssse3);
  2805. H264_QPEL_FUNCS(2, 2, ssse3);
  2806. H264_QPEL_FUNCS(2, 3, ssse3);
  2807. H264_QPEL_FUNCS(3, 0, ssse3);
  2808. H264_QPEL_FUNCS(3, 1, ssse3);
  2809. H264_QPEL_FUNCS(3, 2, ssse3);
  2810. H264_QPEL_FUNCS(3, 3, ssse3);
  2811. }
  2812. #if HAVE_YASM
  2813. else if (bit_depth == 10 && CONFIG_H264QPEL) {
  2814. H264_QPEL_FUNCS_10(1, 0, ssse3_cache64);
  2815. H264_QPEL_FUNCS_10(2, 0, ssse3_cache64);
  2816. H264_QPEL_FUNCS_10(3, 0, ssse3_cache64);
  2817. }
  2818. if (!high_bit_depth && CONFIG_H264CHROMA) {
  2819. c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_ssse3_rnd;
  2820. c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_ssse3_rnd;
  2821. c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_ssse3;
  2822. c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_ssse3;
  2823. }
  2824. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
  2825. if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
  2826. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
  2827. if (mm_flags & AV_CPU_FLAG_ATOM)
  2828. c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
  2829. else
  2830. c->apply_window_int16 = ff_apply_window_int16_ssse3;
  2831. if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
  2832. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
  2833. c->bswap_buf = ff_bswap32_buf_ssse3;
  2834. #endif
  2835. #endif
  2836. }
  2837. static void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
  2838. int mm_flags)
  2839. {
  2840. #if HAVE_YASM
  2841. c->vector_clip_int32 = ff_vector_clip_int32_sse4;
  2842. #endif
  2843. }
  2844. static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
  2845. {
  2846. #if HAVE_AVX && HAVE_YASM
  2847. const int bit_depth = avctx->bits_per_raw_sample;
  2848. if (bit_depth == 10) {
  2849. // AVX implies !cache64.
  2850. // TODO: Port cache(32|64) detection from x264.
  2851. if (CONFIG_H264QPEL) {
  2852. H264_QPEL_FUNCS_10(1, 0, sse2);
  2853. H264_QPEL_FUNCS_10(2, 0, sse2);
  2854. H264_QPEL_FUNCS_10(3, 0, sse2);
  2855. }
  2856. if (CONFIG_H264CHROMA) {
  2857. c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
  2858. c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
  2859. }
  2860. }
  2861. c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
  2862. c->vector_fmul = ff_vector_fmul_avx;
  2863. c->vector_fmul_reverse = ff_vector_fmul_reverse_avx;
  2864. c->vector_fmul_add = ff_vector_fmul_add_avx;
  2865. #endif
  2866. }
  2867. void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
  2868. {
  2869. int mm_flags = av_get_cpu_flags();
  2870. #if 0
  2871. av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
  2872. if (mm_flags & AV_CPU_FLAG_MMX)
  2873. av_log(avctx, AV_LOG_INFO, " mmx");
  2874. if (mm_flags & AV_CPU_FLAG_MMX2)
  2875. av_log(avctx, AV_LOG_INFO, " mmx2");
  2876. if (mm_flags & AV_CPU_FLAG_3DNOW)
  2877. av_log(avctx, AV_LOG_INFO, " 3dnow");
  2878. if (mm_flags & AV_CPU_FLAG_SSE)
  2879. av_log(avctx, AV_LOG_INFO, " sse");
  2880. if (mm_flags & AV_CPU_FLAG_SSE2)
  2881. av_log(avctx, AV_LOG_INFO, " sse2");
  2882. av_log(avctx, AV_LOG_INFO, "\n");
  2883. #endif
  2884. if (mm_flags & AV_CPU_FLAG_MMX) {
  2885. const int idct_algo = avctx->idct_algo;
  2886. if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) {
  2887. if (idct_algo == FF_IDCT_AUTO || idct_algo == FF_IDCT_SIMPLEMMX) {
  2888. c->idct_put = ff_simple_idct_put_mmx;
  2889. c->idct_add = ff_simple_idct_add_mmx;
  2890. c->idct = ff_simple_idct_mmx;
  2891. c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
  2892. #if CONFIG_GPL
  2893. } else if (idct_algo == FF_IDCT_LIBMPEG2MMX) {
  2894. if (mm_flags & AV_CPU_FLAG_MMX2) {
  2895. c->idct_put = ff_libmpeg2mmx2_idct_put;
  2896. c->idct_add = ff_libmpeg2mmx2_idct_add;
  2897. c->idct = ff_mmxext_idct;
  2898. } else {
  2899. c->idct_put = ff_libmpeg2mmx_idct_put;
  2900. c->idct_add = ff_libmpeg2mmx_idct_add;
  2901. c->idct = ff_mmx_idct;
  2902. }
  2903. c->idct_permutation_type = FF_LIBMPEG2_IDCT_PERM;
  2904. #endif
  2905. } else if ((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER ||
  2906. CONFIG_VP6_DECODER) &&
  2907. idct_algo == FF_IDCT_VP3 && HAVE_YASM) {
  2908. if (mm_flags & AV_CPU_FLAG_SSE2) {
  2909. c->idct_put = ff_vp3_idct_put_sse2;
  2910. c->idct_add = ff_vp3_idct_add_sse2;
  2911. c->idct = ff_vp3_idct_sse2;
  2912. c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;
  2913. } else {
  2914. c->idct_put = ff_vp3_idct_put_mmx;
  2915. c->idct_add = ff_vp3_idct_add_mmx;
  2916. c->idct = ff_vp3_idct_mmx;
  2917. c->idct_permutation_type = FF_PARTTRANS_IDCT_PERM;
  2918. }
  2919. } else if (idct_algo == FF_IDCT_CAVS) {
  2920. c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;
  2921. } else if (idct_algo == FF_IDCT_XVIDMMX) {
  2922. if (mm_flags & AV_CPU_FLAG_SSE2) {
  2923. c->idct_put = ff_idct_xvid_sse2_put;
  2924. c->idct_add = ff_idct_xvid_sse2_add;
  2925. c->idct = ff_idct_xvid_sse2;
  2926. c->idct_permutation_type = FF_SSE2_IDCT_PERM;
  2927. } else if (mm_flags & AV_CPU_FLAG_MMX2) {
  2928. c->idct_put = ff_idct_xvid_mmx2_put;
  2929. c->idct_add = ff_idct_xvid_mmx2_add;
  2930. c->idct = ff_idct_xvid_mmx2;
  2931. } else {
  2932. c->idct_put = ff_idct_xvid_mmx_put;
  2933. c->idct_add = ff_idct_xvid_mmx_add;
  2934. c->idct = ff_idct_xvid_mmx;
  2935. }
  2936. }
  2937. }
  2938. dsputil_init_mmx(c, avctx, mm_flags);
  2939. }
  2940. if (mm_flags & AV_CPU_FLAG_MMX2)
  2941. dsputil_init_mmx2(c, avctx, mm_flags);
  2942. if (mm_flags & AV_CPU_FLAG_3DNOW && HAVE_AMD3DNOW)
  2943. dsputil_init_3dnow(c, avctx, mm_flags);
  2944. if (mm_flags & AV_CPU_FLAG_3DNOWEXT && HAVE_AMD3DNOWEXT)
  2945. dsputil_init_3dnow2(c, avctx, mm_flags);
  2946. if (mm_flags & AV_CPU_FLAG_SSE && HAVE_SSE)
  2947. dsputil_init_sse(c, avctx, mm_flags);
  2948. if (mm_flags & AV_CPU_FLAG_SSE2)
  2949. dsputil_init_sse2(c, avctx, mm_flags);
  2950. if (mm_flags & AV_CPU_FLAG_SSSE3)
  2951. dsputil_init_ssse3(c, avctx, mm_flags);
  2952. if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE)
  2953. dsputil_init_sse4(c, avctx, mm_flags);
  2954. if (mm_flags & AV_CPU_FLAG_AVX)
  2955. dsputil_init_avx(c, avctx, mm_flags);
  2956. if (CONFIG_ENCODERS)
  2957. ff_dsputilenc_init_mmx(c, avctx);
  2958. }