You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3535 lines
135KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "../dsputil.h"
  25. #include "../simple_idct.h"
  26. #include "../mpegvideo.h"
  27. #include "x86_cpu.h"
  28. #include "mmx.h"
  29. //#undef NDEBUG
  30. //#include <assert.h>
  31. extern void ff_idct_xvid_mmx(short *block);
  32. extern void ff_idct_xvid_mmx2(short *block);
  33. int mm_flags; /* multimedia extension flags */
  34. /* pixel operations */
  35. static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
  36. static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
  37. static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
  38. static const uint64_t ff_pdw_80000000[2] attribute_used __attribute__ ((aligned(16))) =
  39. {0x8000000080000000ULL, 0x8000000080000000ULL};
  40. static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
  41. static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
  42. static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
  43. static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
  44. static const uint64_t ff_pw_8 attribute_used __attribute__ ((aligned(8))) = 0x0008000800080008ULL;
  45. static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
  46. static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
  47. static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
  48. static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
  49. static const uint64_t ff_pb_1 attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
  50. static const uint64_t ff_pb_3 attribute_used __attribute__ ((aligned(8))) = 0x0303030303030303ULL;
  51. static const uint64_t ff_pb_7 attribute_used __attribute__ ((aligned(8))) = 0x0707070707070707ULL;
  52. static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
  53. static const uint64_t ff_pb_A1 attribute_used __attribute__ ((aligned(8))) = 0xA1A1A1A1A1A1A1A1ULL;
  54. static const uint64_t ff_pb_5F attribute_used __attribute__ ((aligned(8))) = 0x5F5F5F5F5F5F5F5FULL;
  55. static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
  56. #define JUMPALIGN() __asm __volatile (ASMALIGN(3)::)
  57. #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
  58. #define MOVQ_WONE(regd) \
  59. __asm __volatile ( \
  60. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  61. "psrlw $15, %%" #regd ::)
  62. #define MOVQ_BFE(regd) \
  63. __asm __volatile ( \
  64. "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
  65. "paddb %%" #regd ", %%" #regd " \n\t" ::)
  66. #ifndef PIC
  67. #define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
  68. #define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
  69. #else
  70. // for shared library it's better to use this way for accessing constants
  71. // pcmpeqd -> -1
  72. #define MOVQ_BONE(regd) \
  73. __asm __volatile ( \
  74. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  75. "psrlw $15, %%" #regd " \n\t" \
  76. "packuswb %%" #regd ", %%" #regd " \n\t" ::)
  77. #define MOVQ_WTWO(regd) \
  78. __asm __volatile ( \
  79. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  80. "psrlw $15, %%" #regd " \n\t" \
  81. "psllw $1, %%" #regd " \n\t"::)
  82. #endif
  83. // using regr as temporary and for the output result
  84. // first argument is unmodifed and second is trashed
  85. // regfe is supposed to contain 0xfefefefefefefefe
  86. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  87. "movq " #rega ", " #regr " \n\t"\
  88. "pand " #regb ", " #regr " \n\t"\
  89. "pxor " #rega ", " #regb " \n\t"\
  90. "pand " #regfe "," #regb " \n\t"\
  91. "psrlq $1, " #regb " \n\t"\
  92. "paddb " #regb ", " #regr " \n\t"
  93. #define PAVGB_MMX(rega, regb, regr, regfe) \
  94. "movq " #rega ", " #regr " \n\t"\
  95. "por " #regb ", " #regr " \n\t"\
  96. "pxor " #rega ", " #regb " \n\t"\
  97. "pand " #regfe "," #regb " \n\t"\
  98. "psrlq $1, " #regb " \n\t"\
  99. "psubb " #regb ", " #regr " \n\t"
  100. // mm6 is supposed to contain 0xfefefefefefefefe
  101. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  102. "movq " #rega ", " #regr " \n\t"\
  103. "movq " #regc ", " #regp " \n\t"\
  104. "pand " #regb ", " #regr " \n\t"\
  105. "pand " #regd ", " #regp " \n\t"\
  106. "pxor " #rega ", " #regb " \n\t"\
  107. "pxor " #regc ", " #regd " \n\t"\
  108. "pand %%mm6, " #regb " \n\t"\
  109. "pand %%mm6, " #regd " \n\t"\
  110. "psrlq $1, " #regb " \n\t"\
  111. "psrlq $1, " #regd " \n\t"\
  112. "paddb " #regb ", " #regr " \n\t"\
  113. "paddb " #regd ", " #regp " \n\t"
  114. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  115. "movq " #rega ", " #regr " \n\t"\
  116. "movq " #regc ", " #regp " \n\t"\
  117. "por " #regb ", " #regr " \n\t"\
  118. "por " #regd ", " #regp " \n\t"\
  119. "pxor " #rega ", " #regb " \n\t"\
  120. "pxor " #regc ", " #regd " \n\t"\
  121. "pand %%mm6, " #regb " \n\t"\
  122. "pand %%mm6, " #regd " \n\t"\
  123. "psrlq $1, " #regd " \n\t"\
  124. "psrlq $1, " #regb " \n\t"\
  125. "psubb " #regb ", " #regr " \n\t"\
  126. "psubb " #regd ", " #regp " \n\t"
  127. /***********************************/
  128. /* MMX no rounding */
  129. #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
  130. #define SET_RND MOVQ_WONE
  131. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  132. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  133. #include "dsputil_mmx_rnd.h"
  134. #undef DEF
  135. #undef SET_RND
  136. #undef PAVGBP
  137. #undef PAVGB
  138. /***********************************/
  139. /* MMX rounding */
  140. #define DEF(x, y) x ## _ ## y ##_mmx
  141. #define SET_RND MOVQ_WTWO
  142. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  143. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  144. #include "dsputil_mmx_rnd.h"
  145. #undef DEF
  146. #undef SET_RND
  147. #undef PAVGBP
  148. #undef PAVGB
  149. /***********************************/
  150. /* 3Dnow specific */
  151. #define DEF(x) x ## _3dnow
  152. /* for Athlons PAVGUSB is prefered */
  153. #define PAVGB "pavgusb"
  154. #include "dsputil_mmx_avg.h"
  155. #undef DEF
  156. #undef PAVGB
  157. /***********************************/
  158. /* MMX2 specific */
  159. #define DEF(x) x ## _mmx2
  160. /* Introduced only in MMX2 set */
  161. #define PAVGB "pavgb"
  162. #include "dsputil_mmx_avg.h"
  163. #undef DEF
  164. #undef PAVGB
  165. #define SBUTTERFLY(a,b,t,n)\
  166. "movq " #a ", " #t " \n\t" /* abcd */\
  167. "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
  168. "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
  169. /***********************************/
  170. /* standard MMX */
  171. #ifdef CONFIG_ENCODERS
  172. static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
  173. {
  174. asm volatile(
  175. "mov $-128, %%"REG_a" \n\t"
  176. "pxor %%mm7, %%mm7 \n\t"
  177. ASMALIGN(4)
  178. "1: \n\t"
  179. "movq (%0), %%mm0 \n\t"
  180. "movq (%0, %2), %%mm2 \n\t"
  181. "movq %%mm0, %%mm1 \n\t"
  182. "movq %%mm2, %%mm3 \n\t"
  183. "punpcklbw %%mm7, %%mm0 \n\t"
  184. "punpckhbw %%mm7, %%mm1 \n\t"
  185. "punpcklbw %%mm7, %%mm2 \n\t"
  186. "punpckhbw %%mm7, %%mm3 \n\t"
  187. "movq %%mm0, (%1, %%"REG_a") \n\t"
  188. "movq %%mm1, 8(%1, %%"REG_a") \n\t"
  189. "movq %%mm2, 16(%1, %%"REG_a") \n\t"
  190. "movq %%mm3, 24(%1, %%"REG_a") \n\t"
  191. "add %3, %0 \n\t"
  192. "add $32, %%"REG_a" \n\t"
  193. "js 1b \n\t"
  194. : "+r" (pixels)
  195. : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
  196. : "%"REG_a
  197. );
  198. }
  199. static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
  200. {
  201. asm volatile(
  202. "pxor %%mm7, %%mm7 \n\t"
  203. "mov $-128, %%"REG_a" \n\t"
  204. ASMALIGN(4)
  205. "1: \n\t"
  206. "movq (%0), %%mm0 \n\t"
  207. "movq (%1), %%mm2 \n\t"
  208. "movq %%mm0, %%mm1 \n\t"
  209. "movq %%mm2, %%mm3 \n\t"
  210. "punpcklbw %%mm7, %%mm0 \n\t"
  211. "punpckhbw %%mm7, %%mm1 \n\t"
  212. "punpcklbw %%mm7, %%mm2 \n\t"
  213. "punpckhbw %%mm7, %%mm3 \n\t"
  214. "psubw %%mm2, %%mm0 \n\t"
  215. "psubw %%mm3, %%mm1 \n\t"
  216. "movq %%mm0, (%2, %%"REG_a") \n\t"
  217. "movq %%mm1, 8(%2, %%"REG_a") \n\t"
  218. "add %3, %0 \n\t"
  219. "add %3, %1 \n\t"
  220. "add $16, %%"REG_a" \n\t"
  221. "jnz 1b \n\t"
  222. : "+r" (s1), "+r" (s2)
  223. : "r" (block+64), "r" ((long)stride)
  224. : "%"REG_a
  225. );
  226. }
  227. #endif //CONFIG_ENCODERS
  228. void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  229. {
  230. const DCTELEM *p;
  231. uint8_t *pix;
  232. /* read the pixels */
  233. p = block;
  234. pix = pixels;
  235. /* unrolled loop */
  236. __asm __volatile(
  237. "movq %3, %%mm0 \n\t"
  238. "movq 8%3, %%mm1 \n\t"
  239. "movq 16%3, %%mm2 \n\t"
  240. "movq 24%3, %%mm3 \n\t"
  241. "movq 32%3, %%mm4 \n\t"
  242. "movq 40%3, %%mm5 \n\t"
  243. "movq 48%3, %%mm6 \n\t"
  244. "movq 56%3, %%mm7 \n\t"
  245. "packuswb %%mm1, %%mm0 \n\t"
  246. "packuswb %%mm3, %%mm2 \n\t"
  247. "packuswb %%mm5, %%mm4 \n\t"
  248. "packuswb %%mm7, %%mm6 \n\t"
  249. "movq %%mm0, (%0) \n\t"
  250. "movq %%mm2, (%0, %1) \n\t"
  251. "movq %%mm4, (%0, %1, 2) \n\t"
  252. "movq %%mm6, (%0, %2) \n\t"
  253. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
  254. :"memory");
  255. pix += line_size*4;
  256. p += 32;
  257. // if here would be an exact copy of the code above
  258. // compiler would generate some very strange code
  259. // thus using "r"
  260. __asm __volatile(
  261. "movq (%3), %%mm0 \n\t"
  262. "movq 8(%3), %%mm1 \n\t"
  263. "movq 16(%3), %%mm2 \n\t"
  264. "movq 24(%3), %%mm3 \n\t"
  265. "movq 32(%3), %%mm4 \n\t"
  266. "movq 40(%3), %%mm5 \n\t"
  267. "movq 48(%3), %%mm6 \n\t"
  268. "movq 56(%3), %%mm7 \n\t"
  269. "packuswb %%mm1, %%mm0 \n\t"
  270. "packuswb %%mm3, %%mm2 \n\t"
  271. "packuswb %%mm5, %%mm4 \n\t"
  272. "packuswb %%mm7, %%mm6 \n\t"
  273. "movq %%mm0, (%0) \n\t"
  274. "movq %%mm2, (%0, %1) \n\t"
  275. "movq %%mm4, (%0, %1, 2) \n\t"
  276. "movq %%mm6, (%0, %2) \n\t"
  277. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
  278. :"memory");
  279. }
  280. static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) =
  281. { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
  282. void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  283. {
  284. int i;
  285. movq_m2r(*vector128, mm1);
  286. for (i = 0; i < 8; i++) {
  287. movq_m2r(*(block), mm0);
  288. packsswb_m2r(*(block + 4), mm0);
  289. block += 8;
  290. paddb_r2r(mm1, mm0);
  291. movq_r2m(mm0, *pixels);
  292. pixels += line_size;
  293. }
  294. }
  295. void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  296. {
  297. const DCTELEM *p;
  298. uint8_t *pix;
  299. int i;
  300. /* read the pixels */
  301. p = block;
  302. pix = pixels;
  303. MOVQ_ZERO(mm7);
  304. i = 4;
  305. do {
  306. __asm __volatile(
  307. "movq (%2), %%mm0 \n\t"
  308. "movq 8(%2), %%mm1 \n\t"
  309. "movq 16(%2), %%mm2 \n\t"
  310. "movq 24(%2), %%mm3 \n\t"
  311. "movq %0, %%mm4 \n\t"
  312. "movq %1, %%mm6 \n\t"
  313. "movq %%mm4, %%mm5 \n\t"
  314. "punpcklbw %%mm7, %%mm4 \n\t"
  315. "punpckhbw %%mm7, %%mm5 \n\t"
  316. "paddsw %%mm4, %%mm0 \n\t"
  317. "paddsw %%mm5, %%mm1 \n\t"
  318. "movq %%mm6, %%mm5 \n\t"
  319. "punpcklbw %%mm7, %%mm6 \n\t"
  320. "punpckhbw %%mm7, %%mm5 \n\t"
  321. "paddsw %%mm6, %%mm2 \n\t"
  322. "paddsw %%mm5, %%mm3 \n\t"
  323. "packuswb %%mm1, %%mm0 \n\t"
  324. "packuswb %%mm3, %%mm2 \n\t"
  325. "movq %%mm0, %0 \n\t"
  326. "movq %%mm2, %1 \n\t"
  327. :"+m"(*pix), "+m"(*(pix+line_size))
  328. :"r"(p)
  329. :"memory");
  330. pix += line_size*2;
  331. p += 16;
  332. } while (--i);
  333. }
  334. static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  335. {
  336. __asm __volatile(
  337. "lea (%3, %3), %%"REG_a" \n\t"
  338. ASMALIGN(3)
  339. "1: \n\t"
  340. "movd (%1), %%mm0 \n\t"
  341. "movd (%1, %3), %%mm1 \n\t"
  342. "movd %%mm0, (%2) \n\t"
  343. "movd %%mm1, (%2, %3) \n\t"
  344. "add %%"REG_a", %1 \n\t"
  345. "add %%"REG_a", %2 \n\t"
  346. "movd (%1), %%mm0 \n\t"
  347. "movd (%1, %3), %%mm1 \n\t"
  348. "movd %%mm0, (%2) \n\t"
  349. "movd %%mm1, (%2, %3) \n\t"
  350. "add %%"REG_a", %1 \n\t"
  351. "add %%"REG_a", %2 \n\t"
  352. "subl $4, %0 \n\t"
  353. "jnz 1b \n\t"
  354. : "+g"(h), "+r" (pixels), "+r" (block)
  355. : "r"((long)line_size)
  356. : "%"REG_a, "memory"
  357. );
  358. }
  359. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  360. {
  361. __asm __volatile(
  362. "lea (%3, %3), %%"REG_a" \n\t"
  363. ASMALIGN(3)
  364. "1: \n\t"
  365. "movq (%1), %%mm0 \n\t"
  366. "movq (%1, %3), %%mm1 \n\t"
  367. "movq %%mm0, (%2) \n\t"
  368. "movq %%mm1, (%2, %3) \n\t"
  369. "add %%"REG_a", %1 \n\t"
  370. "add %%"REG_a", %2 \n\t"
  371. "movq (%1), %%mm0 \n\t"
  372. "movq (%1, %3), %%mm1 \n\t"
  373. "movq %%mm0, (%2) \n\t"
  374. "movq %%mm1, (%2, %3) \n\t"
  375. "add %%"REG_a", %1 \n\t"
  376. "add %%"REG_a", %2 \n\t"
  377. "subl $4, %0 \n\t"
  378. "jnz 1b \n\t"
  379. : "+g"(h), "+r" (pixels), "+r" (block)
  380. : "r"((long)line_size)
  381. : "%"REG_a, "memory"
  382. );
  383. }
  384. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  385. {
  386. __asm __volatile(
  387. "lea (%3, %3), %%"REG_a" \n\t"
  388. ASMALIGN(3)
  389. "1: \n\t"
  390. "movq (%1), %%mm0 \n\t"
  391. "movq 8(%1), %%mm4 \n\t"
  392. "movq (%1, %3), %%mm1 \n\t"
  393. "movq 8(%1, %3), %%mm5 \n\t"
  394. "movq %%mm0, (%2) \n\t"
  395. "movq %%mm4, 8(%2) \n\t"
  396. "movq %%mm1, (%2, %3) \n\t"
  397. "movq %%mm5, 8(%2, %3) \n\t"
  398. "add %%"REG_a", %1 \n\t"
  399. "add %%"REG_a", %2 \n\t"
  400. "movq (%1), %%mm0 \n\t"
  401. "movq 8(%1), %%mm4 \n\t"
  402. "movq (%1, %3), %%mm1 \n\t"
  403. "movq 8(%1, %3), %%mm5 \n\t"
  404. "movq %%mm0, (%2) \n\t"
  405. "movq %%mm4, 8(%2) \n\t"
  406. "movq %%mm1, (%2, %3) \n\t"
  407. "movq %%mm5, 8(%2, %3) \n\t"
  408. "add %%"REG_a", %1 \n\t"
  409. "add %%"REG_a", %2 \n\t"
  410. "subl $4, %0 \n\t"
  411. "jnz 1b \n\t"
  412. : "+g"(h), "+r" (pixels), "+r" (block)
  413. : "r"((long)line_size)
  414. : "%"REG_a, "memory"
  415. );
  416. }
  417. static void clear_blocks_mmx(DCTELEM *blocks)
  418. {
  419. __asm __volatile(
  420. "pxor %%mm7, %%mm7 \n\t"
  421. "mov $-128*6, %%"REG_a" \n\t"
  422. "1: \n\t"
  423. "movq %%mm7, (%0, %%"REG_a") \n\t"
  424. "movq %%mm7, 8(%0, %%"REG_a") \n\t"
  425. "movq %%mm7, 16(%0, %%"REG_a") \n\t"
  426. "movq %%mm7, 24(%0, %%"REG_a") \n\t"
  427. "add $32, %%"REG_a" \n\t"
  428. " js 1b \n\t"
  429. : : "r" (((uint8_t *)blocks)+128*6)
  430. : "%"REG_a
  431. );
  432. }
  433. #ifdef CONFIG_ENCODERS
  434. static int pix_sum16_mmx(uint8_t * pix, int line_size){
  435. const int h=16;
  436. int sum;
  437. long index= -line_size*h;
  438. __asm __volatile(
  439. "pxor %%mm7, %%mm7 \n\t"
  440. "pxor %%mm6, %%mm6 \n\t"
  441. "1: \n\t"
  442. "movq (%2, %1), %%mm0 \n\t"
  443. "movq (%2, %1), %%mm1 \n\t"
  444. "movq 8(%2, %1), %%mm2 \n\t"
  445. "movq 8(%2, %1), %%mm3 \n\t"
  446. "punpcklbw %%mm7, %%mm0 \n\t"
  447. "punpckhbw %%mm7, %%mm1 \n\t"
  448. "punpcklbw %%mm7, %%mm2 \n\t"
  449. "punpckhbw %%mm7, %%mm3 \n\t"
  450. "paddw %%mm0, %%mm1 \n\t"
  451. "paddw %%mm2, %%mm3 \n\t"
  452. "paddw %%mm1, %%mm3 \n\t"
  453. "paddw %%mm3, %%mm6 \n\t"
  454. "add %3, %1 \n\t"
  455. " js 1b \n\t"
  456. "movq %%mm6, %%mm5 \n\t"
  457. "psrlq $32, %%mm6 \n\t"
  458. "paddw %%mm5, %%mm6 \n\t"
  459. "movq %%mm6, %%mm5 \n\t"
  460. "psrlq $16, %%mm6 \n\t"
  461. "paddw %%mm5, %%mm6 \n\t"
  462. "movd %%mm6, %0 \n\t"
  463. "andl $0xFFFF, %0 \n\t"
  464. : "=&r" (sum), "+r" (index)
  465. : "r" (pix - index), "r" ((long)line_size)
  466. );
  467. return sum;
  468. }
  469. #endif //CONFIG_ENCODERS
  470. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
  471. long i=0;
  472. asm volatile(
  473. "1: \n\t"
  474. "movq (%1, %0), %%mm0 \n\t"
  475. "movq (%2, %0), %%mm1 \n\t"
  476. "paddb %%mm0, %%mm1 \n\t"
  477. "movq %%mm1, (%2, %0) \n\t"
  478. "movq 8(%1, %0), %%mm0 \n\t"
  479. "movq 8(%2, %0), %%mm1 \n\t"
  480. "paddb %%mm0, %%mm1 \n\t"
  481. "movq %%mm1, 8(%2, %0) \n\t"
  482. "add $16, %0 \n\t"
  483. "cmp %3, %0 \n\t"
  484. " jb 1b \n\t"
  485. : "+r" (i)
  486. : "r"(src), "r"(dst), "r"((long)w-15)
  487. );
  488. for(; i<w; i++)
  489. dst[i+0] += src[i+0];
  490. }
  491. #define H263_LOOP_FILTER \
  492. "pxor %%mm7, %%mm7 \n\t"\
  493. "movq %0, %%mm0 \n\t"\
  494. "movq %0, %%mm1 \n\t"\
  495. "movq %3, %%mm2 \n\t"\
  496. "movq %3, %%mm3 \n\t"\
  497. "punpcklbw %%mm7, %%mm0 \n\t"\
  498. "punpckhbw %%mm7, %%mm1 \n\t"\
  499. "punpcklbw %%mm7, %%mm2 \n\t"\
  500. "punpckhbw %%mm7, %%mm3 \n\t"\
  501. "psubw %%mm2, %%mm0 \n\t"\
  502. "psubw %%mm3, %%mm1 \n\t"\
  503. "movq %1, %%mm2 \n\t"\
  504. "movq %1, %%mm3 \n\t"\
  505. "movq %2, %%mm4 \n\t"\
  506. "movq %2, %%mm5 \n\t"\
  507. "punpcklbw %%mm7, %%mm2 \n\t"\
  508. "punpckhbw %%mm7, %%mm3 \n\t"\
  509. "punpcklbw %%mm7, %%mm4 \n\t"\
  510. "punpckhbw %%mm7, %%mm5 \n\t"\
  511. "psubw %%mm2, %%mm4 \n\t"\
  512. "psubw %%mm3, %%mm5 \n\t"\
  513. "psllw $2, %%mm4 \n\t"\
  514. "psllw $2, %%mm5 \n\t"\
  515. "paddw %%mm0, %%mm4 \n\t"\
  516. "paddw %%mm1, %%mm5 \n\t"\
  517. "pxor %%mm6, %%mm6 \n\t"\
  518. "pcmpgtw %%mm4, %%mm6 \n\t"\
  519. "pcmpgtw %%mm5, %%mm7 \n\t"\
  520. "pxor %%mm6, %%mm4 \n\t"\
  521. "pxor %%mm7, %%mm5 \n\t"\
  522. "psubw %%mm6, %%mm4 \n\t"\
  523. "psubw %%mm7, %%mm5 \n\t"\
  524. "psrlw $3, %%mm4 \n\t"\
  525. "psrlw $3, %%mm5 \n\t"\
  526. "packuswb %%mm5, %%mm4 \n\t"\
  527. "packsswb %%mm7, %%mm6 \n\t"\
  528. "pxor %%mm7, %%mm7 \n\t"\
  529. "movd %4, %%mm2 \n\t"\
  530. "punpcklbw %%mm2, %%mm2 \n\t"\
  531. "punpcklbw %%mm2, %%mm2 \n\t"\
  532. "punpcklbw %%mm2, %%mm2 \n\t"\
  533. "psubusb %%mm4, %%mm2 \n\t"\
  534. "movq %%mm2, %%mm3 \n\t"\
  535. "psubusb %%mm4, %%mm3 \n\t"\
  536. "psubb %%mm3, %%mm2 \n\t"\
  537. "movq %1, %%mm3 \n\t"\
  538. "movq %2, %%mm4 \n\t"\
  539. "pxor %%mm6, %%mm3 \n\t"\
  540. "pxor %%mm6, %%mm4 \n\t"\
  541. "paddusb %%mm2, %%mm3 \n\t"\
  542. "psubusb %%mm2, %%mm4 \n\t"\
  543. "pxor %%mm6, %%mm3 \n\t"\
  544. "pxor %%mm6, %%mm4 \n\t"\
  545. "paddusb %%mm2, %%mm2 \n\t"\
  546. "packsswb %%mm1, %%mm0 \n\t"\
  547. "pcmpgtb %%mm0, %%mm7 \n\t"\
  548. "pxor %%mm7, %%mm0 \n\t"\
  549. "psubb %%mm7, %%mm0 \n\t"\
  550. "movq %%mm0, %%mm1 \n\t"\
  551. "psubusb %%mm2, %%mm0 \n\t"\
  552. "psubb %%mm0, %%mm1 \n\t"\
  553. "pand %5, %%mm1 \n\t"\
  554. "psrlw $2, %%mm1 \n\t"\
  555. "pxor %%mm7, %%mm1 \n\t"\
  556. "psubb %%mm7, %%mm1 \n\t"\
  557. "movq %0, %%mm5 \n\t"\
  558. "movq %3, %%mm6 \n\t"\
  559. "psubb %%mm1, %%mm5 \n\t"\
  560. "paddb %%mm1, %%mm6 \n\t"
  561. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  562. const int strength= ff_h263_loop_filter_strength[qscale];
  563. asm volatile(
  564. H263_LOOP_FILTER
  565. "movq %%mm3, %1 \n\t"
  566. "movq %%mm4, %2 \n\t"
  567. "movq %%mm5, %0 \n\t"
  568. "movq %%mm6, %3 \n\t"
  569. : "+m" (*(uint64_t*)(src - 2*stride)),
  570. "+m" (*(uint64_t*)(src - 1*stride)),
  571. "+m" (*(uint64_t*)(src + 0*stride)),
  572. "+m" (*(uint64_t*)(src + 1*stride))
  573. : "g" (2*strength), "m"(ff_pb_FC)
  574. );
  575. }
  576. static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
  577. asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
  578. "movd %4, %%mm0 \n\t"
  579. "movd %5, %%mm1 \n\t"
  580. "movd %6, %%mm2 \n\t"
  581. "movd %7, %%mm3 \n\t"
  582. "punpcklbw %%mm1, %%mm0 \n\t"
  583. "punpcklbw %%mm3, %%mm2 \n\t"
  584. "movq %%mm0, %%mm1 \n\t"
  585. "punpcklwd %%mm2, %%mm0 \n\t"
  586. "punpckhwd %%mm2, %%mm1 \n\t"
  587. "movd %%mm0, %0 \n\t"
  588. "punpckhdq %%mm0, %%mm0 \n\t"
  589. "movd %%mm0, %1 \n\t"
  590. "movd %%mm1, %2 \n\t"
  591. "punpckhdq %%mm1, %%mm1 \n\t"
  592. "movd %%mm1, %3 \n\t"
  593. : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
  594. "=m" (*(uint32_t*)(dst + 1*dst_stride)),
  595. "=m" (*(uint32_t*)(dst + 2*dst_stride)),
  596. "=m" (*(uint32_t*)(dst + 3*dst_stride))
  597. : "m" (*(uint32_t*)(src + 0*src_stride)),
  598. "m" (*(uint32_t*)(src + 1*src_stride)),
  599. "m" (*(uint32_t*)(src + 2*src_stride)),
  600. "m" (*(uint32_t*)(src + 3*src_stride))
  601. );
  602. }
  603. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  604. const int strength= ff_h263_loop_filter_strength[qscale];
  605. uint64_t temp[4] __attribute__ ((aligned(8)));
  606. uint8_t *btemp= (uint8_t*)temp;
  607. src -= 2;
  608. transpose4x4(btemp , src , 8, stride);
  609. transpose4x4(btemp+4, src + 4*stride, 8, stride);
  610. asm volatile(
  611. H263_LOOP_FILTER // 5 3 4 6
  612. : "+m" (temp[0]),
  613. "+m" (temp[1]),
  614. "+m" (temp[2]),
  615. "+m" (temp[3])
  616. : "g" (2*strength), "m"(ff_pb_FC)
  617. );
  618. asm volatile(
  619. "movq %%mm5, %%mm1 \n\t"
  620. "movq %%mm4, %%mm0 \n\t"
  621. "punpcklbw %%mm3, %%mm5 \n\t"
  622. "punpcklbw %%mm6, %%mm4 \n\t"
  623. "punpckhbw %%mm3, %%mm1 \n\t"
  624. "punpckhbw %%mm6, %%mm0 \n\t"
  625. "movq %%mm5, %%mm3 \n\t"
  626. "movq %%mm1, %%mm6 \n\t"
  627. "punpcklwd %%mm4, %%mm5 \n\t"
  628. "punpcklwd %%mm0, %%mm1 \n\t"
  629. "punpckhwd %%mm4, %%mm3 \n\t"
  630. "punpckhwd %%mm0, %%mm6 \n\t"
  631. "movd %%mm5, (%0) \n\t"
  632. "punpckhdq %%mm5, %%mm5 \n\t"
  633. "movd %%mm5, (%0,%2) \n\t"
  634. "movd %%mm3, (%0,%2,2) \n\t"
  635. "punpckhdq %%mm3, %%mm3 \n\t"
  636. "movd %%mm3, (%0,%3) \n\t"
  637. "movd %%mm1, (%1) \n\t"
  638. "punpckhdq %%mm1, %%mm1 \n\t"
  639. "movd %%mm1, (%1,%2) \n\t"
  640. "movd %%mm6, (%1,%2,2) \n\t"
  641. "punpckhdq %%mm6, %%mm6 \n\t"
  642. "movd %%mm6, (%1,%3) \n\t"
  643. :: "r" (src),
  644. "r" (src + 4*stride),
  645. "r" ((long) stride ),
  646. "r" ((long)(3*stride))
  647. );
  648. }
  649. #ifdef CONFIG_ENCODERS
  650. static int pix_norm1_mmx(uint8_t *pix, int line_size) {
  651. int tmp;
  652. asm volatile (
  653. "movl $16,%%ecx\n"
  654. "pxor %%mm0,%%mm0\n"
  655. "pxor %%mm7,%%mm7\n"
  656. "1:\n"
  657. "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
  658. "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
  659. "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
  660. "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
  661. "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
  662. "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
  663. "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
  664. "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
  665. "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
  666. "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
  667. "pmaddwd %%mm3,%%mm3\n"
  668. "pmaddwd %%mm4,%%mm4\n"
  669. "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
  670. pix2^2+pix3^2+pix6^2+pix7^2) */
  671. "paddd %%mm3,%%mm4\n"
  672. "paddd %%mm2,%%mm7\n"
  673. "add %2, %0\n"
  674. "paddd %%mm4,%%mm7\n"
  675. "dec %%ecx\n"
  676. "jnz 1b\n"
  677. "movq %%mm7,%%mm1\n"
  678. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  679. "paddd %%mm7,%%mm1\n"
  680. "movd %%mm1,%1\n"
  681. : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
  682. return tmp;
  683. }
  684. static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  685. int tmp;
  686. asm volatile (
  687. "movl %4,%%ecx\n"
  688. "shr $1,%%ecx\n"
  689. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  690. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  691. "1:\n"
  692. "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
  693. "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
  694. "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
  695. "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
  696. /* todo: mm1-mm2, mm3-mm4 */
  697. /* algo: substract mm1 from mm2 with saturation and vice versa */
  698. /* OR the results to get absolute difference */
  699. "movq %%mm1,%%mm5\n"
  700. "movq %%mm3,%%mm6\n"
  701. "psubusb %%mm2,%%mm1\n"
  702. "psubusb %%mm4,%%mm3\n"
  703. "psubusb %%mm5,%%mm2\n"
  704. "psubusb %%mm6,%%mm4\n"
  705. "por %%mm1,%%mm2\n"
  706. "por %%mm3,%%mm4\n"
  707. /* now convert to 16-bit vectors so we can square them */
  708. "movq %%mm2,%%mm1\n"
  709. "movq %%mm4,%%mm3\n"
  710. "punpckhbw %%mm0,%%mm2\n"
  711. "punpckhbw %%mm0,%%mm4\n"
  712. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  713. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  714. "pmaddwd %%mm2,%%mm2\n"
  715. "pmaddwd %%mm4,%%mm4\n"
  716. "pmaddwd %%mm1,%%mm1\n"
  717. "pmaddwd %%mm3,%%mm3\n"
  718. "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
  719. "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
  720. "paddd %%mm2,%%mm1\n"
  721. "paddd %%mm4,%%mm3\n"
  722. "paddd %%mm1,%%mm7\n"
  723. "paddd %%mm3,%%mm7\n"
  724. "decl %%ecx\n"
  725. "jnz 1b\n"
  726. "movq %%mm7,%%mm1\n"
  727. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  728. "paddd %%mm7,%%mm1\n"
  729. "movd %%mm1,%2\n"
  730. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  731. : "r" ((long)line_size) , "m" (h)
  732. : "%ecx");
  733. return tmp;
  734. }
  735. static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  736. int tmp;
  737. asm volatile (
  738. "movl %4,%%ecx\n"
  739. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  740. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  741. "1:\n"
  742. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  743. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  744. "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
  745. "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
  746. /* todo: mm1-mm2, mm3-mm4 */
  747. /* algo: substract mm1 from mm2 with saturation and vice versa */
  748. /* OR the results to get absolute difference */
  749. "movq %%mm1,%%mm5\n"
  750. "movq %%mm3,%%mm6\n"
  751. "psubusb %%mm2,%%mm1\n"
  752. "psubusb %%mm4,%%mm3\n"
  753. "psubusb %%mm5,%%mm2\n"
  754. "psubusb %%mm6,%%mm4\n"
  755. "por %%mm1,%%mm2\n"
  756. "por %%mm3,%%mm4\n"
  757. /* now convert to 16-bit vectors so we can square them */
  758. "movq %%mm2,%%mm1\n"
  759. "movq %%mm4,%%mm3\n"
  760. "punpckhbw %%mm0,%%mm2\n"
  761. "punpckhbw %%mm0,%%mm4\n"
  762. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  763. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  764. "pmaddwd %%mm2,%%mm2\n"
  765. "pmaddwd %%mm4,%%mm4\n"
  766. "pmaddwd %%mm1,%%mm1\n"
  767. "pmaddwd %%mm3,%%mm3\n"
  768. "add %3,%0\n"
  769. "add %3,%1\n"
  770. "paddd %%mm2,%%mm1\n"
  771. "paddd %%mm4,%%mm3\n"
  772. "paddd %%mm1,%%mm7\n"
  773. "paddd %%mm3,%%mm7\n"
  774. "decl %%ecx\n"
  775. "jnz 1b\n"
  776. "movq %%mm7,%%mm1\n"
  777. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  778. "paddd %%mm7,%%mm1\n"
  779. "movd %%mm1,%2\n"
  780. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  781. : "r" ((long)line_size) , "m" (h)
  782. : "%ecx");
  783. return tmp;
  784. }
  785. static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  786. int tmp;
  787. asm volatile (
  788. "shr $1,%2\n"
  789. "pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */
  790. "pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */
  791. "1:\n"
  792. "movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */
  793. "movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */
  794. "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */
  795. "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
  796. /* todo: mm1-mm2, mm3-mm4 */
  797. /* algo: substract mm1 from mm2 with saturation and vice versa */
  798. /* OR the results to get absolute difference */
  799. "movdqa %%xmm1,%%xmm5\n"
  800. "movdqa %%xmm3,%%xmm6\n"
  801. "psubusb %%xmm2,%%xmm1\n"
  802. "psubusb %%xmm4,%%xmm3\n"
  803. "psubusb %%xmm5,%%xmm2\n"
  804. "psubusb %%xmm6,%%xmm4\n"
  805. "por %%xmm1,%%xmm2\n"
  806. "por %%xmm3,%%xmm4\n"
  807. /* now convert to 16-bit vectors so we can square them */
  808. "movdqa %%xmm2,%%xmm1\n"
  809. "movdqa %%xmm4,%%xmm3\n"
  810. "punpckhbw %%xmm0,%%xmm2\n"
  811. "punpckhbw %%xmm0,%%xmm4\n"
  812. "punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */
  813. "punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */
  814. "pmaddwd %%xmm2,%%xmm2\n"
  815. "pmaddwd %%xmm4,%%xmm4\n"
  816. "pmaddwd %%xmm1,%%xmm1\n"
  817. "pmaddwd %%xmm3,%%xmm3\n"
  818. "lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */
  819. "lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */
  820. "paddd %%xmm2,%%xmm1\n"
  821. "paddd %%xmm4,%%xmm3\n"
  822. "paddd %%xmm1,%%xmm7\n"
  823. "paddd %%xmm3,%%xmm7\n"
  824. "decl %2\n"
  825. "jnz 1b\n"
  826. "movdqa %%xmm7,%%xmm1\n"
  827. "psrldq $8, %%xmm7\n" /* shift hi qword to lo */
  828. "paddd %%xmm1,%%xmm7\n"
  829. "movdqa %%xmm7,%%xmm1\n"
  830. "psrldq $4, %%xmm7\n" /* shift hi dword to lo */
  831. "paddd %%xmm1,%%xmm7\n"
  832. "movd %%xmm7,%3\n"
  833. : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)
  834. : "r" ((long)line_size));
  835. return tmp;
  836. }
  837. static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
  838. int tmp;
  839. asm volatile (
  840. "movl %3,%%ecx\n"
  841. "pxor %%mm7,%%mm7\n"
  842. "pxor %%mm6,%%mm6\n"
  843. "movq (%0),%%mm0\n"
  844. "movq %%mm0, %%mm1\n"
  845. "psllq $8, %%mm0\n"
  846. "psrlq $8, %%mm1\n"
  847. "psrlq $8, %%mm0\n"
  848. "movq %%mm0, %%mm2\n"
  849. "movq %%mm1, %%mm3\n"
  850. "punpcklbw %%mm7,%%mm0\n"
  851. "punpcklbw %%mm7,%%mm1\n"
  852. "punpckhbw %%mm7,%%mm2\n"
  853. "punpckhbw %%mm7,%%mm3\n"
  854. "psubw %%mm1, %%mm0\n"
  855. "psubw %%mm3, %%mm2\n"
  856. "add %2,%0\n"
  857. "movq (%0),%%mm4\n"
  858. "movq %%mm4, %%mm1\n"
  859. "psllq $8, %%mm4\n"
  860. "psrlq $8, %%mm1\n"
  861. "psrlq $8, %%mm4\n"
  862. "movq %%mm4, %%mm5\n"
  863. "movq %%mm1, %%mm3\n"
  864. "punpcklbw %%mm7,%%mm4\n"
  865. "punpcklbw %%mm7,%%mm1\n"
  866. "punpckhbw %%mm7,%%mm5\n"
  867. "punpckhbw %%mm7,%%mm3\n"
  868. "psubw %%mm1, %%mm4\n"
  869. "psubw %%mm3, %%mm5\n"
  870. "psubw %%mm4, %%mm0\n"
  871. "psubw %%mm5, %%mm2\n"
  872. "pxor %%mm3, %%mm3\n"
  873. "pxor %%mm1, %%mm1\n"
  874. "pcmpgtw %%mm0, %%mm3\n\t"
  875. "pcmpgtw %%mm2, %%mm1\n\t"
  876. "pxor %%mm3, %%mm0\n"
  877. "pxor %%mm1, %%mm2\n"
  878. "psubw %%mm3, %%mm0\n"
  879. "psubw %%mm1, %%mm2\n"
  880. "paddw %%mm0, %%mm2\n"
  881. "paddw %%mm2, %%mm6\n"
  882. "add %2,%0\n"
  883. "1:\n"
  884. "movq (%0),%%mm0\n"
  885. "movq %%mm0, %%mm1\n"
  886. "psllq $8, %%mm0\n"
  887. "psrlq $8, %%mm1\n"
  888. "psrlq $8, %%mm0\n"
  889. "movq %%mm0, %%mm2\n"
  890. "movq %%mm1, %%mm3\n"
  891. "punpcklbw %%mm7,%%mm0\n"
  892. "punpcklbw %%mm7,%%mm1\n"
  893. "punpckhbw %%mm7,%%mm2\n"
  894. "punpckhbw %%mm7,%%mm3\n"
  895. "psubw %%mm1, %%mm0\n"
  896. "psubw %%mm3, %%mm2\n"
  897. "psubw %%mm0, %%mm4\n"
  898. "psubw %%mm2, %%mm5\n"
  899. "pxor %%mm3, %%mm3\n"
  900. "pxor %%mm1, %%mm1\n"
  901. "pcmpgtw %%mm4, %%mm3\n\t"
  902. "pcmpgtw %%mm5, %%mm1\n\t"
  903. "pxor %%mm3, %%mm4\n"
  904. "pxor %%mm1, %%mm5\n"
  905. "psubw %%mm3, %%mm4\n"
  906. "psubw %%mm1, %%mm5\n"
  907. "paddw %%mm4, %%mm5\n"
  908. "paddw %%mm5, %%mm6\n"
  909. "add %2,%0\n"
  910. "movq (%0),%%mm4\n"
  911. "movq %%mm4, %%mm1\n"
  912. "psllq $8, %%mm4\n"
  913. "psrlq $8, %%mm1\n"
  914. "psrlq $8, %%mm4\n"
  915. "movq %%mm4, %%mm5\n"
  916. "movq %%mm1, %%mm3\n"
  917. "punpcklbw %%mm7,%%mm4\n"
  918. "punpcklbw %%mm7,%%mm1\n"
  919. "punpckhbw %%mm7,%%mm5\n"
  920. "punpckhbw %%mm7,%%mm3\n"
  921. "psubw %%mm1, %%mm4\n"
  922. "psubw %%mm3, %%mm5\n"
  923. "psubw %%mm4, %%mm0\n"
  924. "psubw %%mm5, %%mm2\n"
  925. "pxor %%mm3, %%mm3\n"
  926. "pxor %%mm1, %%mm1\n"
  927. "pcmpgtw %%mm0, %%mm3\n\t"
  928. "pcmpgtw %%mm2, %%mm1\n\t"
  929. "pxor %%mm3, %%mm0\n"
  930. "pxor %%mm1, %%mm2\n"
  931. "psubw %%mm3, %%mm0\n"
  932. "psubw %%mm1, %%mm2\n"
  933. "paddw %%mm0, %%mm2\n"
  934. "paddw %%mm2, %%mm6\n"
  935. "add %2,%0\n"
  936. "subl $2, %%ecx\n"
  937. " jnz 1b\n"
  938. "movq %%mm6, %%mm0\n"
  939. "punpcklwd %%mm7,%%mm0\n"
  940. "punpckhwd %%mm7,%%mm6\n"
  941. "paddd %%mm0, %%mm6\n"
  942. "movq %%mm6,%%mm0\n"
  943. "psrlq $32, %%mm6\n"
  944. "paddd %%mm6,%%mm0\n"
  945. "movd %%mm0,%1\n"
  946. : "+r" (pix1), "=r"(tmp)
  947. : "r" ((long)line_size) , "g" (h-2)
  948. : "%ecx");
  949. return tmp;
  950. }
  951. static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
  952. int tmp;
  953. uint8_t * pix= pix1;
  954. asm volatile (
  955. "movl %3,%%ecx\n"
  956. "pxor %%mm7,%%mm7\n"
  957. "pxor %%mm6,%%mm6\n"
  958. "movq (%0),%%mm0\n"
  959. "movq 1(%0),%%mm1\n"
  960. "movq %%mm0, %%mm2\n"
  961. "movq %%mm1, %%mm3\n"
  962. "punpcklbw %%mm7,%%mm0\n"
  963. "punpcklbw %%mm7,%%mm1\n"
  964. "punpckhbw %%mm7,%%mm2\n"
  965. "punpckhbw %%mm7,%%mm3\n"
  966. "psubw %%mm1, %%mm0\n"
  967. "psubw %%mm3, %%mm2\n"
  968. "add %2,%0\n"
  969. "movq (%0),%%mm4\n"
  970. "movq 1(%0),%%mm1\n"
  971. "movq %%mm4, %%mm5\n"
  972. "movq %%mm1, %%mm3\n"
  973. "punpcklbw %%mm7,%%mm4\n"
  974. "punpcklbw %%mm7,%%mm1\n"
  975. "punpckhbw %%mm7,%%mm5\n"
  976. "punpckhbw %%mm7,%%mm3\n"
  977. "psubw %%mm1, %%mm4\n"
  978. "psubw %%mm3, %%mm5\n"
  979. "psubw %%mm4, %%mm0\n"
  980. "psubw %%mm5, %%mm2\n"
  981. "pxor %%mm3, %%mm3\n"
  982. "pxor %%mm1, %%mm1\n"
  983. "pcmpgtw %%mm0, %%mm3\n\t"
  984. "pcmpgtw %%mm2, %%mm1\n\t"
  985. "pxor %%mm3, %%mm0\n"
  986. "pxor %%mm1, %%mm2\n"
  987. "psubw %%mm3, %%mm0\n"
  988. "psubw %%mm1, %%mm2\n"
  989. "paddw %%mm0, %%mm2\n"
  990. "paddw %%mm2, %%mm6\n"
  991. "add %2,%0\n"
  992. "1:\n"
  993. "movq (%0),%%mm0\n"
  994. "movq 1(%0),%%mm1\n"
  995. "movq %%mm0, %%mm2\n"
  996. "movq %%mm1, %%mm3\n"
  997. "punpcklbw %%mm7,%%mm0\n"
  998. "punpcklbw %%mm7,%%mm1\n"
  999. "punpckhbw %%mm7,%%mm2\n"
  1000. "punpckhbw %%mm7,%%mm3\n"
  1001. "psubw %%mm1, %%mm0\n"
  1002. "psubw %%mm3, %%mm2\n"
  1003. "psubw %%mm0, %%mm4\n"
  1004. "psubw %%mm2, %%mm5\n"
  1005. "pxor %%mm3, %%mm3\n"
  1006. "pxor %%mm1, %%mm1\n"
  1007. "pcmpgtw %%mm4, %%mm3\n\t"
  1008. "pcmpgtw %%mm5, %%mm1\n\t"
  1009. "pxor %%mm3, %%mm4\n"
  1010. "pxor %%mm1, %%mm5\n"
  1011. "psubw %%mm3, %%mm4\n"
  1012. "psubw %%mm1, %%mm5\n"
  1013. "paddw %%mm4, %%mm5\n"
  1014. "paddw %%mm5, %%mm6\n"
  1015. "add %2,%0\n"
  1016. "movq (%0),%%mm4\n"
  1017. "movq 1(%0),%%mm1\n"
  1018. "movq %%mm4, %%mm5\n"
  1019. "movq %%mm1, %%mm3\n"
  1020. "punpcklbw %%mm7,%%mm4\n"
  1021. "punpcklbw %%mm7,%%mm1\n"
  1022. "punpckhbw %%mm7,%%mm5\n"
  1023. "punpckhbw %%mm7,%%mm3\n"
  1024. "psubw %%mm1, %%mm4\n"
  1025. "psubw %%mm3, %%mm5\n"
  1026. "psubw %%mm4, %%mm0\n"
  1027. "psubw %%mm5, %%mm2\n"
  1028. "pxor %%mm3, %%mm3\n"
  1029. "pxor %%mm1, %%mm1\n"
  1030. "pcmpgtw %%mm0, %%mm3\n\t"
  1031. "pcmpgtw %%mm2, %%mm1\n\t"
  1032. "pxor %%mm3, %%mm0\n"
  1033. "pxor %%mm1, %%mm2\n"
  1034. "psubw %%mm3, %%mm0\n"
  1035. "psubw %%mm1, %%mm2\n"
  1036. "paddw %%mm0, %%mm2\n"
  1037. "paddw %%mm2, %%mm6\n"
  1038. "add %2,%0\n"
  1039. "subl $2, %%ecx\n"
  1040. " jnz 1b\n"
  1041. "movq %%mm6, %%mm0\n"
  1042. "punpcklwd %%mm7,%%mm0\n"
  1043. "punpckhwd %%mm7,%%mm6\n"
  1044. "paddd %%mm0, %%mm6\n"
  1045. "movq %%mm6,%%mm0\n"
  1046. "psrlq $32, %%mm6\n"
  1047. "paddd %%mm6,%%mm0\n"
  1048. "movd %%mm0,%1\n"
  1049. : "+r" (pix1), "=r"(tmp)
  1050. : "r" ((long)line_size) , "g" (h-2)
  1051. : "%ecx");
  1052. return tmp + hf_noise8_mmx(pix+8, line_size, h);
  1053. }
  1054. static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1055. MpegEncContext *c = p;
  1056. int score1, score2;
  1057. if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
  1058. else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
  1059. score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
  1060. if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
  1061. else return score1 + FFABS(score2)*8;
  1062. }
  1063. static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1064. MpegEncContext *c = p;
  1065. int score1= sse8_mmx(c, pix1, pix2, line_size, h);
  1066. int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
  1067. if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
  1068. else return score1 + FFABS(score2)*8;
  1069. }
  1070. static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  1071. int tmp;
  1072. assert( (((int)pix) & 7) == 0);
  1073. assert((line_size &7) ==0);
  1074. #define SUM(in0, in1, out0, out1) \
  1075. "movq (%0), %%mm2\n"\
  1076. "movq 8(%0), %%mm3\n"\
  1077. "add %2,%0\n"\
  1078. "movq %%mm2, " #out0 "\n"\
  1079. "movq %%mm3, " #out1 "\n"\
  1080. "psubusb " #in0 ", %%mm2\n"\
  1081. "psubusb " #in1 ", %%mm3\n"\
  1082. "psubusb " #out0 ", " #in0 "\n"\
  1083. "psubusb " #out1 ", " #in1 "\n"\
  1084. "por %%mm2, " #in0 "\n"\
  1085. "por %%mm3, " #in1 "\n"\
  1086. "movq " #in0 ", %%mm2\n"\
  1087. "movq " #in1 ", %%mm3\n"\
  1088. "punpcklbw %%mm7, " #in0 "\n"\
  1089. "punpcklbw %%mm7, " #in1 "\n"\
  1090. "punpckhbw %%mm7, %%mm2\n"\
  1091. "punpckhbw %%mm7, %%mm3\n"\
  1092. "paddw " #in1 ", " #in0 "\n"\
  1093. "paddw %%mm3, %%mm2\n"\
  1094. "paddw %%mm2, " #in0 "\n"\
  1095. "paddw " #in0 ", %%mm6\n"
  1096. asm volatile (
  1097. "movl %3,%%ecx\n"
  1098. "pxor %%mm6,%%mm6\n"
  1099. "pxor %%mm7,%%mm7\n"
  1100. "movq (%0),%%mm0\n"
  1101. "movq 8(%0),%%mm1\n"
  1102. "add %2,%0\n"
  1103. "subl $2, %%ecx\n"
  1104. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1105. "1:\n"
  1106. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1107. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1108. "subl $2, %%ecx\n"
  1109. "jnz 1b\n"
  1110. "movq %%mm6,%%mm0\n"
  1111. "psrlq $32, %%mm6\n"
  1112. "paddw %%mm6,%%mm0\n"
  1113. "movq %%mm0,%%mm6\n"
  1114. "psrlq $16, %%mm0\n"
  1115. "paddw %%mm6,%%mm0\n"
  1116. "movd %%mm0,%1\n"
  1117. : "+r" (pix), "=r"(tmp)
  1118. : "r" ((long)line_size) , "m" (h)
  1119. : "%ecx");
  1120. return tmp & 0xFFFF;
  1121. }
  1122. #undef SUM
  1123. static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  1124. int tmp;
  1125. assert( (((int)pix) & 7) == 0);
  1126. assert((line_size &7) ==0);
  1127. #define SUM(in0, in1, out0, out1) \
  1128. "movq (%0), " #out0 "\n"\
  1129. "movq 8(%0), " #out1 "\n"\
  1130. "add %2,%0\n"\
  1131. "psadbw " #out0 ", " #in0 "\n"\
  1132. "psadbw " #out1 ", " #in1 "\n"\
  1133. "paddw " #in1 ", " #in0 "\n"\
  1134. "paddw " #in0 ", %%mm6\n"
  1135. asm volatile (
  1136. "movl %3,%%ecx\n"
  1137. "pxor %%mm6,%%mm6\n"
  1138. "pxor %%mm7,%%mm7\n"
  1139. "movq (%0),%%mm0\n"
  1140. "movq 8(%0),%%mm1\n"
  1141. "add %2,%0\n"
  1142. "subl $2, %%ecx\n"
  1143. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1144. "1:\n"
  1145. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1146. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1147. "subl $2, %%ecx\n"
  1148. "jnz 1b\n"
  1149. "movd %%mm6,%1\n"
  1150. : "+r" (pix), "=r"(tmp)
  1151. : "r" ((long)line_size) , "m" (h)
  1152. : "%ecx");
  1153. return tmp;
  1154. }
  1155. #undef SUM
  1156. static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1157. int tmp;
  1158. assert( (((int)pix1) & 7) == 0);
  1159. assert( (((int)pix2) & 7) == 0);
  1160. assert((line_size &7) ==0);
  1161. #define SUM(in0, in1, out0, out1) \
  1162. "movq (%0),%%mm2\n"\
  1163. "movq (%1)," #out0 "\n"\
  1164. "movq 8(%0),%%mm3\n"\
  1165. "movq 8(%1)," #out1 "\n"\
  1166. "add %3,%0\n"\
  1167. "add %3,%1\n"\
  1168. "psubb " #out0 ", %%mm2\n"\
  1169. "psubb " #out1 ", %%mm3\n"\
  1170. "pxor %%mm7, %%mm2\n"\
  1171. "pxor %%mm7, %%mm3\n"\
  1172. "movq %%mm2, " #out0 "\n"\
  1173. "movq %%mm3, " #out1 "\n"\
  1174. "psubusb " #in0 ", %%mm2\n"\
  1175. "psubusb " #in1 ", %%mm3\n"\
  1176. "psubusb " #out0 ", " #in0 "\n"\
  1177. "psubusb " #out1 ", " #in1 "\n"\
  1178. "por %%mm2, " #in0 "\n"\
  1179. "por %%mm3, " #in1 "\n"\
  1180. "movq " #in0 ", %%mm2\n"\
  1181. "movq " #in1 ", %%mm3\n"\
  1182. "punpcklbw %%mm7, " #in0 "\n"\
  1183. "punpcklbw %%mm7, " #in1 "\n"\
  1184. "punpckhbw %%mm7, %%mm2\n"\
  1185. "punpckhbw %%mm7, %%mm3\n"\
  1186. "paddw " #in1 ", " #in0 "\n"\
  1187. "paddw %%mm3, %%mm2\n"\
  1188. "paddw %%mm2, " #in0 "\n"\
  1189. "paddw " #in0 ", %%mm6\n"
  1190. asm volatile (
  1191. "movl %4,%%ecx\n"
  1192. "pxor %%mm6,%%mm6\n"
  1193. "pcmpeqw %%mm7,%%mm7\n"
  1194. "psllw $15, %%mm7\n"
  1195. "packsswb %%mm7, %%mm7\n"
  1196. "movq (%0),%%mm0\n"
  1197. "movq (%1),%%mm2\n"
  1198. "movq 8(%0),%%mm1\n"
  1199. "movq 8(%1),%%mm3\n"
  1200. "add %3,%0\n"
  1201. "add %3,%1\n"
  1202. "subl $2, %%ecx\n"
  1203. "psubb %%mm2, %%mm0\n"
  1204. "psubb %%mm3, %%mm1\n"
  1205. "pxor %%mm7, %%mm0\n"
  1206. "pxor %%mm7, %%mm1\n"
  1207. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1208. "1:\n"
  1209. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1210. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1211. "subl $2, %%ecx\n"
  1212. "jnz 1b\n"
  1213. "movq %%mm6,%%mm0\n"
  1214. "psrlq $32, %%mm6\n"
  1215. "paddw %%mm6,%%mm0\n"
  1216. "movq %%mm0,%%mm6\n"
  1217. "psrlq $16, %%mm0\n"
  1218. "paddw %%mm6,%%mm0\n"
  1219. "movd %%mm0,%2\n"
  1220. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  1221. : "r" ((long)line_size) , "m" (h)
  1222. : "%ecx");
  1223. return tmp & 0x7FFF;
  1224. }
  1225. #undef SUM
  1226. static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1227. int tmp;
  1228. assert( (((int)pix1) & 7) == 0);
  1229. assert( (((int)pix2) & 7) == 0);
  1230. assert((line_size &7) ==0);
  1231. #define SUM(in0, in1, out0, out1) \
  1232. "movq (%0)," #out0 "\n"\
  1233. "movq (%1),%%mm2\n"\
  1234. "movq 8(%0)," #out1 "\n"\
  1235. "movq 8(%1),%%mm3\n"\
  1236. "add %3,%0\n"\
  1237. "add %3,%1\n"\
  1238. "psubb %%mm2, " #out0 "\n"\
  1239. "psubb %%mm3, " #out1 "\n"\
  1240. "pxor %%mm7, " #out0 "\n"\
  1241. "pxor %%mm7, " #out1 "\n"\
  1242. "psadbw " #out0 ", " #in0 "\n"\
  1243. "psadbw " #out1 ", " #in1 "\n"\
  1244. "paddw " #in1 ", " #in0 "\n"\
  1245. "paddw " #in0 ", %%mm6\n"
  1246. asm volatile (
  1247. "movl %4,%%ecx\n"
  1248. "pxor %%mm6,%%mm6\n"
  1249. "pcmpeqw %%mm7,%%mm7\n"
  1250. "psllw $15, %%mm7\n"
  1251. "packsswb %%mm7, %%mm7\n"
  1252. "movq (%0),%%mm0\n"
  1253. "movq (%1),%%mm2\n"
  1254. "movq 8(%0),%%mm1\n"
  1255. "movq 8(%1),%%mm3\n"
  1256. "add %3,%0\n"
  1257. "add %3,%1\n"
  1258. "subl $2, %%ecx\n"
  1259. "psubb %%mm2, %%mm0\n"
  1260. "psubb %%mm3, %%mm1\n"
  1261. "pxor %%mm7, %%mm0\n"
  1262. "pxor %%mm7, %%mm1\n"
  1263. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1264. "1:\n"
  1265. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1266. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1267. "subl $2, %%ecx\n"
  1268. "jnz 1b\n"
  1269. "movd %%mm6,%2\n"
  1270. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  1271. : "r" ((long)line_size) , "m" (h)
  1272. : "%ecx");
  1273. return tmp;
  1274. }
  1275. #undef SUM
  1276. static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  1277. long i=0;
  1278. asm volatile(
  1279. "1: \n\t"
  1280. "movq (%2, %0), %%mm0 \n\t"
  1281. "movq (%1, %0), %%mm1 \n\t"
  1282. "psubb %%mm0, %%mm1 \n\t"
  1283. "movq %%mm1, (%3, %0) \n\t"
  1284. "movq 8(%2, %0), %%mm0 \n\t"
  1285. "movq 8(%1, %0), %%mm1 \n\t"
  1286. "psubb %%mm0, %%mm1 \n\t"
  1287. "movq %%mm1, 8(%3, %0) \n\t"
  1288. "add $16, %0 \n\t"
  1289. "cmp %4, %0 \n\t"
  1290. " jb 1b \n\t"
  1291. : "+r" (i)
  1292. : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
  1293. );
  1294. for(; i<w; i++)
  1295. dst[i+0] = src1[i+0]-src2[i+0];
  1296. }
  1297. static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
  1298. long i=0;
  1299. uint8_t l, lt;
  1300. asm volatile(
  1301. "1: \n\t"
  1302. "movq -1(%1, %0), %%mm0 \n\t" // LT
  1303. "movq (%1, %0), %%mm1 \n\t" // T
  1304. "movq -1(%2, %0), %%mm2 \n\t" // L
  1305. "movq (%2, %0), %%mm3 \n\t" // X
  1306. "movq %%mm2, %%mm4 \n\t" // L
  1307. "psubb %%mm0, %%mm2 \n\t"
  1308. "paddb %%mm1, %%mm2 \n\t" // L + T - LT
  1309. "movq %%mm4, %%mm5 \n\t" // L
  1310. "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
  1311. "pminub %%mm5, %%mm1 \n\t" // min(T, L)
  1312. "pminub %%mm2, %%mm4 \n\t"
  1313. "pmaxub %%mm1, %%mm4 \n\t"
  1314. "psubb %%mm4, %%mm3 \n\t" // dst - pred
  1315. "movq %%mm3, (%3, %0) \n\t"
  1316. "add $8, %0 \n\t"
  1317. "cmp %4, %0 \n\t"
  1318. " jb 1b \n\t"
  1319. : "+r" (i)
  1320. : "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
  1321. );
  1322. l= *left;
  1323. lt= *left_top;
  1324. dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
  1325. *left_top= src1[w-1];
  1326. *left = src2[w-1];
  1327. }
  1328. #define LBUTTERFLY2(a1,b1,a2,b2)\
  1329. "paddw " #b1 ", " #a1 " \n\t"\
  1330. "paddw " #b2 ", " #a2 " \n\t"\
  1331. "paddw " #b1 ", " #b1 " \n\t"\
  1332. "paddw " #b2 ", " #b2 " \n\t"\
  1333. "psubw " #a1 ", " #b1 " \n\t"\
  1334. "psubw " #a2 ", " #b2 " \n\t"
  1335. #define HADAMARD48\
  1336. LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
  1337. LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
  1338. LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
  1339. LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
  1340. LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
  1341. LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
  1342. #define MMABS(a,z)\
  1343. "pxor " #z ", " #z " \n\t"\
  1344. "pcmpgtw " #a ", " #z " \n\t"\
  1345. "pxor " #z ", " #a " \n\t"\
  1346. "psubw " #z ", " #a " \n\t"
  1347. #define MMABS_SUM(a,z, sum)\
  1348. "pxor " #z ", " #z " \n\t"\
  1349. "pcmpgtw " #a ", " #z " \n\t"\
  1350. "pxor " #z ", " #a " \n\t"\
  1351. "psubw " #z ", " #a " \n\t"\
  1352. "paddusw " #a ", " #sum " \n\t"
  1353. #define MMABS_MMX2(a,z)\
  1354. "pxor " #z ", " #z " \n\t"\
  1355. "psubw " #a ", " #z " \n\t"\
  1356. "pmaxsw " #z ", " #a " \n\t"
  1357. #define MMABS_SUM_MMX2(a,z, sum)\
  1358. "pxor " #z ", " #z " \n\t"\
  1359. "psubw " #a ", " #z " \n\t"\
  1360. "pmaxsw " #z ", " #a " \n\t"\
  1361. "paddusw " #a ", " #sum " \n\t"
  1362. #define TRANSPOSE4(a,b,c,d,t)\
  1363. SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
  1364. SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
  1365. SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
  1366. SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
  1367. #define LOAD4(o, a, b, c, d)\
  1368. "movq "#o"(%1), " #a " \n\t"\
  1369. "movq "#o"+16(%1), " #b " \n\t"\
  1370. "movq "#o"+32(%1), " #c " \n\t"\
  1371. "movq "#o"+48(%1), " #d " \n\t"
  1372. #define STORE4(o, a, b, c, d)\
  1373. "movq "#a", "#o"(%1) \n\t"\
  1374. "movq "#b", "#o"+16(%1) \n\t"\
  1375. "movq "#c", "#o"+32(%1) \n\t"\
  1376. "movq "#d", "#o"+48(%1) \n\t"\
  1377. static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
  1378. DECLARE_ALIGNED_8(uint64_t, temp[16]);
  1379. int sum=0;
  1380. assert(h==8);
  1381. diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
  1382. asm volatile(
  1383. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1384. LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
  1385. HADAMARD48
  1386. "movq %%mm7, 112(%1) \n\t"
  1387. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1388. STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
  1389. "movq 112(%1), %%mm7 \n\t"
  1390. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1391. STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
  1392. LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
  1393. LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1394. HADAMARD48
  1395. "movq %%mm7, 120(%1) \n\t"
  1396. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1397. STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
  1398. "movq 120(%1), %%mm7 \n\t"
  1399. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1400. "movq %%mm7, %%mm5 \n\t"//FIXME remove
  1401. "movq %%mm6, %%mm7 \n\t"
  1402. "movq %%mm0, %%mm6 \n\t"
  1403. // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
  1404. LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
  1405. // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1406. HADAMARD48
  1407. "movq %%mm7, 64(%1) \n\t"
  1408. MMABS(%%mm0, %%mm7)
  1409. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1410. MMABS_SUM(%%mm2, %%mm7, %%mm0)
  1411. MMABS_SUM(%%mm3, %%mm7, %%mm0)
  1412. MMABS_SUM(%%mm4, %%mm7, %%mm0)
  1413. MMABS_SUM(%%mm5, %%mm7, %%mm0)
  1414. MMABS_SUM(%%mm6, %%mm7, %%mm0)
  1415. "movq 64(%1), %%mm1 \n\t"
  1416. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1417. "movq %%mm0, 64(%1) \n\t"
  1418. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1419. LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
  1420. HADAMARD48
  1421. "movq %%mm7, (%1) \n\t"
  1422. MMABS(%%mm0, %%mm7)
  1423. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1424. MMABS_SUM(%%mm2, %%mm7, %%mm0)
  1425. MMABS_SUM(%%mm3, %%mm7, %%mm0)
  1426. MMABS_SUM(%%mm4, %%mm7, %%mm0)
  1427. MMABS_SUM(%%mm5, %%mm7, %%mm0)
  1428. MMABS_SUM(%%mm6, %%mm7, %%mm0)
  1429. "movq (%1), %%mm1 \n\t"
  1430. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1431. "movq 64(%1), %%mm1 \n\t"
  1432. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1433. "movq %%mm0, %%mm1 \n\t"
  1434. "psrlq $32, %%mm0 \n\t"
  1435. "paddusw %%mm1, %%mm0 \n\t"
  1436. "movq %%mm0, %%mm1 \n\t"
  1437. "psrlq $16, %%mm0 \n\t"
  1438. "paddusw %%mm1, %%mm0 \n\t"
  1439. "movd %%mm0, %0 \n\t"
  1440. : "=r" (sum)
  1441. : "r"(temp)
  1442. );
  1443. return sum&0xFFFF;
  1444. }
  1445. static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
  1446. DECLARE_ALIGNED_8(uint64_t, temp[16]);
  1447. int sum=0;
  1448. assert(h==8);
  1449. diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
  1450. asm volatile(
  1451. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1452. LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
  1453. HADAMARD48
  1454. "movq %%mm7, 112(%1) \n\t"
  1455. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1456. STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
  1457. "movq 112(%1), %%mm7 \n\t"
  1458. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1459. STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
  1460. LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
  1461. LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1462. HADAMARD48
  1463. "movq %%mm7, 120(%1) \n\t"
  1464. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1465. STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
  1466. "movq 120(%1), %%mm7 \n\t"
  1467. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1468. "movq %%mm7, %%mm5 \n\t"//FIXME remove
  1469. "movq %%mm6, %%mm7 \n\t"
  1470. "movq %%mm0, %%mm6 \n\t"
  1471. // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
  1472. LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
  1473. // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1474. HADAMARD48
  1475. "movq %%mm7, 64(%1) \n\t"
  1476. MMABS_MMX2(%%mm0, %%mm7)
  1477. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1478. MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
  1479. MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
  1480. MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
  1481. MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
  1482. MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
  1483. "movq 64(%1), %%mm1 \n\t"
  1484. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1485. "movq %%mm0, 64(%1) \n\t"
  1486. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1487. LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
  1488. HADAMARD48
  1489. "movq %%mm7, (%1) \n\t"
  1490. MMABS_MMX2(%%mm0, %%mm7)
  1491. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1492. MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
  1493. MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
  1494. MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
  1495. MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
  1496. MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
  1497. "movq (%1), %%mm1 \n\t"
  1498. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1499. "movq 64(%1), %%mm1 \n\t"
  1500. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1501. "pshufw $0x0E, %%mm0, %%mm1 \n\t"
  1502. "paddusw %%mm1, %%mm0 \n\t"
  1503. "pshufw $0x01, %%mm0, %%mm1 \n\t"
  1504. "paddusw %%mm1, %%mm0 \n\t"
  1505. "movd %%mm0, %0 \n\t"
  1506. : "=r" (sum)
  1507. : "r"(temp)
  1508. );
  1509. return sum&0xFFFF;
  1510. }
  1511. WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
  1512. WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
  1513. #endif //CONFIG_ENCODERS
  1514. #define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
  1515. #define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
  1516. #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
  1517. "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
  1518. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
  1519. "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
  1520. "movq "#in7", " #m3 " \n\t" /* d */\
  1521. "movq "#in0", %%mm5 \n\t" /* D */\
  1522. "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
  1523. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
  1524. "movq "#in1", %%mm5 \n\t" /* C */\
  1525. "movq "#in2", %%mm6 \n\t" /* B */\
  1526. "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
  1527. "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
  1528. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
  1529. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
  1530. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
  1531. "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
  1532. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
  1533. "psraw $5, %%mm5 \n\t"\
  1534. "packuswb %%mm5, %%mm5 \n\t"\
  1535. OP(%%mm5, out, %%mm7, d)
  1536. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
  1537. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1538. uint64_t temp;\
  1539. \
  1540. asm volatile(\
  1541. "pxor %%mm7, %%mm7 \n\t"\
  1542. "1: \n\t"\
  1543. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1544. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1545. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1546. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1547. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1548. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1549. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1550. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1551. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1552. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1553. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1554. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1555. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1556. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1557. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1558. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1559. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1560. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1561. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1562. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1563. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1564. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1565. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1566. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1567. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1568. "paddw %6, %%mm6 \n\t"\
  1569. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1570. "psraw $5, %%mm0 \n\t"\
  1571. "movq %%mm0, %5 \n\t"\
  1572. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1573. \
  1574. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
  1575. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
  1576. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
  1577. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
  1578. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
  1579. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
  1580. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
  1581. "paddw %%mm0, %%mm2 \n\t" /* b */\
  1582. "paddw %%mm5, %%mm3 \n\t" /* c */\
  1583. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1584. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1585. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
  1586. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
  1587. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
  1588. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
  1589. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1590. "paddw %%mm2, %%mm1 \n\t" /* a */\
  1591. "paddw %%mm6, %%mm4 \n\t" /* d */\
  1592. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1593. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
  1594. "paddw %6, %%mm1 \n\t"\
  1595. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
  1596. "psraw $5, %%mm3 \n\t"\
  1597. "movq %5, %%mm1 \n\t"\
  1598. "packuswb %%mm3, %%mm1 \n\t"\
  1599. OP_MMX2(%%mm1, (%1),%%mm4, q)\
  1600. /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
  1601. \
  1602. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
  1603. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
  1604. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
  1605. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
  1606. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
  1607. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
  1608. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
  1609. "paddw %%mm1, %%mm5 \n\t" /* b */\
  1610. "paddw %%mm4, %%mm0 \n\t" /* c */\
  1611. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1612. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
  1613. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
  1614. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
  1615. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
  1616. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
  1617. "paddw %%mm3, %%mm2 \n\t" /* d */\
  1618. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
  1619. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
  1620. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
  1621. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
  1622. "paddw %%mm2, %%mm6 \n\t" /* a */\
  1623. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
  1624. "paddw %6, %%mm0 \n\t"\
  1625. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1626. "psraw $5, %%mm0 \n\t"\
  1627. /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
  1628. \
  1629. "paddw %%mm5, %%mm3 \n\t" /* a */\
  1630. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
  1631. "paddw %%mm4, %%mm6 \n\t" /* b */\
  1632. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
  1633. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
  1634. "paddw %%mm1, %%mm4 \n\t" /* c */\
  1635. "paddw %%mm2, %%mm5 \n\t" /* d */\
  1636. "paddw %%mm6, %%mm6 \n\t" /* 2b */\
  1637. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
  1638. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
  1639. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
  1640. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
  1641. "paddw %6, %%mm4 \n\t"\
  1642. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
  1643. "psraw $5, %%mm4 \n\t"\
  1644. "packuswb %%mm4, %%mm0 \n\t"\
  1645. OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
  1646. \
  1647. "add %3, %0 \n\t"\
  1648. "add %4, %1 \n\t"\
  1649. "decl %2 \n\t"\
  1650. " jnz 1b \n\t"\
  1651. : "+a"(src), "+c"(dst), "+m"(h)\
  1652. : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  1653. : "memory"\
  1654. );\
  1655. }\
  1656. \
  1657. static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1658. int i;\
  1659. int16_t temp[16];\
  1660. /* quick HACK, XXX FIXME MUST be optimized */\
  1661. for(i=0; i<h; i++)\
  1662. {\
  1663. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1664. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1665. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1666. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1667. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1668. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
  1669. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
  1670. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
  1671. temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
  1672. temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
  1673. temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
  1674. temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
  1675. temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
  1676. temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
  1677. temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
  1678. temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
  1679. asm volatile(\
  1680. "movq (%0), %%mm0 \n\t"\
  1681. "movq 8(%0), %%mm1 \n\t"\
  1682. "paddw %2, %%mm0 \n\t"\
  1683. "paddw %2, %%mm1 \n\t"\
  1684. "psraw $5, %%mm0 \n\t"\
  1685. "psraw $5, %%mm1 \n\t"\
  1686. "packuswb %%mm1, %%mm0 \n\t"\
  1687. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1688. "movq 16(%0), %%mm0 \n\t"\
  1689. "movq 24(%0), %%mm1 \n\t"\
  1690. "paddw %2, %%mm0 \n\t"\
  1691. "paddw %2, %%mm1 \n\t"\
  1692. "psraw $5, %%mm0 \n\t"\
  1693. "psraw $5, %%mm1 \n\t"\
  1694. "packuswb %%mm1, %%mm0 \n\t"\
  1695. OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
  1696. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1697. : "memory"\
  1698. );\
  1699. dst+=dstStride;\
  1700. src+=srcStride;\
  1701. }\
  1702. }\
  1703. \
  1704. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1705. uint64_t temp;\
  1706. \
  1707. asm volatile(\
  1708. "pxor %%mm7, %%mm7 \n\t"\
  1709. "1: \n\t"\
  1710. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1711. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1712. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1713. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1714. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1715. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1716. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1717. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1718. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1719. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1720. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1721. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1722. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1723. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1724. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1725. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1726. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1727. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1728. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1729. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1730. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1731. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1732. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1733. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1734. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1735. "paddw %6, %%mm6 \n\t"\
  1736. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1737. "psraw $5, %%mm0 \n\t"\
  1738. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1739. \
  1740. "movd 5(%0), %%mm5 \n\t" /* FGHI */\
  1741. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
  1742. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
  1743. "paddw %%mm5, %%mm1 \n\t" /* a */\
  1744. "paddw %%mm6, %%mm2 \n\t" /* b */\
  1745. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
  1746. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
  1747. "paddw %%mm6, %%mm3 \n\t" /* c */\
  1748. "paddw %%mm5, %%mm4 \n\t" /* d */\
  1749. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1750. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1751. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1752. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1753. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
  1754. "paddw %6, %%mm1 \n\t"\
  1755. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
  1756. "psraw $5, %%mm3 \n\t"\
  1757. "packuswb %%mm3, %%mm0 \n\t"\
  1758. OP_MMX2(%%mm0, (%1), %%mm4, q)\
  1759. \
  1760. "add %3, %0 \n\t"\
  1761. "add %4, %1 \n\t"\
  1762. "decl %2 \n\t"\
  1763. " jnz 1b \n\t"\
  1764. : "+a"(src), "+c"(dst), "+m"(h)\
  1765. : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  1766. : "memory"\
  1767. );\
  1768. }\
  1769. \
  1770. static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1771. int i;\
  1772. int16_t temp[8];\
  1773. /* quick HACK, XXX FIXME MUST be optimized */\
  1774. for(i=0; i<h; i++)\
  1775. {\
  1776. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1777. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1778. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1779. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1780. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1781. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
  1782. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
  1783. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
  1784. asm volatile(\
  1785. "movq (%0), %%mm0 \n\t"\
  1786. "movq 8(%0), %%mm1 \n\t"\
  1787. "paddw %2, %%mm0 \n\t"\
  1788. "paddw %2, %%mm1 \n\t"\
  1789. "psraw $5, %%mm0 \n\t"\
  1790. "psraw $5, %%mm1 \n\t"\
  1791. "packuswb %%mm1, %%mm0 \n\t"\
  1792. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1793. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1794. :"memory"\
  1795. );\
  1796. dst+=dstStride;\
  1797. src+=srcStride;\
  1798. }\
  1799. }
  1800. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
  1801. \
  1802. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1803. uint64_t temp[17*4];\
  1804. uint64_t *temp_ptr= temp;\
  1805. int count= 17;\
  1806. \
  1807. /*FIXME unroll */\
  1808. asm volatile(\
  1809. "pxor %%mm7, %%mm7 \n\t"\
  1810. "1: \n\t"\
  1811. "movq (%0), %%mm0 \n\t"\
  1812. "movq (%0), %%mm1 \n\t"\
  1813. "movq 8(%0), %%mm2 \n\t"\
  1814. "movq 8(%0), %%mm3 \n\t"\
  1815. "punpcklbw %%mm7, %%mm0 \n\t"\
  1816. "punpckhbw %%mm7, %%mm1 \n\t"\
  1817. "punpcklbw %%mm7, %%mm2 \n\t"\
  1818. "punpckhbw %%mm7, %%mm3 \n\t"\
  1819. "movq %%mm0, (%1) \n\t"\
  1820. "movq %%mm1, 17*8(%1) \n\t"\
  1821. "movq %%mm2, 2*17*8(%1) \n\t"\
  1822. "movq %%mm3, 3*17*8(%1) \n\t"\
  1823. "add $8, %1 \n\t"\
  1824. "add %3, %0 \n\t"\
  1825. "decl %2 \n\t"\
  1826. " jnz 1b \n\t"\
  1827. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1828. : "r" ((long)srcStride)\
  1829. : "memory"\
  1830. );\
  1831. \
  1832. temp_ptr= temp;\
  1833. count=4;\
  1834. \
  1835. /*FIXME reorder for speed */\
  1836. asm volatile(\
  1837. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1838. "1: \n\t"\
  1839. "movq (%0), %%mm0 \n\t"\
  1840. "movq 8(%0), %%mm1 \n\t"\
  1841. "movq 16(%0), %%mm2 \n\t"\
  1842. "movq 24(%0), %%mm3 \n\t"\
  1843. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1844. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1845. "add %4, %1 \n\t"\
  1846. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1847. \
  1848. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1849. "add %4, %1 \n\t"\
  1850. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1851. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
  1852. "add %4, %1 \n\t"\
  1853. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
  1854. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
  1855. "add %4, %1 \n\t"\
  1856. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
  1857. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
  1858. "add %4, %1 \n\t"\
  1859. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
  1860. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
  1861. "add %4, %1 \n\t"\
  1862. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
  1863. \
  1864. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
  1865. "add %4, %1 \n\t" \
  1866. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
  1867. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
  1868. \
  1869. "add $136, %0 \n\t"\
  1870. "add %6, %1 \n\t"\
  1871. "decl %2 \n\t"\
  1872. " jnz 1b \n\t"\
  1873. \
  1874. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1875. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\
  1876. :"memory"\
  1877. );\
  1878. }\
  1879. \
  1880. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1881. uint64_t temp[9*2];\
  1882. uint64_t *temp_ptr= temp;\
  1883. int count= 9;\
  1884. \
  1885. /*FIXME unroll */\
  1886. asm volatile(\
  1887. "pxor %%mm7, %%mm7 \n\t"\
  1888. "1: \n\t"\
  1889. "movq (%0), %%mm0 \n\t"\
  1890. "movq (%0), %%mm1 \n\t"\
  1891. "punpcklbw %%mm7, %%mm0 \n\t"\
  1892. "punpckhbw %%mm7, %%mm1 \n\t"\
  1893. "movq %%mm0, (%1) \n\t"\
  1894. "movq %%mm1, 9*8(%1) \n\t"\
  1895. "add $8, %1 \n\t"\
  1896. "add %3, %0 \n\t"\
  1897. "decl %2 \n\t"\
  1898. " jnz 1b \n\t"\
  1899. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1900. : "r" ((long)srcStride)\
  1901. : "memory"\
  1902. );\
  1903. \
  1904. temp_ptr= temp;\
  1905. count=2;\
  1906. \
  1907. /*FIXME reorder for speed */\
  1908. asm volatile(\
  1909. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1910. "1: \n\t"\
  1911. "movq (%0), %%mm0 \n\t"\
  1912. "movq 8(%0), %%mm1 \n\t"\
  1913. "movq 16(%0), %%mm2 \n\t"\
  1914. "movq 24(%0), %%mm3 \n\t"\
  1915. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1916. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1917. "add %4, %1 \n\t"\
  1918. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1919. \
  1920. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1921. "add %4, %1 \n\t"\
  1922. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1923. \
  1924. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
  1925. "add %4, %1 \n\t"\
  1926. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
  1927. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
  1928. \
  1929. "add $72, %0 \n\t"\
  1930. "add %6, %1 \n\t"\
  1931. "decl %2 \n\t"\
  1932. " jnz 1b \n\t"\
  1933. \
  1934. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1935. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\
  1936. : "memory"\
  1937. );\
  1938. }\
  1939. \
  1940. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1941. OPNAME ## pixels8_mmx(dst, src, stride, 8);\
  1942. }\
  1943. \
  1944. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1945. uint64_t temp[8];\
  1946. uint8_t * const half= (uint8_t*)temp;\
  1947. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1948. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1949. }\
  1950. \
  1951. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1952. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
  1953. }\
  1954. \
  1955. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1956. uint64_t temp[8];\
  1957. uint8_t * const half= (uint8_t*)temp;\
  1958. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1959. OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
  1960. }\
  1961. \
  1962. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1963. uint64_t temp[8];\
  1964. uint8_t * const half= (uint8_t*)temp;\
  1965. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1966. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1967. }\
  1968. \
  1969. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1970. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1971. }\
  1972. \
  1973. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1974. uint64_t temp[8];\
  1975. uint8_t * const half= (uint8_t*)temp;\
  1976. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1977. OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
  1978. }\
  1979. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1980. uint64_t half[8 + 9];\
  1981. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1982. uint8_t * const halfHV= ((uint8_t*)half);\
  1983. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1984. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1985. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1986. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1987. }\
  1988. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1989. uint64_t half[8 + 9];\
  1990. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1991. uint8_t * const halfHV= ((uint8_t*)half);\
  1992. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1993. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1994. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1995. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1996. }\
  1997. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1998. uint64_t half[8 + 9];\
  1999. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  2000. uint8_t * const halfHV= ((uint8_t*)half);\
  2001. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2002. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  2003. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  2004. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  2005. }\
  2006. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2007. uint64_t half[8 + 9];\
  2008. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  2009. uint8_t * const halfHV= ((uint8_t*)half);\
  2010. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2011. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  2012. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  2013. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  2014. }\
  2015. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2016. uint64_t half[8 + 9];\
  2017. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  2018. uint8_t * const halfHV= ((uint8_t*)half);\
  2019. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2020. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  2021. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  2022. }\
  2023. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2024. uint64_t half[8 + 9];\
  2025. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  2026. uint8_t * const halfHV= ((uint8_t*)half);\
  2027. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2028. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  2029. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  2030. }\
  2031. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2032. uint64_t half[8 + 9];\
  2033. uint8_t * const halfH= ((uint8_t*)half);\
  2034. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2035. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  2036. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  2037. }\
  2038. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2039. uint64_t half[8 + 9];\
  2040. uint8_t * const halfH= ((uint8_t*)half);\
  2041. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2042. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  2043. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  2044. }\
  2045. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2046. uint64_t half[9];\
  2047. uint8_t * const halfH= ((uint8_t*)half);\
  2048. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2049. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  2050. }\
  2051. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  2052. OPNAME ## pixels16_mmx(dst, src, stride, 16);\
  2053. }\
  2054. \
  2055. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2056. uint64_t temp[32];\
  2057. uint8_t * const half= (uint8_t*)temp;\
  2058. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  2059. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  2060. }\
  2061. \
  2062. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2063. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
  2064. }\
  2065. \
  2066. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2067. uint64_t temp[32];\
  2068. uint8_t * const half= (uint8_t*)temp;\
  2069. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  2070. OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
  2071. }\
  2072. \
  2073. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2074. uint64_t temp[32];\
  2075. uint8_t * const half= (uint8_t*)temp;\
  2076. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  2077. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  2078. }\
  2079. \
  2080. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2081. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
  2082. }\
  2083. \
  2084. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2085. uint64_t temp[32];\
  2086. uint8_t * const half= (uint8_t*)temp;\
  2087. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  2088. OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
  2089. }\
  2090. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2091. uint64_t half[16*2 + 17*2];\
  2092. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2093. uint8_t * const halfHV= ((uint8_t*)half);\
  2094. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2095. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2096. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2097. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2098. }\
  2099. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2100. uint64_t half[16*2 + 17*2];\
  2101. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2102. uint8_t * const halfHV= ((uint8_t*)half);\
  2103. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2104. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2105. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2106. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2107. }\
  2108. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2109. uint64_t half[16*2 + 17*2];\
  2110. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2111. uint8_t * const halfHV= ((uint8_t*)half);\
  2112. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2113. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2114. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2115. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2116. }\
  2117. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2118. uint64_t half[16*2 + 17*2];\
  2119. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2120. uint8_t * const halfHV= ((uint8_t*)half);\
  2121. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2122. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2123. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2124. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2125. }\
  2126. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2127. uint64_t half[16*2 + 17*2];\
  2128. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2129. uint8_t * const halfHV= ((uint8_t*)half);\
  2130. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2131. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2132. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2133. }\
  2134. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2135. uint64_t half[16*2 + 17*2];\
  2136. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2137. uint8_t * const halfHV= ((uint8_t*)half);\
  2138. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2139. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2140. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2141. }\
  2142. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2143. uint64_t half[17*2];\
  2144. uint8_t * const halfH= ((uint8_t*)half);\
  2145. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2146. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2147. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2148. }\
  2149. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2150. uint64_t half[17*2];\
  2151. uint8_t * const halfH= ((uint8_t*)half);\
  2152. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2153. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2154. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2155. }\
  2156. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2157. uint64_t half[17*2];\
  2158. uint8_t * const halfH= ((uint8_t*)half);\
  2159. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2160. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2161. }
  2162. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  2163. #define AVG_3DNOW_OP(a,b,temp, size) \
  2164. "mov" #size " " #b ", " #temp " \n\t"\
  2165. "pavgusb " #temp ", " #a " \n\t"\
  2166. "mov" #size " " #a ", " #b " \n\t"
  2167. #define AVG_MMX2_OP(a,b,temp, size) \
  2168. "mov" #size " " #b ", " #temp " \n\t"\
  2169. "pavgb " #temp ", " #a " \n\t"\
  2170. "mov" #size " " #a ", " #b " \n\t"
  2171. QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
  2172. QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
  2173. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
  2174. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
  2175. QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
  2176. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
  2177. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
  2178. QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
  2179. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
  2180. /***********************************/
  2181. /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
  2182. #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
  2183. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2184. OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
  2185. }
  2186. #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
  2187. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2188. OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
  2189. }
  2190. #define QPEL_2TAP(OPNAME, SIZE, MMX)\
  2191. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
  2192. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
  2193. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
  2194. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
  2195. OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
  2196. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
  2197. OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
  2198. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
  2199. OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
  2200. static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2201. OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
  2202. }\
  2203. static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2204. OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
  2205. }\
  2206. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
  2207. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
  2208. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
  2209. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
  2210. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
  2211. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
  2212. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
  2213. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
  2214. QPEL_2TAP(put_, 16, mmx2)
  2215. QPEL_2TAP(avg_, 16, mmx2)
  2216. QPEL_2TAP(put_, 8, mmx2)
  2217. QPEL_2TAP(avg_, 8, mmx2)
  2218. QPEL_2TAP(put_, 16, 3dnow)
  2219. QPEL_2TAP(avg_, 16, 3dnow)
  2220. QPEL_2TAP(put_, 8, 3dnow)
  2221. QPEL_2TAP(avg_, 8, 3dnow)
  2222. #if 0
  2223. static void just_return() { return; }
  2224. #endif
  2225. #define SET_QPEL_FUNC(postfix1, postfix2) \
  2226. c->put_ ## postfix1 = put_ ## postfix2;\
  2227. c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
  2228. c->avg_ ## postfix1 = avg_ ## postfix2;
  2229. static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  2230. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
  2231. const int w = 8;
  2232. const int ix = ox>>(16+shift);
  2233. const int iy = oy>>(16+shift);
  2234. const int oxs = ox>>4;
  2235. const int oys = oy>>4;
  2236. const int dxxs = dxx>>4;
  2237. const int dxys = dxy>>4;
  2238. const int dyxs = dyx>>4;
  2239. const int dyys = dyy>>4;
  2240. const uint16_t r4[4] = {r,r,r,r};
  2241. const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
  2242. const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
  2243. const uint64_t shift2 = 2*shift;
  2244. uint8_t edge_buf[(h+1)*stride];
  2245. int x, y;
  2246. const int dxw = (dxx-(1<<(16+shift)))*(w-1);
  2247. const int dyh = (dyy-(1<<(16+shift)))*(h-1);
  2248. const int dxh = dxy*(h-1);
  2249. const int dyw = dyx*(w-1);
  2250. if( // non-constant fullpel offset (3% of blocks)
  2251. (ox^(ox+dxw) | ox^(ox+dxh) | ox^(ox+dxw+dxh) |
  2252. oy^(oy+dyw) | oy^(oy+dyh) | oy^(oy+dyw+dyh)) >> (16+shift)
  2253. // uses more than 16 bits of subpel mv (only at huge resolution)
  2254. || (dxx|dxy|dyx|dyy)&15 )
  2255. {
  2256. //FIXME could still use mmx for some of the rows
  2257. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
  2258. return;
  2259. }
  2260. src += ix + iy*stride;
  2261. if( (unsigned)ix >= width-w ||
  2262. (unsigned)iy >= height-h )
  2263. {
  2264. ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
  2265. src = edge_buf;
  2266. }
  2267. asm volatile(
  2268. "movd %0, %%mm6 \n\t"
  2269. "pxor %%mm7, %%mm7 \n\t"
  2270. "punpcklwd %%mm6, %%mm6 \n\t"
  2271. "punpcklwd %%mm6, %%mm6 \n\t"
  2272. :: "r"(1<<shift)
  2273. );
  2274. for(x=0; x<w; x+=4){
  2275. uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
  2276. oxs - dxys + dxxs*(x+1),
  2277. oxs - dxys + dxxs*(x+2),
  2278. oxs - dxys + dxxs*(x+3) };
  2279. uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
  2280. oys - dyys + dyxs*(x+1),
  2281. oys - dyys + dyxs*(x+2),
  2282. oys - dyys + dyxs*(x+3) };
  2283. for(y=0; y<h; y++){
  2284. asm volatile(
  2285. "movq %0, %%mm4 \n\t"
  2286. "movq %1, %%mm5 \n\t"
  2287. "paddw %2, %%mm4 \n\t"
  2288. "paddw %3, %%mm5 \n\t"
  2289. "movq %%mm4, %0 \n\t"
  2290. "movq %%mm5, %1 \n\t"
  2291. "psrlw $12, %%mm4 \n\t"
  2292. "psrlw $12, %%mm5 \n\t"
  2293. : "+m"(*dx4), "+m"(*dy4)
  2294. : "m"(*dxy4), "m"(*dyy4)
  2295. );
  2296. asm volatile(
  2297. "movq %%mm6, %%mm2 \n\t"
  2298. "movq %%mm6, %%mm1 \n\t"
  2299. "psubw %%mm4, %%mm2 \n\t"
  2300. "psubw %%mm5, %%mm1 \n\t"
  2301. "movq %%mm2, %%mm0 \n\t"
  2302. "movq %%mm4, %%mm3 \n\t"
  2303. "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
  2304. "pmullw %%mm5, %%mm3 \n\t" // dx*dy
  2305. "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
  2306. "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
  2307. "movd %4, %%mm5 \n\t"
  2308. "movd %3, %%mm4 \n\t"
  2309. "punpcklbw %%mm7, %%mm5 \n\t"
  2310. "punpcklbw %%mm7, %%mm4 \n\t"
  2311. "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
  2312. "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
  2313. "movd %2, %%mm5 \n\t"
  2314. "movd %1, %%mm4 \n\t"
  2315. "punpcklbw %%mm7, %%mm5 \n\t"
  2316. "punpcklbw %%mm7, %%mm4 \n\t"
  2317. "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
  2318. "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
  2319. "paddw %5, %%mm1 \n\t"
  2320. "paddw %%mm3, %%mm2 \n\t"
  2321. "paddw %%mm1, %%mm0 \n\t"
  2322. "paddw %%mm2, %%mm0 \n\t"
  2323. "psrlw %6, %%mm0 \n\t"
  2324. "packuswb %%mm0, %%mm0 \n\t"
  2325. "movd %%mm0, %0 \n\t"
  2326. : "=m"(dst[x+y*stride])
  2327. : "m"(src[0]), "m"(src[1]),
  2328. "m"(src[stride]), "m"(src[stride+1]),
  2329. "m"(*r4), "m"(shift2)
  2330. );
  2331. src += stride;
  2332. }
  2333. src += 4-h*stride;
  2334. }
  2335. }
  2336. #ifdef CONFIG_ENCODERS
  2337. static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
  2338. long i=0;
  2339. assert(FFABS(scale) < 256);
  2340. scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
  2341. asm volatile(
  2342. "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
  2343. "psrlw $15, %%mm6 \n\t" // 1w
  2344. "pxor %%mm7, %%mm7 \n\t"
  2345. "movd %4, %%mm5 \n\t"
  2346. "punpcklwd %%mm5, %%mm5 \n\t"
  2347. "punpcklwd %%mm5, %%mm5 \n\t"
  2348. "1: \n\t"
  2349. "movq (%1, %0), %%mm0 \n\t"
  2350. "movq 8(%1, %0), %%mm1 \n\t"
  2351. "pmulhw %%mm5, %%mm0 \n\t"
  2352. "pmulhw %%mm5, %%mm1 \n\t"
  2353. "paddw %%mm6, %%mm0 \n\t"
  2354. "paddw %%mm6, %%mm1 \n\t"
  2355. "psraw $1, %%mm0 \n\t"
  2356. "psraw $1, %%mm1 \n\t"
  2357. "paddw (%2, %0), %%mm0 \n\t"
  2358. "paddw 8(%2, %0), %%mm1 \n\t"
  2359. "psraw $6, %%mm0 \n\t"
  2360. "psraw $6, %%mm1 \n\t"
  2361. "pmullw (%3, %0), %%mm0 \n\t"
  2362. "pmullw 8(%3, %0), %%mm1 \n\t"
  2363. "pmaddwd %%mm0, %%mm0 \n\t"
  2364. "pmaddwd %%mm1, %%mm1 \n\t"
  2365. "paddd %%mm1, %%mm0 \n\t"
  2366. "psrld $4, %%mm0 \n\t"
  2367. "paddd %%mm0, %%mm7 \n\t"
  2368. "add $16, %0 \n\t"
  2369. "cmp $128, %0 \n\t" //FIXME optimize & bench
  2370. " jb 1b \n\t"
  2371. "movq %%mm7, %%mm6 \n\t"
  2372. "psrlq $32, %%mm7 \n\t"
  2373. "paddd %%mm6, %%mm7 \n\t"
  2374. "psrld $2, %%mm7 \n\t"
  2375. "movd %%mm7, %0 \n\t"
  2376. : "+r" (i)
  2377. : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
  2378. );
  2379. return i;
  2380. }
  2381. static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
  2382. long i=0;
  2383. if(FFABS(scale) < 256){
  2384. scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
  2385. asm volatile(
  2386. "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
  2387. "psrlw $15, %%mm6 \n\t" // 1w
  2388. "movd %3, %%mm5 \n\t"
  2389. "punpcklwd %%mm5, %%mm5 \n\t"
  2390. "punpcklwd %%mm5, %%mm5 \n\t"
  2391. "1: \n\t"
  2392. "movq (%1, %0), %%mm0 \n\t"
  2393. "movq 8(%1, %0), %%mm1 \n\t"
  2394. "pmulhw %%mm5, %%mm0 \n\t"
  2395. "pmulhw %%mm5, %%mm1 \n\t"
  2396. "paddw %%mm6, %%mm0 \n\t"
  2397. "paddw %%mm6, %%mm1 \n\t"
  2398. "psraw $1, %%mm0 \n\t"
  2399. "psraw $1, %%mm1 \n\t"
  2400. "paddw (%2, %0), %%mm0 \n\t"
  2401. "paddw 8(%2, %0), %%mm1 \n\t"
  2402. "movq %%mm0, (%2, %0) \n\t"
  2403. "movq %%mm1, 8(%2, %0) \n\t"
  2404. "add $16, %0 \n\t"
  2405. "cmp $128, %0 \n\t" //FIXME optimize & bench
  2406. " jb 1b \n\t"
  2407. : "+r" (i)
  2408. : "r"(basis), "r"(rem), "g"(scale)
  2409. );
  2410. }else{
  2411. for(i=0; i<8*8; i++){
  2412. rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
  2413. }
  2414. }
  2415. }
  2416. #endif /* CONFIG_ENCODERS */
  2417. #define PREFETCH(name, op) \
  2418. static void name(void *mem, int stride, int h){\
  2419. const uint8_t *p= mem;\
  2420. do{\
  2421. asm volatile(#op" %0" :: "m"(*p));\
  2422. p+= stride;\
  2423. }while(--h);\
  2424. }
  2425. PREFETCH(prefetch_mmx2, prefetcht0)
  2426. PREFETCH(prefetch_3dnow, prefetch)
  2427. #undef PREFETCH
  2428. #include "h264dsp_mmx.c"
  2429. /* AVS specific */
  2430. void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
  2431. void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  2432. put_pixels8_mmx(dst, src, stride, 8);
  2433. }
  2434. void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  2435. avg_pixels8_mmx(dst, src, stride, 8);
  2436. }
  2437. void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  2438. put_pixels16_mmx(dst, src, stride, 16);
  2439. }
  2440. void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  2441. avg_pixels16_mmx(dst, src, stride, 16);
  2442. }
  2443. /* external functions, from idct_mmx.c */
  2444. void ff_mmx_idct(DCTELEM *block);
  2445. void ff_mmxext_idct(DCTELEM *block);
  2446. void ff_vp3_idct_sse2(int16_t *input_data);
  2447. void ff_vp3_idct_mmx(int16_t *data);
  2448. void ff_vp3_dsp_init_mmx(void);
  2449. /* XXX: those functions should be suppressed ASAP when all IDCTs are
  2450. converted */
  2451. #ifdef CONFIG_GPL
  2452. static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  2453. {
  2454. ff_mmx_idct (block);
  2455. put_pixels_clamped_mmx(block, dest, line_size);
  2456. }
  2457. static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  2458. {
  2459. ff_mmx_idct (block);
  2460. add_pixels_clamped_mmx(block, dest, line_size);
  2461. }
  2462. static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  2463. {
  2464. ff_mmxext_idct (block);
  2465. put_pixels_clamped_mmx(block, dest, line_size);
  2466. }
  2467. static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  2468. {
  2469. ff_mmxext_idct (block);
  2470. add_pixels_clamped_mmx(block, dest, line_size);
  2471. }
  2472. #endif
  2473. static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
  2474. {
  2475. ff_vp3_idct_sse2(block);
  2476. put_signed_pixels_clamped_mmx(block, dest, line_size);
  2477. }
  2478. static void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block)
  2479. {
  2480. ff_vp3_idct_sse2(block);
  2481. add_pixels_clamped_mmx(block, dest, line_size);
  2482. }
  2483. static void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
  2484. {
  2485. ff_vp3_idct_mmx(block);
  2486. put_signed_pixels_clamped_mmx(block, dest, line_size);
  2487. }
  2488. static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
  2489. {
  2490. ff_vp3_idct_mmx(block);
  2491. add_pixels_clamped_mmx(block, dest, line_size);
  2492. }
  2493. static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
  2494. {
  2495. ff_idct_xvid_mmx (block);
  2496. put_pixels_clamped_mmx(block, dest, line_size);
  2497. }
  2498. static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
  2499. {
  2500. ff_idct_xvid_mmx (block);
  2501. add_pixels_clamped_mmx(block, dest, line_size);
  2502. }
  2503. static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
  2504. {
  2505. ff_idct_xvid_mmx2 (block);
  2506. put_pixels_clamped_mmx(block, dest, line_size);
  2507. }
  2508. static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
  2509. {
  2510. ff_idct_xvid_mmx2 (block);
  2511. add_pixels_clamped_mmx(block, dest, line_size);
  2512. }
  2513. static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
  2514. {
  2515. int i;
  2516. asm volatile("pxor %%mm7, %%mm7":);
  2517. for(i=0; i<blocksize; i+=2) {
  2518. asm volatile(
  2519. "movq %0, %%mm0 \n\t"
  2520. "movq %1, %%mm1 \n\t"
  2521. "movq %%mm0, %%mm2 \n\t"
  2522. "movq %%mm1, %%mm3 \n\t"
  2523. "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
  2524. "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
  2525. "pslld $31, %%mm2 \n\t" // keep only the sign bit
  2526. "pxor %%mm2, %%mm1 \n\t"
  2527. "movq %%mm3, %%mm4 \n\t"
  2528. "pand %%mm1, %%mm3 \n\t"
  2529. "pandn %%mm1, %%mm4 \n\t"
  2530. "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  2531. "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  2532. "movq %%mm3, %1 \n\t"
  2533. "movq %%mm0, %0 \n\t"
  2534. :"+m"(mag[i]), "+m"(ang[i])
  2535. ::"memory"
  2536. );
  2537. }
  2538. asm volatile("femms");
  2539. }
  2540. static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
  2541. {
  2542. int i;
  2543. asm volatile(
  2544. "movaps %0, %%xmm5 \n\t"
  2545. ::"m"(ff_pdw_80000000[0])
  2546. );
  2547. for(i=0; i<blocksize; i+=4) {
  2548. asm volatile(
  2549. "movaps %0, %%xmm0 \n\t"
  2550. "movaps %1, %%xmm1 \n\t"
  2551. "xorps %%xmm2, %%xmm2 \n\t"
  2552. "xorps %%xmm3, %%xmm3 \n\t"
  2553. "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
  2554. "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
  2555. "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
  2556. "xorps %%xmm2, %%xmm1 \n\t"
  2557. "movaps %%xmm3, %%xmm4 \n\t"
  2558. "andps %%xmm1, %%xmm3 \n\t"
  2559. "andnps %%xmm1, %%xmm4 \n\t"
  2560. "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  2561. "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  2562. "movaps %%xmm3, %1 \n\t"
  2563. "movaps %%xmm0, %0 \n\t"
  2564. :"+m"(mag[i]), "+m"(ang[i])
  2565. ::"memory"
  2566. );
  2567. }
  2568. }
  2569. static void vector_fmul_3dnow(float *dst, const float *src, int len){
  2570. long i = (len-4)*4;
  2571. asm volatile(
  2572. "1: \n\t"
  2573. "movq (%1,%0), %%mm0 \n\t"
  2574. "movq 8(%1,%0), %%mm1 \n\t"
  2575. "pfmul (%2,%0), %%mm0 \n\t"
  2576. "pfmul 8(%2,%0), %%mm1 \n\t"
  2577. "movq %%mm0, (%1,%0) \n\t"
  2578. "movq %%mm1, 8(%1,%0) \n\t"
  2579. "sub $16, %0 \n\t"
  2580. "jge 1b \n\t"
  2581. "femms \n\t"
  2582. :"+r"(i)
  2583. :"r"(dst), "r"(src)
  2584. :"memory"
  2585. );
  2586. }
  2587. static void vector_fmul_sse(float *dst, const float *src, int len){
  2588. long i = (len-8)*4;
  2589. asm volatile(
  2590. "1: \n\t"
  2591. "movaps (%1,%0), %%xmm0 \n\t"
  2592. "movaps 16(%1,%0), %%xmm1 \n\t"
  2593. "mulps (%2,%0), %%xmm0 \n\t"
  2594. "mulps 16(%2,%0), %%xmm1 \n\t"
  2595. "movaps %%xmm0, (%1,%0) \n\t"
  2596. "movaps %%xmm1, 16(%1,%0) \n\t"
  2597. "sub $32, %0 \n\t"
  2598. "jge 1b \n\t"
  2599. :"+r"(i)
  2600. :"r"(dst), "r"(src)
  2601. :"memory"
  2602. );
  2603. }
  2604. static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
  2605. long i = len*4-16;
  2606. asm volatile(
  2607. "1: \n\t"
  2608. "pswapd 8(%1), %%mm0 \n\t"
  2609. "pswapd (%1), %%mm1 \n\t"
  2610. "pfmul (%3,%0), %%mm0 \n\t"
  2611. "pfmul 8(%3,%0), %%mm1 \n\t"
  2612. "movq %%mm0, (%2,%0) \n\t"
  2613. "movq %%mm1, 8(%2,%0) \n\t"
  2614. "add $16, %1 \n\t"
  2615. "sub $16, %0 \n\t"
  2616. "jge 1b \n\t"
  2617. :"+r"(i), "+r"(src1)
  2618. :"r"(dst), "r"(src0)
  2619. );
  2620. asm volatile("femms");
  2621. }
  2622. static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
  2623. long i = len*4-32;
  2624. asm volatile(
  2625. "1: \n\t"
  2626. "movaps 16(%1), %%xmm0 \n\t"
  2627. "movaps (%1), %%xmm1 \n\t"
  2628. "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
  2629. "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
  2630. "mulps (%3,%0), %%xmm0 \n\t"
  2631. "mulps 16(%3,%0), %%xmm1 \n\t"
  2632. "movaps %%xmm0, (%2,%0) \n\t"
  2633. "movaps %%xmm1, 16(%2,%0) \n\t"
  2634. "add $32, %1 \n\t"
  2635. "sub $32, %0 \n\t"
  2636. "jge 1b \n\t"
  2637. :"+r"(i), "+r"(src1)
  2638. :"r"(dst), "r"(src0)
  2639. );
  2640. }
  2641. static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
  2642. const float *src2, int src3, int len, int step){
  2643. long i = (len-4)*4;
  2644. if(step == 2 && src3 == 0){
  2645. dst += (len-4)*2;
  2646. asm volatile(
  2647. "1: \n\t"
  2648. "movq (%2,%0), %%mm0 \n\t"
  2649. "movq 8(%2,%0), %%mm1 \n\t"
  2650. "pfmul (%3,%0), %%mm0 \n\t"
  2651. "pfmul 8(%3,%0), %%mm1 \n\t"
  2652. "pfadd (%4,%0), %%mm0 \n\t"
  2653. "pfadd 8(%4,%0), %%mm1 \n\t"
  2654. "movd %%mm0, (%1) \n\t"
  2655. "movd %%mm1, 16(%1) \n\t"
  2656. "psrlq $32, %%mm0 \n\t"
  2657. "psrlq $32, %%mm1 \n\t"
  2658. "movd %%mm0, 8(%1) \n\t"
  2659. "movd %%mm1, 24(%1) \n\t"
  2660. "sub $32, %1 \n\t"
  2661. "sub $16, %0 \n\t"
  2662. "jge 1b \n\t"
  2663. :"+r"(i), "+r"(dst)
  2664. :"r"(src0), "r"(src1), "r"(src2)
  2665. :"memory"
  2666. );
  2667. }
  2668. else if(step == 1 && src3 == 0){
  2669. asm volatile(
  2670. "1: \n\t"
  2671. "movq (%2,%0), %%mm0 \n\t"
  2672. "movq 8(%2,%0), %%mm1 \n\t"
  2673. "pfmul (%3,%0), %%mm0 \n\t"
  2674. "pfmul 8(%3,%0), %%mm1 \n\t"
  2675. "pfadd (%4,%0), %%mm0 \n\t"
  2676. "pfadd 8(%4,%0), %%mm1 \n\t"
  2677. "movq %%mm0, (%1,%0) \n\t"
  2678. "movq %%mm1, 8(%1,%0) \n\t"
  2679. "sub $16, %0 \n\t"
  2680. "jge 1b \n\t"
  2681. :"+r"(i)
  2682. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  2683. :"memory"
  2684. );
  2685. }
  2686. else
  2687. ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
  2688. asm volatile("femms");
  2689. }
  2690. static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
  2691. const float *src2, int src3, int len, int step){
  2692. long i = (len-8)*4;
  2693. if(step == 2 && src3 == 0){
  2694. dst += (len-8)*2;
  2695. asm volatile(
  2696. "1: \n\t"
  2697. "movaps (%2,%0), %%xmm0 \n\t"
  2698. "movaps 16(%2,%0), %%xmm1 \n\t"
  2699. "mulps (%3,%0), %%xmm0 \n\t"
  2700. "mulps 16(%3,%0), %%xmm1 \n\t"
  2701. "addps (%4,%0), %%xmm0 \n\t"
  2702. "addps 16(%4,%0), %%xmm1 \n\t"
  2703. "movss %%xmm0, (%1) \n\t"
  2704. "movss %%xmm1, 32(%1) \n\t"
  2705. "movhlps %%xmm0, %%xmm2 \n\t"
  2706. "movhlps %%xmm1, %%xmm3 \n\t"
  2707. "movss %%xmm2, 16(%1) \n\t"
  2708. "movss %%xmm3, 48(%1) \n\t"
  2709. "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
  2710. "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
  2711. "movss %%xmm0, 8(%1) \n\t"
  2712. "movss %%xmm1, 40(%1) \n\t"
  2713. "movhlps %%xmm0, %%xmm2 \n\t"
  2714. "movhlps %%xmm1, %%xmm3 \n\t"
  2715. "movss %%xmm2, 24(%1) \n\t"
  2716. "movss %%xmm3, 56(%1) \n\t"
  2717. "sub $64, %1 \n\t"
  2718. "sub $32, %0 \n\t"
  2719. "jge 1b \n\t"
  2720. :"+r"(i), "+r"(dst)
  2721. :"r"(src0), "r"(src1), "r"(src2)
  2722. :"memory"
  2723. );
  2724. }
  2725. else if(step == 1 && src3 == 0){
  2726. asm volatile(
  2727. "1: \n\t"
  2728. "movaps (%2,%0), %%xmm0 \n\t"
  2729. "movaps 16(%2,%0), %%xmm1 \n\t"
  2730. "mulps (%3,%0), %%xmm0 \n\t"
  2731. "mulps 16(%3,%0), %%xmm1 \n\t"
  2732. "addps (%4,%0), %%xmm0 \n\t"
  2733. "addps 16(%4,%0), %%xmm1 \n\t"
  2734. "movaps %%xmm0, (%1,%0) \n\t"
  2735. "movaps %%xmm1, 16(%1,%0) \n\t"
  2736. "sub $32, %0 \n\t"
  2737. "jge 1b \n\t"
  2738. :"+r"(i)
  2739. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  2740. :"memory"
  2741. );
  2742. }
  2743. else
  2744. ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
  2745. }
  2746. static void float_to_int16_3dnow(int16_t *dst, const float *src, int len){
  2747. // not bit-exact: pf2id uses different rounding than C and SSE
  2748. int i;
  2749. for(i=0; i<len; i+=4) {
  2750. asm volatile(
  2751. "pf2id %1, %%mm0 \n\t"
  2752. "pf2id %2, %%mm1 \n\t"
  2753. "packssdw %%mm1, %%mm0 \n\t"
  2754. "movq %%mm0, %0 \n\t"
  2755. :"=m"(dst[i])
  2756. :"m"(src[i]), "m"(src[i+2])
  2757. );
  2758. }
  2759. asm volatile("femms");
  2760. }
  2761. static void float_to_int16_sse(int16_t *dst, const float *src, int len){
  2762. int i;
  2763. for(i=0; i<len; i+=4) {
  2764. asm volatile(
  2765. "cvtps2pi %1, %%mm0 \n\t"
  2766. "cvtps2pi %2, %%mm1 \n\t"
  2767. "packssdw %%mm1, %%mm0 \n\t"
  2768. "movq %%mm0, %0 \n\t"
  2769. :"=m"(dst[i])
  2770. :"m"(src[i]), "m"(src[i+2])
  2771. );
  2772. }
  2773. asm volatile("emms");
  2774. }
  2775. #ifdef CONFIG_SNOW_DECODER
  2776. extern void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width);
  2777. extern void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width);
  2778. extern void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
  2779. extern void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
  2780. extern void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  2781. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
  2782. extern void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
  2783. int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
  2784. #endif
  2785. void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
  2786. {
  2787. mm_flags = mm_support();
  2788. if (avctx->dsp_mask) {
  2789. if (avctx->dsp_mask & FF_MM_FORCE)
  2790. mm_flags |= (avctx->dsp_mask & 0xffff);
  2791. else
  2792. mm_flags &= ~(avctx->dsp_mask & 0xffff);
  2793. }
  2794. #if 0
  2795. av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
  2796. if (mm_flags & MM_MMX)
  2797. av_log(avctx, AV_LOG_INFO, " mmx");
  2798. if (mm_flags & MM_MMXEXT)
  2799. av_log(avctx, AV_LOG_INFO, " mmxext");
  2800. if (mm_flags & MM_3DNOW)
  2801. av_log(avctx, AV_LOG_INFO, " 3dnow");
  2802. if (mm_flags & MM_SSE)
  2803. av_log(avctx, AV_LOG_INFO, " sse");
  2804. if (mm_flags & MM_SSE2)
  2805. av_log(avctx, AV_LOG_INFO, " sse2");
  2806. av_log(avctx, AV_LOG_INFO, "\n");
  2807. #endif
  2808. if (mm_flags & MM_MMX) {
  2809. const int idct_algo= avctx->idct_algo;
  2810. #ifdef CONFIG_ENCODERS
  2811. const int dct_algo = avctx->dct_algo;
  2812. if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
  2813. if(mm_flags & MM_SSE2){
  2814. c->fdct = ff_fdct_sse2;
  2815. }else if(mm_flags & MM_MMXEXT){
  2816. c->fdct = ff_fdct_mmx2;
  2817. }else{
  2818. c->fdct = ff_fdct_mmx;
  2819. }
  2820. }
  2821. #endif //CONFIG_ENCODERS
  2822. if(avctx->lowres==0){
  2823. if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
  2824. c->idct_put= ff_simple_idct_put_mmx;
  2825. c->idct_add= ff_simple_idct_add_mmx;
  2826. c->idct = ff_simple_idct_mmx;
  2827. c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
  2828. #ifdef CONFIG_GPL
  2829. }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
  2830. if(mm_flags & MM_MMXEXT){
  2831. c->idct_put= ff_libmpeg2mmx2_idct_put;
  2832. c->idct_add= ff_libmpeg2mmx2_idct_add;
  2833. c->idct = ff_mmxext_idct;
  2834. }else{
  2835. c->idct_put= ff_libmpeg2mmx_idct_put;
  2836. c->idct_add= ff_libmpeg2mmx_idct_add;
  2837. c->idct = ff_mmx_idct;
  2838. }
  2839. c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
  2840. #endif
  2841. }else if(idct_algo==FF_IDCT_VP3 &&
  2842. avctx->codec->id!=CODEC_ID_THEORA &&
  2843. !(avctx->flags & CODEC_FLAG_BITEXACT)){
  2844. if(mm_flags & MM_SSE2){
  2845. c->idct_put= ff_vp3_idct_put_sse2;
  2846. c->idct_add= ff_vp3_idct_add_sse2;
  2847. c->idct = ff_vp3_idct_sse2;
  2848. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  2849. }else{
  2850. ff_vp3_dsp_init_mmx();
  2851. c->idct_put= ff_vp3_idct_put_mmx;
  2852. c->idct_add= ff_vp3_idct_add_mmx;
  2853. c->idct = ff_vp3_idct_mmx;
  2854. c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
  2855. }
  2856. }else if(idct_algo==FF_IDCT_CAVS){
  2857. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  2858. }else if(idct_algo==FF_IDCT_XVIDMMX){
  2859. if(mm_flags & MM_MMXEXT){
  2860. c->idct_put= ff_idct_xvid_mmx2_put;
  2861. c->idct_add= ff_idct_xvid_mmx2_add;
  2862. c->idct = ff_idct_xvid_mmx2;
  2863. }else{
  2864. c->idct_put= ff_idct_xvid_mmx_put;
  2865. c->idct_add= ff_idct_xvid_mmx_add;
  2866. c->idct = ff_idct_xvid_mmx;
  2867. }
  2868. }
  2869. }
  2870. #ifdef CONFIG_ENCODERS
  2871. c->get_pixels = get_pixels_mmx;
  2872. c->diff_pixels = diff_pixels_mmx;
  2873. #endif //CONFIG_ENCODERS
  2874. c->put_pixels_clamped = put_pixels_clamped_mmx;
  2875. c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
  2876. c->add_pixels_clamped = add_pixels_clamped_mmx;
  2877. c->clear_blocks = clear_blocks_mmx;
  2878. #ifdef CONFIG_ENCODERS
  2879. c->pix_sum = pix_sum16_mmx;
  2880. #endif //CONFIG_ENCODERS
  2881. c->put_pixels_tab[0][0] = put_pixels16_mmx;
  2882. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
  2883. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
  2884. c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
  2885. c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
  2886. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
  2887. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
  2888. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
  2889. c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
  2890. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
  2891. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
  2892. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
  2893. c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
  2894. c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
  2895. c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
  2896. c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
  2897. c->put_pixels_tab[1][0] = put_pixels8_mmx;
  2898. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
  2899. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
  2900. c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
  2901. c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
  2902. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
  2903. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
  2904. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
  2905. c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
  2906. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
  2907. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
  2908. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
  2909. c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
  2910. c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
  2911. c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
  2912. c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
  2913. c->gmc= gmc_mmx;
  2914. c->add_bytes= add_bytes_mmx;
  2915. #ifdef CONFIG_ENCODERS
  2916. c->diff_bytes= diff_bytes_mmx;
  2917. c->hadamard8_diff[0]= hadamard8_diff16_mmx;
  2918. c->hadamard8_diff[1]= hadamard8_diff_mmx;
  2919. c->pix_norm1 = pix_norm1_mmx;
  2920. c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx;
  2921. c->sse[1] = sse8_mmx;
  2922. c->vsad[4]= vsad_intra16_mmx;
  2923. c->nsse[0] = nsse16_mmx;
  2924. c->nsse[1] = nsse8_mmx;
  2925. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2926. c->vsad[0] = vsad16_mmx;
  2927. }
  2928. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2929. c->try_8x8basis= try_8x8basis_mmx;
  2930. }
  2931. c->add_8x8basis= add_8x8basis_mmx;
  2932. #endif //CONFIG_ENCODERS
  2933. c->h263_v_loop_filter= h263_v_loop_filter_mmx;
  2934. c->h263_h_loop_filter= h263_h_loop_filter_mmx;
  2935. c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx;
  2936. c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
  2937. c->h264_idct_dc_add=
  2938. c->h264_idct_add= ff_h264_idct_add_mmx;
  2939. c->h264_idct8_dc_add=
  2940. c->h264_idct8_add= ff_h264_idct8_add_mmx;
  2941. if (mm_flags & MM_MMXEXT) {
  2942. c->prefetch = prefetch_mmx2;
  2943. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
  2944. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
  2945. c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
  2946. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
  2947. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
  2948. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
  2949. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
  2950. c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
  2951. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
  2952. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
  2953. #ifdef CONFIG_ENCODERS
  2954. c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
  2955. c->hadamard8_diff[1]= hadamard8_diff_mmx2;
  2956. c->vsad[4]= vsad_intra16_mmx2;
  2957. #endif //CONFIG_ENCODERS
  2958. c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
  2959. c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
  2960. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2961. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
  2962. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
  2963. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
  2964. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
  2965. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
  2966. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
  2967. #ifdef CONFIG_ENCODERS
  2968. c->vsad[0] = vsad16_mmx2;
  2969. #endif //CONFIG_ENCODERS
  2970. }
  2971. #if 1
  2972. SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
  2973. SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
  2974. SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
  2975. SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
  2976. SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
  2977. SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
  2978. SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
  2979. SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
  2980. SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
  2981. SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
  2982. SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
  2983. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
  2984. SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
  2985. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
  2986. SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
  2987. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
  2988. SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
  2989. SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
  2990. SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
  2991. SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
  2992. SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
  2993. SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
  2994. SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
  2995. SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
  2996. SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
  2997. SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
  2998. SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
  2999. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
  3000. SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
  3001. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
  3002. SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
  3003. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
  3004. #endif
  3005. //FIXME 3dnow too
  3006. #define dspfunc(PFX, IDX, NUM) \
  3007. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \
  3008. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \
  3009. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \
  3010. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \
  3011. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \
  3012. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \
  3013. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \
  3014. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \
  3015. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \
  3016. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \
  3017. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \
  3018. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \
  3019. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \
  3020. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \
  3021. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \
  3022. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2
  3023. dspfunc(put_h264_qpel, 0, 16);
  3024. dspfunc(put_h264_qpel, 1, 8);
  3025. dspfunc(put_h264_qpel, 2, 4);
  3026. dspfunc(avg_h264_qpel, 0, 16);
  3027. dspfunc(avg_h264_qpel, 1, 8);
  3028. dspfunc(avg_h264_qpel, 2, 4);
  3029. dspfunc(put_2tap_qpel, 0, 16);
  3030. dspfunc(put_2tap_qpel, 1, 8);
  3031. dspfunc(avg_2tap_qpel, 0, 16);
  3032. dspfunc(avg_2tap_qpel, 1, 8);
  3033. #undef dspfunc
  3034. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2;
  3035. c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
  3036. c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
  3037. c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
  3038. c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
  3039. c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
  3040. c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
  3041. c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
  3042. c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
  3043. c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
  3044. c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
  3045. c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
  3046. c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
  3047. c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
  3048. c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
  3049. c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
  3050. c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
  3051. c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
  3052. c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
  3053. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
  3054. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
  3055. c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
  3056. c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
  3057. c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
  3058. c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
  3059. c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
  3060. c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
  3061. #ifdef CONFIG_CAVS_DECODER
  3062. ff_cavsdsp_init_mmx2(c, avctx);
  3063. #endif
  3064. #ifdef CONFIG_ENCODERS
  3065. c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
  3066. #endif //CONFIG_ENCODERS
  3067. } else if (mm_flags & MM_3DNOW) {
  3068. c->prefetch = prefetch_3dnow;
  3069. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  3070. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  3071. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  3072. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  3073. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  3074. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  3075. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  3076. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  3077. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  3078. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  3079. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  3080. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  3081. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  3082. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  3083. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  3084. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  3085. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  3086. }
  3087. SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
  3088. SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
  3089. SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
  3090. SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
  3091. SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
  3092. SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
  3093. SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
  3094. SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
  3095. SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
  3096. SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
  3097. SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
  3098. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
  3099. SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
  3100. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
  3101. SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
  3102. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
  3103. SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
  3104. SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
  3105. SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
  3106. SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
  3107. SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
  3108. SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
  3109. SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
  3110. SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
  3111. SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
  3112. SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
  3113. SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
  3114. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
  3115. SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
  3116. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
  3117. SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
  3118. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
  3119. #define dspfunc(PFX, IDX, NUM) \
  3120. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \
  3121. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \
  3122. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \
  3123. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \
  3124. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \
  3125. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \
  3126. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \
  3127. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \
  3128. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \
  3129. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \
  3130. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \
  3131. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \
  3132. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \
  3133. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \
  3134. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \
  3135. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow
  3136. dspfunc(put_h264_qpel, 0, 16);
  3137. dspfunc(put_h264_qpel, 1, 8);
  3138. dspfunc(put_h264_qpel, 2, 4);
  3139. dspfunc(avg_h264_qpel, 0, 16);
  3140. dspfunc(avg_h264_qpel, 1, 8);
  3141. dspfunc(avg_h264_qpel, 2, 4);
  3142. dspfunc(put_2tap_qpel, 0, 16);
  3143. dspfunc(put_2tap_qpel, 1, 8);
  3144. dspfunc(avg_2tap_qpel, 0, 16);
  3145. dspfunc(avg_2tap_qpel, 1, 8);
  3146. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow;
  3147. c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
  3148. }
  3149. #ifdef CONFIG_SNOW_DECODER
  3150. if(mm_flags & MM_SSE2){
  3151. c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
  3152. c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
  3153. c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
  3154. }
  3155. else{
  3156. c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
  3157. c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
  3158. c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
  3159. }
  3160. #endif
  3161. if(mm_flags & MM_3DNOW){
  3162. c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
  3163. c->vector_fmul = vector_fmul_3dnow;
  3164. if(!(avctx->flags & CODEC_FLAG_BITEXACT))
  3165. c->float_to_int16 = float_to_int16_3dnow;
  3166. }
  3167. if(mm_flags & MM_3DNOWEXT)
  3168. c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
  3169. if(mm_flags & MM_SSE){
  3170. c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
  3171. c->vector_fmul = vector_fmul_sse;
  3172. c->float_to_int16 = float_to_int16_sse;
  3173. c->vector_fmul_reverse = vector_fmul_reverse_sse;
  3174. c->vector_fmul_add_add = vector_fmul_add_add_sse;
  3175. }
  3176. if(mm_flags & MM_3DNOW)
  3177. c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
  3178. }
  3179. #ifdef CONFIG_ENCODERS
  3180. dsputil_init_pix_mmx(c, avctx);
  3181. #endif //CONFIG_ENCODERS
  3182. #if 0
  3183. // for speed testing
  3184. get_pixels = just_return;
  3185. put_pixels_clamped = just_return;
  3186. add_pixels_clamped = just_return;
  3187. pix_abs16x16 = just_return;
  3188. pix_abs16x16_x2 = just_return;
  3189. pix_abs16x16_y2 = just_return;
  3190. pix_abs16x16_xy2 = just_return;
  3191. put_pixels_tab[0] = just_return;
  3192. put_pixels_tab[1] = just_return;
  3193. put_pixels_tab[2] = just_return;
  3194. put_pixels_tab[3] = just_return;
  3195. put_no_rnd_pixels_tab[0] = just_return;
  3196. put_no_rnd_pixels_tab[1] = just_return;
  3197. put_no_rnd_pixels_tab[2] = just_return;
  3198. put_no_rnd_pixels_tab[3] = just_return;
  3199. avg_pixels_tab[0] = just_return;
  3200. avg_pixels_tab[1] = just_return;
  3201. avg_pixels_tab[2] = just_return;
  3202. avg_pixels_tab[3] = just_return;
  3203. avg_no_rnd_pixels_tab[0] = just_return;
  3204. avg_no_rnd_pixels_tab[1] = just_return;
  3205. avg_no_rnd_pixels_tab[2] = just_return;
  3206. avg_no_rnd_pixels_tab[3] = just_return;
  3207. //av_fdct = just_return;
  3208. //ff_idct = just_return;
  3209. #endif
  3210. }