You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3640 lines
131KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  21. */
  22. #include "../dsputil.h"
  23. #include "../simple_idct.h"
  24. #include "../mpegvideo.h"
  25. #include "mmx.h"
  26. //#undef NDEBUG
  27. //#include <assert.h>
  28. extern const uint8_t ff_h263_loop_filter_strength[32];
  29. int mm_flags; /* multimedia extension flags */
  30. /* pixel operations */
  31. static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
  32. static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
  33. static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
  34. static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
  35. static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
  36. static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
  37. static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
  38. static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
  39. static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
  40. static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
  41. static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
  42. static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
  43. #define JUMPALIGN() __asm __volatile (".balign 8"::)
  44. #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
  45. #define MOVQ_WONE(regd) \
  46. __asm __volatile ( \
  47. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  48. "psrlw $15, %%" #regd ::)
  49. #define MOVQ_BFE(regd) \
  50. __asm __volatile ( \
  51. "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
  52. "paddb %%" #regd ", %%" #regd " \n\t" ::)
  53. #ifndef PIC
  54. #define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
  55. #define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
  56. #else
  57. // for shared library it's better to use this way for accessing constants
  58. // pcmpeqd -> -1
  59. #define MOVQ_BONE(regd) \
  60. __asm __volatile ( \
  61. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  62. "psrlw $15, %%" #regd " \n\t" \
  63. "packuswb %%" #regd ", %%" #regd " \n\t" ::)
  64. #define MOVQ_WTWO(regd) \
  65. __asm __volatile ( \
  66. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  67. "psrlw $15, %%" #regd " \n\t" \
  68. "psllw $1, %%" #regd " \n\t"::)
  69. #endif
  70. // using regr as temporary and for the output result
  71. // first argument is unmodifed and second is trashed
  72. // regfe is supposed to contain 0xfefefefefefefefe
  73. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  74. "movq " #rega ", " #regr " \n\t"\
  75. "pand " #regb ", " #regr " \n\t"\
  76. "pxor " #rega ", " #regb " \n\t"\
  77. "pand " #regfe "," #regb " \n\t"\
  78. "psrlq $1, " #regb " \n\t"\
  79. "paddb " #regb ", " #regr " \n\t"
  80. #define PAVGB_MMX(rega, regb, regr, regfe) \
  81. "movq " #rega ", " #regr " \n\t"\
  82. "por " #regb ", " #regr " \n\t"\
  83. "pxor " #rega ", " #regb " \n\t"\
  84. "pand " #regfe "," #regb " \n\t"\
  85. "psrlq $1, " #regb " \n\t"\
  86. "psubb " #regb ", " #regr " \n\t"
  87. // mm6 is supposed to contain 0xfefefefefefefefe
  88. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  89. "movq " #rega ", " #regr " \n\t"\
  90. "movq " #regc ", " #regp " \n\t"\
  91. "pand " #regb ", " #regr " \n\t"\
  92. "pand " #regd ", " #regp " \n\t"\
  93. "pxor " #rega ", " #regb " \n\t"\
  94. "pxor " #regc ", " #regd " \n\t"\
  95. "pand %%mm6, " #regb " \n\t"\
  96. "pand %%mm6, " #regd " \n\t"\
  97. "psrlq $1, " #regb " \n\t"\
  98. "psrlq $1, " #regd " \n\t"\
  99. "paddb " #regb ", " #regr " \n\t"\
  100. "paddb " #regd ", " #regp " \n\t"
  101. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  102. "movq " #rega ", " #regr " \n\t"\
  103. "movq " #regc ", " #regp " \n\t"\
  104. "por " #regb ", " #regr " \n\t"\
  105. "por " #regd ", " #regp " \n\t"\
  106. "pxor " #rega ", " #regb " \n\t"\
  107. "pxor " #regc ", " #regd " \n\t"\
  108. "pand %%mm6, " #regb " \n\t"\
  109. "pand %%mm6, " #regd " \n\t"\
  110. "psrlq $1, " #regd " \n\t"\
  111. "psrlq $1, " #regb " \n\t"\
  112. "psubb " #regb ", " #regr " \n\t"\
  113. "psubb " #regd ", " #regp " \n\t"
  114. /***********************************/
  115. /* MMX no rounding */
  116. #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
  117. #define SET_RND MOVQ_WONE
  118. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  119. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  120. #include "dsputil_mmx_rnd.h"
  121. #undef DEF
  122. #undef SET_RND
  123. #undef PAVGBP
  124. #undef PAVGB
  125. /***********************************/
  126. /* MMX rounding */
  127. #define DEF(x, y) x ## _ ## y ##_mmx
  128. #define SET_RND MOVQ_WTWO
  129. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  130. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  131. #include "dsputil_mmx_rnd.h"
  132. #undef DEF
  133. #undef SET_RND
  134. #undef PAVGBP
  135. #undef PAVGB
  136. /***********************************/
  137. /* 3Dnow specific */
  138. #define DEF(x) x ## _3dnow
  139. /* for Athlons PAVGUSB is prefered */
  140. #define PAVGB "pavgusb"
  141. #include "dsputil_mmx_avg.h"
  142. #undef DEF
  143. #undef PAVGB
  144. /***********************************/
  145. /* MMX2 specific */
  146. #define DEF(x) x ## _mmx2
  147. /* Introduced only in MMX2 set */
  148. #define PAVGB "pavgb"
  149. #include "dsputil_mmx_avg.h"
  150. #undef DEF
  151. #undef PAVGB
  152. /***********************************/
  153. /* standard MMX */
  154. #ifdef CONFIG_ENCODERS
  155. static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
  156. {
  157. asm volatile(
  158. "mov $-128, %%"REG_a" \n\t"
  159. "pxor %%mm7, %%mm7 \n\t"
  160. ".balign 16 \n\t"
  161. "1: \n\t"
  162. "movq (%0), %%mm0 \n\t"
  163. "movq (%0, %2), %%mm2 \n\t"
  164. "movq %%mm0, %%mm1 \n\t"
  165. "movq %%mm2, %%mm3 \n\t"
  166. "punpcklbw %%mm7, %%mm0 \n\t"
  167. "punpckhbw %%mm7, %%mm1 \n\t"
  168. "punpcklbw %%mm7, %%mm2 \n\t"
  169. "punpckhbw %%mm7, %%mm3 \n\t"
  170. "movq %%mm0, (%1, %%"REG_a")\n\t"
  171. "movq %%mm1, 8(%1, %%"REG_a")\n\t"
  172. "movq %%mm2, 16(%1, %%"REG_a")\n\t"
  173. "movq %%mm3, 24(%1, %%"REG_a")\n\t"
  174. "add %3, %0 \n\t"
  175. "add $32, %%"REG_a" \n\t"
  176. "js 1b \n\t"
  177. : "+r" (pixels)
  178. : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
  179. : "%"REG_a
  180. );
  181. }
  182. static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
  183. {
  184. asm volatile(
  185. "pxor %%mm7, %%mm7 \n\t"
  186. "mov $-128, %%"REG_a" \n\t"
  187. ".balign 16 \n\t"
  188. "1: \n\t"
  189. "movq (%0), %%mm0 \n\t"
  190. "movq (%1), %%mm2 \n\t"
  191. "movq %%mm0, %%mm1 \n\t"
  192. "movq %%mm2, %%mm3 \n\t"
  193. "punpcklbw %%mm7, %%mm0 \n\t"
  194. "punpckhbw %%mm7, %%mm1 \n\t"
  195. "punpcklbw %%mm7, %%mm2 \n\t"
  196. "punpckhbw %%mm7, %%mm3 \n\t"
  197. "psubw %%mm2, %%mm0 \n\t"
  198. "psubw %%mm3, %%mm1 \n\t"
  199. "movq %%mm0, (%2, %%"REG_a")\n\t"
  200. "movq %%mm1, 8(%2, %%"REG_a")\n\t"
  201. "add %3, %0 \n\t"
  202. "add %3, %1 \n\t"
  203. "add $16, %%"REG_a" \n\t"
  204. "jnz 1b \n\t"
  205. : "+r" (s1), "+r" (s2)
  206. : "r" (block+64), "r" ((long)stride)
  207. : "%"REG_a
  208. );
  209. }
  210. #endif //CONFIG_ENCODERS
  211. void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  212. {
  213. const DCTELEM *p;
  214. uint8_t *pix;
  215. /* read the pixels */
  216. p = block;
  217. pix = pixels;
  218. /* unrolled loop */
  219. __asm __volatile(
  220. "movq %3, %%mm0\n\t"
  221. "movq 8%3, %%mm1\n\t"
  222. "movq 16%3, %%mm2\n\t"
  223. "movq 24%3, %%mm3\n\t"
  224. "movq 32%3, %%mm4\n\t"
  225. "movq 40%3, %%mm5\n\t"
  226. "movq 48%3, %%mm6\n\t"
  227. "movq 56%3, %%mm7\n\t"
  228. "packuswb %%mm1, %%mm0\n\t"
  229. "packuswb %%mm3, %%mm2\n\t"
  230. "packuswb %%mm5, %%mm4\n\t"
  231. "packuswb %%mm7, %%mm6\n\t"
  232. "movq %%mm0, (%0)\n\t"
  233. "movq %%mm2, (%0, %1)\n\t"
  234. "movq %%mm4, (%0, %1, 2)\n\t"
  235. "movq %%mm6, (%0, %2)\n\t"
  236. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
  237. :"memory");
  238. pix += line_size*4;
  239. p += 32;
  240. // if here would be an exact copy of the code above
  241. // compiler would generate some very strange code
  242. // thus using "r"
  243. __asm __volatile(
  244. "movq (%3), %%mm0\n\t"
  245. "movq 8(%3), %%mm1\n\t"
  246. "movq 16(%3), %%mm2\n\t"
  247. "movq 24(%3), %%mm3\n\t"
  248. "movq 32(%3), %%mm4\n\t"
  249. "movq 40(%3), %%mm5\n\t"
  250. "movq 48(%3), %%mm6\n\t"
  251. "movq 56(%3), %%mm7\n\t"
  252. "packuswb %%mm1, %%mm0\n\t"
  253. "packuswb %%mm3, %%mm2\n\t"
  254. "packuswb %%mm5, %%mm4\n\t"
  255. "packuswb %%mm7, %%mm6\n\t"
  256. "movq %%mm0, (%0)\n\t"
  257. "movq %%mm2, (%0, %1)\n\t"
  258. "movq %%mm4, (%0, %1, 2)\n\t"
  259. "movq %%mm6, (%0, %2)\n\t"
  260. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
  261. :"memory");
  262. }
  263. static const unsigned char __align8 vector128[8] =
  264. { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
  265. void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  266. {
  267. int i;
  268. movq_m2r(*vector128, mm1);
  269. for (i = 0; i < 8; i++) {
  270. movq_m2r(*(block), mm0);
  271. packsswb_m2r(*(block + 4), mm0);
  272. block += 8;
  273. paddb_r2r(mm1, mm0);
  274. movq_r2m(mm0, *pixels);
  275. pixels += line_size;
  276. }
  277. }
  278. void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  279. {
  280. const DCTELEM *p;
  281. uint8_t *pix;
  282. int i;
  283. /* read the pixels */
  284. p = block;
  285. pix = pixels;
  286. MOVQ_ZERO(mm7);
  287. i = 4;
  288. do {
  289. __asm __volatile(
  290. "movq (%2), %%mm0\n\t"
  291. "movq 8(%2), %%mm1\n\t"
  292. "movq 16(%2), %%mm2\n\t"
  293. "movq 24(%2), %%mm3\n\t"
  294. "movq %0, %%mm4\n\t"
  295. "movq %1, %%mm6\n\t"
  296. "movq %%mm4, %%mm5\n\t"
  297. "punpcklbw %%mm7, %%mm4\n\t"
  298. "punpckhbw %%mm7, %%mm5\n\t"
  299. "paddsw %%mm4, %%mm0\n\t"
  300. "paddsw %%mm5, %%mm1\n\t"
  301. "movq %%mm6, %%mm5\n\t"
  302. "punpcklbw %%mm7, %%mm6\n\t"
  303. "punpckhbw %%mm7, %%mm5\n\t"
  304. "paddsw %%mm6, %%mm2\n\t"
  305. "paddsw %%mm5, %%mm3\n\t"
  306. "packuswb %%mm1, %%mm0\n\t"
  307. "packuswb %%mm3, %%mm2\n\t"
  308. "movq %%mm0, %0\n\t"
  309. "movq %%mm2, %1\n\t"
  310. :"+m"(*pix), "+m"(*(pix+line_size))
  311. :"r"(p)
  312. :"memory");
  313. pix += line_size*2;
  314. p += 16;
  315. } while (--i);
  316. }
  317. static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  318. {
  319. __asm __volatile(
  320. "lea (%3, %3), %%"REG_a" \n\t"
  321. ".balign 8 \n\t"
  322. "1: \n\t"
  323. "movd (%1), %%mm0 \n\t"
  324. "movd (%1, %3), %%mm1 \n\t"
  325. "movd %%mm0, (%2) \n\t"
  326. "movd %%mm1, (%2, %3) \n\t"
  327. "add %%"REG_a", %1 \n\t"
  328. "add %%"REG_a", %2 \n\t"
  329. "movd (%1), %%mm0 \n\t"
  330. "movd (%1, %3), %%mm1 \n\t"
  331. "movd %%mm0, (%2) \n\t"
  332. "movd %%mm1, (%2, %3) \n\t"
  333. "add %%"REG_a", %1 \n\t"
  334. "add %%"REG_a", %2 \n\t"
  335. "subl $4, %0 \n\t"
  336. "jnz 1b \n\t"
  337. : "+g"(h), "+r" (pixels), "+r" (block)
  338. : "r"((long)line_size)
  339. : "%"REG_a, "memory"
  340. );
  341. }
  342. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  343. {
  344. __asm __volatile(
  345. "lea (%3, %3), %%"REG_a" \n\t"
  346. ".balign 8 \n\t"
  347. "1: \n\t"
  348. "movq (%1), %%mm0 \n\t"
  349. "movq (%1, %3), %%mm1 \n\t"
  350. "movq %%mm0, (%2) \n\t"
  351. "movq %%mm1, (%2, %3) \n\t"
  352. "add %%"REG_a", %1 \n\t"
  353. "add %%"REG_a", %2 \n\t"
  354. "movq (%1), %%mm0 \n\t"
  355. "movq (%1, %3), %%mm1 \n\t"
  356. "movq %%mm0, (%2) \n\t"
  357. "movq %%mm1, (%2, %3) \n\t"
  358. "add %%"REG_a", %1 \n\t"
  359. "add %%"REG_a", %2 \n\t"
  360. "subl $4, %0 \n\t"
  361. "jnz 1b \n\t"
  362. : "+g"(h), "+r" (pixels), "+r" (block)
  363. : "r"((long)line_size)
  364. : "%"REG_a, "memory"
  365. );
  366. }
  367. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  368. {
  369. __asm __volatile(
  370. "lea (%3, %3), %%"REG_a" \n\t"
  371. ".balign 8 \n\t"
  372. "1: \n\t"
  373. "movq (%1), %%mm0 \n\t"
  374. "movq 8(%1), %%mm4 \n\t"
  375. "movq (%1, %3), %%mm1 \n\t"
  376. "movq 8(%1, %3), %%mm5 \n\t"
  377. "movq %%mm0, (%2) \n\t"
  378. "movq %%mm4, 8(%2) \n\t"
  379. "movq %%mm1, (%2, %3) \n\t"
  380. "movq %%mm5, 8(%2, %3) \n\t"
  381. "add %%"REG_a", %1 \n\t"
  382. "add %%"REG_a", %2 \n\t"
  383. "movq (%1), %%mm0 \n\t"
  384. "movq 8(%1), %%mm4 \n\t"
  385. "movq (%1, %3), %%mm1 \n\t"
  386. "movq 8(%1, %3), %%mm5 \n\t"
  387. "movq %%mm0, (%2) \n\t"
  388. "movq %%mm4, 8(%2) \n\t"
  389. "movq %%mm1, (%2, %3) \n\t"
  390. "movq %%mm5, 8(%2, %3) \n\t"
  391. "add %%"REG_a", %1 \n\t"
  392. "add %%"REG_a", %2 \n\t"
  393. "subl $4, %0 \n\t"
  394. "jnz 1b \n\t"
  395. : "+g"(h), "+r" (pixels), "+r" (block)
  396. : "r"((long)line_size)
  397. : "%"REG_a, "memory"
  398. );
  399. }
  400. static void clear_blocks_mmx(DCTELEM *blocks)
  401. {
  402. __asm __volatile(
  403. "pxor %%mm7, %%mm7 \n\t"
  404. "mov $-128*6, %%"REG_a" \n\t"
  405. "1: \n\t"
  406. "movq %%mm7, (%0, %%"REG_a") \n\t"
  407. "movq %%mm7, 8(%0, %%"REG_a") \n\t"
  408. "movq %%mm7, 16(%0, %%"REG_a") \n\t"
  409. "movq %%mm7, 24(%0, %%"REG_a") \n\t"
  410. "add $32, %%"REG_a" \n\t"
  411. " js 1b \n\t"
  412. : : "r" (((uint8_t *)blocks)+128*6)
  413. : "%"REG_a
  414. );
  415. }
  416. #ifdef CONFIG_ENCODERS
  417. static int pix_sum16_mmx(uint8_t * pix, int line_size){
  418. const int h=16;
  419. int sum;
  420. long index= -line_size*h;
  421. __asm __volatile(
  422. "pxor %%mm7, %%mm7 \n\t"
  423. "pxor %%mm6, %%mm6 \n\t"
  424. "1: \n\t"
  425. "movq (%2, %1), %%mm0 \n\t"
  426. "movq (%2, %1), %%mm1 \n\t"
  427. "movq 8(%2, %1), %%mm2 \n\t"
  428. "movq 8(%2, %1), %%mm3 \n\t"
  429. "punpcklbw %%mm7, %%mm0 \n\t"
  430. "punpckhbw %%mm7, %%mm1 \n\t"
  431. "punpcklbw %%mm7, %%mm2 \n\t"
  432. "punpckhbw %%mm7, %%mm3 \n\t"
  433. "paddw %%mm0, %%mm1 \n\t"
  434. "paddw %%mm2, %%mm3 \n\t"
  435. "paddw %%mm1, %%mm3 \n\t"
  436. "paddw %%mm3, %%mm6 \n\t"
  437. "add %3, %1 \n\t"
  438. " js 1b \n\t"
  439. "movq %%mm6, %%mm5 \n\t"
  440. "psrlq $32, %%mm6 \n\t"
  441. "paddw %%mm5, %%mm6 \n\t"
  442. "movq %%mm6, %%mm5 \n\t"
  443. "psrlq $16, %%mm6 \n\t"
  444. "paddw %%mm5, %%mm6 \n\t"
  445. "movd %%mm6, %0 \n\t"
  446. "andl $0xFFFF, %0 \n\t"
  447. : "=&r" (sum), "+r" (index)
  448. : "r" (pix - index), "r" ((long)line_size)
  449. );
  450. return sum;
  451. }
  452. #endif //CONFIG_ENCODERS
  453. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
  454. long i=0;
  455. asm volatile(
  456. "1: \n\t"
  457. "movq (%1, %0), %%mm0 \n\t"
  458. "movq (%2, %0), %%mm1 \n\t"
  459. "paddb %%mm0, %%mm1 \n\t"
  460. "movq %%mm1, (%2, %0) \n\t"
  461. "movq 8(%1, %0), %%mm0 \n\t"
  462. "movq 8(%2, %0), %%mm1 \n\t"
  463. "paddb %%mm0, %%mm1 \n\t"
  464. "movq %%mm1, 8(%2, %0) \n\t"
  465. "add $16, %0 \n\t"
  466. "cmp %3, %0 \n\t"
  467. " jb 1b \n\t"
  468. : "+r" (i)
  469. : "r"(src), "r"(dst), "r"((long)w-15)
  470. );
  471. for(; i<w; i++)
  472. dst[i+0] += src[i+0];
  473. }
  474. #define H263_LOOP_FILTER \
  475. "pxor %%mm7, %%mm7 \n\t"\
  476. "movq %0, %%mm0 \n\t"\
  477. "movq %0, %%mm1 \n\t"\
  478. "movq %3, %%mm2 \n\t"\
  479. "movq %3, %%mm3 \n\t"\
  480. "punpcklbw %%mm7, %%mm0 \n\t"\
  481. "punpckhbw %%mm7, %%mm1 \n\t"\
  482. "punpcklbw %%mm7, %%mm2 \n\t"\
  483. "punpckhbw %%mm7, %%mm3 \n\t"\
  484. "psubw %%mm2, %%mm0 \n\t"\
  485. "psubw %%mm3, %%mm1 \n\t"\
  486. "movq %1, %%mm2 \n\t"\
  487. "movq %1, %%mm3 \n\t"\
  488. "movq %2, %%mm4 \n\t"\
  489. "movq %2, %%mm5 \n\t"\
  490. "punpcklbw %%mm7, %%mm2 \n\t"\
  491. "punpckhbw %%mm7, %%mm3 \n\t"\
  492. "punpcklbw %%mm7, %%mm4 \n\t"\
  493. "punpckhbw %%mm7, %%mm5 \n\t"\
  494. "psubw %%mm2, %%mm4 \n\t"\
  495. "psubw %%mm3, %%mm5 \n\t"\
  496. "psllw $2, %%mm4 \n\t"\
  497. "psllw $2, %%mm5 \n\t"\
  498. "paddw %%mm0, %%mm4 \n\t"\
  499. "paddw %%mm1, %%mm5 \n\t"\
  500. "pxor %%mm6, %%mm6 \n\t"\
  501. "pcmpgtw %%mm4, %%mm6 \n\t"\
  502. "pcmpgtw %%mm5, %%mm7 \n\t"\
  503. "pxor %%mm6, %%mm4 \n\t"\
  504. "pxor %%mm7, %%mm5 \n\t"\
  505. "psubw %%mm6, %%mm4 \n\t"\
  506. "psubw %%mm7, %%mm5 \n\t"\
  507. "psrlw $3, %%mm4 \n\t"\
  508. "psrlw $3, %%mm5 \n\t"\
  509. "packuswb %%mm5, %%mm4 \n\t"\
  510. "packsswb %%mm7, %%mm6 \n\t"\
  511. "pxor %%mm7, %%mm7 \n\t"\
  512. "movd %4, %%mm2 \n\t"\
  513. "punpcklbw %%mm2, %%mm2 \n\t"\
  514. "punpcklbw %%mm2, %%mm2 \n\t"\
  515. "punpcklbw %%mm2, %%mm2 \n\t"\
  516. "psubusb %%mm4, %%mm2 \n\t"\
  517. "movq %%mm2, %%mm3 \n\t"\
  518. "psubusb %%mm4, %%mm3 \n\t"\
  519. "psubb %%mm3, %%mm2 \n\t"\
  520. "movq %1, %%mm3 \n\t"\
  521. "movq %2, %%mm4 \n\t"\
  522. "pxor %%mm6, %%mm3 \n\t"\
  523. "pxor %%mm6, %%mm4 \n\t"\
  524. "paddusb %%mm2, %%mm3 \n\t"\
  525. "psubusb %%mm2, %%mm4 \n\t"\
  526. "pxor %%mm6, %%mm3 \n\t"\
  527. "pxor %%mm6, %%mm4 \n\t"\
  528. "paddusb %%mm2, %%mm2 \n\t"\
  529. "packsswb %%mm1, %%mm0 \n\t"\
  530. "pcmpgtb %%mm0, %%mm7 \n\t"\
  531. "pxor %%mm7, %%mm0 \n\t"\
  532. "psubb %%mm7, %%mm0 \n\t"\
  533. "movq %%mm0, %%mm1 \n\t"\
  534. "psubusb %%mm2, %%mm0 \n\t"\
  535. "psubb %%mm0, %%mm1 \n\t"\
  536. "pand %5, %%mm1 \n\t"\
  537. "psrlw $2, %%mm1 \n\t"\
  538. "pxor %%mm7, %%mm1 \n\t"\
  539. "psubb %%mm7, %%mm1 \n\t"\
  540. "movq %0, %%mm5 \n\t"\
  541. "movq %3, %%mm6 \n\t"\
  542. "psubb %%mm1, %%mm5 \n\t"\
  543. "paddb %%mm1, %%mm6 \n\t"
  544. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  545. const int strength= ff_h263_loop_filter_strength[qscale];
  546. asm volatile(
  547. H263_LOOP_FILTER
  548. "movq %%mm3, %1 \n\t"
  549. "movq %%mm4, %2 \n\t"
  550. "movq %%mm5, %0 \n\t"
  551. "movq %%mm6, %3 \n\t"
  552. : "+m" (*(uint64_t*)(src - 2*stride)),
  553. "+m" (*(uint64_t*)(src - 1*stride)),
  554. "+m" (*(uint64_t*)(src + 0*stride)),
  555. "+m" (*(uint64_t*)(src + 1*stride))
  556. : "g" (2*strength), "m"(ff_pb_FC)
  557. );
  558. }
  559. static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
  560. asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
  561. "movd %4, %%mm0 \n\t"
  562. "movd %5, %%mm1 \n\t"
  563. "movd %6, %%mm2 \n\t"
  564. "movd %7, %%mm3 \n\t"
  565. "punpcklbw %%mm1, %%mm0 \n\t"
  566. "punpcklbw %%mm3, %%mm2 \n\t"
  567. "movq %%mm0, %%mm1 \n\t"
  568. "punpcklwd %%mm2, %%mm0 \n\t"
  569. "punpckhwd %%mm2, %%mm1 \n\t"
  570. "movd %%mm0, %0 \n\t"
  571. "punpckhdq %%mm0, %%mm0 \n\t"
  572. "movd %%mm0, %1 \n\t"
  573. "movd %%mm1, %2 \n\t"
  574. "punpckhdq %%mm1, %%mm1 \n\t"
  575. "movd %%mm1, %3 \n\t"
  576. : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
  577. "=m" (*(uint32_t*)(dst + 1*dst_stride)),
  578. "=m" (*(uint32_t*)(dst + 2*dst_stride)),
  579. "=m" (*(uint32_t*)(dst + 3*dst_stride))
  580. : "m" (*(uint32_t*)(src + 0*src_stride)),
  581. "m" (*(uint32_t*)(src + 1*src_stride)),
  582. "m" (*(uint32_t*)(src + 2*src_stride)),
  583. "m" (*(uint32_t*)(src + 3*src_stride))
  584. );
  585. }
  586. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  587. const int strength= ff_h263_loop_filter_strength[qscale];
  588. uint64_t temp[4] __attribute__ ((aligned(8)));
  589. uint8_t *btemp= (uint8_t*)temp;
  590. src -= 2;
  591. transpose4x4(btemp , src , 8, stride);
  592. transpose4x4(btemp+4, src + 4*stride, 8, stride);
  593. asm volatile(
  594. H263_LOOP_FILTER // 5 3 4 6
  595. : "+m" (temp[0]),
  596. "+m" (temp[1]),
  597. "+m" (temp[2]),
  598. "+m" (temp[3])
  599. : "g" (2*strength), "m"(ff_pb_FC)
  600. );
  601. asm volatile(
  602. "movq %%mm5, %%mm1 \n\t"
  603. "movq %%mm4, %%mm0 \n\t"
  604. "punpcklbw %%mm3, %%mm5 \n\t"
  605. "punpcklbw %%mm6, %%mm4 \n\t"
  606. "punpckhbw %%mm3, %%mm1 \n\t"
  607. "punpckhbw %%mm6, %%mm0 \n\t"
  608. "movq %%mm5, %%mm3 \n\t"
  609. "movq %%mm1, %%mm6 \n\t"
  610. "punpcklwd %%mm4, %%mm5 \n\t"
  611. "punpcklwd %%mm0, %%mm1 \n\t"
  612. "punpckhwd %%mm4, %%mm3 \n\t"
  613. "punpckhwd %%mm0, %%mm6 \n\t"
  614. "movd %%mm5, (%0) \n\t"
  615. "punpckhdq %%mm5, %%mm5 \n\t"
  616. "movd %%mm5, (%0,%2) \n\t"
  617. "movd %%mm3, (%0,%2,2) \n\t"
  618. "punpckhdq %%mm3, %%mm3 \n\t"
  619. "movd %%mm3, (%0,%3) \n\t"
  620. "movd %%mm1, (%1) \n\t"
  621. "punpckhdq %%mm1, %%mm1 \n\t"
  622. "movd %%mm1, (%1,%2) \n\t"
  623. "movd %%mm6, (%1,%2,2) \n\t"
  624. "punpckhdq %%mm6, %%mm6 \n\t"
  625. "movd %%mm6, (%1,%3) \n\t"
  626. :: "r" (src),
  627. "r" (src + 4*stride),
  628. "r" ((long) stride ),
  629. "r" ((long)(3*stride))
  630. );
  631. }
  632. // out: o = |x-y|>a
  633. // clobbers: t
  634. #define DIFF_GT_MMX(x,y,a,o,t)\
  635. "movq "#y", "#t" \n\t"\
  636. "movq "#x", "#o" \n\t"\
  637. "psubusb "#x", "#t" \n\t"\
  638. "psubusb "#y", "#o" \n\t"\
  639. "por "#t", "#o" \n\t"\
  640. "psubusb "#a", "#o" \n\t"
  641. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
  642. // out: mm5=beta-1, mm7=mask
  643. // clobbers: mm4,mm6
  644. #define H264_DEBLOCK_MASK(alpha1, beta1) \
  645. "pshufw $0, "#alpha1", %%mm4 \n\t"\
  646. "pshufw $0, "#beta1 ", %%mm5 \n\t"\
  647. "packuswb %%mm4, %%mm4 \n\t"\
  648. "packuswb %%mm5, %%mm5 \n\t"\
  649. DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
  650. DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
  651. "por %%mm4, %%mm7 \n\t"\
  652. DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
  653. "por %%mm4, %%mm7 \n\t"\
  654. "pxor %%mm6, %%mm6 \n\t"\
  655. "pcmpeqb %%mm6, %%mm7 \n\t"
  656. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
  657. // out: mm1=p0' mm2=q0'
  658. // clobbers: mm0,3-6
  659. #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
  660. /* a = q0^p0^((p1-q1)>>2) */\
  661. "movq %%mm0, %%mm4 \n\t"\
  662. "psubb %%mm3, %%mm4 \n\t"\
  663. "psrlw $2, %%mm4 \n\t"\
  664. "pxor %%mm1, %%mm4 \n\t"\
  665. "pxor %%mm2, %%mm4 \n\t"\
  666. /* b = p0^(q1>>2) */\
  667. "psrlw $2, %%mm3 \n\t"\
  668. "pand "#pb_3f", %%mm3 \n\t"\
  669. "movq %%mm1, %%mm5 \n\t"\
  670. "pxor %%mm3, %%mm5 \n\t"\
  671. /* c = q0^(p1>>2) */\
  672. "psrlw $2, %%mm0 \n\t"\
  673. "pand "#pb_3f", %%mm0 \n\t"\
  674. "movq %%mm2, %%mm6 \n\t"\
  675. "pxor %%mm0, %%mm6 \n\t"\
  676. /* d = (c^b) & ~(b^a) & 1 */\
  677. "pxor %%mm5, %%mm6 \n\t"\
  678. "pxor %%mm4, %%mm5 \n\t"\
  679. "pandn %%mm6, %%mm5 \n\t"\
  680. "pand "#pb_01", %%mm5 \n\t"\
  681. /* delta = (avg(q0, p1>>2) + (d&a))
  682. * - (avg(p0, q1>>2) + (d&~a)) */\
  683. "pavgb %%mm2, %%mm0 \n\t"\
  684. "movq %%mm5, %%mm6 \n\t"\
  685. "pand %%mm4, %%mm6 \n\t"\
  686. "paddusb %%mm6, %%mm0 \n\t"\
  687. "pavgb %%mm1, %%mm3 \n\t"\
  688. "pandn %%mm5, %%mm4 \n\t"\
  689. "paddusb %%mm4, %%mm3 \n\t"\
  690. /* p0 += clip(delta, -tc0, tc0)
  691. * q0 -= clip(delta, -tc0, tc0) */\
  692. "movq %%mm0, %%mm4 \n\t"\
  693. "psubusb %%mm3, %%mm0 \n\t"\
  694. "psubusb %%mm4, %%mm3 \n\t"\
  695. "pminub %%mm7, %%mm0 \n\t"\
  696. "pminub %%mm7, %%mm3 \n\t"\
  697. "paddusb %%mm0, %%mm1 \n\t"\
  698. "paddusb %%mm3, %%mm2 \n\t"\
  699. "psubusb %%mm3, %%mm1 \n\t"\
  700. "psubusb %%mm0, %%mm2 \n\t"
  701. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=mm_bone
  702. // out: (q1addr) = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
  703. // clobbers: q2, tmp, tc0
  704. #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
  705. "movq %%mm1, "#tmp" \n\t"\
  706. "pavgb %%mm2, "#tmp" \n\t"\
  707. "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
  708. "pxor "q2addr", "#tmp" \n\t"\
  709. "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
  710. "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
  711. "movq "#p1", "#tmp" \n\t"\
  712. "psubusb "#tc0", "#tmp" \n\t"\
  713. "paddusb "#p1", "#tc0" \n\t"\
  714. "pmaxub "#tmp", "#q2" \n\t"\
  715. "pminub "#tc0", "#q2" \n\t"\
  716. "movq "#q2", "q1addr" \n\t"
  717. static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  718. {
  719. uint64_t tmp0;
  720. uint64_t tc = (uint8_t)tc0[1]*0x01010000 | (uint8_t)tc0[0]*0x0101;
  721. // with luma, tc0=0 doesn't mean no filtering, so we need a separate input mask
  722. uint32_t mask[2] = { (tc0[0]>=0)*0xffffffff, (tc0[1]>=0)*0xffffffff };
  723. asm volatile(
  724. "movq (%1,%3), %%mm0 \n\t" //p1
  725. "movq (%1,%3,2), %%mm1 \n\t" //p0
  726. "movq (%2), %%mm2 \n\t" //q0
  727. "movq (%2,%3), %%mm3 \n\t" //q1
  728. H264_DEBLOCK_MASK(%6, %7)
  729. "pand %5, %%mm7 \n\t"
  730. "movq %%mm7, %0 \n\t"
  731. /* filter p1 */
  732. "movq (%1), %%mm3 \n\t" //p2
  733. DIFF_GT_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
  734. "pandn %%mm7, %%mm6 \n\t"
  735. "pcmpeqb %%mm7, %%mm6 \n\t"
  736. "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
  737. "pshufw $80, %4, %%mm4 \n\t"
  738. "pand %%mm7, %%mm4 \n\t" // mask & tc0
  739. "movq %8, %%mm7 \n\t"
  740. "pand %%mm6, %%mm7 \n\t" // mask & |p2-p0|<beta & 1
  741. "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
  742. "paddb %%mm4, %%mm7 \n\t" // tc++
  743. H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
  744. /* filter q1 */
  745. "movq (%2,%3,2), %%mm4 \n\t" //q2
  746. DIFF_GT_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
  747. "pandn %0, %%mm6 \n\t"
  748. "pcmpeqb %0, %%mm6 \n\t"
  749. "pand %0, %%mm6 \n\t"
  750. "pshufw $80, %4, %%mm5 \n\t"
  751. "pand %%mm6, %%mm5 \n\t"
  752. "pand %8, %%mm6 \n\t"
  753. "paddb %%mm6, %%mm7 \n\t"
  754. "movq (%2,%3), %%mm3 \n\t"
  755. H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
  756. /* filter p0, q0 */
  757. H264_DEBLOCK_P0_Q0(%8, %9)
  758. "movq %%mm1, (%1,%3,2) \n\t"
  759. "movq %%mm2, (%2) \n\t"
  760. : "=m"(tmp0)
  761. : "r"(pix-3*stride), "r"(pix), "r"((long)stride),
  762. "m"(tc), "m"(*(uint64_t*)mask), "m"(alpha1), "m"(beta1),
  763. "m"(mm_bone), "m"(ff_pb_3F)
  764. );
  765. }
  766. static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  767. {
  768. if((tc0[0] & tc0[1]) >= 0)
  769. h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  770. if((tc0[2] & tc0[3]) >= 0)
  771. h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
  772. }
  773. static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  774. {
  775. //FIXME: could cut some load/stores by merging transpose with filter
  776. // also, it only needs to transpose 6x8
  777. uint8_t trans[8*8];
  778. int i;
  779. for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
  780. if((tc0[0] & tc0[1]) < 0)
  781. continue;
  782. transpose4x4(trans, pix-4, 8, stride);
  783. transpose4x4(trans +4*8, pix, 8, stride);
  784. transpose4x4(trans+4, pix-4+4*stride, 8, stride);
  785. transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
  786. h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
  787. transpose4x4(pix-2, trans +2*8, stride, 8);
  788. transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
  789. }
  790. }
  791. static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  792. {
  793. asm volatile(
  794. "movq (%0), %%mm0 \n\t" //p1
  795. "movq (%0,%2), %%mm1 \n\t" //p0
  796. "movq (%1), %%mm2 \n\t" //q0
  797. "movq (%1,%2), %%mm3 \n\t" //q1
  798. H264_DEBLOCK_MASK(%4, %5)
  799. "movd %3, %%mm6 \n\t"
  800. "punpcklbw %%mm6, %%mm6 \n\t"
  801. "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
  802. H264_DEBLOCK_P0_Q0(%6, %7)
  803. "movq %%mm1, (%0,%2) \n\t"
  804. "movq %%mm2, (%1) \n\t"
  805. :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
  806. "r"(*(uint32_t*)tc0),
  807. "m"(alpha1), "m"(beta1), "m"(mm_bone), "m"(ff_pb_3F)
  808. );
  809. }
  810. static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  811. {
  812. h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  813. }
  814. static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  815. {
  816. //FIXME: could cut some load/stores by merging transpose with filter
  817. uint8_t trans[8*4];
  818. transpose4x4(trans, pix-2, 8, stride);
  819. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  820. h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
  821. transpose4x4(pix-2, trans, stride, 8);
  822. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  823. }
  824. // p0 = (p0 + q1 + 2*p1 + 2) >> 2
  825. #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
  826. "movq "#p0", %%mm4 \n\t"\
  827. "pxor "#q1", %%mm4 \n\t"\
  828. "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
  829. "pavgb "#q1", "#p0" \n\t"\
  830. "psubusb %%mm4, "#p0" \n\t"\
  831. "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
  832. static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
  833. {
  834. asm volatile(
  835. "movq (%0), %%mm0 \n\t"
  836. "movq (%0,%2), %%mm1 \n\t"
  837. "movq (%1), %%mm2 \n\t"
  838. "movq (%1,%2), %%mm3 \n\t"
  839. H264_DEBLOCK_MASK(%3, %4)
  840. "movq %%mm1, %%mm5 \n\t"
  841. "movq %%mm2, %%mm6 \n\t"
  842. H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
  843. H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
  844. "psubb %%mm5, %%mm1 \n\t"
  845. "psubb %%mm6, %%mm2 \n\t"
  846. "pand %%mm7, %%mm1 \n\t"
  847. "pand %%mm7, %%mm2 \n\t"
  848. "paddb %%mm5, %%mm1 \n\t"
  849. "paddb %%mm6, %%mm2 \n\t"
  850. "movq %%mm1, (%0,%2) \n\t"
  851. "movq %%mm2, (%1) \n\t"
  852. :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
  853. "m"(alpha1), "m"(beta1), "m"(mm_bone)
  854. );
  855. }
  856. static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  857. {
  858. h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
  859. }
  860. static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  861. {
  862. //FIXME: could cut some load/stores by merging transpose with filter
  863. uint8_t trans[8*4];
  864. transpose4x4(trans, pix-2, 8, stride);
  865. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  866. h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
  867. transpose4x4(pix-2, trans, stride, 8);
  868. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  869. }
  870. #ifdef CONFIG_ENCODERS
  871. static int pix_norm1_mmx(uint8_t *pix, int line_size) {
  872. int tmp;
  873. asm volatile (
  874. "movl $16,%%ecx\n"
  875. "pxor %%mm0,%%mm0\n"
  876. "pxor %%mm7,%%mm7\n"
  877. "1:\n"
  878. "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
  879. "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
  880. "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
  881. "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
  882. "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
  883. "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
  884. "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
  885. "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
  886. "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
  887. "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
  888. "pmaddwd %%mm3,%%mm3\n"
  889. "pmaddwd %%mm4,%%mm4\n"
  890. "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
  891. pix2^2+pix3^2+pix6^2+pix7^2) */
  892. "paddd %%mm3,%%mm4\n"
  893. "paddd %%mm2,%%mm7\n"
  894. "add %2, %0\n"
  895. "paddd %%mm4,%%mm7\n"
  896. "dec %%ecx\n"
  897. "jnz 1b\n"
  898. "movq %%mm7,%%mm1\n"
  899. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  900. "paddd %%mm7,%%mm1\n"
  901. "movd %%mm1,%1\n"
  902. : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
  903. return tmp;
  904. }
  905. static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  906. int tmp;
  907. asm volatile (
  908. "movl %4,%%ecx\n"
  909. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  910. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  911. "1:\n"
  912. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  913. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  914. "movq %%mm1,%%mm5\n"
  915. "psubusb %%mm2,%%mm1\n"
  916. "psubusb %%mm5,%%mm2\n"
  917. "por %%mm1,%%mm2\n"
  918. "movq %%mm2,%%mm1\n"
  919. "punpckhbw %%mm0,%%mm2\n"
  920. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  921. "pmaddwd %%mm2,%%mm2\n"
  922. "pmaddwd %%mm1,%%mm1\n"
  923. "add %3,%0\n"
  924. "add %3,%1\n"
  925. "paddd %%mm2,%%mm1\n"
  926. "paddd %%mm1,%%mm7\n"
  927. "decl %%ecx\n"
  928. "jnz 1b\n"
  929. "movq %%mm7,%%mm1\n"
  930. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  931. "paddd %%mm7,%%mm1\n"
  932. "movd %%mm1,%2\n"
  933. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  934. : "r" ((long)line_size) , "m" (h)
  935. : "%ecx");
  936. return tmp;
  937. }
  938. static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  939. int tmp;
  940. asm volatile (
  941. "movl %4,%%ecx\n"
  942. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  943. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  944. "1:\n"
  945. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  946. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  947. "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
  948. "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
  949. /* todo: mm1-mm2, mm3-mm4 */
  950. /* algo: substract mm1 from mm2 with saturation and vice versa */
  951. /* OR the results to get absolute difference */
  952. "movq %%mm1,%%mm5\n"
  953. "movq %%mm3,%%mm6\n"
  954. "psubusb %%mm2,%%mm1\n"
  955. "psubusb %%mm4,%%mm3\n"
  956. "psubusb %%mm5,%%mm2\n"
  957. "psubusb %%mm6,%%mm4\n"
  958. "por %%mm1,%%mm2\n"
  959. "por %%mm3,%%mm4\n"
  960. /* now convert to 16-bit vectors so we can square them */
  961. "movq %%mm2,%%mm1\n"
  962. "movq %%mm4,%%mm3\n"
  963. "punpckhbw %%mm0,%%mm2\n"
  964. "punpckhbw %%mm0,%%mm4\n"
  965. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  966. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  967. "pmaddwd %%mm2,%%mm2\n"
  968. "pmaddwd %%mm4,%%mm4\n"
  969. "pmaddwd %%mm1,%%mm1\n"
  970. "pmaddwd %%mm3,%%mm3\n"
  971. "add %3,%0\n"
  972. "add %3,%1\n"
  973. "paddd %%mm2,%%mm1\n"
  974. "paddd %%mm4,%%mm3\n"
  975. "paddd %%mm1,%%mm7\n"
  976. "paddd %%mm3,%%mm7\n"
  977. "decl %%ecx\n"
  978. "jnz 1b\n"
  979. "movq %%mm7,%%mm1\n"
  980. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  981. "paddd %%mm7,%%mm1\n"
  982. "movd %%mm1,%2\n"
  983. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  984. : "r" ((long)line_size) , "m" (h)
  985. : "%ecx");
  986. return tmp;
  987. }
  988. static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
  989. int tmp;
  990. asm volatile (
  991. "movl %3,%%ecx\n"
  992. "pxor %%mm7,%%mm7\n"
  993. "pxor %%mm6,%%mm6\n"
  994. "movq (%0),%%mm0\n"
  995. "movq %%mm0, %%mm1\n"
  996. "psllq $8, %%mm0\n"
  997. "psrlq $8, %%mm1\n"
  998. "psrlq $8, %%mm0\n"
  999. "movq %%mm0, %%mm2\n"
  1000. "movq %%mm1, %%mm3\n"
  1001. "punpcklbw %%mm7,%%mm0\n"
  1002. "punpcklbw %%mm7,%%mm1\n"
  1003. "punpckhbw %%mm7,%%mm2\n"
  1004. "punpckhbw %%mm7,%%mm3\n"
  1005. "psubw %%mm1, %%mm0\n"
  1006. "psubw %%mm3, %%mm2\n"
  1007. "add %2,%0\n"
  1008. "movq (%0),%%mm4\n"
  1009. "movq %%mm4, %%mm1\n"
  1010. "psllq $8, %%mm4\n"
  1011. "psrlq $8, %%mm1\n"
  1012. "psrlq $8, %%mm4\n"
  1013. "movq %%mm4, %%mm5\n"
  1014. "movq %%mm1, %%mm3\n"
  1015. "punpcklbw %%mm7,%%mm4\n"
  1016. "punpcklbw %%mm7,%%mm1\n"
  1017. "punpckhbw %%mm7,%%mm5\n"
  1018. "punpckhbw %%mm7,%%mm3\n"
  1019. "psubw %%mm1, %%mm4\n"
  1020. "psubw %%mm3, %%mm5\n"
  1021. "psubw %%mm4, %%mm0\n"
  1022. "psubw %%mm5, %%mm2\n"
  1023. "pxor %%mm3, %%mm3\n"
  1024. "pxor %%mm1, %%mm1\n"
  1025. "pcmpgtw %%mm0, %%mm3\n\t"
  1026. "pcmpgtw %%mm2, %%mm1\n\t"
  1027. "pxor %%mm3, %%mm0\n"
  1028. "pxor %%mm1, %%mm2\n"
  1029. "psubw %%mm3, %%mm0\n"
  1030. "psubw %%mm1, %%mm2\n"
  1031. "paddw %%mm0, %%mm2\n"
  1032. "paddw %%mm2, %%mm6\n"
  1033. "add %2,%0\n"
  1034. "1:\n"
  1035. "movq (%0),%%mm0\n"
  1036. "movq %%mm0, %%mm1\n"
  1037. "psllq $8, %%mm0\n"
  1038. "psrlq $8, %%mm1\n"
  1039. "psrlq $8, %%mm0\n"
  1040. "movq %%mm0, %%mm2\n"
  1041. "movq %%mm1, %%mm3\n"
  1042. "punpcklbw %%mm7,%%mm0\n"
  1043. "punpcklbw %%mm7,%%mm1\n"
  1044. "punpckhbw %%mm7,%%mm2\n"
  1045. "punpckhbw %%mm7,%%mm3\n"
  1046. "psubw %%mm1, %%mm0\n"
  1047. "psubw %%mm3, %%mm2\n"
  1048. "psubw %%mm0, %%mm4\n"
  1049. "psubw %%mm2, %%mm5\n"
  1050. "pxor %%mm3, %%mm3\n"
  1051. "pxor %%mm1, %%mm1\n"
  1052. "pcmpgtw %%mm4, %%mm3\n\t"
  1053. "pcmpgtw %%mm5, %%mm1\n\t"
  1054. "pxor %%mm3, %%mm4\n"
  1055. "pxor %%mm1, %%mm5\n"
  1056. "psubw %%mm3, %%mm4\n"
  1057. "psubw %%mm1, %%mm5\n"
  1058. "paddw %%mm4, %%mm5\n"
  1059. "paddw %%mm5, %%mm6\n"
  1060. "add %2,%0\n"
  1061. "movq (%0),%%mm4\n"
  1062. "movq %%mm4, %%mm1\n"
  1063. "psllq $8, %%mm4\n"
  1064. "psrlq $8, %%mm1\n"
  1065. "psrlq $8, %%mm4\n"
  1066. "movq %%mm4, %%mm5\n"
  1067. "movq %%mm1, %%mm3\n"
  1068. "punpcklbw %%mm7,%%mm4\n"
  1069. "punpcklbw %%mm7,%%mm1\n"
  1070. "punpckhbw %%mm7,%%mm5\n"
  1071. "punpckhbw %%mm7,%%mm3\n"
  1072. "psubw %%mm1, %%mm4\n"
  1073. "psubw %%mm3, %%mm5\n"
  1074. "psubw %%mm4, %%mm0\n"
  1075. "psubw %%mm5, %%mm2\n"
  1076. "pxor %%mm3, %%mm3\n"
  1077. "pxor %%mm1, %%mm1\n"
  1078. "pcmpgtw %%mm0, %%mm3\n\t"
  1079. "pcmpgtw %%mm2, %%mm1\n\t"
  1080. "pxor %%mm3, %%mm0\n"
  1081. "pxor %%mm1, %%mm2\n"
  1082. "psubw %%mm3, %%mm0\n"
  1083. "psubw %%mm1, %%mm2\n"
  1084. "paddw %%mm0, %%mm2\n"
  1085. "paddw %%mm2, %%mm6\n"
  1086. "add %2,%0\n"
  1087. "subl $2, %%ecx\n"
  1088. " jnz 1b\n"
  1089. "movq %%mm6, %%mm0\n"
  1090. "punpcklwd %%mm7,%%mm0\n"
  1091. "punpckhwd %%mm7,%%mm6\n"
  1092. "paddd %%mm0, %%mm6\n"
  1093. "movq %%mm6,%%mm0\n"
  1094. "psrlq $32, %%mm6\n"
  1095. "paddd %%mm6,%%mm0\n"
  1096. "movd %%mm0,%1\n"
  1097. : "+r" (pix1), "=r"(tmp)
  1098. : "r" ((long)line_size) , "g" (h-2)
  1099. : "%ecx");
  1100. return tmp;
  1101. }
  1102. static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
  1103. int tmp;
  1104. uint8_t * pix= pix1;
  1105. asm volatile (
  1106. "movl %3,%%ecx\n"
  1107. "pxor %%mm7,%%mm7\n"
  1108. "pxor %%mm6,%%mm6\n"
  1109. "movq (%0),%%mm0\n"
  1110. "movq 1(%0),%%mm1\n"
  1111. "movq %%mm0, %%mm2\n"
  1112. "movq %%mm1, %%mm3\n"
  1113. "punpcklbw %%mm7,%%mm0\n"
  1114. "punpcklbw %%mm7,%%mm1\n"
  1115. "punpckhbw %%mm7,%%mm2\n"
  1116. "punpckhbw %%mm7,%%mm3\n"
  1117. "psubw %%mm1, %%mm0\n"
  1118. "psubw %%mm3, %%mm2\n"
  1119. "add %2,%0\n"
  1120. "movq (%0),%%mm4\n"
  1121. "movq 1(%0),%%mm1\n"
  1122. "movq %%mm4, %%mm5\n"
  1123. "movq %%mm1, %%mm3\n"
  1124. "punpcklbw %%mm7,%%mm4\n"
  1125. "punpcklbw %%mm7,%%mm1\n"
  1126. "punpckhbw %%mm7,%%mm5\n"
  1127. "punpckhbw %%mm7,%%mm3\n"
  1128. "psubw %%mm1, %%mm4\n"
  1129. "psubw %%mm3, %%mm5\n"
  1130. "psubw %%mm4, %%mm0\n"
  1131. "psubw %%mm5, %%mm2\n"
  1132. "pxor %%mm3, %%mm3\n"
  1133. "pxor %%mm1, %%mm1\n"
  1134. "pcmpgtw %%mm0, %%mm3\n\t"
  1135. "pcmpgtw %%mm2, %%mm1\n\t"
  1136. "pxor %%mm3, %%mm0\n"
  1137. "pxor %%mm1, %%mm2\n"
  1138. "psubw %%mm3, %%mm0\n"
  1139. "psubw %%mm1, %%mm2\n"
  1140. "paddw %%mm0, %%mm2\n"
  1141. "paddw %%mm2, %%mm6\n"
  1142. "add %2,%0\n"
  1143. "1:\n"
  1144. "movq (%0),%%mm0\n"
  1145. "movq 1(%0),%%mm1\n"
  1146. "movq %%mm0, %%mm2\n"
  1147. "movq %%mm1, %%mm3\n"
  1148. "punpcklbw %%mm7,%%mm0\n"
  1149. "punpcklbw %%mm7,%%mm1\n"
  1150. "punpckhbw %%mm7,%%mm2\n"
  1151. "punpckhbw %%mm7,%%mm3\n"
  1152. "psubw %%mm1, %%mm0\n"
  1153. "psubw %%mm3, %%mm2\n"
  1154. "psubw %%mm0, %%mm4\n"
  1155. "psubw %%mm2, %%mm5\n"
  1156. "pxor %%mm3, %%mm3\n"
  1157. "pxor %%mm1, %%mm1\n"
  1158. "pcmpgtw %%mm4, %%mm3\n\t"
  1159. "pcmpgtw %%mm5, %%mm1\n\t"
  1160. "pxor %%mm3, %%mm4\n"
  1161. "pxor %%mm1, %%mm5\n"
  1162. "psubw %%mm3, %%mm4\n"
  1163. "psubw %%mm1, %%mm5\n"
  1164. "paddw %%mm4, %%mm5\n"
  1165. "paddw %%mm5, %%mm6\n"
  1166. "add %2,%0\n"
  1167. "movq (%0),%%mm4\n"
  1168. "movq 1(%0),%%mm1\n"
  1169. "movq %%mm4, %%mm5\n"
  1170. "movq %%mm1, %%mm3\n"
  1171. "punpcklbw %%mm7,%%mm4\n"
  1172. "punpcklbw %%mm7,%%mm1\n"
  1173. "punpckhbw %%mm7,%%mm5\n"
  1174. "punpckhbw %%mm7,%%mm3\n"
  1175. "psubw %%mm1, %%mm4\n"
  1176. "psubw %%mm3, %%mm5\n"
  1177. "psubw %%mm4, %%mm0\n"
  1178. "psubw %%mm5, %%mm2\n"
  1179. "pxor %%mm3, %%mm3\n"
  1180. "pxor %%mm1, %%mm1\n"
  1181. "pcmpgtw %%mm0, %%mm3\n\t"
  1182. "pcmpgtw %%mm2, %%mm1\n\t"
  1183. "pxor %%mm3, %%mm0\n"
  1184. "pxor %%mm1, %%mm2\n"
  1185. "psubw %%mm3, %%mm0\n"
  1186. "psubw %%mm1, %%mm2\n"
  1187. "paddw %%mm0, %%mm2\n"
  1188. "paddw %%mm2, %%mm6\n"
  1189. "add %2,%0\n"
  1190. "subl $2, %%ecx\n"
  1191. " jnz 1b\n"
  1192. "movq %%mm6, %%mm0\n"
  1193. "punpcklwd %%mm7,%%mm0\n"
  1194. "punpckhwd %%mm7,%%mm6\n"
  1195. "paddd %%mm0, %%mm6\n"
  1196. "movq %%mm6,%%mm0\n"
  1197. "psrlq $32, %%mm6\n"
  1198. "paddd %%mm6,%%mm0\n"
  1199. "movd %%mm0,%1\n"
  1200. : "+r" (pix1), "=r"(tmp)
  1201. : "r" ((long)line_size) , "g" (h-2)
  1202. : "%ecx");
  1203. return tmp + hf_noise8_mmx(pix+8, line_size, h);
  1204. }
  1205. static int nsse16_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1206. int score1= sse16_mmx(c, pix1, pix2, line_size, h);
  1207. int score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
  1208. if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
  1209. else return score1 + ABS(score2)*8;
  1210. }
  1211. static int nsse8_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1212. int score1= sse8_mmx(c, pix1, pix2, line_size, h);
  1213. int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
  1214. if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
  1215. else return score1 + ABS(score2)*8;
  1216. }
  1217. static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  1218. int tmp;
  1219. assert( (((int)pix) & 7) == 0);
  1220. assert((line_size &7) ==0);
  1221. #define SUM(in0, in1, out0, out1) \
  1222. "movq (%0), %%mm2\n"\
  1223. "movq 8(%0), %%mm3\n"\
  1224. "add %2,%0\n"\
  1225. "movq %%mm2, " #out0 "\n"\
  1226. "movq %%mm3, " #out1 "\n"\
  1227. "psubusb " #in0 ", %%mm2\n"\
  1228. "psubusb " #in1 ", %%mm3\n"\
  1229. "psubusb " #out0 ", " #in0 "\n"\
  1230. "psubusb " #out1 ", " #in1 "\n"\
  1231. "por %%mm2, " #in0 "\n"\
  1232. "por %%mm3, " #in1 "\n"\
  1233. "movq " #in0 ", %%mm2\n"\
  1234. "movq " #in1 ", %%mm3\n"\
  1235. "punpcklbw %%mm7, " #in0 "\n"\
  1236. "punpcklbw %%mm7, " #in1 "\n"\
  1237. "punpckhbw %%mm7, %%mm2\n"\
  1238. "punpckhbw %%mm7, %%mm3\n"\
  1239. "paddw " #in1 ", " #in0 "\n"\
  1240. "paddw %%mm3, %%mm2\n"\
  1241. "paddw %%mm2, " #in0 "\n"\
  1242. "paddw " #in0 ", %%mm6\n"
  1243. asm volatile (
  1244. "movl %3,%%ecx\n"
  1245. "pxor %%mm6,%%mm6\n"
  1246. "pxor %%mm7,%%mm7\n"
  1247. "movq (%0),%%mm0\n"
  1248. "movq 8(%0),%%mm1\n"
  1249. "add %2,%0\n"
  1250. "subl $2, %%ecx\n"
  1251. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1252. "1:\n"
  1253. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1254. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1255. "subl $2, %%ecx\n"
  1256. "jnz 1b\n"
  1257. "movq %%mm6,%%mm0\n"
  1258. "psrlq $32, %%mm6\n"
  1259. "paddw %%mm6,%%mm0\n"
  1260. "movq %%mm0,%%mm6\n"
  1261. "psrlq $16, %%mm0\n"
  1262. "paddw %%mm6,%%mm0\n"
  1263. "movd %%mm0,%1\n"
  1264. : "+r" (pix), "=r"(tmp)
  1265. : "r" ((long)line_size) , "m" (h)
  1266. : "%ecx");
  1267. return tmp & 0xFFFF;
  1268. }
  1269. #undef SUM
  1270. static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  1271. int tmp;
  1272. assert( (((int)pix) & 7) == 0);
  1273. assert((line_size &7) ==0);
  1274. #define SUM(in0, in1, out0, out1) \
  1275. "movq (%0), " #out0 "\n"\
  1276. "movq 8(%0), " #out1 "\n"\
  1277. "add %2,%0\n"\
  1278. "psadbw " #out0 ", " #in0 "\n"\
  1279. "psadbw " #out1 ", " #in1 "\n"\
  1280. "paddw " #in1 ", " #in0 "\n"\
  1281. "paddw " #in0 ", %%mm6\n"
  1282. asm volatile (
  1283. "movl %3,%%ecx\n"
  1284. "pxor %%mm6,%%mm6\n"
  1285. "pxor %%mm7,%%mm7\n"
  1286. "movq (%0),%%mm0\n"
  1287. "movq 8(%0),%%mm1\n"
  1288. "add %2,%0\n"
  1289. "subl $2, %%ecx\n"
  1290. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1291. "1:\n"
  1292. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1293. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1294. "subl $2, %%ecx\n"
  1295. "jnz 1b\n"
  1296. "movd %%mm6,%1\n"
  1297. : "+r" (pix), "=r"(tmp)
  1298. : "r" ((long)line_size) , "m" (h)
  1299. : "%ecx");
  1300. return tmp;
  1301. }
  1302. #undef SUM
  1303. static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1304. int tmp;
  1305. assert( (((int)pix1) & 7) == 0);
  1306. assert( (((int)pix2) & 7) == 0);
  1307. assert((line_size &7) ==0);
  1308. #define SUM(in0, in1, out0, out1) \
  1309. "movq (%0),%%mm2\n"\
  1310. "movq (%1)," #out0 "\n"\
  1311. "movq 8(%0),%%mm3\n"\
  1312. "movq 8(%1)," #out1 "\n"\
  1313. "add %3,%0\n"\
  1314. "add %3,%1\n"\
  1315. "psubb " #out0 ", %%mm2\n"\
  1316. "psubb " #out1 ", %%mm3\n"\
  1317. "pxor %%mm7, %%mm2\n"\
  1318. "pxor %%mm7, %%mm3\n"\
  1319. "movq %%mm2, " #out0 "\n"\
  1320. "movq %%mm3, " #out1 "\n"\
  1321. "psubusb " #in0 ", %%mm2\n"\
  1322. "psubusb " #in1 ", %%mm3\n"\
  1323. "psubusb " #out0 ", " #in0 "\n"\
  1324. "psubusb " #out1 ", " #in1 "\n"\
  1325. "por %%mm2, " #in0 "\n"\
  1326. "por %%mm3, " #in1 "\n"\
  1327. "movq " #in0 ", %%mm2\n"\
  1328. "movq " #in1 ", %%mm3\n"\
  1329. "punpcklbw %%mm7, " #in0 "\n"\
  1330. "punpcklbw %%mm7, " #in1 "\n"\
  1331. "punpckhbw %%mm7, %%mm2\n"\
  1332. "punpckhbw %%mm7, %%mm3\n"\
  1333. "paddw " #in1 ", " #in0 "\n"\
  1334. "paddw %%mm3, %%mm2\n"\
  1335. "paddw %%mm2, " #in0 "\n"\
  1336. "paddw " #in0 ", %%mm6\n"
  1337. asm volatile (
  1338. "movl %4,%%ecx\n"
  1339. "pxor %%mm6,%%mm6\n"
  1340. "pcmpeqw %%mm7,%%mm7\n"
  1341. "psllw $15, %%mm7\n"
  1342. "packsswb %%mm7, %%mm7\n"
  1343. "movq (%0),%%mm0\n"
  1344. "movq (%1),%%mm2\n"
  1345. "movq 8(%0),%%mm1\n"
  1346. "movq 8(%1),%%mm3\n"
  1347. "add %3,%0\n"
  1348. "add %3,%1\n"
  1349. "subl $2, %%ecx\n"
  1350. "psubb %%mm2, %%mm0\n"
  1351. "psubb %%mm3, %%mm1\n"
  1352. "pxor %%mm7, %%mm0\n"
  1353. "pxor %%mm7, %%mm1\n"
  1354. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1355. "1:\n"
  1356. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1357. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1358. "subl $2, %%ecx\n"
  1359. "jnz 1b\n"
  1360. "movq %%mm6,%%mm0\n"
  1361. "psrlq $32, %%mm6\n"
  1362. "paddw %%mm6,%%mm0\n"
  1363. "movq %%mm0,%%mm6\n"
  1364. "psrlq $16, %%mm0\n"
  1365. "paddw %%mm6,%%mm0\n"
  1366. "movd %%mm0,%2\n"
  1367. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  1368. : "r" ((long)line_size) , "m" (h)
  1369. : "%ecx");
  1370. return tmp & 0x7FFF;
  1371. }
  1372. #undef SUM
  1373. static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1374. int tmp;
  1375. assert( (((int)pix1) & 7) == 0);
  1376. assert( (((int)pix2) & 7) == 0);
  1377. assert((line_size &7) ==0);
  1378. #define SUM(in0, in1, out0, out1) \
  1379. "movq (%0)," #out0 "\n"\
  1380. "movq (%1),%%mm2\n"\
  1381. "movq 8(%0)," #out1 "\n"\
  1382. "movq 8(%1),%%mm3\n"\
  1383. "add %3,%0\n"\
  1384. "add %3,%1\n"\
  1385. "psubb %%mm2, " #out0 "\n"\
  1386. "psubb %%mm3, " #out1 "\n"\
  1387. "pxor %%mm7, " #out0 "\n"\
  1388. "pxor %%mm7, " #out1 "\n"\
  1389. "psadbw " #out0 ", " #in0 "\n"\
  1390. "psadbw " #out1 ", " #in1 "\n"\
  1391. "paddw " #in1 ", " #in0 "\n"\
  1392. "paddw " #in0 ", %%mm6\n"
  1393. asm volatile (
  1394. "movl %4,%%ecx\n"
  1395. "pxor %%mm6,%%mm6\n"
  1396. "pcmpeqw %%mm7,%%mm7\n"
  1397. "psllw $15, %%mm7\n"
  1398. "packsswb %%mm7, %%mm7\n"
  1399. "movq (%0),%%mm0\n"
  1400. "movq (%1),%%mm2\n"
  1401. "movq 8(%0),%%mm1\n"
  1402. "movq 8(%1),%%mm3\n"
  1403. "add %3,%0\n"
  1404. "add %3,%1\n"
  1405. "subl $2, %%ecx\n"
  1406. "psubb %%mm2, %%mm0\n"
  1407. "psubb %%mm3, %%mm1\n"
  1408. "pxor %%mm7, %%mm0\n"
  1409. "pxor %%mm7, %%mm1\n"
  1410. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1411. "1:\n"
  1412. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1413. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1414. "subl $2, %%ecx\n"
  1415. "jnz 1b\n"
  1416. "movd %%mm6,%2\n"
  1417. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  1418. : "r" ((long)line_size) , "m" (h)
  1419. : "%ecx");
  1420. return tmp;
  1421. }
  1422. #undef SUM
  1423. static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  1424. long i=0;
  1425. asm volatile(
  1426. "1: \n\t"
  1427. "movq (%2, %0), %%mm0 \n\t"
  1428. "movq (%1, %0), %%mm1 \n\t"
  1429. "psubb %%mm0, %%mm1 \n\t"
  1430. "movq %%mm1, (%3, %0) \n\t"
  1431. "movq 8(%2, %0), %%mm0 \n\t"
  1432. "movq 8(%1, %0), %%mm1 \n\t"
  1433. "psubb %%mm0, %%mm1 \n\t"
  1434. "movq %%mm1, 8(%3, %0) \n\t"
  1435. "add $16, %0 \n\t"
  1436. "cmp %4, %0 \n\t"
  1437. " jb 1b \n\t"
  1438. : "+r" (i)
  1439. : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
  1440. );
  1441. for(; i<w; i++)
  1442. dst[i+0] = src1[i+0]-src2[i+0];
  1443. }
  1444. static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
  1445. long i=0;
  1446. uint8_t l, lt;
  1447. asm volatile(
  1448. "1: \n\t"
  1449. "movq -1(%1, %0), %%mm0 \n\t" // LT
  1450. "movq (%1, %0), %%mm1 \n\t" // T
  1451. "movq -1(%2, %0), %%mm2 \n\t" // L
  1452. "movq (%2, %0), %%mm3 \n\t" // X
  1453. "movq %%mm2, %%mm4 \n\t" // L
  1454. "psubb %%mm0, %%mm2 \n\t"
  1455. "paddb %%mm1, %%mm2 \n\t" // L + T - LT
  1456. "movq %%mm4, %%mm5 \n\t" // L
  1457. "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
  1458. "pminub %%mm5, %%mm1 \n\t" // min(T, L)
  1459. "pminub %%mm2, %%mm4 \n\t"
  1460. "pmaxub %%mm1, %%mm4 \n\t"
  1461. "psubb %%mm4, %%mm3 \n\t" // dst - pred
  1462. "movq %%mm3, (%3, %0) \n\t"
  1463. "add $8, %0 \n\t"
  1464. "cmp %4, %0 \n\t"
  1465. " jb 1b \n\t"
  1466. : "+r" (i)
  1467. : "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
  1468. );
  1469. l= *left;
  1470. lt= *left_top;
  1471. dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
  1472. *left_top= src1[w-1];
  1473. *left = src2[w-1];
  1474. }
  1475. #define LBUTTERFLY2(a1,b1,a2,b2)\
  1476. "paddw " #b1 ", " #a1 " \n\t"\
  1477. "paddw " #b2 ", " #a2 " \n\t"\
  1478. "paddw " #b1 ", " #b1 " \n\t"\
  1479. "paddw " #b2 ", " #b2 " \n\t"\
  1480. "psubw " #a1 ", " #b1 " \n\t"\
  1481. "psubw " #a2 ", " #b2 " \n\t"
  1482. #define HADAMARD48\
  1483. LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
  1484. LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
  1485. LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
  1486. LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
  1487. LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
  1488. LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
  1489. #define MMABS(a,z)\
  1490. "pxor " #z ", " #z " \n\t"\
  1491. "pcmpgtw " #a ", " #z " \n\t"\
  1492. "pxor " #z ", " #a " \n\t"\
  1493. "psubw " #z ", " #a " \n\t"
  1494. #define MMABS_SUM(a,z, sum)\
  1495. "pxor " #z ", " #z " \n\t"\
  1496. "pcmpgtw " #a ", " #z " \n\t"\
  1497. "pxor " #z ", " #a " \n\t"\
  1498. "psubw " #z ", " #a " \n\t"\
  1499. "paddusw " #a ", " #sum " \n\t"
  1500. #define MMABS_MMX2(a,z)\
  1501. "pxor " #z ", " #z " \n\t"\
  1502. "psubw " #a ", " #z " \n\t"\
  1503. "pmaxsw " #z ", " #a " \n\t"
  1504. #define MMABS_SUM_MMX2(a,z, sum)\
  1505. "pxor " #z ", " #z " \n\t"\
  1506. "psubw " #a ", " #z " \n\t"\
  1507. "pmaxsw " #z ", " #a " \n\t"\
  1508. "paddusw " #a ", " #sum " \n\t"
  1509. #define SBUTTERFLY(a,b,t,n)\
  1510. "movq " #a ", " #t " \n\t" /* abcd */\
  1511. "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
  1512. "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
  1513. #define TRANSPOSE4(a,b,c,d,t)\
  1514. SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
  1515. SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
  1516. SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
  1517. SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
  1518. #define LOAD4(o, a, b, c, d)\
  1519. "movq "#o"(%1), " #a " \n\t"\
  1520. "movq "#o"+16(%1), " #b " \n\t"\
  1521. "movq "#o"+32(%1), " #c " \n\t"\
  1522. "movq "#o"+48(%1), " #d " \n\t"
  1523. #define STORE4(o, a, b, c, d)\
  1524. "movq "#a", "#o"(%1) \n\t"\
  1525. "movq "#b", "#o"+16(%1) \n\t"\
  1526. "movq "#c", "#o"+32(%1) \n\t"\
  1527. "movq "#d", "#o"+48(%1) \n\t"\
  1528. static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
  1529. uint64_t temp[16] __align8;
  1530. int sum=0;
  1531. assert(h==8);
  1532. diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
  1533. asm volatile(
  1534. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1535. LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
  1536. HADAMARD48
  1537. "movq %%mm7, 112(%1) \n\t"
  1538. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1539. STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
  1540. "movq 112(%1), %%mm7 \n\t"
  1541. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1542. STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
  1543. LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
  1544. LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1545. HADAMARD48
  1546. "movq %%mm7, 120(%1) \n\t"
  1547. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1548. STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
  1549. "movq 120(%1), %%mm7 \n\t"
  1550. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1551. "movq %%mm7, %%mm5 \n\t"//FIXME remove
  1552. "movq %%mm6, %%mm7 \n\t"
  1553. "movq %%mm0, %%mm6 \n\t"
  1554. // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
  1555. LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
  1556. // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1557. HADAMARD48
  1558. "movq %%mm7, 64(%1) \n\t"
  1559. MMABS(%%mm0, %%mm7)
  1560. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1561. MMABS_SUM(%%mm2, %%mm7, %%mm0)
  1562. MMABS_SUM(%%mm3, %%mm7, %%mm0)
  1563. MMABS_SUM(%%mm4, %%mm7, %%mm0)
  1564. MMABS_SUM(%%mm5, %%mm7, %%mm0)
  1565. MMABS_SUM(%%mm6, %%mm7, %%mm0)
  1566. "movq 64(%1), %%mm1 \n\t"
  1567. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1568. "movq %%mm0, 64(%1) \n\t"
  1569. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1570. LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
  1571. HADAMARD48
  1572. "movq %%mm7, (%1) \n\t"
  1573. MMABS(%%mm0, %%mm7)
  1574. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1575. MMABS_SUM(%%mm2, %%mm7, %%mm0)
  1576. MMABS_SUM(%%mm3, %%mm7, %%mm0)
  1577. MMABS_SUM(%%mm4, %%mm7, %%mm0)
  1578. MMABS_SUM(%%mm5, %%mm7, %%mm0)
  1579. MMABS_SUM(%%mm6, %%mm7, %%mm0)
  1580. "movq (%1), %%mm1 \n\t"
  1581. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1582. "movq 64(%1), %%mm1 \n\t"
  1583. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1584. "movq %%mm0, %%mm1 \n\t"
  1585. "psrlq $32, %%mm0 \n\t"
  1586. "paddusw %%mm1, %%mm0 \n\t"
  1587. "movq %%mm0, %%mm1 \n\t"
  1588. "psrlq $16, %%mm0 \n\t"
  1589. "paddusw %%mm1, %%mm0 \n\t"
  1590. "movd %%mm0, %0 \n\t"
  1591. : "=r" (sum)
  1592. : "r"(temp)
  1593. );
  1594. return sum&0xFFFF;
  1595. }
  1596. static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
  1597. uint64_t temp[16] __align8;
  1598. int sum=0;
  1599. assert(h==8);
  1600. diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
  1601. asm volatile(
  1602. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1603. LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
  1604. HADAMARD48
  1605. "movq %%mm7, 112(%1) \n\t"
  1606. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1607. STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
  1608. "movq 112(%1), %%mm7 \n\t"
  1609. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1610. STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
  1611. LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
  1612. LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1613. HADAMARD48
  1614. "movq %%mm7, 120(%1) \n\t"
  1615. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1616. STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
  1617. "movq 120(%1), %%mm7 \n\t"
  1618. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1619. "movq %%mm7, %%mm5 \n\t"//FIXME remove
  1620. "movq %%mm6, %%mm7 \n\t"
  1621. "movq %%mm0, %%mm6 \n\t"
  1622. // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
  1623. LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
  1624. // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1625. HADAMARD48
  1626. "movq %%mm7, 64(%1) \n\t"
  1627. MMABS_MMX2(%%mm0, %%mm7)
  1628. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1629. MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
  1630. MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
  1631. MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
  1632. MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
  1633. MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
  1634. "movq 64(%1), %%mm1 \n\t"
  1635. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1636. "movq %%mm0, 64(%1) \n\t"
  1637. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1638. LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
  1639. HADAMARD48
  1640. "movq %%mm7, (%1) \n\t"
  1641. MMABS_MMX2(%%mm0, %%mm7)
  1642. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1643. MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
  1644. MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
  1645. MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
  1646. MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
  1647. MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
  1648. "movq (%1), %%mm1 \n\t"
  1649. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1650. "movq 64(%1), %%mm1 \n\t"
  1651. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1652. "movq %%mm0, %%mm1 \n\t"
  1653. "psrlq $32, %%mm0 \n\t"
  1654. "paddusw %%mm1, %%mm0 \n\t"
  1655. "movq %%mm0, %%mm1 \n\t"
  1656. "psrlq $16, %%mm0 \n\t"
  1657. "paddusw %%mm1, %%mm0 \n\t"
  1658. "movd %%mm0, %0 \n\t"
  1659. : "=r" (sum)
  1660. : "r"(temp)
  1661. );
  1662. return sum&0xFFFF;
  1663. }
  1664. WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
  1665. WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
  1666. #endif //CONFIG_ENCODERS
  1667. #define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
  1668. #define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
  1669. #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
  1670. "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
  1671. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
  1672. "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
  1673. "movq "#in7", " #m3 " \n\t" /* d */\
  1674. "movq "#in0", %%mm5 \n\t" /* D */\
  1675. "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
  1676. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
  1677. "movq "#in1", %%mm5 \n\t" /* C */\
  1678. "movq "#in2", %%mm6 \n\t" /* B */\
  1679. "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
  1680. "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
  1681. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
  1682. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
  1683. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
  1684. "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
  1685. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
  1686. "psraw $5, %%mm5 \n\t"\
  1687. "packuswb %%mm5, %%mm5 \n\t"\
  1688. OP(%%mm5, out, %%mm7, d)
  1689. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
  1690. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1691. uint64_t temp;\
  1692. \
  1693. asm volatile(\
  1694. "pxor %%mm7, %%mm7 \n\t"\
  1695. "1: \n\t"\
  1696. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1697. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1698. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1699. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1700. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1701. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1702. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1703. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1704. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1705. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1706. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1707. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1708. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1709. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1710. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1711. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1712. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1713. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1714. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1715. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1716. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1717. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1718. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1719. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1720. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1721. "paddw %6, %%mm6 \n\t"\
  1722. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1723. "psraw $5, %%mm0 \n\t"\
  1724. "movq %%mm0, %5 \n\t"\
  1725. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1726. \
  1727. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
  1728. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
  1729. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
  1730. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
  1731. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
  1732. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
  1733. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
  1734. "paddw %%mm0, %%mm2 \n\t" /* b */\
  1735. "paddw %%mm5, %%mm3 \n\t" /* c */\
  1736. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1737. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1738. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
  1739. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
  1740. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
  1741. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
  1742. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1743. "paddw %%mm2, %%mm1 \n\t" /* a */\
  1744. "paddw %%mm6, %%mm4 \n\t" /* d */\
  1745. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1746. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
  1747. "paddw %6, %%mm1 \n\t"\
  1748. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
  1749. "psraw $5, %%mm3 \n\t"\
  1750. "movq %5, %%mm1 \n\t"\
  1751. "packuswb %%mm3, %%mm1 \n\t"\
  1752. OP_MMX2(%%mm1, (%1),%%mm4, q)\
  1753. /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
  1754. \
  1755. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
  1756. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
  1757. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
  1758. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
  1759. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
  1760. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
  1761. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
  1762. "paddw %%mm1, %%mm5 \n\t" /* b */\
  1763. "paddw %%mm4, %%mm0 \n\t" /* c */\
  1764. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1765. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
  1766. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
  1767. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
  1768. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
  1769. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
  1770. "paddw %%mm3, %%mm2 \n\t" /* d */\
  1771. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
  1772. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
  1773. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
  1774. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
  1775. "paddw %%mm2, %%mm6 \n\t" /* a */\
  1776. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
  1777. "paddw %6, %%mm0 \n\t"\
  1778. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1779. "psraw $5, %%mm0 \n\t"\
  1780. /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
  1781. \
  1782. "paddw %%mm5, %%mm3 \n\t" /* a */\
  1783. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
  1784. "paddw %%mm4, %%mm6 \n\t" /* b */\
  1785. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
  1786. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
  1787. "paddw %%mm1, %%mm4 \n\t" /* c */\
  1788. "paddw %%mm2, %%mm5 \n\t" /* d */\
  1789. "paddw %%mm6, %%mm6 \n\t" /* 2b */\
  1790. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
  1791. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
  1792. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
  1793. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
  1794. "paddw %6, %%mm4 \n\t"\
  1795. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
  1796. "psraw $5, %%mm4 \n\t"\
  1797. "packuswb %%mm4, %%mm0 \n\t"\
  1798. OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
  1799. \
  1800. "add %3, %0 \n\t"\
  1801. "add %4, %1 \n\t"\
  1802. "decl %2 \n\t"\
  1803. " jnz 1b \n\t"\
  1804. : "+a"(src), "+c"(dst), "+m"(h)\
  1805. : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  1806. : "memory"\
  1807. );\
  1808. }\
  1809. \
  1810. static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1811. int i;\
  1812. int16_t temp[16];\
  1813. /* quick HACK, XXX FIXME MUST be optimized */\
  1814. for(i=0; i<h; i++)\
  1815. {\
  1816. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1817. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1818. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1819. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1820. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1821. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
  1822. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
  1823. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
  1824. temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
  1825. temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
  1826. temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
  1827. temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
  1828. temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
  1829. temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
  1830. temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
  1831. temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
  1832. asm volatile(\
  1833. "movq (%0), %%mm0 \n\t"\
  1834. "movq 8(%0), %%mm1 \n\t"\
  1835. "paddw %2, %%mm0 \n\t"\
  1836. "paddw %2, %%mm1 \n\t"\
  1837. "psraw $5, %%mm0 \n\t"\
  1838. "psraw $5, %%mm1 \n\t"\
  1839. "packuswb %%mm1, %%mm0 \n\t"\
  1840. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1841. "movq 16(%0), %%mm0 \n\t"\
  1842. "movq 24(%0), %%mm1 \n\t"\
  1843. "paddw %2, %%mm0 \n\t"\
  1844. "paddw %2, %%mm1 \n\t"\
  1845. "psraw $5, %%mm0 \n\t"\
  1846. "psraw $5, %%mm1 \n\t"\
  1847. "packuswb %%mm1, %%mm0 \n\t"\
  1848. OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
  1849. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1850. : "memory"\
  1851. );\
  1852. dst+=dstStride;\
  1853. src+=srcStride;\
  1854. }\
  1855. }\
  1856. \
  1857. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1858. uint64_t temp;\
  1859. \
  1860. asm volatile(\
  1861. "pxor %%mm7, %%mm7 \n\t"\
  1862. "1: \n\t"\
  1863. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1864. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1865. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1866. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1867. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1868. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1869. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1870. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1871. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1872. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1873. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1874. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1875. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1876. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1877. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1878. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1879. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1880. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1881. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1882. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1883. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1884. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1885. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1886. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1887. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1888. "paddw %6, %%mm6 \n\t"\
  1889. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1890. "psraw $5, %%mm0 \n\t"\
  1891. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1892. \
  1893. "movd 5(%0), %%mm5 \n\t" /* FGHI */\
  1894. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
  1895. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
  1896. "paddw %%mm5, %%mm1 \n\t" /* a */\
  1897. "paddw %%mm6, %%mm2 \n\t" /* b */\
  1898. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
  1899. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
  1900. "paddw %%mm6, %%mm3 \n\t" /* c */\
  1901. "paddw %%mm5, %%mm4 \n\t" /* d */\
  1902. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1903. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1904. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1905. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1906. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
  1907. "paddw %6, %%mm1 \n\t"\
  1908. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
  1909. "psraw $5, %%mm3 \n\t"\
  1910. "packuswb %%mm3, %%mm0 \n\t"\
  1911. OP_MMX2(%%mm0, (%1), %%mm4, q)\
  1912. \
  1913. "add %3, %0 \n\t"\
  1914. "add %4, %1 \n\t"\
  1915. "decl %2 \n\t"\
  1916. " jnz 1b \n\t"\
  1917. : "+a"(src), "+c"(dst), "+m"(h)\
  1918. : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  1919. : "memory"\
  1920. );\
  1921. }\
  1922. \
  1923. static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1924. int i;\
  1925. int16_t temp[8];\
  1926. /* quick HACK, XXX FIXME MUST be optimized */\
  1927. for(i=0; i<h; i++)\
  1928. {\
  1929. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1930. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1931. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1932. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1933. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1934. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
  1935. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
  1936. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
  1937. asm volatile(\
  1938. "movq (%0), %%mm0 \n\t"\
  1939. "movq 8(%0), %%mm1 \n\t"\
  1940. "paddw %2, %%mm0 \n\t"\
  1941. "paddw %2, %%mm1 \n\t"\
  1942. "psraw $5, %%mm0 \n\t"\
  1943. "psraw $5, %%mm1 \n\t"\
  1944. "packuswb %%mm1, %%mm0 \n\t"\
  1945. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1946. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1947. :"memory"\
  1948. );\
  1949. dst+=dstStride;\
  1950. src+=srcStride;\
  1951. }\
  1952. }
  1953. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
  1954. \
  1955. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1956. uint64_t temp[17*4];\
  1957. uint64_t *temp_ptr= temp;\
  1958. int count= 17;\
  1959. \
  1960. /*FIXME unroll */\
  1961. asm volatile(\
  1962. "pxor %%mm7, %%mm7 \n\t"\
  1963. "1: \n\t"\
  1964. "movq (%0), %%mm0 \n\t"\
  1965. "movq (%0), %%mm1 \n\t"\
  1966. "movq 8(%0), %%mm2 \n\t"\
  1967. "movq 8(%0), %%mm3 \n\t"\
  1968. "punpcklbw %%mm7, %%mm0 \n\t"\
  1969. "punpckhbw %%mm7, %%mm1 \n\t"\
  1970. "punpcklbw %%mm7, %%mm2 \n\t"\
  1971. "punpckhbw %%mm7, %%mm3 \n\t"\
  1972. "movq %%mm0, (%1) \n\t"\
  1973. "movq %%mm1, 17*8(%1) \n\t"\
  1974. "movq %%mm2, 2*17*8(%1) \n\t"\
  1975. "movq %%mm3, 3*17*8(%1) \n\t"\
  1976. "add $8, %1 \n\t"\
  1977. "add %3, %0 \n\t"\
  1978. "decl %2 \n\t"\
  1979. " jnz 1b \n\t"\
  1980. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1981. : "r" ((long)srcStride)\
  1982. : "memory"\
  1983. );\
  1984. \
  1985. temp_ptr= temp;\
  1986. count=4;\
  1987. \
  1988. /*FIXME reorder for speed */\
  1989. asm volatile(\
  1990. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1991. "1: \n\t"\
  1992. "movq (%0), %%mm0 \n\t"\
  1993. "movq 8(%0), %%mm1 \n\t"\
  1994. "movq 16(%0), %%mm2 \n\t"\
  1995. "movq 24(%0), %%mm3 \n\t"\
  1996. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1997. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1998. "add %4, %1 \n\t"\
  1999. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  2000. \
  2001. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  2002. "add %4, %1 \n\t"\
  2003. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  2004. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
  2005. "add %4, %1 \n\t"\
  2006. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
  2007. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
  2008. "add %4, %1 \n\t"\
  2009. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
  2010. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
  2011. "add %4, %1 \n\t"\
  2012. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
  2013. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
  2014. "add %4, %1 \n\t"\
  2015. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
  2016. \
  2017. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
  2018. "add %4, %1 \n\t" \
  2019. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
  2020. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
  2021. \
  2022. "add $136, %0 \n\t"\
  2023. "add %6, %1 \n\t"\
  2024. "decl %2 \n\t"\
  2025. " jnz 1b \n\t"\
  2026. \
  2027. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  2028. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\
  2029. :"memory"\
  2030. );\
  2031. }\
  2032. \
  2033. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2034. uint64_t temp[9*2];\
  2035. uint64_t *temp_ptr= temp;\
  2036. int count= 9;\
  2037. \
  2038. /*FIXME unroll */\
  2039. asm volatile(\
  2040. "pxor %%mm7, %%mm7 \n\t"\
  2041. "1: \n\t"\
  2042. "movq (%0), %%mm0 \n\t"\
  2043. "movq (%0), %%mm1 \n\t"\
  2044. "punpcklbw %%mm7, %%mm0 \n\t"\
  2045. "punpckhbw %%mm7, %%mm1 \n\t"\
  2046. "movq %%mm0, (%1) \n\t"\
  2047. "movq %%mm1, 9*8(%1) \n\t"\
  2048. "add $8, %1 \n\t"\
  2049. "add %3, %0 \n\t"\
  2050. "decl %2 \n\t"\
  2051. " jnz 1b \n\t"\
  2052. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  2053. : "r" ((long)srcStride)\
  2054. : "memory"\
  2055. );\
  2056. \
  2057. temp_ptr= temp;\
  2058. count=2;\
  2059. \
  2060. /*FIXME reorder for speed */\
  2061. asm volatile(\
  2062. /*"pxor %%mm7, %%mm7 \n\t"*/\
  2063. "1: \n\t"\
  2064. "movq (%0), %%mm0 \n\t"\
  2065. "movq 8(%0), %%mm1 \n\t"\
  2066. "movq 16(%0), %%mm2 \n\t"\
  2067. "movq 24(%0), %%mm3 \n\t"\
  2068. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  2069. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  2070. "add %4, %1 \n\t"\
  2071. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  2072. \
  2073. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  2074. "add %4, %1 \n\t"\
  2075. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  2076. \
  2077. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
  2078. "add %4, %1 \n\t"\
  2079. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
  2080. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
  2081. \
  2082. "add $72, %0 \n\t"\
  2083. "add %6, %1 \n\t"\
  2084. "decl %2 \n\t"\
  2085. " jnz 1b \n\t"\
  2086. \
  2087. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  2088. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\
  2089. : "memory"\
  2090. );\
  2091. }\
  2092. \
  2093. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  2094. OPNAME ## pixels8_mmx(dst, src, stride, 8);\
  2095. }\
  2096. \
  2097. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2098. uint64_t temp[8];\
  2099. uint8_t * const half= (uint8_t*)temp;\
  2100. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  2101. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  2102. }\
  2103. \
  2104. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2105. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
  2106. }\
  2107. \
  2108. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2109. uint64_t temp[8];\
  2110. uint8_t * const half= (uint8_t*)temp;\
  2111. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  2112. OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
  2113. }\
  2114. \
  2115. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2116. uint64_t temp[8];\
  2117. uint8_t * const half= (uint8_t*)temp;\
  2118. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  2119. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  2120. }\
  2121. \
  2122. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2123. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
  2124. }\
  2125. \
  2126. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2127. uint64_t temp[8];\
  2128. uint8_t * const half= (uint8_t*)temp;\
  2129. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  2130. OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
  2131. }\
  2132. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2133. uint64_t half[8 + 9];\
  2134. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  2135. uint8_t * const halfHV= ((uint8_t*)half);\
  2136. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2137. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  2138. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  2139. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  2140. }\
  2141. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2142. uint64_t half[8 + 9];\
  2143. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  2144. uint8_t * const halfHV= ((uint8_t*)half);\
  2145. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2146. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  2147. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  2148. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  2149. }\
  2150. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2151. uint64_t half[8 + 9];\
  2152. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  2153. uint8_t * const halfHV= ((uint8_t*)half);\
  2154. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2155. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  2156. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  2157. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  2158. }\
  2159. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2160. uint64_t half[8 + 9];\
  2161. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  2162. uint8_t * const halfHV= ((uint8_t*)half);\
  2163. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2164. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  2165. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  2166. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  2167. }\
  2168. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2169. uint64_t half[8 + 9];\
  2170. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  2171. uint8_t * const halfHV= ((uint8_t*)half);\
  2172. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2173. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  2174. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  2175. }\
  2176. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2177. uint64_t half[8 + 9];\
  2178. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  2179. uint8_t * const halfHV= ((uint8_t*)half);\
  2180. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2181. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  2182. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  2183. }\
  2184. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2185. uint64_t half[8 + 9];\
  2186. uint8_t * const halfH= ((uint8_t*)half);\
  2187. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2188. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  2189. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  2190. }\
  2191. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2192. uint64_t half[8 + 9];\
  2193. uint8_t * const halfH= ((uint8_t*)half);\
  2194. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2195. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  2196. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  2197. }\
  2198. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2199. uint64_t half[9];\
  2200. uint8_t * const halfH= ((uint8_t*)half);\
  2201. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  2202. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  2203. }\
  2204. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  2205. OPNAME ## pixels16_mmx(dst, src, stride, 16);\
  2206. }\
  2207. \
  2208. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2209. uint64_t temp[32];\
  2210. uint8_t * const half= (uint8_t*)temp;\
  2211. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  2212. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  2213. }\
  2214. \
  2215. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2216. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
  2217. }\
  2218. \
  2219. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2220. uint64_t temp[32];\
  2221. uint8_t * const half= (uint8_t*)temp;\
  2222. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  2223. OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
  2224. }\
  2225. \
  2226. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2227. uint64_t temp[32];\
  2228. uint8_t * const half= (uint8_t*)temp;\
  2229. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  2230. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  2231. }\
  2232. \
  2233. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2234. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
  2235. }\
  2236. \
  2237. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2238. uint64_t temp[32];\
  2239. uint8_t * const half= (uint8_t*)temp;\
  2240. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  2241. OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
  2242. }\
  2243. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2244. uint64_t half[16*2 + 17*2];\
  2245. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2246. uint8_t * const halfHV= ((uint8_t*)half);\
  2247. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2248. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2249. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2250. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2251. }\
  2252. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2253. uint64_t half[16*2 + 17*2];\
  2254. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2255. uint8_t * const halfHV= ((uint8_t*)half);\
  2256. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2257. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2258. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2259. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2260. }\
  2261. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2262. uint64_t half[16*2 + 17*2];\
  2263. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2264. uint8_t * const halfHV= ((uint8_t*)half);\
  2265. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2266. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2267. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2268. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2269. }\
  2270. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2271. uint64_t half[16*2 + 17*2];\
  2272. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2273. uint8_t * const halfHV= ((uint8_t*)half);\
  2274. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2275. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2276. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2277. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2278. }\
  2279. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2280. uint64_t half[16*2 + 17*2];\
  2281. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2282. uint8_t * const halfHV= ((uint8_t*)half);\
  2283. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2284. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2285. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2286. }\
  2287. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2288. uint64_t half[16*2 + 17*2];\
  2289. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2290. uint8_t * const halfHV= ((uint8_t*)half);\
  2291. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2292. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2293. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2294. }\
  2295. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2296. uint64_t half[17*2];\
  2297. uint8_t * const halfH= ((uint8_t*)half);\
  2298. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2299. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2300. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2301. }\
  2302. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2303. uint64_t half[17*2];\
  2304. uint8_t * const halfH= ((uint8_t*)half);\
  2305. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2306. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2307. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2308. }\
  2309. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2310. uint64_t half[17*2];\
  2311. uint8_t * const halfH= ((uint8_t*)half);\
  2312. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2313. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2314. }
  2315. #define QPEL_H264V(A,B,C,D,E,F,OP)\
  2316. "movd (%0), "#F" \n\t"\
  2317. "movq "#C", %%mm6 \n\t"\
  2318. "paddw "#D", %%mm6 \n\t"\
  2319. "psllw $2, %%mm6 \n\t"\
  2320. "psubw "#B", %%mm6 \n\t"\
  2321. "psubw "#E", %%mm6 \n\t"\
  2322. "pmullw %4, %%mm6 \n\t"\
  2323. "add %2, %0 \n\t"\
  2324. "punpcklbw %%mm7, "#F" \n\t"\
  2325. "paddw %5, "#A" \n\t"\
  2326. "paddw "#F", "#A" \n\t"\
  2327. "paddw "#A", %%mm6 \n\t"\
  2328. "psraw $5, %%mm6 \n\t"\
  2329. "packuswb %%mm6, %%mm6 \n\t"\
  2330. OP(%%mm6, (%1), A, d)\
  2331. "add %3, %1 \n\t"
  2332. #define QPEL_H264HV(A,B,C,D,E,F,OF)\
  2333. "movd (%0), "#F" \n\t"\
  2334. "movq "#C", %%mm6 \n\t"\
  2335. "paddw "#D", %%mm6 \n\t"\
  2336. "psllw $2, %%mm6 \n\t"\
  2337. "psubw "#B", %%mm6 \n\t"\
  2338. "psubw "#E", %%mm6 \n\t"\
  2339. "pmullw %3, %%mm6 \n\t"\
  2340. "add %2, %0 \n\t"\
  2341. "punpcklbw %%mm7, "#F" \n\t"\
  2342. "paddw "#F", "#A" \n\t"\
  2343. "paddw "#A", %%mm6 \n\t"\
  2344. "movq %%mm6, "#OF"(%1) \n\t"
  2345. #define QPEL_H264(OPNAME, OP, MMX)\
  2346. static void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2347. int h=4;\
  2348. \
  2349. asm volatile(\
  2350. "pxor %%mm7, %%mm7 \n\t"\
  2351. "movq %5, %%mm4 \n\t"\
  2352. "movq %6, %%mm5 \n\t"\
  2353. "1: \n\t"\
  2354. "movd -1(%0), %%mm1 \n\t"\
  2355. "movd (%0), %%mm2 \n\t"\
  2356. "movd 1(%0), %%mm3 \n\t"\
  2357. "movd 2(%0), %%mm0 \n\t"\
  2358. "punpcklbw %%mm7, %%mm1 \n\t"\
  2359. "punpcklbw %%mm7, %%mm2 \n\t"\
  2360. "punpcklbw %%mm7, %%mm3 \n\t"\
  2361. "punpcklbw %%mm7, %%mm0 \n\t"\
  2362. "paddw %%mm0, %%mm1 \n\t"\
  2363. "paddw %%mm3, %%mm2 \n\t"\
  2364. "movd -2(%0), %%mm0 \n\t"\
  2365. "movd 3(%0), %%mm3 \n\t"\
  2366. "punpcklbw %%mm7, %%mm0 \n\t"\
  2367. "punpcklbw %%mm7, %%mm3 \n\t"\
  2368. "paddw %%mm3, %%mm0 \n\t"\
  2369. "psllw $2, %%mm2 \n\t"\
  2370. "psubw %%mm1, %%mm2 \n\t"\
  2371. "pmullw %%mm4, %%mm2 \n\t"\
  2372. "paddw %%mm5, %%mm0 \n\t"\
  2373. "paddw %%mm2, %%mm0 \n\t"\
  2374. "psraw $5, %%mm0 \n\t"\
  2375. "packuswb %%mm0, %%mm0 \n\t"\
  2376. OP(%%mm0, (%1),%%mm6, d)\
  2377. "add %3, %0 \n\t"\
  2378. "add %4, %1 \n\t"\
  2379. "decl %2 \n\t"\
  2380. " jnz 1b \n\t"\
  2381. : "+a"(src), "+c"(dst), "+m"(h)\
  2382. : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  2383. : "memory"\
  2384. );\
  2385. }\
  2386. static void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2387. src -= 2*srcStride;\
  2388. asm volatile(\
  2389. "pxor %%mm7, %%mm7 \n\t"\
  2390. "movd (%0), %%mm0 \n\t"\
  2391. "add %2, %0 \n\t"\
  2392. "movd (%0), %%mm1 \n\t"\
  2393. "add %2, %0 \n\t"\
  2394. "movd (%0), %%mm2 \n\t"\
  2395. "add %2, %0 \n\t"\
  2396. "movd (%0), %%mm3 \n\t"\
  2397. "add %2, %0 \n\t"\
  2398. "movd (%0), %%mm4 \n\t"\
  2399. "add %2, %0 \n\t"\
  2400. "punpcklbw %%mm7, %%mm0 \n\t"\
  2401. "punpcklbw %%mm7, %%mm1 \n\t"\
  2402. "punpcklbw %%mm7, %%mm2 \n\t"\
  2403. "punpcklbw %%mm7, %%mm3 \n\t"\
  2404. "punpcklbw %%mm7, %%mm4 \n\t"\
  2405. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  2406. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  2407. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  2408. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  2409. \
  2410. : "+a"(src), "+c"(dst)\
  2411. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  2412. : "memory"\
  2413. );\
  2414. }\
  2415. static void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  2416. int h=4;\
  2417. int w=3;\
  2418. src -= 2*srcStride+2;\
  2419. while(w--){\
  2420. asm volatile(\
  2421. "pxor %%mm7, %%mm7 \n\t"\
  2422. "movd (%0), %%mm0 \n\t"\
  2423. "add %2, %0 \n\t"\
  2424. "movd (%0), %%mm1 \n\t"\
  2425. "add %2, %0 \n\t"\
  2426. "movd (%0), %%mm2 \n\t"\
  2427. "add %2, %0 \n\t"\
  2428. "movd (%0), %%mm3 \n\t"\
  2429. "add %2, %0 \n\t"\
  2430. "movd (%0), %%mm4 \n\t"\
  2431. "add %2, %0 \n\t"\
  2432. "punpcklbw %%mm7, %%mm0 \n\t"\
  2433. "punpcklbw %%mm7, %%mm1 \n\t"\
  2434. "punpcklbw %%mm7, %%mm2 \n\t"\
  2435. "punpcklbw %%mm7, %%mm3 \n\t"\
  2436. "punpcklbw %%mm7, %%mm4 \n\t"\
  2437. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
  2438. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
  2439. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
  2440. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
  2441. \
  2442. : "+a"(src)\
  2443. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
  2444. : "memory"\
  2445. );\
  2446. tmp += 4;\
  2447. src += 4 - 9*srcStride;\
  2448. }\
  2449. tmp -= 3*4;\
  2450. asm volatile(\
  2451. "movq %4, %%mm6 \n\t"\
  2452. "1: \n\t"\
  2453. "movq (%0), %%mm0 \n\t"\
  2454. "paddw 10(%0), %%mm0 \n\t"\
  2455. "movq 2(%0), %%mm1 \n\t"\
  2456. "paddw 8(%0), %%mm1 \n\t"\
  2457. "movq 4(%0), %%mm2 \n\t"\
  2458. "paddw 6(%0), %%mm2 \n\t"\
  2459. "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
  2460. "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
  2461. "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
  2462. "paddsw %%mm2, %%mm0 \n\t"\
  2463. "psraw $2, %%mm0 \n\t"/*((a-b)/4-b)/4 */\
  2464. "paddw %%mm6, %%mm2 \n\t"\
  2465. "paddw %%mm2, %%mm0 \n\t"\
  2466. "psraw $6, %%mm0 \n\t"\
  2467. "packuswb %%mm0, %%mm0 \n\t"\
  2468. OP(%%mm0, (%1),%%mm7, d)\
  2469. "add $24, %0 \n\t"\
  2470. "add %3, %1 \n\t"\
  2471. "decl %2 \n\t"\
  2472. " jnz 1b \n\t"\
  2473. : "+a"(tmp), "+c"(dst), "+m"(h)\
  2474. : "S"((long)dstStride), "m"(ff_pw_32)\
  2475. : "memory"\
  2476. );\
  2477. }\
  2478. \
  2479. static void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2480. int h=8;\
  2481. asm volatile(\
  2482. "pxor %%mm7, %%mm7 \n\t"\
  2483. "movq %5, %%mm6 \n\t"\
  2484. "1: \n\t"\
  2485. "movq (%0), %%mm0 \n\t"\
  2486. "movq 1(%0), %%mm2 \n\t"\
  2487. "movq %%mm0, %%mm1 \n\t"\
  2488. "movq %%mm2, %%mm3 \n\t"\
  2489. "punpcklbw %%mm7, %%mm0 \n\t"\
  2490. "punpckhbw %%mm7, %%mm1 \n\t"\
  2491. "punpcklbw %%mm7, %%mm2 \n\t"\
  2492. "punpckhbw %%mm7, %%mm3 \n\t"\
  2493. "paddw %%mm2, %%mm0 \n\t"\
  2494. "paddw %%mm3, %%mm1 \n\t"\
  2495. "psllw $2, %%mm0 \n\t"\
  2496. "psllw $2, %%mm1 \n\t"\
  2497. "movq -1(%0), %%mm2 \n\t"\
  2498. "movq 2(%0), %%mm4 \n\t"\
  2499. "movq %%mm2, %%mm3 \n\t"\
  2500. "movq %%mm4, %%mm5 \n\t"\
  2501. "punpcklbw %%mm7, %%mm2 \n\t"\
  2502. "punpckhbw %%mm7, %%mm3 \n\t"\
  2503. "punpcklbw %%mm7, %%mm4 \n\t"\
  2504. "punpckhbw %%mm7, %%mm5 \n\t"\
  2505. "paddw %%mm4, %%mm2 \n\t"\
  2506. "paddw %%mm3, %%mm5 \n\t"\
  2507. "psubw %%mm2, %%mm0 \n\t"\
  2508. "psubw %%mm5, %%mm1 \n\t"\
  2509. "pmullw %%mm6, %%mm0 \n\t"\
  2510. "pmullw %%mm6, %%mm1 \n\t"\
  2511. "movd -2(%0), %%mm2 \n\t"\
  2512. "movd 7(%0), %%mm5 \n\t"\
  2513. "punpcklbw %%mm7, %%mm2 \n\t"\
  2514. "punpcklbw %%mm7, %%mm5 \n\t"\
  2515. "paddw %%mm3, %%mm2 \n\t"\
  2516. "paddw %%mm5, %%mm4 \n\t"\
  2517. "movq %6, %%mm5 \n\t"\
  2518. "paddw %%mm5, %%mm2 \n\t"\
  2519. "paddw %%mm5, %%mm4 \n\t"\
  2520. "paddw %%mm2, %%mm0 \n\t"\
  2521. "paddw %%mm4, %%mm1 \n\t"\
  2522. "psraw $5, %%mm0 \n\t"\
  2523. "psraw $5, %%mm1 \n\t"\
  2524. "packuswb %%mm1, %%mm0 \n\t"\
  2525. OP(%%mm0, (%1),%%mm5, q)\
  2526. "add %3, %0 \n\t"\
  2527. "add %4, %1 \n\t"\
  2528. "decl %2 \n\t"\
  2529. " jnz 1b \n\t"\
  2530. : "+a"(src), "+c"(dst), "+m"(h)\
  2531. : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  2532. : "memory"\
  2533. );\
  2534. }\
  2535. \
  2536. static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2537. int h= 2;\
  2538. src -= 2*srcStride;\
  2539. \
  2540. while(h--){\
  2541. asm volatile(\
  2542. "pxor %%mm7, %%mm7 \n\t"\
  2543. "movd (%0), %%mm0 \n\t"\
  2544. "add %2, %0 \n\t"\
  2545. "movd (%0), %%mm1 \n\t"\
  2546. "add %2, %0 \n\t"\
  2547. "movd (%0), %%mm2 \n\t"\
  2548. "add %2, %0 \n\t"\
  2549. "movd (%0), %%mm3 \n\t"\
  2550. "add %2, %0 \n\t"\
  2551. "movd (%0), %%mm4 \n\t"\
  2552. "add %2, %0 \n\t"\
  2553. "punpcklbw %%mm7, %%mm0 \n\t"\
  2554. "punpcklbw %%mm7, %%mm1 \n\t"\
  2555. "punpcklbw %%mm7, %%mm2 \n\t"\
  2556. "punpcklbw %%mm7, %%mm3 \n\t"\
  2557. "punpcklbw %%mm7, %%mm4 \n\t"\
  2558. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  2559. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  2560. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  2561. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  2562. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  2563. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  2564. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  2565. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  2566. \
  2567. : "+a"(src), "+c"(dst)\
  2568. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  2569. : "memory"\
  2570. );\
  2571. src += 4-13*srcStride;\
  2572. dst += 4-8*dstStride;\
  2573. }\
  2574. }\
  2575. static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  2576. int h=8;\
  2577. int w=4;\
  2578. src -= 2*srcStride+2;\
  2579. while(w--){\
  2580. asm volatile(\
  2581. "pxor %%mm7, %%mm7 \n\t"\
  2582. "movd (%0), %%mm0 \n\t"\
  2583. "add %2, %0 \n\t"\
  2584. "movd (%0), %%mm1 \n\t"\
  2585. "add %2, %0 \n\t"\
  2586. "movd (%0), %%mm2 \n\t"\
  2587. "add %2, %0 \n\t"\
  2588. "movd (%0), %%mm3 \n\t"\
  2589. "add %2, %0 \n\t"\
  2590. "movd (%0), %%mm4 \n\t"\
  2591. "add %2, %0 \n\t"\
  2592. "punpcklbw %%mm7, %%mm0 \n\t"\
  2593. "punpcklbw %%mm7, %%mm1 \n\t"\
  2594. "punpcklbw %%mm7, %%mm2 \n\t"\
  2595. "punpcklbw %%mm7, %%mm3 \n\t"\
  2596. "punpcklbw %%mm7, %%mm4 \n\t"\
  2597. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*4)\
  2598. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*4)\
  2599. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*4)\
  2600. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*4)\
  2601. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*8*4)\
  2602. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*8*4)\
  2603. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*8*4)\
  2604. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*8*4)\
  2605. \
  2606. : "+a"(src)\
  2607. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
  2608. : "memory"\
  2609. );\
  2610. tmp += 4;\
  2611. src += 4 - 13*srcStride;\
  2612. }\
  2613. tmp -= 4*4;\
  2614. asm volatile(\
  2615. "movq %4, %%mm6 \n\t"\
  2616. "1: \n\t"\
  2617. "movq (%0), %%mm0 \n\t"\
  2618. "movq 8(%0), %%mm3 \n\t"\
  2619. "movq 2(%0), %%mm1 \n\t"\
  2620. "movq 10(%0), %%mm4 \n\t"\
  2621. "paddw %%mm4, %%mm0 \n\t"\
  2622. "paddw %%mm3, %%mm1 \n\t"\
  2623. "paddw 18(%0), %%mm3 \n\t"\
  2624. "paddw 16(%0), %%mm4 \n\t"\
  2625. "movq 4(%0), %%mm2 \n\t"\
  2626. "movq 12(%0), %%mm5 \n\t"\
  2627. "paddw 6(%0), %%mm2 \n\t"\
  2628. "paddw 14(%0), %%mm5 \n\t"\
  2629. "psubw %%mm1, %%mm0 \n\t"\
  2630. "psubw %%mm4, %%mm3 \n\t"\
  2631. "psraw $2, %%mm0 \n\t"\
  2632. "psraw $2, %%mm3 \n\t"\
  2633. "psubw %%mm1, %%mm0 \n\t"\
  2634. "psubw %%mm4, %%mm3 \n\t"\
  2635. "paddsw %%mm2, %%mm0 \n\t"\
  2636. "paddsw %%mm5, %%mm3 \n\t"\
  2637. "psraw $2, %%mm0 \n\t"\
  2638. "psraw $2, %%mm3 \n\t"\
  2639. "paddw %%mm6, %%mm2 \n\t"\
  2640. "paddw %%mm6, %%mm5 \n\t"\
  2641. "paddw %%mm2, %%mm0 \n\t"\
  2642. "paddw %%mm5, %%mm3 \n\t"\
  2643. "psraw $6, %%mm0 \n\t"\
  2644. "psraw $6, %%mm3 \n\t"\
  2645. "packuswb %%mm3, %%mm0 \n\t"\
  2646. OP(%%mm0, (%1),%%mm7, q)\
  2647. "add $32, %0 \n\t"\
  2648. "add %3, %1 \n\t"\
  2649. "decl %2 \n\t"\
  2650. " jnz 1b \n\t"\
  2651. : "+a"(tmp), "+c"(dst), "+m"(h)\
  2652. : "S"((long)dstStride), "m"(ff_pw_32)\
  2653. : "memory"\
  2654. );\
  2655. }\
  2656. static void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2657. OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  2658. OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  2659. src += 8*srcStride;\
  2660. dst += 8*dstStride;\
  2661. OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  2662. OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  2663. }\
  2664. \
  2665. static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2666. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  2667. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  2668. src += 8*srcStride;\
  2669. dst += 8*dstStride;\
  2670. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  2671. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  2672. }\
  2673. \
  2674. static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  2675. OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\
  2676. OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\
  2677. src += 8*srcStride;\
  2678. dst += 8*dstStride;\
  2679. OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\
  2680. OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\
  2681. }\
  2682. #define H264_MC(OPNAME, SIZE, MMX) \
  2683. static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  2684. OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\
  2685. }\
  2686. \
  2687. static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2688. uint64_t temp[SIZE*SIZE/8];\
  2689. uint8_t * const half= (uint8_t*)temp;\
  2690. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\
  2691. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
  2692. }\
  2693. \
  2694. static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2695. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
  2696. }\
  2697. \
  2698. static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2699. uint64_t temp[SIZE*SIZE/8];\
  2700. uint8_t * const half= (uint8_t*)temp;\
  2701. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\
  2702. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+1, half, stride, stride, SIZE);\
  2703. }\
  2704. \
  2705. static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2706. uint64_t temp[SIZE*SIZE/8];\
  2707. uint8_t * const half= (uint8_t*)temp;\
  2708. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
  2709. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
  2710. }\
  2711. \
  2712. static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2713. OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
  2714. }\
  2715. \
  2716. static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2717. uint64_t temp[SIZE*SIZE/8];\
  2718. uint8_t * const half= (uint8_t*)temp;\
  2719. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
  2720. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\
  2721. }\
  2722. \
  2723. static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2724. uint64_t temp[SIZE*SIZE/4];\
  2725. uint8_t * const halfH= (uint8_t*)temp;\
  2726. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  2727. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
  2728. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
  2729. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  2730. }\
  2731. \
  2732. static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2733. uint64_t temp[SIZE*SIZE/4];\
  2734. uint8_t * const halfH= (uint8_t*)temp;\
  2735. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  2736. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
  2737. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
  2738. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  2739. }\
  2740. \
  2741. static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2742. uint64_t temp[SIZE*SIZE/4];\
  2743. uint8_t * const halfH= (uint8_t*)temp;\
  2744. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  2745. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
  2746. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
  2747. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  2748. }\
  2749. \
  2750. static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2751. uint64_t temp[SIZE*SIZE/4];\
  2752. uint8_t * const halfH= (uint8_t*)temp;\
  2753. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  2754. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
  2755. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
  2756. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  2757. }\
  2758. \
  2759. static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2760. uint64_t temp[SIZE*(SIZE+8)/4];\
  2761. int16_t * const tmp= (int16_t*)temp;\
  2762. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\
  2763. }\
  2764. \
  2765. static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2766. uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
  2767. uint8_t * const halfH= (uint8_t*)temp;\
  2768. uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
  2769. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
  2770. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
  2771. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  2772. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\
  2773. }\
  2774. \
  2775. static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2776. uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
  2777. uint8_t * const halfH= (uint8_t*)temp;\
  2778. uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
  2779. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
  2780. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
  2781. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  2782. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\
  2783. }\
  2784. \
  2785. static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2786. uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
  2787. uint8_t * const halfV= (uint8_t*)temp;\
  2788. uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
  2789. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
  2790. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
  2791. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  2792. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\
  2793. }\
  2794. \
  2795. static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2796. uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
  2797. uint8_t * const halfV= (uint8_t*)temp;\
  2798. uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
  2799. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
  2800. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
  2801. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  2802. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\
  2803. }\
  2804. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  2805. #define AVG_3DNOW_OP(a,b,temp, size) \
  2806. "mov" #size " " #b ", " #temp " \n\t"\
  2807. "pavgusb " #temp ", " #a " \n\t"\
  2808. "mov" #size " " #a ", " #b " \n\t"
  2809. #define AVG_MMX2_OP(a,b,temp, size) \
  2810. "mov" #size " " #b ", " #temp " \n\t"\
  2811. "pavgb " #temp ", " #a " \n\t"\
  2812. "mov" #size " " #a ", " #b " \n\t"
  2813. QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
  2814. QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
  2815. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
  2816. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
  2817. QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
  2818. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
  2819. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
  2820. QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
  2821. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
  2822. QPEL_H264(put_ , PUT_OP, 3dnow)
  2823. QPEL_H264(avg_ , AVG_3DNOW_OP, 3dnow)
  2824. QPEL_H264(put_ , PUT_OP, mmx2)
  2825. QPEL_H264(avg_ , AVG_MMX2_OP, mmx2)
  2826. H264_MC(put_, 4, 3dnow)
  2827. H264_MC(put_, 8, 3dnow)
  2828. H264_MC(put_, 16,3dnow)
  2829. H264_MC(avg_, 4, 3dnow)
  2830. H264_MC(avg_, 8, 3dnow)
  2831. H264_MC(avg_, 16,3dnow)
  2832. H264_MC(put_, 4, mmx2)
  2833. H264_MC(put_, 8, mmx2)
  2834. H264_MC(put_, 16,mmx2)
  2835. H264_MC(avg_, 4, mmx2)
  2836. H264_MC(avg_, 8, mmx2)
  2837. H264_MC(avg_, 16,mmx2)
  2838. /** These are used by *_h264_chroma_mc8_* */
  2839. static const uint64_t thirtytwo __align8 = 0x0020002000200020ULL;
  2840. static const uint64_t sixtyfour __align8 = 0x0040004000400040ULL;
  2841. #define H264_CHROMA_OP(S,D)
  2842. #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
  2843. #include "dsputil_h264_template_mmx.c"
  2844. #undef H264_CHROMA_OP
  2845. #undef H264_CHROMA_MC8_TMPL
  2846. #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
  2847. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
  2848. #include "dsputil_h264_template_mmx.c"
  2849. #undef H264_CHROMA_OP
  2850. #undef H264_CHROMA_MC8_TMPL
  2851. #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
  2852. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
  2853. #include "dsputil_h264_template_mmx.c"
  2854. #undef H264_CHROMA_OP
  2855. #undef H264_CHROMA_MC8_TMPL
  2856. #if 0
  2857. static void just_return() { return; }
  2858. #endif
  2859. #define SET_QPEL_FUNC(postfix1, postfix2) \
  2860. c->put_ ## postfix1 = put_ ## postfix2;\
  2861. c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
  2862. c->avg_ ## postfix1 = avg_ ## postfix2;
  2863. static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
  2864. long i=0;
  2865. assert(ABS(scale) < 256);
  2866. scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
  2867. asm volatile(
  2868. "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
  2869. "psrlw $15, %%mm6 \n\t" // 1w
  2870. "pxor %%mm7, %%mm7 \n\t"
  2871. "movd %4, %%mm5 \n\t"
  2872. "punpcklwd %%mm5, %%mm5 \n\t"
  2873. "punpcklwd %%mm5, %%mm5 \n\t"
  2874. "1: \n\t"
  2875. "movq (%1, %0), %%mm0 \n\t"
  2876. "movq 8(%1, %0), %%mm1 \n\t"
  2877. "pmulhw %%mm5, %%mm0 \n\t"
  2878. "pmulhw %%mm5, %%mm1 \n\t"
  2879. "paddw %%mm6, %%mm0 \n\t"
  2880. "paddw %%mm6, %%mm1 \n\t"
  2881. "psraw $1, %%mm0 \n\t"
  2882. "psraw $1, %%mm1 \n\t"
  2883. "paddw (%2, %0), %%mm0 \n\t"
  2884. "paddw 8(%2, %0), %%mm1 \n\t"
  2885. "psraw $6, %%mm0 \n\t"
  2886. "psraw $6, %%mm1 \n\t"
  2887. "pmullw (%3, %0), %%mm0 \n\t"
  2888. "pmullw 8(%3, %0), %%mm1 \n\t"
  2889. "pmaddwd %%mm0, %%mm0 \n\t"
  2890. "pmaddwd %%mm1, %%mm1 \n\t"
  2891. "paddd %%mm1, %%mm0 \n\t"
  2892. "psrld $4, %%mm0 \n\t"
  2893. "paddd %%mm0, %%mm7 \n\t"
  2894. "add $16, %0 \n\t"
  2895. "cmp $128, %0 \n\t" //FIXME optimize & bench
  2896. " jb 1b \n\t"
  2897. "movq %%mm7, %%mm6 \n\t"
  2898. "psrlq $32, %%mm7 \n\t"
  2899. "paddd %%mm6, %%mm7 \n\t"
  2900. "psrld $2, %%mm7 \n\t"
  2901. "movd %%mm7, %0 \n\t"
  2902. : "+r" (i)
  2903. : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
  2904. );
  2905. return i;
  2906. }
  2907. static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
  2908. long i=0;
  2909. if(ABS(scale) < 256){
  2910. scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
  2911. asm volatile(
  2912. "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
  2913. "psrlw $15, %%mm6 \n\t" // 1w
  2914. "movd %3, %%mm5 \n\t"
  2915. "punpcklwd %%mm5, %%mm5 \n\t"
  2916. "punpcklwd %%mm5, %%mm5 \n\t"
  2917. "1: \n\t"
  2918. "movq (%1, %0), %%mm0 \n\t"
  2919. "movq 8(%1, %0), %%mm1 \n\t"
  2920. "pmulhw %%mm5, %%mm0 \n\t"
  2921. "pmulhw %%mm5, %%mm1 \n\t"
  2922. "paddw %%mm6, %%mm0 \n\t"
  2923. "paddw %%mm6, %%mm1 \n\t"
  2924. "psraw $1, %%mm0 \n\t"
  2925. "psraw $1, %%mm1 \n\t"
  2926. "paddw (%2, %0), %%mm0 \n\t"
  2927. "paddw 8(%2, %0), %%mm1 \n\t"
  2928. "movq %%mm0, (%2, %0) \n\t"
  2929. "movq %%mm1, 8(%2, %0) \n\t"
  2930. "add $16, %0 \n\t"
  2931. "cmp $128, %0 \n\t" //FIXME optimize & bench
  2932. " jb 1b \n\t"
  2933. : "+r" (i)
  2934. : "r"(basis), "r"(rem), "g"(scale)
  2935. );
  2936. }else{
  2937. for(i=0; i<8*8; i++){
  2938. rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
  2939. }
  2940. }
  2941. }
  2942. /* external functions, from idct_mmx.c */
  2943. void ff_mmx_idct(DCTELEM *block);
  2944. void ff_mmxext_idct(DCTELEM *block);
  2945. void ff_vp3_idct_sse2(int16_t *input_data);
  2946. void ff_vp3_idct_mmx(int16_t *data);
  2947. void ff_vp3_dsp_init_mmx(void);
  2948. void ff_h264_idct_add_mmx2(uint8_t *dst, int16_t *block, int stride);
  2949. /* XXX: those functions should be suppressed ASAP when all IDCTs are
  2950. converted */
  2951. static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  2952. {
  2953. ff_mmx_idct (block);
  2954. put_pixels_clamped_mmx(block, dest, line_size);
  2955. }
  2956. static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  2957. {
  2958. ff_mmx_idct (block);
  2959. add_pixels_clamped_mmx(block, dest, line_size);
  2960. }
  2961. static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  2962. {
  2963. ff_mmxext_idct (block);
  2964. put_pixels_clamped_mmx(block, dest, line_size);
  2965. }
  2966. static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  2967. {
  2968. ff_mmxext_idct (block);
  2969. add_pixels_clamped_mmx(block, dest, line_size);
  2970. }
  2971. static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
  2972. {
  2973. ff_vp3_idct_sse2(block);
  2974. put_signed_pixels_clamped_mmx(block, dest, line_size);
  2975. }
  2976. static void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block)
  2977. {
  2978. ff_vp3_idct_sse2(block);
  2979. add_pixels_clamped_mmx(block, dest, line_size);
  2980. }
  2981. static void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
  2982. {
  2983. ff_vp3_idct_mmx(block);
  2984. put_signed_pixels_clamped_mmx(block, dest, line_size);
  2985. }
  2986. static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
  2987. {
  2988. ff_vp3_idct_mmx(block);
  2989. add_pixels_clamped_mmx(block, dest, line_size);
  2990. }
  2991. void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
  2992. {
  2993. mm_flags = mm_support();
  2994. if (avctx->dsp_mask) {
  2995. if (avctx->dsp_mask & FF_MM_FORCE)
  2996. mm_flags |= (avctx->dsp_mask & 0xffff);
  2997. else
  2998. mm_flags &= ~(avctx->dsp_mask & 0xffff);
  2999. }
  3000. #if 0
  3001. av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
  3002. if (mm_flags & MM_MMX)
  3003. av_log(avctx, AV_LOG_INFO, " mmx");
  3004. if (mm_flags & MM_MMXEXT)
  3005. av_log(avctx, AV_LOG_INFO, " mmxext");
  3006. if (mm_flags & MM_3DNOW)
  3007. av_log(avctx, AV_LOG_INFO, " 3dnow");
  3008. if (mm_flags & MM_SSE)
  3009. av_log(avctx, AV_LOG_INFO, " sse");
  3010. if (mm_flags & MM_SSE2)
  3011. av_log(avctx, AV_LOG_INFO, " sse2");
  3012. av_log(avctx, AV_LOG_INFO, "\n");
  3013. #endif
  3014. if (mm_flags & MM_MMX) {
  3015. const int idct_algo= avctx->idct_algo;
  3016. #ifdef CONFIG_ENCODERS
  3017. const int dct_algo = avctx->dct_algo;
  3018. if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
  3019. if(mm_flags & MM_SSE2){
  3020. c->fdct = ff_fdct_sse2;
  3021. }else if(mm_flags & MM_MMXEXT){
  3022. c->fdct = ff_fdct_mmx2;
  3023. }else{
  3024. c->fdct = ff_fdct_mmx;
  3025. }
  3026. }
  3027. #endif //CONFIG_ENCODERS
  3028. if(avctx->lowres==0){
  3029. if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
  3030. c->idct_put= ff_simple_idct_put_mmx;
  3031. c->idct_add= ff_simple_idct_add_mmx;
  3032. c->idct = ff_simple_idct_mmx;
  3033. c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
  3034. }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
  3035. if(mm_flags & MM_MMXEXT){
  3036. c->idct_put= ff_libmpeg2mmx2_idct_put;
  3037. c->idct_add= ff_libmpeg2mmx2_idct_add;
  3038. c->idct = ff_mmxext_idct;
  3039. }else{
  3040. c->idct_put= ff_libmpeg2mmx_idct_put;
  3041. c->idct_add= ff_libmpeg2mmx_idct_add;
  3042. c->idct = ff_mmx_idct;
  3043. }
  3044. c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
  3045. }else if(idct_algo==FF_IDCT_VP3){
  3046. if(mm_flags & MM_SSE2){
  3047. c->idct_put= ff_vp3_idct_put_sse2;
  3048. c->idct_add= ff_vp3_idct_add_sse2;
  3049. c->idct = ff_vp3_idct_sse2;
  3050. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  3051. }else{
  3052. ff_vp3_dsp_init_mmx();
  3053. c->idct_put= ff_vp3_idct_put_mmx;
  3054. c->idct_add= ff_vp3_idct_add_mmx;
  3055. c->idct = ff_vp3_idct_mmx;
  3056. c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
  3057. }
  3058. }
  3059. }
  3060. #ifdef CONFIG_ENCODERS
  3061. c->get_pixels = get_pixels_mmx;
  3062. c->diff_pixels = diff_pixels_mmx;
  3063. #endif //CONFIG_ENCODERS
  3064. c->put_pixels_clamped = put_pixels_clamped_mmx;
  3065. c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
  3066. c->add_pixels_clamped = add_pixels_clamped_mmx;
  3067. c->clear_blocks = clear_blocks_mmx;
  3068. #ifdef CONFIG_ENCODERS
  3069. c->pix_sum = pix_sum16_mmx;
  3070. #endif //CONFIG_ENCODERS
  3071. c->put_pixels_tab[0][0] = put_pixels16_mmx;
  3072. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
  3073. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
  3074. c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
  3075. c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
  3076. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
  3077. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
  3078. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
  3079. c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
  3080. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
  3081. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
  3082. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
  3083. c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
  3084. c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
  3085. c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
  3086. c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
  3087. c->put_pixels_tab[1][0] = put_pixels8_mmx;
  3088. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
  3089. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
  3090. c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
  3091. c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
  3092. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
  3093. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
  3094. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
  3095. c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
  3096. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
  3097. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
  3098. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
  3099. c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
  3100. c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
  3101. c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
  3102. c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
  3103. c->add_bytes= add_bytes_mmx;
  3104. #ifdef CONFIG_ENCODERS
  3105. c->diff_bytes= diff_bytes_mmx;
  3106. c->hadamard8_diff[0]= hadamard8_diff16_mmx;
  3107. c->hadamard8_diff[1]= hadamard8_diff_mmx;
  3108. c->pix_norm1 = pix_norm1_mmx;
  3109. c->sse[0] = sse16_mmx;
  3110. c->sse[1] = sse8_mmx;
  3111. c->vsad[4]= vsad_intra16_mmx;
  3112. c->nsse[0] = nsse16_mmx;
  3113. c->nsse[1] = nsse8_mmx;
  3114. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  3115. c->vsad[0] = vsad16_mmx;
  3116. }
  3117. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  3118. c->try_8x8basis= try_8x8basis_mmx;
  3119. }
  3120. c->add_8x8basis= add_8x8basis_mmx;
  3121. #endif //CONFIG_ENCODERS
  3122. c->h263_v_loop_filter= h263_v_loop_filter_mmx;
  3123. c->h263_h_loop_filter= h263_h_loop_filter_mmx;
  3124. c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx;
  3125. if (mm_flags & MM_MMXEXT) {
  3126. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
  3127. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
  3128. c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
  3129. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
  3130. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
  3131. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
  3132. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
  3133. c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
  3134. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
  3135. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
  3136. #ifdef CONFIG_ENCODERS
  3137. c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
  3138. c->hadamard8_diff[1]= hadamard8_diff_mmx2;
  3139. c->vsad[4]= vsad_intra16_mmx2;
  3140. #endif //CONFIG_ENCODERS
  3141. c->h264_idct_add= ff_h264_idct_add_mmx2;
  3142. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  3143. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
  3144. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
  3145. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
  3146. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
  3147. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
  3148. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
  3149. #ifdef CONFIG_ENCODERS
  3150. c->vsad[0] = vsad16_mmx2;
  3151. #endif //CONFIG_ENCODERS
  3152. }
  3153. #if 1
  3154. SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
  3155. SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
  3156. SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
  3157. SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
  3158. SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
  3159. SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
  3160. SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
  3161. SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
  3162. SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
  3163. SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
  3164. SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
  3165. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
  3166. SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
  3167. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
  3168. SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
  3169. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
  3170. SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
  3171. SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
  3172. SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
  3173. SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
  3174. SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
  3175. SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
  3176. SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
  3177. SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
  3178. SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
  3179. SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
  3180. SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
  3181. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
  3182. SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
  3183. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
  3184. SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
  3185. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
  3186. #endif
  3187. //FIXME 3dnow too
  3188. #define dspfunc(PFX, IDX, NUM) \
  3189. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \
  3190. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \
  3191. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \
  3192. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \
  3193. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \
  3194. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \
  3195. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \
  3196. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \
  3197. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \
  3198. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \
  3199. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \
  3200. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \
  3201. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \
  3202. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \
  3203. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \
  3204. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2
  3205. dspfunc(put_h264_qpel, 0, 16);
  3206. dspfunc(put_h264_qpel, 1, 8);
  3207. dspfunc(put_h264_qpel, 2, 4);
  3208. dspfunc(avg_h264_qpel, 0, 16);
  3209. dspfunc(avg_h264_qpel, 1, 8);
  3210. dspfunc(avg_h264_qpel, 2, 4);
  3211. #undef dspfunc
  3212. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2;
  3213. c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
  3214. c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
  3215. c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
  3216. c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
  3217. c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
  3218. c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
  3219. #ifdef CONFIG_ENCODERS
  3220. c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
  3221. #endif //CONFIG_ENCODERS
  3222. } else if (mm_flags & MM_3DNOW) {
  3223. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  3224. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  3225. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  3226. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  3227. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  3228. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  3229. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  3230. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  3231. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  3232. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  3233. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  3234. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  3235. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  3236. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  3237. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  3238. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  3239. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  3240. }
  3241. SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
  3242. SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
  3243. SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
  3244. SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
  3245. SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
  3246. SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
  3247. SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
  3248. SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
  3249. SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
  3250. SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
  3251. SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
  3252. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
  3253. SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
  3254. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
  3255. SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
  3256. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
  3257. SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
  3258. SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
  3259. SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
  3260. SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
  3261. SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
  3262. SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
  3263. SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
  3264. SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
  3265. SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
  3266. SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
  3267. SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
  3268. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
  3269. SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
  3270. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
  3271. SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
  3272. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
  3273. #define dspfunc(PFX, IDX, NUM) \
  3274. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \
  3275. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \
  3276. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \
  3277. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \
  3278. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \
  3279. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \
  3280. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \
  3281. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \
  3282. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \
  3283. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \
  3284. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \
  3285. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \
  3286. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \
  3287. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \
  3288. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \
  3289. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow
  3290. dspfunc(put_h264_qpel, 0, 16);
  3291. dspfunc(put_h264_qpel, 1, 8);
  3292. dspfunc(put_h264_qpel, 2, 4);
  3293. dspfunc(avg_h264_qpel, 0, 16);
  3294. dspfunc(avg_h264_qpel, 1, 8);
  3295. dspfunc(avg_h264_qpel, 2, 4);
  3296. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow;
  3297. }
  3298. }
  3299. #ifdef CONFIG_ENCODERS
  3300. dsputil_init_pix_mmx(c, avctx);
  3301. #endif //CONFIG_ENCODERS
  3302. #if 0
  3303. // for speed testing
  3304. get_pixels = just_return;
  3305. put_pixels_clamped = just_return;
  3306. add_pixels_clamped = just_return;
  3307. pix_abs16x16 = just_return;
  3308. pix_abs16x16_x2 = just_return;
  3309. pix_abs16x16_y2 = just_return;
  3310. pix_abs16x16_xy2 = just_return;
  3311. put_pixels_tab[0] = just_return;
  3312. put_pixels_tab[1] = just_return;
  3313. put_pixels_tab[2] = just_return;
  3314. put_pixels_tab[3] = just_return;
  3315. put_no_rnd_pixels_tab[0] = just_return;
  3316. put_no_rnd_pixels_tab[1] = just_return;
  3317. put_no_rnd_pixels_tab[2] = just_return;
  3318. put_no_rnd_pixels_tab[3] = just_return;
  3319. avg_pixels_tab[0] = just_return;
  3320. avg_pixels_tab[1] = just_return;
  3321. avg_pixels_tab[2] = just_return;
  3322. avg_pixels_tab[3] = just_return;
  3323. avg_no_rnd_pixels_tab[0] = just_return;
  3324. avg_no_rnd_pixels_tab[1] = just_return;
  3325. avg_no_rnd_pixels_tab[2] = just_return;
  3326. avg_no_rnd_pixels_tab[3] = just_return;
  3327. //av_fdct = just_return;
  3328. //ff_idct = just_return;
  3329. #endif
  3330. }