You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2724 lines
94KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  21. */
  22. #include "../dsputil.h"
  23. #include "../simple_idct.h"
  24. #include "../mpegvideo.h"
  25. #include "mmx.h"
  26. //#undef NDEBUG
  27. //#include <assert.h>
  28. extern const uint8_t ff_h263_loop_filter_strength[32];
  29. int mm_flags; /* multimedia extension flags */
  30. /* pixel operations */
  31. static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
  32. static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
  33. static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
  34. static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
  35. static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
  36. static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
  37. static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
  38. static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
  39. #define JUMPALIGN() __asm __volatile (".balign 8"::)
  40. #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
  41. #define MOVQ_WONE(regd) \
  42. __asm __volatile ( \
  43. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  44. "psrlw $15, %%" #regd ::)
  45. #define MOVQ_BFE(regd) \
  46. __asm __volatile ( \
  47. "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
  48. "paddb %%" #regd ", %%" #regd " \n\t" ::)
  49. #ifndef PIC
  50. #define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
  51. #define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
  52. #else
  53. // for shared library it's better to use this way for accessing constants
  54. // pcmpeqd -> -1
  55. #define MOVQ_BONE(regd) \
  56. __asm __volatile ( \
  57. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  58. "psrlw $15, %%" #regd " \n\t" \
  59. "packuswb %%" #regd ", %%" #regd " \n\t" ::)
  60. #define MOVQ_WTWO(regd) \
  61. __asm __volatile ( \
  62. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  63. "psrlw $15, %%" #regd " \n\t" \
  64. "psllw $1, %%" #regd " \n\t"::)
  65. #endif
  66. // using regr as temporary and for the output result
  67. // first argument is unmodifed and second is trashed
  68. // regfe is supposed to contain 0xfefefefefefefefe
  69. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  70. "movq " #rega ", " #regr " \n\t"\
  71. "pand " #regb ", " #regr " \n\t"\
  72. "pxor " #rega ", " #regb " \n\t"\
  73. "pand " #regfe "," #regb " \n\t"\
  74. "psrlq $1, " #regb " \n\t"\
  75. "paddb " #regb ", " #regr " \n\t"
  76. #define PAVGB_MMX(rega, regb, regr, regfe) \
  77. "movq " #rega ", " #regr " \n\t"\
  78. "por " #regb ", " #regr " \n\t"\
  79. "pxor " #rega ", " #regb " \n\t"\
  80. "pand " #regfe "," #regb " \n\t"\
  81. "psrlq $1, " #regb " \n\t"\
  82. "psubb " #regb ", " #regr " \n\t"
  83. // mm6 is supposed to contain 0xfefefefefefefefe
  84. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  85. "movq " #rega ", " #regr " \n\t"\
  86. "movq " #regc ", " #regp " \n\t"\
  87. "pand " #regb ", " #regr " \n\t"\
  88. "pand " #regd ", " #regp " \n\t"\
  89. "pxor " #rega ", " #regb " \n\t"\
  90. "pxor " #regc ", " #regd " \n\t"\
  91. "pand %%mm6, " #regb " \n\t"\
  92. "pand %%mm6, " #regd " \n\t"\
  93. "psrlq $1, " #regb " \n\t"\
  94. "psrlq $1, " #regd " \n\t"\
  95. "paddb " #regb ", " #regr " \n\t"\
  96. "paddb " #regd ", " #regp " \n\t"
  97. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  98. "movq " #rega ", " #regr " \n\t"\
  99. "movq " #regc ", " #regp " \n\t"\
  100. "por " #regb ", " #regr " \n\t"\
  101. "por " #regd ", " #regp " \n\t"\
  102. "pxor " #rega ", " #regb " \n\t"\
  103. "pxor " #regc ", " #regd " \n\t"\
  104. "pand %%mm6, " #regb " \n\t"\
  105. "pand %%mm6, " #regd " \n\t"\
  106. "psrlq $1, " #regd " \n\t"\
  107. "psrlq $1, " #regb " \n\t"\
  108. "psubb " #regb ", " #regr " \n\t"\
  109. "psubb " #regd ", " #regp " \n\t"
  110. /***********************************/
  111. /* MMX no rounding */
  112. #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
  113. #define SET_RND MOVQ_WONE
  114. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  115. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  116. #include "dsputil_mmx_rnd.h"
  117. #undef DEF
  118. #undef SET_RND
  119. #undef PAVGBP
  120. #undef PAVGB
  121. /***********************************/
  122. /* MMX rounding */
  123. #define DEF(x, y) x ## _ ## y ##_mmx
  124. #define SET_RND MOVQ_WTWO
  125. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  126. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  127. #include "dsputil_mmx_rnd.h"
  128. #undef DEF
  129. #undef SET_RND
  130. #undef PAVGBP
  131. #undef PAVGB
  132. /***********************************/
  133. /* 3Dnow specific */
  134. #define DEF(x) x ## _3dnow
  135. /* for Athlons PAVGUSB is prefered */
  136. #define PAVGB "pavgusb"
  137. #include "dsputil_mmx_avg.h"
  138. #undef DEF
  139. #undef PAVGB
  140. /***********************************/
  141. /* MMX2 specific */
  142. #define DEF(x) x ## _mmx2
  143. /* Introduced only in MMX2 set */
  144. #define PAVGB "pavgb"
  145. #include "dsputil_mmx_avg.h"
  146. #undef DEF
  147. #undef PAVGB
  148. /***********************************/
  149. /* standard MMX */
  150. #ifdef CONFIG_ENCODERS
  151. static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
  152. {
  153. asm volatile(
  154. "movl $-128, %%eax \n\t"
  155. "pxor %%mm7, %%mm7 \n\t"
  156. ".balign 16 \n\t"
  157. "1: \n\t"
  158. "movq (%0), %%mm0 \n\t"
  159. "movq (%0, %2), %%mm2 \n\t"
  160. "movq %%mm0, %%mm1 \n\t"
  161. "movq %%mm2, %%mm3 \n\t"
  162. "punpcklbw %%mm7, %%mm0 \n\t"
  163. "punpckhbw %%mm7, %%mm1 \n\t"
  164. "punpcklbw %%mm7, %%mm2 \n\t"
  165. "punpckhbw %%mm7, %%mm3 \n\t"
  166. "movq %%mm0, (%1, %%eax)\n\t"
  167. "movq %%mm1, 8(%1, %%eax)\n\t"
  168. "movq %%mm2, 16(%1, %%eax)\n\t"
  169. "movq %%mm3, 24(%1, %%eax)\n\t"
  170. "addl %3, %0 \n\t"
  171. "addl $32, %%eax \n\t"
  172. "js 1b \n\t"
  173. : "+r" (pixels)
  174. : "r" (block+64), "r" (line_size), "r" (line_size*2)
  175. : "%eax"
  176. );
  177. }
  178. static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
  179. {
  180. asm volatile(
  181. "pxor %%mm7, %%mm7 \n\t"
  182. "movl $-128, %%eax \n\t"
  183. ".balign 16 \n\t"
  184. "1: \n\t"
  185. "movq (%0), %%mm0 \n\t"
  186. "movq (%1), %%mm2 \n\t"
  187. "movq %%mm0, %%mm1 \n\t"
  188. "movq %%mm2, %%mm3 \n\t"
  189. "punpcklbw %%mm7, %%mm0 \n\t"
  190. "punpckhbw %%mm7, %%mm1 \n\t"
  191. "punpcklbw %%mm7, %%mm2 \n\t"
  192. "punpckhbw %%mm7, %%mm3 \n\t"
  193. "psubw %%mm2, %%mm0 \n\t"
  194. "psubw %%mm3, %%mm1 \n\t"
  195. "movq %%mm0, (%2, %%eax)\n\t"
  196. "movq %%mm1, 8(%2, %%eax)\n\t"
  197. "addl %3, %0 \n\t"
  198. "addl %3, %1 \n\t"
  199. "addl $16, %%eax \n\t"
  200. "jnz 1b \n\t"
  201. : "+r" (s1), "+r" (s2)
  202. : "r" (block+64), "r" (stride)
  203. : "%eax"
  204. );
  205. }
  206. #endif //CONFIG_ENCODERS
  207. void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  208. {
  209. const DCTELEM *p;
  210. uint8_t *pix;
  211. /* read the pixels */
  212. p = block;
  213. pix = pixels;
  214. /* unrolled loop */
  215. __asm __volatile(
  216. "movq %3, %%mm0\n\t"
  217. "movq 8%3, %%mm1\n\t"
  218. "movq 16%3, %%mm2\n\t"
  219. "movq 24%3, %%mm3\n\t"
  220. "movq 32%3, %%mm4\n\t"
  221. "movq 40%3, %%mm5\n\t"
  222. "movq 48%3, %%mm6\n\t"
  223. "movq 56%3, %%mm7\n\t"
  224. "packuswb %%mm1, %%mm0\n\t"
  225. "packuswb %%mm3, %%mm2\n\t"
  226. "packuswb %%mm5, %%mm4\n\t"
  227. "packuswb %%mm7, %%mm6\n\t"
  228. "movq %%mm0, (%0)\n\t"
  229. "movq %%mm2, (%0, %1)\n\t"
  230. "movq %%mm4, (%0, %1, 2)\n\t"
  231. "movq %%mm6, (%0, %2)\n\t"
  232. ::"r" (pix), "r" (line_size), "r" (line_size*3), "m"(*p)
  233. :"memory");
  234. pix += line_size*4;
  235. p += 32;
  236. // if here would be an exact copy of the code above
  237. // compiler would generate some very strange code
  238. // thus using "r"
  239. __asm __volatile(
  240. "movq (%3), %%mm0\n\t"
  241. "movq 8(%3), %%mm1\n\t"
  242. "movq 16(%3), %%mm2\n\t"
  243. "movq 24(%3), %%mm3\n\t"
  244. "movq 32(%3), %%mm4\n\t"
  245. "movq 40(%3), %%mm5\n\t"
  246. "movq 48(%3), %%mm6\n\t"
  247. "movq 56(%3), %%mm7\n\t"
  248. "packuswb %%mm1, %%mm0\n\t"
  249. "packuswb %%mm3, %%mm2\n\t"
  250. "packuswb %%mm5, %%mm4\n\t"
  251. "packuswb %%mm7, %%mm6\n\t"
  252. "movq %%mm0, (%0)\n\t"
  253. "movq %%mm2, (%0, %1)\n\t"
  254. "movq %%mm4, (%0, %1, 2)\n\t"
  255. "movq %%mm6, (%0, %2)\n\t"
  256. ::"r" (pix), "r" (line_size), "r" (line_size*3), "r"(p)
  257. :"memory");
  258. }
  259. static unsigned char __align8 vector128[8] =
  260. { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
  261. void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  262. {
  263. int i;
  264. movq_m2r(*vector128, mm1);
  265. for (i = 0; i < 8; i++) {
  266. movq_m2r(*(block), mm0);
  267. packsswb_m2r(*(block + 4), mm0);
  268. block += 8;
  269. paddb_r2r(mm1, mm0);
  270. movq_r2m(mm0, *pixels);
  271. pixels += line_size;
  272. }
  273. }
  274. void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  275. {
  276. const DCTELEM *p;
  277. uint8_t *pix;
  278. int i;
  279. /* read the pixels */
  280. p = block;
  281. pix = pixels;
  282. MOVQ_ZERO(mm7);
  283. i = 4;
  284. do {
  285. __asm __volatile(
  286. "movq (%2), %%mm0\n\t"
  287. "movq 8(%2), %%mm1\n\t"
  288. "movq 16(%2), %%mm2\n\t"
  289. "movq 24(%2), %%mm3\n\t"
  290. "movq %0, %%mm4\n\t"
  291. "movq %1, %%mm6\n\t"
  292. "movq %%mm4, %%mm5\n\t"
  293. "punpcklbw %%mm7, %%mm4\n\t"
  294. "punpckhbw %%mm7, %%mm5\n\t"
  295. "paddsw %%mm4, %%mm0\n\t"
  296. "paddsw %%mm5, %%mm1\n\t"
  297. "movq %%mm6, %%mm5\n\t"
  298. "punpcklbw %%mm7, %%mm6\n\t"
  299. "punpckhbw %%mm7, %%mm5\n\t"
  300. "paddsw %%mm6, %%mm2\n\t"
  301. "paddsw %%mm5, %%mm3\n\t"
  302. "packuswb %%mm1, %%mm0\n\t"
  303. "packuswb %%mm3, %%mm2\n\t"
  304. "movq %%mm0, %0\n\t"
  305. "movq %%mm2, %1\n\t"
  306. :"+m"(*pix), "+m"(*(pix+line_size))
  307. :"r"(p)
  308. :"memory");
  309. pix += line_size*2;
  310. p += 16;
  311. } while (--i);
  312. }
  313. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  314. {
  315. __asm __volatile(
  316. "lea (%3, %3), %%eax \n\t"
  317. ".balign 8 \n\t"
  318. "1: \n\t"
  319. "movq (%1), %%mm0 \n\t"
  320. "movq (%1, %3), %%mm1 \n\t"
  321. "movq %%mm0, (%2) \n\t"
  322. "movq %%mm1, (%2, %3) \n\t"
  323. "addl %%eax, %1 \n\t"
  324. "addl %%eax, %2 \n\t"
  325. "movq (%1), %%mm0 \n\t"
  326. "movq (%1, %3), %%mm1 \n\t"
  327. "movq %%mm0, (%2) \n\t"
  328. "movq %%mm1, (%2, %3) \n\t"
  329. "addl %%eax, %1 \n\t"
  330. "addl %%eax, %2 \n\t"
  331. "subl $4, %0 \n\t"
  332. "jnz 1b \n\t"
  333. : "+g"(h), "+r" (pixels), "+r" (block)
  334. : "r"(line_size)
  335. : "%eax", "memory"
  336. );
  337. }
  338. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  339. {
  340. __asm __volatile(
  341. "lea (%3, %3), %%eax \n\t"
  342. ".balign 8 \n\t"
  343. "1: \n\t"
  344. "movq (%1), %%mm0 \n\t"
  345. "movq 8(%1), %%mm4 \n\t"
  346. "movq (%1, %3), %%mm1 \n\t"
  347. "movq 8(%1, %3), %%mm5 \n\t"
  348. "movq %%mm0, (%2) \n\t"
  349. "movq %%mm4, 8(%2) \n\t"
  350. "movq %%mm1, (%2, %3) \n\t"
  351. "movq %%mm5, 8(%2, %3) \n\t"
  352. "addl %%eax, %1 \n\t"
  353. "addl %%eax, %2 \n\t"
  354. "movq (%1), %%mm0 \n\t"
  355. "movq 8(%1), %%mm4 \n\t"
  356. "movq (%1, %3), %%mm1 \n\t"
  357. "movq 8(%1, %3), %%mm5 \n\t"
  358. "movq %%mm0, (%2) \n\t"
  359. "movq %%mm4, 8(%2) \n\t"
  360. "movq %%mm1, (%2, %3) \n\t"
  361. "movq %%mm5, 8(%2, %3) \n\t"
  362. "addl %%eax, %1 \n\t"
  363. "addl %%eax, %2 \n\t"
  364. "subl $4, %0 \n\t"
  365. "jnz 1b \n\t"
  366. : "+g"(h), "+r" (pixels), "+r" (block)
  367. : "r"(line_size)
  368. : "%eax", "memory"
  369. );
  370. }
  371. static void clear_blocks_mmx(DCTELEM *blocks)
  372. {
  373. __asm __volatile(
  374. "pxor %%mm7, %%mm7 \n\t"
  375. "movl $-128*6, %%eax \n\t"
  376. "1: \n\t"
  377. "movq %%mm7, (%0, %%eax) \n\t"
  378. "movq %%mm7, 8(%0, %%eax) \n\t"
  379. "movq %%mm7, 16(%0, %%eax) \n\t"
  380. "movq %%mm7, 24(%0, %%eax) \n\t"
  381. "addl $32, %%eax \n\t"
  382. " js 1b \n\t"
  383. : : "r" (((int)blocks)+128*6)
  384. : "%eax"
  385. );
  386. }
  387. #ifdef CONFIG_ENCODERS
  388. static int pix_sum16_mmx(uint8_t * pix, int line_size){
  389. const int h=16;
  390. int sum;
  391. int index= -line_size*h;
  392. __asm __volatile(
  393. "pxor %%mm7, %%mm7 \n\t"
  394. "pxor %%mm6, %%mm6 \n\t"
  395. "1: \n\t"
  396. "movq (%2, %1), %%mm0 \n\t"
  397. "movq (%2, %1), %%mm1 \n\t"
  398. "movq 8(%2, %1), %%mm2 \n\t"
  399. "movq 8(%2, %1), %%mm3 \n\t"
  400. "punpcklbw %%mm7, %%mm0 \n\t"
  401. "punpckhbw %%mm7, %%mm1 \n\t"
  402. "punpcklbw %%mm7, %%mm2 \n\t"
  403. "punpckhbw %%mm7, %%mm3 \n\t"
  404. "paddw %%mm0, %%mm1 \n\t"
  405. "paddw %%mm2, %%mm3 \n\t"
  406. "paddw %%mm1, %%mm3 \n\t"
  407. "paddw %%mm3, %%mm6 \n\t"
  408. "addl %3, %1 \n\t"
  409. " js 1b \n\t"
  410. "movq %%mm6, %%mm5 \n\t"
  411. "psrlq $32, %%mm6 \n\t"
  412. "paddw %%mm5, %%mm6 \n\t"
  413. "movq %%mm6, %%mm5 \n\t"
  414. "psrlq $16, %%mm6 \n\t"
  415. "paddw %%mm5, %%mm6 \n\t"
  416. "movd %%mm6, %0 \n\t"
  417. "andl $0xFFFF, %0 \n\t"
  418. : "=&r" (sum), "+r" (index)
  419. : "r" (pix - index), "r" (line_size)
  420. );
  421. return sum;
  422. }
  423. #endif //CONFIG_ENCODERS
  424. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
  425. int i=0;
  426. asm volatile(
  427. "1: \n\t"
  428. "movq (%1, %0), %%mm0 \n\t"
  429. "movq (%2, %0), %%mm1 \n\t"
  430. "paddb %%mm0, %%mm1 \n\t"
  431. "movq %%mm1, (%2, %0) \n\t"
  432. "movq 8(%1, %0), %%mm0 \n\t"
  433. "movq 8(%2, %0), %%mm1 \n\t"
  434. "paddb %%mm0, %%mm1 \n\t"
  435. "movq %%mm1, 8(%2, %0) \n\t"
  436. "addl $16, %0 \n\t"
  437. "cmpl %3, %0 \n\t"
  438. " jb 1b \n\t"
  439. : "+r" (i)
  440. : "r"(src), "r"(dst), "r"(w-15)
  441. );
  442. for(; i<w; i++)
  443. dst[i+0] += src[i+0];
  444. }
  445. #define H263_LOOP_FILTER \
  446. "pxor %%mm7, %%mm7 \n\t"\
  447. "movq %0, %%mm0 \n\t"\
  448. "movq %0, %%mm1 \n\t"\
  449. "movq %3, %%mm2 \n\t"\
  450. "movq %3, %%mm3 \n\t"\
  451. "punpcklbw %%mm7, %%mm0 \n\t"\
  452. "punpckhbw %%mm7, %%mm1 \n\t"\
  453. "punpcklbw %%mm7, %%mm2 \n\t"\
  454. "punpckhbw %%mm7, %%mm3 \n\t"\
  455. "psubw %%mm2, %%mm0 \n\t"\
  456. "psubw %%mm3, %%mm1 \n\t"\
  457. "movq %1, %%mm2 \n\t"\
  458. "movq %1, %%mm3 \n\t"\
  459. "movq %2, %%mm4 \n\t"\
  460. "movq %2, %%mm5 \n\t"\
  461. "punpcklbw %%mm7, %%mm2 \n\t"\
  462. "punpckhbw %%mm7, %%mm3 \n\t"\
  463. "punpcklbw %%mm7, %%mm4 \n\t"\
  464. "punpckhbw %%mm7, %%mm5 \n\t"\
  465. "psubw %%mm2, %%mm4 \n\t"\
  466. "psubw %%mm3, %%mm5 \n\t"\
  467. "psllw $2, %%mm4 \n\t"\
  468. "psllw $2, %%mm5 \n\t"\
  469. "paddw %%mm0, %%mm4 \n\t"\
  470. "paddw %%mm1, %%mm5 \n\t"\
  471. "pxor %%mm6, %%mm6 \n\t"\
  472. "pcmpgtw %%mm4, %%mm6 \n\t"\
  473. "pcmpgtw %%mm5, %%mm7 \n\t"\
  474. "pxor %%mm6, %%mm4 \n\t"\
  475. "pxor %%mm7, %%mm5 \n\t"\
  476. "psubw %%mm6, %%mm4 \n\t"\
  477. "psubw %%mm7, %%mm5 \n\t"\
  478. "psrlw $3, %%mm4 \n\t"\
  479. "psrlw $3, %%mm5 \n\t"\
  480. "packuswb %%mm5, %%mm4 \n\t"\
  481. "packsswb %%mm7, %%mm6 \n\t"\
  482. "pxor %%mm7, %%mm7 \n\t"\
  483. "movd %4, %%mm2 \n\t"\
  484. "punpcklbw %%mm2, %%mm2 \n\t"\
  485. "punpcklbw %%mm2, %%mm2 \n\t"\
  486. "punpcklbw %%mm2, %%mm2 \n\t"\
  487. "psubusb %%mm4, %%mm2 \n\t"\
  488. "movq %%mm2, %%mm3 \n\t"\
  489. "psubusb %%mm4, %%mm3 \n\t"\
  490. "psubb %%mm3, %%mm2 \n\t"\
  491. "movq %1, %%mm3 \n\t"\
  492. "movq %2, %%mm4 \n\t"\
  493. "pxor %%mm6, %%mm3 \n\t"\
  494. "pxor %%mm6, %%mm4 \n\t"\
  495. "paddusb %%mm2, %%mm3 \n\t"\
  496. "psubusb %%mm2, %%mm4 \n\t"\
  497. "pxor %%mm6, %%mm3 \n\t"\
  498. "pxor %%mm6, %%mm4 \n\t"\
  499. "paddusb %%mm2, %%mm2 \n\t"\
  500. "packsswb %%mm1, %%mm0 \n\t"\
  501. "pcmpgtb %%mm0, %%mm7 \n\t"\
  502. "pxor %%mm7, %%mm0 \n\t"\
  503. "psubb %%mm7, %%mm0 \n\t"\
  504. "movq %%mm0, %%mm1 \n\t"\
  505. "psubusb %%mm2, %%mm0 \n\t"\
  506. "psubb %%mm0, %%mm1 \n\t"\
  507. "pand %5, %%mm1 \n\t"\
  508. "psrlw $2, %%mm1 \n\t"\
  509. "pxor %%mm7, %%mm1 \n\t"\
  510. "psubb %%mm7, %%mm1 \n\t"\
  511. "movq %0, %%mm5 \n\t"\
  512. "movq %3, %%mm6 \n\t"\
  513. "psubb %%mm1, %%mm5 \n\t"\
  514. "paddb %%mm1, %%mm6 \n\t"
  515. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  516. const int strength= ff_h263_loop_filter_strength[qscale];
  517. asm volatile(
  518. H263_LOOP_FILTER
  519. "movq %%mm3, %1 \n\t"
  520. "movq %%mm4, %2 \n\t"
  521. "movq %%mm5, %0 \n\t"
  522. "movq %%mm6, %3 \n\t"
  523. : "+m" (*(uint64_t*)(src - 2*stride)),
  524. "+m" (*(uint64_t*)(src - 1*stride)),
  525. "+m" (*(uint64_t*)(src + 0*stride)),
  526. "+m" (*(uint64_t*)(src + 1*stride))
  527. : "g" (2*strength), "m"(ff_pb_FC)
  528. );
  529. }
  530. static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
  531. asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
  532. "movd %4, %%mm0 \n\t"
  533. "movd %5, %%mm1 \n\t"
  534. "movd %6, %%mm2 \n\t"
  535. "movd %7, %%mm3 \n\t"
  536. "punpcklbw %%mm1, %%mm0 \n\t"
  537. "punpcklbw %%mm3, %%mm2 \n\t"
  538. "movq %%mm0, %%mm1 \n\t"
  539. "punpcklwd %%mm2, %%mm0 \n\t"
  540. "punpckhwd %%mm2, %%mm1 \n\t"
  541. "movd %%mm0, %0 \n\t"
  542. "punpckhdq %%mm0, %%mm0 \n\t"
  543. "movd %%mm0, %1 \n\t"
  544. "movd %%mm1, %2 \n\t"
  545. "punpckhdq %%mm1, %%mm1 \n\t"
  546. "movd %%mm1, %3 \n\t"
  547. : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
  548. "=m" (*(uint32_t*)(dst + 1*dst_stride)),
  549. "=m" (*(uint32_t*)(dst + 2*dst_stride)),
  550. "=m" (*(uint32_t*)(dst + 3*dst_stride))
  551. : "m" (*(uint32_t*)(src + 0*src_stride)),
  552. "m" (*(uint32_t*)(src + 1*src_stride)),
  553. "m" (*(uint32_t*)(src + 2*src_stride)),
  554. "m" (*(uint32_t*)(src + 3*src_stride))
  555. );
  556. }
  557. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  558. const int strength= ff_h263_loop_filter_strength[qscale];
  559. uint64_t temp[4] __attribute__ ((aligned(8)));
  560. uint8_t *btemp= (uint8_t*)temp;
  561. src -= 2;
  562. transpose4x4(btemp , src , 8, stride);
  563. transpose4x4(btemp+4, src + 4*stride, 8, stride);
  564. asm volatile(
  565. H263_LOOP_FILTER // 5 3 4 6
  566. : "+m" (temp[0]),
  567. "+m" (temp[1]),
  568. "+m" (temp[2]),
  569. "+m" (temp[3])
  570. : "g" (2*strength), "m"(ff_pb_FC)
  571. );
  572. asm volatile(
  573. "movq %%mm5, %%mm1 \n\t"
  574. "movq %%mm4, %%mm0 \n\t"
  575. "punpcklbw %%mm3, %%mm5 \n\t"
  576. "punpcklbw %%mm6, %%mm4 \n\t"
  577. "punpckhbw %%mm3, %%mm1 \n\t"
  578. "punpckhbw %%mm6, %%mm0 \n\t"
  579. "movq %%mm5, %%mm3 \n\t"
  580. "movq %%mm1, %%mm6 \n\t"
  581. "punpcklwd %%mm4, %%mm5 \n\t"
  582. "punpcklwd %%mm0, %%mm1 \n\t"
  583. "punpckhwd %%mm4, %%mm3 \n\t"
  584. "punpckhwd %%mm0, %%mm6 \n\t"
  585. "movd %%mm5, %0 \n\t"
  586. "punpckhdq %%mm5, %%mm5 \n\t"
  587. "movd %%mm5, %1 \n\t"
  588. "movd %%mm3, %2 \n\t"
  589. "punpckhdq %%mm3, %%mm3 \n\t"
  590. "movd %%mm3, %3 \n\t"
  591. "movd %%mm1, %4 \n\t"
  592. "punpckhdq %%mm1, %%mm1 \n\t"
  593. "movd %%mm1, %5 \n\t"
  594. "movd %%mm6, %6 \n\t"
  595. "punpckhdq %%mm6, %%mm6 \n\t"
  596. "movd %%mm6, %7 \n\t"
  597. : "=m" (*(uint32_t*)(src + 0*stride)),
  598. "=m" (*(uint32_t*)(src + 1*stride)),
  599. "=m" (*(uint32_t*)(src + 2*stride)),
  600. "=m" (*(uint32_t*)(src + 3*stride)),
  601. "=m" (*(uint32_t*)(src + 4*stride)),
  602. "=m" (*(uint32_t*)(src + 5*stride)),
  603. "=m" (*(uint32_t*)(src + 6*stride)),
  604. "=m" (*(uint32_t*)(src + 7*stride))
  605. );
  606. }
  607. #ifdef CONFIG_ENCODERS
  608. static int pix_norm1_mmx(uint8_t *pix, int line_size) {
  609. int tmp;
  610. asm volatile (
  611. "movl $16,%%ecx\n"
  612. "pxor %%mm0,%%mm0\n"
  613. "pxor %%mm7,%%mm7\n"
  614. "1:\n"
  615. "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
  616. "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
  617. "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
  618. "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
  619. "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
  620. "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
  621. "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
  622. "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
  623. "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
  624. "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
  625. "pmaddwd %%mm3,%%mm3\n"
  626. "pmaddwd %%mm4,%%mm4\n"
  627. "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
  628. pix2^2+pix3^2+pix6^2+pix7^2) */
  629. "paddd %%mm3,%%mm4\n"
  630. "paddd %%mm2,%%mm7\n"
  631. "addl %2, %0\n"
  632. "paddd %%mm4,%%mm7\n"
  633. "dec %%ecx\n"
  634. "jnz 1b\n"
  635. "movq %%mm7,%%mm1\n"
  636. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  637. "paddd %%mm7,%%mm1\n"
  638. "movd %%mm1,%1\n"
  639. : "+r" (pix), "=r"(tmp) : "r" (line_size) : "%ecx" );
  640. return tmp;
  641. }
  642. static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  643. int tmp;
  644. asm volatile (
  645. "movl %4,%%ecx\n"
  646. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  647. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  648. "1:\n"
  649. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  650. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  651. "movq %%mm1,%%mm5\n"
  652. "psubusb %%mm2,%%mm1\n"
  653. "psubusb %%mm5,%%mm2\n"
  654. "por %%mm1,%%mm2\n"
  655. "movq %%mm2,%%mm1\n"
  656. "punpckhbw %%mm0,%%mm2\n"
  657. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  658. "pmaddwd %%mm2,%%mm2\n"
  659. "pmaddwd %%mm1,%%mm1\n"
  660. "addl %3,%0\n"
  661. "addl %3,%1\n"
  662. "paddd %%mm2,%%mm1\n"
  663. "paddd %%mm1,%%mm7\n"
  664. "decl %%ecx\n"
  665. "jnz 1b\n"
  666. "movq %%mm7,%%mm1\n"
  667. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  668. "paddd %%mm7,%%mm1\n"
  669. "movd %%mm1,%2\n"
  670. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  671. : "r" (line_size) , "m" (h)
  672. : "%ecx");
  673. return tmp;
  674. }
  675. static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  676. int tmp;
  677. asm volatile (
  678. "movl %4,%%ecx\n"
  679. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  680. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  681. "1:\n"
  682. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  683. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  684. "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
  685. "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
  686. /* todo: mm1-mm2, mm3-mm4 */
  687. /* algo: substract mm1 from mm2 with saturation and vice versa */
  688. /* OR the results to get absolute difference */
  689. "movq %%mm1,%%mm5\n"
  690. "movq %%mm3,%%mm6\n"
  691. "psubusb %%mm2,%%mm1\n"
  692. "psubusb %%mm4,%%mm3\n"
  693. "psubusb %%mm5,%%mm2\n"
  694. "psubusb %%mm6,%%mm4\n"
  695. "por %%mm1,%%mm2\n"
  696. "por %%mm3,%%mm4\n"
  697. /* now convert to 16-bit vectors so we can square them */
  698. "movq %%mm2,%%mm1\n"
  699. "movq %%mm4,%%mm3\n"
  700. "punpckhbw %%mm0,%%mm2\n"
  701. "punpckhbw %%mm0,%%mm4\n"
  702. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  703. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  704. "pmaddwd %%mm2,%%mm2\n"
  705. "pmaddwd %%mm4,%%mm4\n"
  706. "pmaddwd %%mm1,%%mm1\n"
  707. "pmaddwd %%mm3,%%mm3\n"
  708. "addl %3,%0\n"
  709. "addl %3,%1\n"
  710. "paddd %%mm2,%%mm1\n"
  711. "paddd %%mm4,%%mm3\n"
  712. "paddd %%mm1,%%mm7\n"
  713. "paddd %%mm3,%%mm7\n"
  714. "decl %%ecx\n"
  715. "jnz 1b\n"
  716. "movq %%mm7,%%mm1\n"
  717. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  718. "paddd %%mm7,%%mm1\n"
  719. "movd %%mm1,%2\n"
  720. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  721. : "r" (line_size) , "m" (h)
  722. : "%ecx");
  723. return tmp;
  724. }
  725. static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
  726. int tmp;
  727. asm volatile (
  728. "movl %3,%%ecx\n"
  729. "pxor %%mm7,%%mm7\n"
  730. "pxor %%mm6,%%mm6\n"
  731. "movq (%0),%%mm0\n"
  732. "movq %%mm0, %%mm1\n"
  733. "psllq $8, %%mm0\n"
  734. "psrlq $8, %%mm1\n"
  735. "psrlq $8, %%mm0\n"
  736. "movq %%mm0, %%mm2\n"
  737. "movq %%mm1, %%mm3\n"
  738. "punpcklbw %%mm7,%%mm0\n"
  739. "punpcklbw %%mm7,%%mm1\n"
  740. "punpckhbw %%mm7,%%mm2\n"
  741. "punpckhbw %%mm7,%%mm3\n"
  742. "psubw %%mm1, %%mm0\n"
  743. "psubw %%mm3, %%mm2\n"
  744. "addl %2,%0\n"
  745. "movq (%0),%%mm4\n"
  746. "movq %%mm4, %%mm1\n"
  747. "psllq $8, %%mm4\n"
  748. "psrlq $8, %%mm1\n"
  749. "psrlq $8, %%mm4\n"
  750. "movq %%mm4, %%mm5\n"
  751. "movq %%mm1, %%mm3\n"
  752. "punpcklbw %%mm7,%%mm4\n"
  753. "punpcklbw %%mm7,%%mm1\n"
  754. "punpckhbw %%mm7,%%mm5\n"
  755. "punpckhbw %%mm7,%%mm3\n"
  756. "psubw %%mm1, %%mm4\n"
  757. "psubw %%mm3, %%mm5\n"
  758. "psubw %%mm4, %%mm0\n"
  759. "psubw %%mm5, %%mm2\n"
  760. "pxor %%mm3, %%mm3\n"
  761. "pxor %%mm1, %%mm1\n"
  762. "pcmpgtw %%mm0, %%mm3\n\t"
  763. "pcmpgtw %%mm2, %%mm1\n\t"
  764. "pxor %%mm3, %%mm0\n"
  765. "pxor %%mm1, %%mm2\n"
  766. "psubw %%mm3, %%mm0\n"
  767. "psubw %%mm1, %%mm2\n"
  768. "paddw %%mm0, %%mm2\n"
  769. "paddw %%mm2, %%mm6\n"
  770. "addl %2,%0\n"
  771. "1:\n"
  772. "movq (%0),%%mm0\n"
  773. "movq %%mm0, %%mm1\n"
  774. "psllq $8, %%mm0\n"
  775. "psrlq $8, %%mm1\n"
  776. "psrlq $8, %%mm0\n"
  777. "movq %%mm0, %%mm2\n"
  778. "movq %%mm1, %%mm3\n"
  779. "punpcklbw %%mm7,%%mm0\n"
  780. "punpcklbw %%mm7,%%mm1\n"
  781. "punpckhbw %%mm7,%%mm2\n"
  782. "punpckhbw %%mm7,%%mm3\n"
  783. "psubw %%mm1, %%mm0\n"
  784. "psubw %%mm3, %%mm2\n"
  785. "psubw %%mm0, %%mm4\n"
  786. "psubw %%mm2, %%mm5\n"
  787. "pxor %%mm3, %%mm3\n"
  788. "pxor %%mm1, %%mm1\n"
  789. "pcmpgtw %%mm4, %%mm3\n\t"
  790. "pcmpgtw %%mm5, %%mm1\n\t"
  791. "pxor %%mm3, %%mm4\n"
  792. "pxor %%mm1, %%mm5\n"
  793. "psubw %%mm3, %%mm4\n"
  794. "psubw %%mm1, %%mm5\n"
  795. "paddw %%mm4, %%mm5\n"
  796. "paddw %%mm5, %%mm6\n"
  797. "addl %2,%0\n"
  798. "movq (%0),%%mm4\n"
  799. "movq %%mm4, %%mm1\n"
  800. "psllq $8, %%mm4\n"
  801. "psrlq $8, %%mm1\n"
  802. "psrlq $8, %%mm4\n"
  803. "movq %%mm4, %%mm5\n"
  804. "movq %%mm1, %%mm3\n"
  805. "punpcklbw %%mm7,%%mm4\n"
  806. "punpcklbw %%mm7,%%mm1\n"
  807. "punpckhbw %%mm7,%%mm5\n"
  808. "punpckhbw %%mm7,%%mm3\n"
  809. "psubw %%mm1, %%mm4\n"
  810. "psubw %%mm3, %%mm5\n"
  811. "psubw %%mm4, %%mm0\n"
  812. "psubw %%mm5, %%mm2\n"
  813. "pxor %%mm3, %%mm3\n"
  814. "pxor %%mm1, %%mm1\n"
  815. "pcmpgtw %%mm0, %%mm3\n\t"
  816. "pcmpgtw %%mm2, %%mm1\n\t"
  817. "pxor %%mm3, %%mm0\n"
  818. "pxor %%mm1, %%mm2\n"
  819. "psubw %%mm3, %%mm0\n"
  820. "psubw %%mm1, %%mm2\n"
  821. "paddw %%mm0, %%mm2\n"
  822. "paddw %%mm2, %%mm6\n"
  823. "addl %2,%0\n"
  824. "subl $2, %%ecx\n"
  825. " jnz 1b\n"
  826. "movq %%mm6, %%mm0\n"
  827. "punpcklwd %%mm7,%%mm0\n"
  828. "punpckhwd %%mm7,%%mm6\n"
  829. "paddd %%mm0, %%mm6\n"
  830. "movq %%mm6,%%mm0\n"
  831. "psrlq $32, %%mm6\n"
  832. "paddd %%mm6,%%mm0\n"
  833. "movd %%mm0,%1\n"
  834. : "+r" (pix1), "=r"(tmp)
  835. : "r" (line_size) , "g" (h-2)
  836. : "%ecx");
  837. return tmp;
  838. }
  839. static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
  840. int tmp;
  841. uint8_t * pix= pix1;
  842. asm volatile (
  843. "movl %3,%%ecx\n"
  844. "pxor %%mm7,%%mm7\n"
  845. "pxor %%mm6,%%mm6\n"
  846. "movq (%0),%%mm0\n"
  847. "movq 1(%0),%%mm1\n"
  848. "movq %%mm0, %%mm2\n"
  849. "movq %%mm1, %%mm3\n"
  850. "punpcklbw %%mm7,%%mm0\n"
  851. "punpcklbw %%mm7,%%mm1\n"
  852. "punpckhbw %%mm7,%%mm2\n"
  853. "punpckhbw %%mm7,%%mm3\n"
  854. "psubw %%mm1, %%mm0\n"
  855. "psubw %%mm3, %%mm2\n"
  856. "addl %2,%0\n"
  857. "movq (%0),%%mm4\n"
  858. "movq 1(%0),%%mm1\n"
  859. "movq %%mm4, %%mm5\n"
  860. "movq %%mm1, %%mm3\n"
  861. "punpcklbw %%mm7,%%mm4\n"
  862. "punpcklbw %%mm7,%%mm1\n"
  863. "punpckhbw %%mm7,%%mm5\n"
  864. "punpckhbw %%mm7,%%mm3\n"
  865. "psubw %%mm1, %%mm4\n"
  866. "psubw %%mm3, %%mm5\n"
  867. "psubw %%mm4, %%mm0\n"
  868. "psubw %%mm5, %%mm2\n"
  869. "pxor %%mm3, %%mm3\n"
  870. "pxor %%mm1, %%mm1\n"
  871. "pcmpgtw %%mm0, %%mm3\n\t"
  872. "pcmpgtw %%mm2, %%mm1\n\t"
  873. "pxor %%mm3, %%mm0\n"
  874. "pxor %%mm1, %%mm2\n"
  875. "psubw %%mm3, %%mm0\n"
  876. "psubw %%mm1, %%mm2\n"
  877. "paddw %%mm0, %%mm2\n"
  878. "paddw %%mm2, %%mm6\n"
  879. "addl %2,%0\n"
  880. "1:\n"
  881. "movq (%0),%%mm0\n"
  882. "movq 1(%0),%%mm1\n"
  883. "movq %%mm0, %%mm2\n"
  884. "movq %%mm1, %%mm3\n"
  885. "punpcklbw %%mm7,%%mm0\n"
  886. "punpcklbw %%mm7,%%mm1\n"
  887. "punpckhbw %%mm7,%%mm2\n"
  888. "punpckhbw %%mm7,%%mm3\n"
  889. "psubw %%mm1, %%mm0\n"
  890. "psubw %%mm3, %%mm2\n"
  891. "psubw %%mm0, %%mm4\n"
  892. "psubw %%mm2, %%mm5\n"
  893. "pxor %%mm3, %%mm3\n"
  894. "pxor %%mm1, %%mm1\n"
  895. "pcmpgtw %%mm4, %%mm3\n\t"
  896. "pcmpgtw %%mm5, %%mm1\n\t"
  897. "pxor %%mm3, %%mm4\n"
  898. "pxor %%mm1, %%mm5\n"
  899. "psubw %%mm3, %%mm4\n"
  900. "psubw %%mm1, %%mm5\n"
  901. "paddw %%mm4, %%mm5\n"
  902. "paddw %%mm5, %%mm6\n"
  903. "addl %2,%0\n"
  904. "movq (%0),%%mm4\n"
  905. "movq 1(%0),%%mm1\n"
  906. "movq %%mm4, %%mm5\n"
  907. "movq %%mm1, %%mm3\n"
  908. "punpcklbw %%mm7,%%mm4\n"
  909. "punpcklbw %%mm7,%%mm1\n"
  910. "punpckhbw %%mm7,%%mm5\n"
  911. "punpckhbw %%mm7,%%mm3\n"
  912. "psubw %%mm1, %%mm4\n"
  913. "psubw %%mm3, %%mm5\n"
  914. "psubw %%mm4, %%mm0\n"
  915. "psubw %%mm5, %%mm2\n"
  916. "pxor %%mm3, %%mm3\n"
  917. "pxor %%mm1, %%mm1\n"
  918. "pcmpgtw %%mm0, %%mm3\n\t"
  919. "pcmpgtw %%mm2, %%mm1\n\t"
  920. "pxor %%mm3, %%mm0\n"
  921. "pxor %%mm1, %%mm2\n"
  922. "psubw %%mm3, %%mm0\n"
  923. "psubw %%mm1, %%mm2\n"
  924. "paddw %%mm0, %%mm2\n"
  925. "paddw %%mm2, %%mm6\n"
  926. "addl %2,%0\n"
  927. "subl $2, %%ecx\n"
  928. " jnz 1b\n"
  929. "movq %%mm6, %%mm0\n"
  930. "punpcklwd %%mm7,%%mm0\n"
  931. "punpckhwd %%mm7,%%mm6\n"
  932. "paddd %%mm0, %%mm6\n"
  933. "movq %%mm6,%%mm0\n"
  934. "psrlq $32, %%mm6\n"
  935. "paddd %%mm6,%%mm0\n"
  936. "movd %%mm0,%1\n"
  937. : "+r" (pix1), "=r"(tmp)
  938. : "r" (line_size) , "g" (h-2)
  939. : "%ecx");
  940. return tmp + hf_noise8_mmx(pix+8, line_size, h);
  941. }
  942. static int nsse16_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  943. int score1= sse16_mmx(c, pix1, pix2, line_size, h);
  944. int score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
  945. if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
  946. else return score1 + ABS(score2)*8;
  947. }
  948. static int nsse8_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  949. int score1= sse8_mmx(c, pix1, pix2, line_size, h);
  950. int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
  951. if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
  952. else return score1 + ABS(score2)*8;
  953. }
  954. static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  955. int tmp;
  956. assert( (((int)pix) & 7) == 0);
  957. assert((line_size &7) ==0);
  958. #define SUM(in0, in1, out0, out1) \
  959. "movq (%0), %%mm2\n"\
  960. "movq 8(%0), %%mm3\n"\
  961. "addl %2,%0\n"\
  962. "movq %%mm2, " #out0 "\n"\
  963. "movq %%mm3, " #out1 "\n"\
  964. "psubusb " #in0 ", %%mm2\n"\
  965. "psubusb " #in1 ", %%mm3\n"\
  966. "psubusb " #out0 ", " #in0 "\n"\
  967. "psubusb " #out1 ", " #in1 "\n"\
  968. "por %%mm2, " #in0 "\n"\
  969. "por %%mm3, " #in1 "\n"\
  970. "movq " #in0 ", %%mm2\n"\
  971. "movq " #in1 ", %%mm3\n"\
  972. "punpcklbw %%mm7, " #in0 "\n"\
  973. "punpcklbw %%mm7, " #in1 "\n"\
  974. "punpckhbw %%mm7, %%mm2\n"\
  975. "punpckhbw %%mm7, %%mm3\n"\
  976. "paddw " #in1 ", " #in0 "\n"\
  977. "paddw %%mm3, %%mm2\n"\
  978. "paddw %%mm2, " #in0 "\n"\
  979. "paddw " #in0 ", %%mm6\n"
  980. asm volatile (
  981. "movl %3,%%ecx\n"
  982. "pxor %%mm6,%%mm6\n"
  983. "pxor %%mm7,%%mm7\n"
  984. "movq (%0),%%mm0\n"
  985. "movq 8(%0),%%mm1\n"
  986. "addl %2,%0\n"
  987. "subl $2, %%ecx\n"
  988. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  989. "1:\n"
  990. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  991. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  992. "subl $2, %%ecx\n"
  993. "jnz 1b\n"
  994. "movq %%mm6,%%mm0\n"
  995. "psrlq $32, %%mm6\n"
  996. "paddw %%mm6,%%mm0\n"
  997. "movq %%mm0,%%mm6\n"
  998. "psrlq $16, %%mm0\n"
  999. "paddw %%mm6,%%mm0\n"
  1000. "movd %%mm0,%1\n"
  1001. : "+r" (pix), "=r"(tmp)
  1002. : "r" (line_size) , "m" (h)
  1003. : "%ecx");
  1004. return tmp & 0xFFFF;
  1005. }
  1006. #undef SUM
  1007. static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  1008. int tmp;
  1009. assert( (((int)pix) & 7) == 0);
  1010. assert((line_size &7) ==0);
  1011. #define SUM(in0, in1, out0, out1) \
  1012. "movq (%0), " #out0 "\n"\
  1013. "movq 8(%0), " #out1 "\n"\
  1014. "addl %2,%0\n"\
  1015. "psadbw " #out0 ", " #in0 "\n"\
  1016. "psadbw " #out1 ", " #in1 "\n"\
  1017. "paddw " #in1 ", " #in0 "\n"\
  1018. "paddw " #in0 ", %%mm6\n"
  1019. asm volatile (
  1020. "movl %3,%%ecx\n"
  1021. "pxor %%mm6,%%mm6\n"
  1022. "pxor %%mm7,%%mm7\n"
  1023. "movq (%0),%%mm0\n"
  1024. "movq 8(%0),%%mm1\n"
  1025. "addl %2,%0\n"
  1026. "subl $2, %%ecx\n"
  1027. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1028. "1:\n"
  1029. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1030. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1031. "subl $2, %%ecx\n"
  1032. "jnz 1b\n"
  1033. "movd %%mm6,%1\n"
  1034. : "+r" (pix), "=r"(tmp)
  1035. : "r" (line_size) , "m" (h)
  1036. : "%ecx");
  1037. return tmp;
  1038. }
  1039. #undef SUM
  1040. static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1041. int tmp;
  1042. assert( (((int)pix1) & 7) == 0);
  1043. assert( (((int)pix2) & 7) == 0);
  1044. assert((line_size &7) ==0);
  1045. #define SUM(in0, in1, out0, out1) \
  1046. "movq (%0),%%mm2\n"\
  1047. "movq (%1)," #out0 "\n"\
  1048. "movq 8(%0),%%mm3\n"\
  1049. "movq 8(%1)," #out1 "\n"\
  1050. "addl %3,%0\n"\
  1051. "addl %3,%1\n"\
  1052. "psubb " #out0 ", %%mm2\n"\
  1053. "psubb " #out1 ", %%mm3\n"\
  1054. "pxor %%mm7, %%mm2\n"\
  1055. "pxor %%mm7, %%mm3\n"\
  1056. "movq %%mm2, " #out0 "\n"\
  1057. "movq %%mm3, " #out1 "\n"\
  1058. "psubusb " #in0 ", %%mm2\n"\
  1059. "psubusb " #in1 ", %%mm3\n"\
  1060. "psubusb " #out0 ", " #in0 "\n"\
  1061. "psubusb " #out1 ", " #in1 "\n"\
  1062. "por %%mm2, " #in0 "\n"\
  1063. "por %%mm3, " #in1 "\n"\
  1064. "movq " #in0 ", %%mm2\n"\
  1065. "movq " #in1 ", %%mm3\n"\
  1066. "punpcklbw %%mm7, " #in0 "\n"\
  1067. "punpcklbw %%mm7, " #in1 "\n"\
  1068. "punpckhbw %%mm7, %%mm2\n"\
  1069. "punpckhbw %%mm7, %%mm3\n"\
  1070. "paddw " #in1 ", " #in0 "\n"\
  1071. "paddw %%mm3, %%mm2\n"\
  1072. "paddw %%mm2, " #in0 "\n"\
  1073. "paddw " #in0 ", %%mm6\n"
  1074. asm volatile (
  1075. "movl %4,%%ecx\n"
  1076. "pxor %%mm6,%%mm6\n"
  1077. "pcmpeqw %%mm7,%%mm7\n"
  1078. "psllw $15, %%mm7\n"
  1079. "packsswb %%mm7, %%mm7\n"
  1080. "movq (%0),%%mm0\n"
  1081. "movq (%1),%%mm2\n"
  1082. "movq 8(%0),%%mm1\n"
  1083. "movq 8(%1),%%mm3\n"
  1084. "addl %3,%0\n"
  1085. "addl %3,%1\n"
  1086. "subl $2, %%ecx\n"
  1087. "psubb %%mm2, %%mm0\n"
  1088. "psubb %%mm3, %%mm1\n"
  1089. "pxor %%mm7, %%mm0\n"
  1090. "pxor %%mm7, %%mm1\n"
  1091. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1092. "1:\n"
  1093. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1094. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1095. "subl $2, %%ecx\n"
  1096. "jnz 1b\n"
  1097. "movq %%mm6,%%mm0\n"
  1098. "psrlq $32, %%mm6\n"
  1099. "paddw %%mm6,%%mm0\n"
  1100. "movq %%mm0,%%mm6\n"
  1101. "psrlq $16, %%mm0\n"
  1102. "paddw %%mm6,%%mm0\n"
  1103. "movd %%mm0,%2\n"
  1104. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  1105. : "r" (line_size) , "m" (h)
  1106. : "%ecx");
  1107. return tmp & 0x7FFF;
  1108. }
  1109. #undef SUM
  1110. static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1111. int tmp;
  1112. assert( (((int)pix1) & 7) == 0);
  1113. assert( (((int)pix2) & 7) == 0);
  1114. assert((line_size &7) ==0);
  1115. #define SUM(in0, in1, out0, out1) \
  1116. "movq (%0)," #out0 "\n"\
  1117. "movq (%1),%%mm2\n"\
  1118. "movq 8(%0)," #out1 "\n"\
  1119. "movq 8(%1),%%mm3\n"\
  1120. "addl %3,%0\n"\
  1121. "addl %3,%1\n"\
  1122. "psubb %%mm2, " #out0 "\n"\
  1123. "psubb %%mm3, " #out1 "\n"\
  1124. "pxor %%mm7, " #out0 "\n"\
  1125. "pxor %%mm7, " #out1 "\n"\
  1126. "psadbw " #out0 ", " #in0 "\n"\
  1127. "psadbw " #out1 ", " #in1 "\n"\
  1128. "paddw " #in1 ", " #in0 "\n"\
  1129. "paddw " #in0 ", %%mm6\n"
  1130. asm volatile (
  1131. "movl %4,%%ecx\n"
  1132. "pxor %%mm6,%%mm6\n"
  1133. "pcmpeqw %%mm7,%%mm7\n"
  1134. "psllw $15, %%mm7\n"
  1135. "packsswb %%mm7, %%mm7\n"
  1136. "movq (%0),%%mm0\n"
  1137. "movq (%1),%%mm2\n"
  1138. "movq 8(%0),%%mm1\n"
  1139. "movq 8(%1),%%mm3\n"
  1140. "addl %3,%0\n"
  1141. "addl %3,%1\n"
  1142. "subl $2, %%ecx\n"
  1143. "psubb %%mm2, %%mm0\n"
  1144. "psubb %%mm3, %%mm1\n"
  1145. "pxor %%mm7, %%mm0\n"
  1146. "pxor %%mm7, %%mm1\n"
  1147. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1148. "1:\n"
  1149. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1150. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1151. "subl $2, %%ecx\n"
  1152. "jnz 1b\n"
  1153. "movd %%mm6,%2\n"
  1154. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  1155. : "r" (line_size) , "m" (h)
  1156. : "%ecx");
  1157. return tmp;
  1158. }
  1159. #undef SUM
  1160. static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  1161. int i=0;
  1162. asm volatile(
  1163. "1: \n\t"
  1164. "movq (%2, %0), %%mm0 \n\t"
  1165. "movq (%1, %0), %%mm1 \n\t"
  1166. "psubb %%mm0, %%mm1 \n\t"
  1167. "movq %%mm1, (%3, %0) \n\t"
  1168. "movq 8(%2, %0), %%mm0 \n\t"
  1169. "movq 8(%1, %0), %%mm1 \n\t"
  1170. "psubb %%mm0, %%mm1 \n\t"
  1171. "movq %%mm1, 8(%3, %0) \n\t"
  1172. "addl $16, %0 \n\t"
  1173. "cmpl %4, %0 \n\t"
  1174. " jb 1b \n\t"
  1175. : "+r" (i)
  1176. : "r"(src1), "r"(src2), "r"(dst), "r"(w-15)
  1177. );
  1178. for(; i<w; i++)
  1179. dst[i+0] = src1[i+0]-src2[i+0];
  1180. }
  1181. static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
  1182. int i=0;
  1183. uint8_t l, lt;
  1184. asm volatile(
  1185. "1: \n\t"
  1186. "movq -1(%1, %0), %%mm0 \n\t" // LT
  1187. "movq (%1, %0), %%mm1 \n\t" // T
  1188. "movq -1(%2, %0), %%mm2 \n\t" // L
  1189. "movq (%2, %0), %%mm3 \n\t" // X
  1190. "movq %%mm2, %%mm4 \n\t" // L
  1191. "psubb %%mm0, %%mm2 \n\t"
  1192. "paddb %%mm1, %%mm2 \n\t" // L + T - LT
  1193. "movq %%mm4, %%mm5 \n\t" // L
  1194. "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
  1195. "pminub %%mm5, %%mm1 \n\t" // min(T, L)
  1196. "pminub %%mm2, %%mm4 \n\t"
  1197. "pmaxub %%mm1, %%mm4 \n\t"
  1198. "psubb %%mm4, %%mm3 \n\t" // dst - pred
  1199. "movq %%mm3, (%3, %0) \n\t"
  1200. "addl $8, %0 \n\t"
  1201. "cmpl %4, %0 \n\t"
  1202. " jb 1b \n\t"
  1203. : "+r" (i)
  1204. : "r"(src1), "r"(src2), "r"(dst), "r"(w)
  1205. );
  1206. l= *left;
  1207. lt= *left_top;
  1208. dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
  1209. *left_top= src1[w-1];
  1210. *left = src2[w-1];
  1211. }
  1212. #define LBUTTERFLY2(a1,b1,a2,b2)\
  1213. "paddw " #b1 ", " #a1 " \n\t"\
  1214. "paddw " #b2 ", " #a2 " \n\t"\
  1215. "paddw " #b1 ", " #b1 " \n\t"\
  1216. "paddw " #b2 ", " #b2 " \n\t"\
  1217. "psubw " #a1 ", " #b1 " \n\t"\
  1218. "psubw " #a2 ", " #b2 " \n\t"
  1219. #define HADAMARD48\
  1220. LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
  1221. LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
  1222. LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
  1223. LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
  1224. LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
  1225. LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
  1226. #define MMABS(a,z)\
  1227. "pxor " #z ", " #z " \n\t"\
  1228. "pcmpgtw " #a ", " #z " \n\t"\
  1229. "pxor " #z ", " #a " \n\t"\
  1230. "psubw " #z ", " #a " \n\t"
  1231. #define MMABS_SUM(a,z, sum)\
  1232. "pxor " #z ", " #z " \n\t"\
  1233. "pcmpgtw " #a ", " #z " \n\t"\
  1234. "pxor " #z ", " #a " \n\t"\
  1235. "psubw " #z ", " #a " \n\t"\
  1236. "paddusw " #a ", " #sum " \n\t"
  1237. #define MMABS_MMX2(a,z)\
  1238. "pxor " #z ", " #z " \n\t"\
  1239. "psubw " #a ", " #z " \n\t"\
  1240. "pmaxsw " #z ", " #a " \n\t"
  1241. #define MMABS_SUM_MMX2(a,z, sum)\
  1242. "pxor " #z ", " #z " \n\t"\
  1243. "psubw " #a ", " #z " \n\t"\
  1244. "pmaxsw " #z ", " #a " \n\t"\
  1245. "paddusw " #a ", " #sum " \n\t"
  1246. #define SBUTTERFLY(a,b,t,n)\
  1247. "movq " #a ", " #t " \n\t" /* abcd */\
  1248. "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
  1249. "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
  1250. #define TRANSPOSE4(a,b,c,d,t)\
  1251. SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
  1252. SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
  1253. SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
  1254. SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
  1255. #define LOAD4(o, a, b, c, d)\
  1256. "movq "#o"(%1), " #a " \n\t"\
  1257. "movq "#o"+16(%1), " #b " \n\t"\
  1258. "movq "#o"+32(%1), " #c " \n\t"\
  1259. "movq "#o"+48(%1), " #d " \n\t"
  1260. #define STORE4(o, a, b, c, d)\
  1261. "movq "#a", "#o"(%1) \n\t"\
  1262. "movq "#b", "#o"+16(%1) \n\t"\
  1263. "movq "#c", "#o"+32(%1) \n\t"\
  1264. "movq "#d", "#o"+48(%1) \n\t"\
  1265. static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
  1266. uint64_t temp[16] __align8;
  1267. int sum=0;
  1268. assert(h==8);
  1269. diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
  1270. asm volatile(
  1271. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1272. LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
  1273. HADAMARD48
  1274. "movq %%mm7, 112(%1) \n\t"
  1275. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1276. STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
  1277. "movq 112(%1), %%mm7 \n\t"
  1278. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1279. STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
  1280. LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
  1281. LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1282. HADAMARD48
  1283. "movq %%mm7, 120(%1) \n\t"
  1284. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1285. STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
  1286. "movq 120(%1), %%mm7 \n\t"
  1287. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1288. "movq %%mm7, %%mm5 \n\t"//FIXME remove
  1289. "movq %%mm6, %%mm7 \n\t"
  1290. "movq %%mm0, %%mm6 \n\t"
  1291. // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
  1292. LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
  1293. // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1294. HADAMARD48
  1295. "movq %%mm7, 64(%1) \n\t"
  1296. MMABS(%%mm0, %%mm7)
  1297. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1298. MMABS_SUM(%%mm2, %%mm7, %%mm0)
  1299. MMABS_SUM(%%mm3, %%mm7, %%mm0)
  1300. MMABS_SUM(%%mm4, %%mm7, %%mm0)
  1301. MMABS_SUM(%%mm5, %%mm7, %%mm0)
  1302. MMABS_SUM(%%mm6, %%mm7, %%mm0)
  1303. "movq 64(%1), %%mm1 \n\t"
  1304. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1305. "movq %%mm0, 64(%1) \n\t"
  1306. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1307. LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
  1308. HADAMARD48
  1309. "movq %%mm7, (%1) \n\t"
  1310. MMABS(%%mm0, %%mm7)
  1311. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1312. MMABS_SUM(%%mm2, %%mm7, %%mm0)
  1313. MMABS_SUM(%%mm3, %%mm7, %%mm0)
  1314. MMABS_SUM(%%mm4, %%mm7, %%mm0)
  1315. MMABS_SUM(%%mm5, %%mm7, %%mm0)
  1316. MMABS_SUM(%%mm6, %%mm7, %%mm0)
  1317. "movq (%1), %%mm1 \n\t"
  1318. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1319. "movq 64(%1), %%mm1 \n\t"
  1320. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1321. "movq %%mm0, %%mm1 \n\t"
  1322. "psrlq $32, %%mm0 \n\t"
  1323. "paddusw %%mm1, %%mm0 \n\t"
  1324. "movq %%mm0, %%mm1 \n\t"
  1325. "psrlq $16, %%mm0 \n\t"
  1326. "paddusw %%mm1, %%mm0 \n\t"
  1327. "movd %%mm0, %0 \n\t"
  1328. : "=r" (sum)
  1329. : "r"(temp)
  1330. );
  1331. return sum&0xFFFF;
  1332. }
  1333. static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
  1334. uint64_t temp[16] __align8;
  1335. int sum=0;
  1336. assert(h==8);
  1337. diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
  1338. asm volatile(
  1339. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1340. LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
  1341. HADAMARD48
  1342. "movq %%mm7, 112(%1) \n\t"
  1343. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1344. STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
  1345. "movq 112(%1), %%mm7 \n\t"
  1346. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1347. STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
  1348. LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
  1349. LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1350. HADAMARD48
  1351. "movq %%mm7, 120(%1) \n\t"
  1352. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1353. STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
  1354. "movq 120(%1), %%mm7 \n\t"
  1355. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1356. "movq %%mm7, %%mm5 \n\t"//FIXME remove
  1357. "movq %%mm6, %%mm7 \n\t"
  1358. "movq %%mm0, %%mm6 \n\t"
  1359. // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
  1360. LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
  1361. // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1362. HADAMARD48
  1363. "movq %%mm7, 64(%1) \n\t"
  1364. MMABS_MMX2(%%mm0, %%mm7)
  1365. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1366. MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
  1367. MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
  1368. MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
  1369. MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
  1370. MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
  1371. "movq 64(%1), %%mm1 \n\t"
  1372. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1373. "movq %%mm0, 64(%1) \n\t"
  1374. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1375. LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
  1376. HADAMARD48
  1377. "movq %%mm7, (%1) \n\t"
  1378. MMABS_MMX2(%%mm0, %%mm7)
  1379. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1380. MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
  1381. MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
  1382. MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
  1383. MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
  1384. MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
  1385. "movq (%1), %%mm1 \n\t"
  1386. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1387. "movq 64(%1), %%mm1 \n\t"
  1388. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1389. "movq %%mm0, %%mm1 \n\t"
  1390. "psrlq $32, %%mm0 \n\t"
  1391. "paddusw %%mm1, %%mm0 \n\t"
  1392. "movq %%mm0, %%mm1 \n\t"
  1393. "psrlq $16, %%mm0 \n\t"
  1394. "paddusw %%mm1, %%mm0 \n\t"
  1395. "movd %%mm0, %0 \n\t"
  1396. : "=r" (sum)
  1397. : "r"(temp)
  1398. );
  1399. return sum&0xFFFF;
  1400. }
  1401. WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
  1402. WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
  1403. #endif //CONFIG_ENCODERS
  1404. #define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
  1405. #define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
  1406. #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
  1407. "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
  1408. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
  1409. "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
  1410. "movq "#in7", " #m3 " \n\t" /* d */\
  1411. "movq "#in0", %%mm5 \n\t" /* D */\
  1412. "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
  1413. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
  1414. "movq "#in1", %%mm5 \n\t" /* C */\
  1415. "movq "#in2", %%mm6 \n\t" /* B */\
  1416. "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
  1417. "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
  1418. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
  1419. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
  1420. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
  1421. "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
  1422. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
  1423. "psraw $5, %%mm5 \n\t"\
  1424. "packuswb %%mm5, %%mm5 \n\t"\
  1425. OP(%%mm5, out, %%mm7, d)
  1426. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
  1427. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1428. uint64_t temp;\
  1429. \
  1430. asm volatile(\
  1431. "pxor %%mm7, %%mm7 \n\t"\
  1432. "1: \n\t"\
  1433. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1434. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1435. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1436. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1437. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1438. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1439. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1440. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1441. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1442. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1443. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1444. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1445. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1446. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1447. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1448. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1449. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1450. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1451. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1452. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1453. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1454. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1455. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1456. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1457. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1458. "paddw %6, %%mm6 \n\t"\
  1459. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1460. "psraw $5, %%mm0 \n\t"\
  1461. "movq %%mm0, %5 \n\t"\
  1462. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1463. \
  1464. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
  1465. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
  1466. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
  1467. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
  1468. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
  1469. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
  1470. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
  1471. "paddw %%mm0, %%mm2 \n\t" /* b */\
  1472. "paddw %%mm5, %%mm3 \n\t" /* c */\
  1473. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1474. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1475. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
  1476. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
  1477. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
  1478. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
  1479. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1480. "paddw %%mm2, %%mm1 \n\t" /* a */\
  1481. "paddw %%mm6, %%mm4 \n\t" /* d */\
  1482. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1483. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
  1484. "paddw %6, %%mm1 \n\t"\
  1485. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
  1486. "psraw $5, %%mm3 \n\t"\
  1487. "movq %5, %%mm1 \n\t"\
  1488. "packuswb %%mm3, %%mm1 \n\t"\
  1489. OP_MMX2(%%mm1, (%1),%%mm4, q)\
  1490. /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
  1491. \
  1492. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
  1493. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
  1494. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
  1495. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
  1496. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
  1497. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
  1498. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
  1499. "paddw %%mm1, %%mm5 \n\t" /* b */\
  1500. "paddw %%mm4, %%mm0 \n\t" /* c */\
  1501. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1502. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
  1503. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
  1504. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
  1505. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
  1506. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
  1507. "paddw %%mm3, %%mm2 \n\t" /* d */\
  1508. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
  1509. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
  1510. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
  1511. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
  1512. "paddw %%mm2, %%mm6 \n\t" /* a */\
  1513. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
  1514. "paddw %6, %%mm0 \n\t"\
  1515. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1516. "psraw $5, %%mm0 \n\t"\
  1517. /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
  1518. \
  1519. "paddw %%mm5, %%mm3 \n\t" /* a */\
  1520. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
  1521. "paddw %%mm4, %%mm6 \n\t" /* b */\
  1522. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
  1523. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
  1524. "paddw %%mm1, %%mm4 \n\t" /* c */\
  1525. "paddw %%mm2, %%mm5 \n\t" /* d */\
  1526. "paddw %%mm6, %%mm6 \n\t" /* 2b */\
  1527. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
  1528. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
  1529. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
  1530. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
  1531. "paddw %6, %%mm4 \n\t"\
  1532. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
  1533. "psraw $5, %%mm4 \n\t"\
  1534. "packuswb %%mm4, %%mm0 \n\t"\
  1535. OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
  1536. \
  1537. "addl %3, %0 \n\t"\
  1538. "addl %4, %1 \n\t"\
  1539. "decl %2 \n\t"\
  1540. " jnz 1b \n\t"\
  1541. : "+a"(src), "+c"(dst), "+m"(h)\
  1542. : "d"(srcStride), "S"(dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  1543. : "memory"\
  1544. );\
  1545. }\
  1546. \
  1547. static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1548. int i;\
  1549. int16_t temp[16];\
  1550. /* quick HACK, XXX FIXME MUST be optimized */\
  1551. for(i=0; i<h; i++)\
  1552. {\
  1553. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1554. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1555. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1556. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1557. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1558. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
  1559. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
  1560. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
  1561. temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
  1562. temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
  1563. temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
  1564. temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
  1565. temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
  1566. temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
  1567. temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
  1568. temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
  1569. asm volatile(\
  1570. "movq (%0), %%mm0 \n\t"\
  1571. "movq 8(%0), %%mm1 \n\t"\
  1572. "paddw %2, %%mm0 \n\t"\
  1573. "paddw %2, %%mm1 \n\t"\
  1574. "psraw $5, %%mm0 \n\t"\
  1575. "psraw $5, %%mm1 \n\t"\
  1576. "packuswb %%mm1, %%mm0 \n\t"\
  1577. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1578. "movq 16(%0), %%mm0 \n\t"\
  1579. "movq 24(%0), %%mm1 \n\t"\
  1580. "paddw %2, %%mm0 \n\t"\
  1581. "paddw %2, %%mm1 \n\t"\
  1582. "psraw $5, %%mm0 \n\t"\
  1583. "psraw $5, %%mm1 \n\t"\
  1584. "packuswb %%mm1, %%mm0 \n\t"\
  1585. OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
  1586. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1587. : "memory"\
  1588. );\
  1589. dst+=dstStride;\
  1590. src+=srcStride;\
  1591. }\
  1592. }\
  1593. \
  1594. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1595. uint64_t temp;\
  1596. \
  1597. asm volatile(\
  1598. "pxor %%mm7, %%mm7 \n\t"\
  1599. "1: \n\t"\
  1600. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1601. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1602. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1603. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1604. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1605. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1606. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1607. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1608. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1609. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1610. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1611. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1612. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1613. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1614. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1615. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1616. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1617. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1618. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1619. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1620. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1621. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1622. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1623. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1624. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1625. "paddw %6, %%mm6 \n\t"\
  1626. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1627. "psraw $5, %%mm0 \n\t"\
  1628. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1629. \
  1630. "movd 5(%0), %%mm5 \n\t" /* FGHI */\
  1631. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
  1632. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
  1633. "paddw %%mm5, %%mm1 \n\t" /* a */\
  1634. "paddw %%mm6, %%mm2 \n\t" /* b */\
  1635. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
  1636. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
  1637. "paddw %%mm6, %%mm3 \n\t" /* c */\
  1638. "paddw %%mm5, %%mm4 \n\t" /* d */\
  1639. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1640. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1641. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1642. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1643. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
  1644. "paddw %6, %%mm1 \n\t"\
  1645. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
  1646. "psraw $5, %%mm3 \n\t"\
  1647. "packuswb %%mm3, %%mm0 \n\t"\
  1648. OP_MMX2(%%mm0, (%1), %%mm4, q)\
  1649. \
  1650. "addl %3, %0 \n\t"\
  1651. "addl %4, %1 \n\t"\
  1652. "decl %2 \n\t"\
  1653. " jnz 1b \n\t"\
  1654. : "+a"(src), "+c"(dst), "+m"(h)\
  1655. : "S"(srcStride), "D"(dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  1656. : "memory"\
  1657. );\
  1658. }\
  1659. \
  1660. static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1661. int i;\
  1662. int16_t temp[8];\
  1663. /* quick HACK, XXX FIXME MUST be optimized */\
  1664. for(i=0; i<h; i++)\
  1665. {\
  1666. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1667. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1668. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1669. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1670. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1671. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
  1672. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
  1673. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
  1674. asm volatile(\
  1675. "movq (%0), %%mm0 \n\t"\
  1676. "movq 8(%0), %%mm1 \n\t"\
  1677. "paddw %2, %%mm0 \n\t"\
  1678. "paddw %2, %%mm1 \n\t"\
  1679. "psraw $5, %%mm0 \n\t"\
  1680. "psraw $5, %%mm1 \n\t"\
  1681. "packuswb %%mm1, %%mm0 \n\t"\
  1682. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1683. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1684. :"memory"\
  1685. );\
  1686. dst+=dstStride;\
  1687. src+=srcStride;\
  1688. }\
  1689. }
  1690. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
  1691. \
  1692. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1693. uint64_t temp[17*4];\
  1694. uint64_t *temp_ptr= temp;\
  1695. int count= 17;\
  1696. \
  1697. /*FIXME unroll */\
  1698. asm volatile(\
  1699. "pxor %%mm7, %%mm7 \n\t"\
  1700. "1: \n\t"\
  1701. "movq (%0), %%mm0 \n\t"\
  1702. "movq (%0), %%mm1 \n\t"\
  1703. "movq 8(%0), %%mm2 \n\t"\
  1704. "movq 8(%0), %%mm3 \n\t"\
  1705. "punpcklbw %%mm7, %%mm0 \n\t"\
  1706. "punpckhbw %%mm7, %%mm1 \n\t"\
  1707. "punpcklbw %%mm7, %%mm2 \n\t"\
  1708. "punpckhbw %%mm7, %%mm3 \n\t"\
  1709. "movq %%mm0, (%1) \n\t"\
  1710. "movq %%mm1, 17*8(%1) \n\t"\
  1711. "movq %%mm2, 2*17*8(%1) \n\t"\
  1712. "movq %%mm3, 3*17*8(%1) \n\t"\
  1713. "addl $8, %1 \n\t"\
  1714. "addl %3, %0 \n\t"\
  1715. "decl %2 \n\t"\
  1716. " jnz 1b \n\t"\
  1717. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1718. : "r" (srcStride)\
  1719. : "memory"\
  1720. );\
  1721. \
  1722. temp_ptr= temp;\
  1723. count=4;\
  1724. \
  1725. /*FIXME reorder for speed */\
  1726. asm volatile(\
  1727. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1728. "1: \n\t"\
  1729. "movq (%0), %%mm0 \n\t"\
  1730. "movq 8(%0), %%mm1 \n\t"\
  1731. "movq 16(%0), %%mm2 \n\t"\
  1732. "movq 24(%0), %%mm3 \n\t"\
  1733. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1734. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1735. "addl %4, %1 \n\t"\
  1736. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1737. \
  1738. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1739. "addl %4, %1 \n\t"\
  1740. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1741. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
  1742. "addl %4, %1 \n\t"\
  1743. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
  1744. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
  1745. "addl %4, %1 \n\t"\
  1746. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
  1747. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
  1748. "addl %4, %1 \n\t"\
  1749. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
  1750. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
  1751. "addl %4, %1 \n\t"\
  1752. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
  1753. \
  1754. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
  1755. "addl %4, %1 \n\t" \
  1756. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
  1757. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
  1758. \
  1759. "addl $136, %0 \n\t"\
  1760. "addl %6, %1 \n\t"\
  1761. "decl %2 \n\t"\
  1762. " jnz 1b \n\t"\
  1763. \
  1764. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1765. : "r"(dstStride), "r"(2*dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*dstStride)\
  1766. :"memory"\
  1767. );\
  1768. }\
  1769. \
  1770. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1771. uint64_t temp[9*4];\
  1772. uint64_t *temp_ptr= temp;\
  1773. int count= 9;\
  1774. \
  1775. /*FIXME unroll */\
  1776. asm volatile(\
  1777. "pxor %%mm7, %%mm7 \n\t"\
  1778. "1: \n\t"\
  1779. "movq (%0), %%mm0 \n\t"\
  1780. "movq (%0), %%mm1 \n\t"\
  1781. "punpcklbw %%mm7, %%mm0 \n\t"\
  1782. "punpckhbw %%mm7, %%mm1 \n\t"\
  1783. "movq %%mm0, (%1) \n\t"\
  1784. "movq %%mm1, 9*8(%1) \n\t"\
  1785. "addl $8, %1 \n\t"\
  1786. "addl %3, %0 \n\t"\
  1787. "decl %2 \n\t"\
  1788. " jnz 1b \n\t"\
  1789. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1790. : "r" (srcStride)\
  1791. : "memory"\
  1792. );\
  1793. \
  1794. temp_ptr= temp;\
  1795. count=2;\
  1796. \
  1797. /*FIXME reorder for speed */\
  1798. asm volatile(\
  1799. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1800. "1: \n\t"\
  1801. "movq (%0), %%mm0 \n\t"\
  1802. "movq 8(%0), %%mm1 \n\t"\
  1803. "movq 16(%0), %%mm2 \n\t"\
  1804. "movq 24(%0), %%mm3 \n\t"\
  1805. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1806. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1807. "addl %4, %1 \n\t"\
  1808. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1809. \
  1810. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1811. "addl %4, %1 \n\t"\
  1812. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1813. \
  1814. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
  1815. "addl %4, %1 \n\t"\
  1816. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
  1817. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
  1818. \
  1819. "addl $72, %0 \n\t"\
  1820. "addl %6, %1 \n\t"\
  1821. "decl %2 \n\t"\
  1822. " jnz 1b \n\t"\
  1823. \
  1824. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1825. : "r"(dstStride), "r"(2*dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*dstStride)\
  1826. : "memory"\
  1827. );\
  1828. }\
  1829. \
  1830. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1831. OPNAME ## pixels8_mmx(dst, src, stride, 8);\
  1832. }\
  1833. \
  1834. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1835. uint64_t temp[8];\
  1836. uint8_t * const half= (uint8_t*)temp;\
  1837. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1838. OPNAME ## pixels8_l2_mmx(dst, src, half, stride, stride, 8);\
  1839. }\
  1840. \
  1841. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1842. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
  1843. }\
  1844. \
  1845. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1846. uint64_t temp[8];\
  1847. uint8_t * const half= (uint8_t*)temp;\
  1848. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1849. OPNAME ## pixels8_l2_mmx(dst, src+1, half, stride, stride, 8);\
  1850. }\
  1851. \
  1852. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1853. uint64_t temp[8];\
  1854. uint8_t * const half= (uint8_t*)temp;\
  1855. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1856. OPNAME ## pixels8_l2_mmx(dst, src, half, stride, stride, 8);\
  1857. }\
  1858. \
  1859. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1860. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1861. }\
  1862. \
  1863. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1864. uint64_t temp[8];\
  1865. uint8_t * const half= (uint8_t*)temp;\
  1866. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1867. OPNAME ## pixels8_l2_mmx(dst, src+stride, half, stride, stride, 8);\
  1868. }\
  1869. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1870. uint64_t half[8 + 9];\
  1871. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1872. uint8_t * const halfHV= ((uint8_t*)half);\
  1873. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1874. put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
  1875. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1876. OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
  1877. }\
  1878. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1879. uint64_t half[8 + 9];\
  1880. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1881. uint8_t * const halfHV= ((uint8_t*)half);\
  1882. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1883. put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
  1884. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1885. OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
  1886. }\
  1887. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1888. uint64_t half[8 + 9];\
  1889. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1890. uint8_t * const halfHV= ((uint8_t*)half);\
  1891. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1892. put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
  1893. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1894. OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
  1895. }\
  1896. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1897. uint64_t half[8 + 9];\
  1898. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1899. uint8_t * const halfHV= ((uint8_t*)half);\
  1900. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1901. put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
  1902. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1903. OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
  1904. }\
  1905. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1906. uint64_t half[8 + 9];\
  1907. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1908. uint8_t * const halfHV= ((uint8_t*)half);\
  1909. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1910. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1911. OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
  1912. }\
  1913. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1914. uint64_t half[8 + 9];\
  1915. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1916. uint8_t * const halfHV= ((uint8_t*)half);\
  1917. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1918. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1919. OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
  1920. }\
  1921. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1922. uint64_t half[8 + 9];\
  1923. uint8_t * const halfH= ((uint8_t*)half);\
  1924. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1925. put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
  1926. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1927. }\
  1928. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1929. uint64_t half[8 + 9];\
  1930. uint8_t * const halfH= ((uint8_t*)half);\
  1931. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1932. put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
  1933. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1934. }\
  1935. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1936. uint64_t half[9];\
  1937. uint8_t * const halfH= ((uint8_t*)half);\
  1938. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1939. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1940. }\
  1941. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1942. OPNAME ## pixels16_mmx(dst, src, stride, 16);\
  1943. }\
  1944. \
  1945. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1946. uint64_t temp[32];\
  1947. uint8_t * const half= (uint8_t*)temp;\
  1948. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1949. OPNAME ## pixels16_l2_mmx(dst, src, half, stride, stride, 16);\
  1950. }\
  1951. \
  1952. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1953. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
  1954. }\
  1955. \
  1956. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1957. uint64_t temp[32];\
  1958. uint8_t * const half= (uint8_t*)temp;\
  1959. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1960. OPNAME ## pixels16_l2_mmx(dst, src+1, half, stride, stride, 16);\
  1961. }\
  1962. \
  1963. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1964. uint64_t temp[32];\
  1965. uint8_t * const half= (uint8_t*)temp;\
  1966. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1967. OPNAME ## pixels16_l2_mmx(dst, src, half, stride, stride, 16);\
  1968. }\
  1969. \
  1970. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1971. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1972. }\
  1973. \
  1974. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1975. uint64_t temp[32];\
  1976. uint8_t * const half= (uint8_t*)temp;\
  1977. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1978. OPNAME ## pixels16_l2_mmx(dst, src+stride, half, stride, stride, 16);\
  1979. }\
  1980. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1981. uint64_t half[16*2 + 17*2];\
  1982. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1983. uint8_t * const halfHV= ((uint8_t*)half);\
  1984. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1985. put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
  1986. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1987. OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
  1988. }\
  1989. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1990. uint64_t half[16*2 + 17*2];\
  1991. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1992. uint8_t * const halfHV= ((uint8_t*)half);\
  1993. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1994. put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
  1995. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1996. OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
  1997. }\
  1998. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1999. uint64_t half[16*2 + 17*2];\
  2000. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2001. uint8_t * const halfHV= ((uint8_t*)half);\
  2002. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2003. put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
  2004. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2005. OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
  2006. }\
  2007. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2008. uint64_t half[16*2 + 17*2];\
  2009. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2010. uint8_t * const halfHV= ((uint8_t*)half);\
  2011. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2012. put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
  2013. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2014. OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
  2015. }\
  2016. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2017. uint64_t half[16*2 + 17*2];\
  2018. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2019. uint8_t * const halfHV= ((uint8_t*)half);\
  2020. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2021. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2022. OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
  2023. }\
  2024. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2025. uint64_t half[16*2 + 17*2];\
  2026. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2027. uint8_t * const halfHV= ((uint8_t*)half);\
  2028. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2029. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2030. OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
  2031. }\
  2032. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2033. uint64_t half[17*2];\
  2034. uint8_t * const halfH= ((uint8_t*)half);\
  2035. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2036. put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
  2037. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2038. }\
  2039. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2040. uint64_t half[17*2];\
  2041. uint8_t * const halfH= ((uint8_t*)half);\
  2042. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2043. put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
  2044. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2045. }\
  2046. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2047. uint64_t half[17*2];\
  2048. uint8_t * const halfH= ((uint8_t*)half);\
  2049. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2050. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2051. }
  2052. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  2053. #define AVG_3DNOW_OP(a,b,temp, size) \
  2054. "mov" #size " " #b ", " #temp " \n\t"\
  2055. "pavgusb " #temp ", " #a " \n\t"\
  2056. "mov" #size " " #a ", " #b " \n\t"
  2057. #define AVG_MMX2_OP(a,b,temp, size) \
  2058. "mov" #size " " #b ", " #temp " \n\t"\
  2059. "pavgb " #temp ", " #a " \n\t"\
  2060. "mov" #size " " #a ", " #b " \n\t"
  2061. QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
  2062. QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
  2063. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
  2064. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
  2065. QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
  2066. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
  2067. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
  2068. QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
  2069. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
  2070. #if 0
  2071. static void just_return() { return; }
  2072. #endif
  2073. #define SET_QPEL_FUNC(postfix1, postfix2) \
  2074. c->put_ ## postfix1 = put_ ## postfix2;\
  2075. c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
  2076. c->avg_ ## postfix1 = avg_ ## postfix2;
  2077. static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
  2078. int i=0;
  2079. assert(ABS(scale) < 256);
  2080. scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
  2081. asm volatile(
  2082. "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
  2083. "psrlw $15, %%mm6 \n\t" // 1w
  2084. "pxor %%mm7, %%mm7 \n\t"
  2085. "movd %4, %%mm5 \n\t"
  2086. "punpcklwd %%mm5, %%mm5 \n\t"
  2087. "punpcklwd %%mm5, %%mm5 \n\t"
  2088. "1: \n\t"
  2089. "movq (%1, %0), %%mm0 \n\t"
  2090. "movq 8(%1, %0), %%mm1 \n\t"
  2091. "pmulhw %%mm5, %%mm0 \n\t"
  2092. "pmulhw %%mm5, %%mm1 \n\t"
  2093. "paddw %%mm6, %%mm0 \n\t"
  2094. "paddw %%mm6, %%mm1 \n\t"
  2095. "psraw $1, %%mm0 \n\t"
  2096. "psraw $1, %%mm1 \n\t"
  2097. "paddw (%2, %0), %%mm0 \n\t"
  2098. "paddw 8(%2, %0), %%mm1 \n\t"
  2099. "psraw $6, %%mm0 \n\t"
  2100. "psraw $6, %%mm1 \n\t"
  2101. "pmullw (%3, %0), %%mm0 \n\t"
  2102. "pmullw 8(%3, %0), %%mm1 \n\t"
  2103. "pmaddwd %%mm0, %%mm0 \n\t"
  2104. "pmaddwd %%mm1, %%mm1 \n\t"
  2105. "paddd %%mm1, %%mm0 \n\t"
  2106. "psrld $4, %%mm0 \n\t"
  2107. "paddd %%mm0, %%mm7 \n\t"
  2108. "addl $16, %0 \n\t"
  2109. "cmpl $128, %0 \n\t" //FIXME optimize & bench
  2110. " jb 1b \n\t"
  2111. "movq %%mm7, %%mm6 \n\t"
  2112. "psrlq $32, %%mm7 \n\t"
  2113. "paddd %%mm6, %%mm7 \n\t"
  2114. "psrld $2, %%mm7 \n\t"
  2115. "movd %%mm7, %0 \n\t"
  2116. : "+r" (i)
  2117. : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
  2118. );
  2119. return i;
  2120. }
  2121. static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
  2122. int i=0;
  2123. if(ABS(scale) < 256){
  2124. scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
  2125. asm volatile(
  2126. "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
  2127. "psrlw $15, %%mm6 \n\t" // 1w
  2128. "movd %3, %%mm5 \n\t"
  2129. "punpcklwd %%mm5, %%mm5 \n\t"
  2130. "punpcklwd %%mm5, %%mm5 \n\t"
  2131. "1: \n\t"
  2132. "movq (%1, %0), %%mm0 \n\t"
  2133. "movq 8(%1, %0), %%mm1 \n\t"
  2134. "pmulhw %%mm5, %%mm0 \n\t"
  2135. "pmulhw %%mm5, %%mm1 \n\t"
  2136. "paddw %%mm6, %%mm0 \n\t"
  2137. "paddw %%mm6, %%mm1 \n\t"
  2138. "psraw $1, %%mm0 \n\t"
  2139. "psraw $1, %%mm1 \n\t"
  2140. "paddw (%2, %0), %%mm0 \n\t"
  2141. "paddw 8(%2, %0), %%mm1 \n\t"
  2142. "movq %%mm0, (%2, %0) \n\t"
  2143. "movq %%mm1, 8(%2, %0) \n\t"
  2144. "addl $16, %0 \n\t"
  2145. "cmpl $128, %0 \n\t" //FIXME optimize & bench
  2146. " jb 1b \n\t"
  2147. : "+r" (i)
  2148. : "r"(basis), "r"(rem), "g"(scale)
  2149. );
  2150. }else{
  2151. for(i=0; i<8*8; i++){
  2152. rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
  2153. }
  2154. }
  2155. }
  2156. /* external functions, from idct_mmx.c */
  2157. void ff_mmx_idct(DCTELEM *block);
  2158. void ff_mmxext_idct(DCTELEM *block);
  2159. /* XXX: those functions should be suppressed ASAP when all IDCTs are
  2160. converted */
  2161. static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  2162. {
  2163. ff_mmx_idct (block);
  2164. put_pixels_clamped_mmx(block, dest, line_size);
  2165. }
  2166. static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  2167. {
  2168. ff_mmx_idct (block);
  2169. add_pixels_clamped_mmx(block, dest, line_size);
  2170. }
  2171. static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  2172. {
  2173. ff_mmxext_idct (block);
  2174. put_pixels_clamped_mmx(block, dest, line_size);
  2175. }
  2176. static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  2177. {
  2178. ff_mmxext_idct (block);
  2179. add_pixels_clamped_mmx(block, dest, line_size);
  2180. }
  2181. void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
  2182. {
  2183. mm_flags = mm_support();
  2184. if (avctx->dsp_mask) {
  2185. if (avctx->dsp_mask & FF_MM_FORCE)
  2186. mm_flags |= (avctx->dsp_mask & 0xffff);
  2187. else
  2188. mm_flags &= ~(avctx->dsp_mask & 0xffff);
  2189. }
  2190. #if 0
  2191. av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
  2192. if (mm_flags & MM_MMX)
  2193. av_log(avctx, AV_LOG_INFO, " mmx");
  2194. if (mm_flags & MM_MMXEXT)
  2195. av_log(avctx, AV_LOG_INFO, " mmxext");
  2196. if (mm_flags & MM_3DNOW)
  2197. av_log(avctx, AV_LOG_INFO, " 3dnow");
  2198. if (mm_flags & MM_SSE)
  2199. av_log(avctx, AV_LOG_INFO, " sse");
  2200. if (mm_flags & MM_SSE2)
  2201. av_log(avctx, AV_LOG_INFO, " sse2");
  2202. av_log(avctx, AV_LOG_INFO, "\n");
  2203. #endif
  2204. if (mm_flags & MM_MMX) {
  2205. const int idct_algo= avctx->idct_algo;
  2206. #ifdef CONFIG_ENCODERS
  2207. const int dct_algo = avctx->dct_algo;
  2208. if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
  2209. if(mm_flags & MM_SSE2){
  2210. c->fdct = ff_fdct_sse2;
  2211. }else if(mm_flags & MM_MMXEXT){
  2212. c->fdct = ff_fdct_mmx2;
  2213. }else{
  2214. c->fdct = ff_fdct_mmx;
  2215. }
  2216. }
  2217. #endif //CONFIG_ENCODERS
  2218. if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
  2219. c->idct_put= ff_simple_idct_put_mmx;
  2220. c->idct_add= ff_simple_idct_add_mmx;
  2221. c->idct = ff_simple_idct_mmx;
  2222. c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
  2223. }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
  2224. if(mm_flags & MM_MMXEXT){
  2225. c->idct_put= ff_libmpeg2mmx2_idct_put;
  2226. c->idct_add= ff_libmpeg2mmx2_idct_add;
  2227. c->idct = ff_mmxext_idct;
  2228. }else{
  2229. c->idct_put= ff_libmpeg2mmx_idct_put;
  2230. c->idct_add= ff_libmpeg2mmx_idct_add;
  2231. c->idct = ff_mmx_idct;
  2232. }
  2233. c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
  2234. }
  2235. /* VP3 optimized DSP functions */
  2236. if (mm_flags & MM_SSE2) {
  2237. c->vp3_dsp_init = vp3_dsp_init_sse2;
  2238. c->vp3_idct = vp3_idct_sse2;
  2239. } else {
  2240. c->vp3_dsp_init = vp3_dsp_init_mmx;
  2241. c->vp3_idct = vp3_idct_mmx;
  2242. }
  2243. #ifdef CONFIG_ENCODERS
  2244. c->get_pixels = get_pixels_mmx;
  2245. c->diff_pixels = diff_pixels_mmx;
  2246. #endif //CONFIG_ENCODERS
  2247. c->put_pixels_clamped = put_pixels_clamped_mmx;
  2248. c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
  2249. c->add_pixels_clamped = add_pixels_clamped_mmx;
  2250. c->clear_blocks = clear_blocks_mmx;
  2251. #ifdef CONFIG_ENCODERS
  2252. c->pix_sum = pix_sum16_mmx;
  2253. #endif //CONFIG_ENCODERS
  2254. c->put_pixels_tab[0][0] = put_pixels16_mmx;
  2255. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
  2256. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
  2257. c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
  2258. c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
  2259. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
  2260. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
  2261. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
  2262. c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
  2263. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
  2264. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
  2265. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
  2266. c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
  2267. c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
  2268. c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
  2269. c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
  2270. c->put_pixels_tab[1][0] = put_pixels8_mmx;
  2271. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
  2272. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
  2273. c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
  2274. c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
  2275. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
  2276. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
  2277. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
  2278. c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
  2279. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
  2280. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
  2281. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
  2282. c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
  2283. c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
  2284. c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
  2285. c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
  2286. c->add_bytes= add_bytes_mmx;
  2287. #ifdef CONFIG_ENCODERS
  2288. c->diff_bytes= diff_bytes_mmx;
  2289. c->hadamard8_diff[0]= hadamard8_diff16_mmx;
  2290. c->hadamard8_diff[1]= hadamard8_diff_mmx;
  2291. c->pix_norm1 = pix_norm1_mmx;
  2292. c->sse[0] = sse16_mmx;
  2293. c->sse[1] = sse8_mmx;
  2294. c->vsad[4]= vsad_intra16_mmx;
  2295. c->nsse[0] = nsse16_mmx;
  2296. c->nsse[1] = nsse8_mmx;
  2297. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2298. c->vsad[0] = vsad16_mmx;
  2299. }
  2300. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2301. c->try_8x8basis= try_8x8basis_mmx;
  2302. }
  2303. c->add_8x8basis= add_8x8basis_mmx;
  2304. #endif //CONFIG_ENCODERS
  2305. c->h263_v_loop_filter= h263_v_loop_filter_mmx;
  2306. c->h263_h_loop_filter= h263_h_loop_filter_mmx;
  2307. if (mm_flags & MM_MMXEXT) {
  2308. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
  2309. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
  2310. c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
  2311. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
  2312. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
  2313. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
  2314. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
  2315. c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
  2316. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
  2317. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
  2318. #ifdef CONFIG_ENCODERS
  2319. c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
  2320. c->hadamard8_diff[1]= hadamard8_diff_mmx2;
  2321. c->vsad[4]= vsad_intra16_mmx2;
  2322. #endif //CONFIG_ENCODERS
  2323. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2324. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
  2325. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
  2326. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
  2327. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
  2328. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
  2329. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
  2330. #ifdef CONFIG_ENCODERS
  2331. c->vsad[0] = vsad16_mmx2;
  2332. #endif //CONFIG_ENCODERS
  2333. }
  2334. #if 1
  2335. SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
  2336. SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
  2337. SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
  2338. SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
  2339. SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
  2340. SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
  2341. SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
  2342. SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
  2343. SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
  2344. SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
  2345. SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
  2346. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
  2347. SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
  2348. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
  2349. SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
  2350. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
  2351. SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
  2352. SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
  2353. SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
  2354. SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
  2355. SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
  2356. SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
  2357. SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
  2358. SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
  2359. SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
  2360. SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
  2361. SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
  2362. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
  2363. SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
  2364. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
  2365. SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
  2366. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
  2367. #endif
  2368. #ifdef CONFIG_ENCODERS
  2369. c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
  2370. #endif //CONFIG_ENCODERS
  2371. } else if (mm_flags & MM_3DNOW) {
  2372. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  2373. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  2374. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  2375. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  2376. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  2377. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  2378. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  2379. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  2380. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  2381. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  2382. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2383. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  2384. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  2385. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  2386. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  2387. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  2388. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  2389. }
  2390. SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
  2391. SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
  2392. SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
  2393. SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
  2394. SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
  2395. SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
  2396. SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
  2397. SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
  2398. SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
  2399. SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
  2400. SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
  2401. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
  2402. SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
  2403. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
  2404. SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
  2405. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
  2406. SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
  2407. SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
  2408. SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
  2409. SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
  2410. SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
  2411. SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
  2412. SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
  2413. SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
  2414. SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
  2415. SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
  2416. SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
  2417. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
  2418. SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
  2419. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
  2420. SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
  2421. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
  2422. }
  2423. }
  2424. #ifdef CONFIG_ENCODERS
  2425. dsputil_init_pix_mmx(c, avctx);
  2426. #endif //CONFIG_ENCODERS
  2427. #if 0
  2428. // for speed testing
  2429. get_pixels = just_return;
  2430. put_pixels_clamped = just_return;
  2431. add_pixels_clamped = just_return;
  2432. pix_abs16x16 = just_return;
  2433. pix_abs16x16_x2 = just_return;
  2434. pix_abs16x16_y2 = just_return;
  2435. pix_abs16x16_xy2 = just_return;
  2436. put_pixels_tab[0] = just_return;
  2437. put_pixels_tab[1] = just_return;
  2438. put_pixels_tab[2] = just_return;
  2439. put_pixels_tab[3] = just_return;
  2440. put_no_rnd_pixels_tab[0] = just_return;
  2441. put_no_rnd_pixels_tab[1] = just_return;
  2442. put_no_rnd_pixels_tab[2] = just_return;
  2443. put_no_rnd_pixels_tab[3] = just_return;
  2444. avg_pixels_tab[0] = just_return;
  2445. avg_pixels_tab[1] = just_return;
  2446. avg_pixels_tab[2] = just_return;
  2447. avg_pixels_tab[3] = just_return;
  2448. avg_no_rnd_pixels_tab[0] = just_return;
  2449. avg_no_rnd_pixels_tab[1] = just_return;
  2450. avg_no_rnd_pixels_tab[2] = just_return;
  2451. avg_no_rnd_pixels_tab[3] = just_return;
  2452. //av_fdct = just_return;
  2453. //ff_idct = just_return;
  2454. #endif
  2455. }