You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3316 lines
119KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  21. */
  22. #include "../dsputil.h"
  23. #include "../simple_idct.h"
  24. #include "../mpegvideo.h"
  25. #include "mmx.h"
  26. //#undef NDEBUG
  27. //#include <assert.h>
  28. extern const uint8_t ff_h263_loop_filter_strength[32];
  29. int mm_flags; /* multimedia extension flags */
  30. /* pixel operations */
  31. static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
  32. static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
  33. static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
  34. static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
  35. static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
  36. static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
  37. static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
  38. static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
  39. static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
  40. static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
  41. #define JUMPALIGN() __asm __volatile (".balign 8"::)
  42. #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
  43. #define MOVQ_WONE(regd) \
  44. __asm __volatile ( \
  45. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  46. "psrlw $15, %%" #regd ::)
  47. #define MOVQ_BFE(regd) \
  48. __asm __volatile ( \
  49. "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
  50. "paddb %%" #regd ", %%" #regd " \n\t" ::)
  51. #ifndef PIC
  52. #define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
  53. #define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
  54. #else
  55. // for shared library it's better to use this way for accessing constants
  56. // pcmpeqd -> -1
  57. #define MOVQ_BONE(regd) \
  58. __asm __volatile ( \
  59. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  60. "psrlw $15, %%" #regd " \n\t" \
  61. "packuswb %%" #regd ", %%" #regd " \n\t" ::)
  62. #define MOVQ_WTWO(regd) \
  63. __asm __volatile ( \
  64. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  65. "psrlw $15, %%" #regd " \n\t" \
  66. "psllw $1, %%" #regd " \n\t"::)
  67. #endif
  68. // using regr as temporary and for the output result
  69. // first argument is unmodifed and second is trashed
  70. // regfe is supposed to contain 0xfefefefefefefefe
  71. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  72. "movq " #rega ", " #regr " \n\t"\
  73. "pand " #regb ", " #regr " \n\t"\
  74. "pxor " #rega ", " #regb " \n\t"\
  75. "pand " #regfe "," #regb " \n\t"\
  76. "psrlq $1, " #regb " \n\t"\
  77. "paddb " #regb ", " #regr " \n\t"
  78. #define PAVGB_MMX(rega, regb, regr, regfe) \
  79. "movq " #rega ", " #regr " \n\t"\
  80. "por " #regb ", " #regr " \n\t"\
  81. "pxor " #rega ", " #regb " \n\t"\
  82. "pand " #regfe "," #regb " \n\t"\
  83. "psrlq $1, " #regb " \n\t"\
  84. "psubb " #regb ", " #regr " \n\t"
  85. // mm6 is supposed to contain 0xfefefefefefefefe
  86. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  87. "movq " #rega ", " #regr " \n\t"\
  88. "movq " #regc ", " #regp " \n\t"\
  89. "pand " #regb ", " #regr " \n\t"\
  90. "pand " #regd ", " #regp " \n\t"\
  91. "pxor " #rega ", " #regb " \n\t"\
  92. "pxor " #regc ", " #regd " \n\t"\
  93. "pand %%mm6, " #regb " \n\t"\
  94. "pand %%mm6, " #regd " \n\t"\
  95. "psrlq $1, " #regb " \n\t"\
  96. "psrlq $1, " #regd " \n\t"\
  97. "paddb " #regb ", " #regr " \n\t"\
  98. "paddb " #regd ", " #regp " \n\t"
  99. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  100. "movq " #rega ", " #regr " \n\t"\
  101. "movq " #regc ", " #regp " \n\t"\
  102. "por " #regb ", " #regr " \n\t"\
  103. "por " #regd ", " #regp " \n\t"\
  104. "pxor " #rega ", " #regb " \n\t"\
  105. "pxor " #regc ", " #regd " \n\t"\
  106. "pand %%mm6, " #regb " \n\t"\
  107. "pand %%mm6, " #regd " \n\t"\
  108. "psrlq $1, " #regd " \n\t"\
  109. "psrlq $1, " #regb " \n\t"\
  110. "psubb " #regb ", " #regr " \n\t"\
  111. "psubb " #regd ", " #regp " \n\t"
  112. /***********************************/
  113. /* MMX no rounding */
  114. #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
  115. #define SET_RND MOVQ_WONE
  116. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  117. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  118. #include "dsputil_mmx_rnd.h"
  119. #undef DEF
  120. #undef SET_RND
  121. #undef PAVGBP
  122. #undef PAVGB
  123. /***********************************/
  124. /* MMX rounding */
  125. #define DEF(x, y) x ## _ ## y ##_mmx
  126. #define SET_RND MOVQ_WTWO
  127. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  128. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  129. #include "dsputil_mmx_rnd.h"
  130. #undef DEF
  131. #undef SET_RND
  132. #undef PAVGBP
  133. #undef PAVGB
  134. /***********************************/
  135. /* 3Dnow specific */
  136. #define DEF(x) x ## _3dnow
  137. /* for Athlons PAVGUSB is prefered */
  138. #define PAVGB "pavgusb"
  139. #include "dsputil_mmx_avg.h"
  140. #undef DEF
  141. #undef PAVGB
  142. /***********************************/
  143. /* MMX2 specific */
  144. #define DEF(x) x ## _mmx2
  145. /* Introduced only in MMX2 set */
  146. #define PAVGB "pavgb"
  147. #include "dsputil_mmx_avg.h"
  148. #undef DEF
  149. #undef PAVGB
  150. /***********************************/
  151. /* standard MMX */
  152. #ifdef CONFIG_ENCODERS
  153. static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
  154. {
  155. asm volatile(
  156. "mov $-128, %%"REG_a" \n\t"
  157. "pxor %%mm7, %%mm7 \n\t"
  158. ".balign 16 \n\t"
  159. "1: \n\t"
  160. "movq (%0), %%mm0 \n\t"
  161. "movq (%0, %2), %%mm2 \n\t"
  162. "movq %%mm0, %%mm1 \n\t"
  163. "movq %%mm2, %%mm3 \n\t"
  164. "punpcklbw %%mm7, %%mm0 \n\t"
  165. "punpckhbw %%mm7, %%mm1 \n\t"
  166. "punpcklbw %%mm7, %%mm2 \n\t"
  167. "punpckhbw %%mm7, %%mm3 \n\t"
  168. "movq %%mm0, (%1, %%"REG_a")\n\t"
  169. "movq %%mm1, 8(%1, %%"REG_a")\n\t"
  170. "movq %%mm2, 16(%1, %%"REG_a")\n\t"
  171. "movq %%mm3, 24(%1, %%"REG_a")\n\t"
  172. "add %3, %0 \n\t"
  173. "add $32, %%"REG_a" \n\t"
  174. "js 1b \n\t"
  175. : "+r" (pixels)
  176. : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
  177. : "%"REG_a
  178. );
  179. }
  180. static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
  181. {
  182. asm volatile(
  183. "pxor %%mm7, %%mm7 \n\t"
  184. "mov $-128, %%"REG_a" \n\t"
  185. ".balign 16 \n\t"
  186. "1: \n\t"
  187. "movq (%0), %%mm0 \n\t"
  188. "movq (%1), %%mm2 \n\t"
  189. "movq %%mm0, %%mm1 \n\t"
  190. "movq %%mm2, %%mm3 \n\t"
  191. "punpcklbw %%mm7, %%mm0 \n\t"
  192. "punpckhbw %%mm7, %%mm1 \n\t"
  193. "punpcklbw %%mm7, %%mm2 \n\t"
  194. "punpckhbw %%mm7, %%mm3 \n\t"
  195. "psubw %%mm2, %%mm0 \n\t"
  196. "psubw %%mm3, %%mm1 \n\t"
  197. "movq %%mm0, (%2, %%"REG_a")\n\t"
  198. "movq %%mm1, 8(%2, %%"REG_a")\n\t"
  199. "add %3, %0 \n\t"
  200. "add %3, %1 \n\t"
  201. "add $16, %%"REG_a" \n\t"
  202. "jnz 1b \n\t"
  203. : "+r" (s1), "+r" (s2)
  204. : "r" (block+64), "r" ((long)stride)
  205. : "%"REG_a
  206. );
  207. }
  208. #endif //CONFIG_ENCODERS
  209. void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  210. {
  211. const DCTELEM *p;
  212. uint8_t *pix;
  213. /* read the pixels */
  214. p = block;
  215. pix = pixels;
  216. /* unrolled loop */
  217. __asm __volatile(
  218. "movq %3, %%mm0\n\t"
  219. "movq 8%3, %%mm1\n\t"
  220. "movq 16%3, %%mm2\n\t"
  221. "movq 24%3, %%mm3\n\t"
  222. "movq 32%3, %%mm4\n\t"
  223. "movq 40%3, %%mm5\n\t"
  224. "movq 48%3, %%mm6\n\t"
  225. "movq 56%3, %%mm7\n\t"
  226. "packuswb %%mm1, %%mm0\n\t"
  227. "packuswb %%mm3, %%mm2\n\t"
  228. "packuswb %%mm5, %%mm4\n\t"
  229. "packuswb %%mm7, %%mm6\n\t"
  230. "movq %%mm0, (%0)\n\t"
  231. "movq %%mm2, (%0, %1)\n\t"
  232. "movq %%mm4, (%0, %1, 2)\n\t"
  233. "movq %%mm6, (%0, %2)\n\t"
  234. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
  235. :"memory");
  236. pix += line_size*4;
  237. p += 32;
  238. // if here would be an exact copy of the code above
  239. // compiler would generate some very strange code
  240. // thus using "r"
  241. __asm __volatile(
  242. "movq (%3), %%mm0\n\t"
  243. "movq 8(%3), %%mm1\n\t"
  244. "movq 16(%3), %%mm2\n\t"
  245. "movq 24(%3), %%mm3\n\t"
  246. "movq 32(%3), %%mm4\n\t"
  247. "movq 40(%3), %%mm5\n\t"
  248. "movq 48(%3), %%mm6\n\t"
  249. "movq 56(%3), %%mm7\n\t"
  250. "packuswb %%mm1, %%mm0\n\t"
  251. "packuswb %%mm3, %%mm2\n\t"
  252. "packuswb %%mm5, %%mm4\n\t"
  253. "packuswb %%mm7, %%mm6\n\t"
  254. "movq %%mm0, (%0)\n\t"
  255. "movq %%mm2, (%0, %1)\n\t"
  256. "movq %%mm4, (%0, %1, 2)\n\t"
  257. "movq %%mm6, (%0, %2)\n\t"
  258. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
  259. :"memory");
  260. }
  261. static unsigned char __align8 vector128[8] =
  262. { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
  263. void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  264. {
  265. int i;
  266. movq_m2r(*vector128, mm1);
  267. for (i = 0; i < 8; i++) {
  268. movq_m2r(*(block), mm0);
  269. packsswb_m2r(*(block + 4), mm0);
  270. block += 8;
  271. paddb_r2r(mm1, mm0);
  272. movq_r2m(mm0, *pixels);
  273. pixels += line_size;
  274. }
  275. }
  276. void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  277. {
  278. const DCTELEM *p;
  279. uint8_t *pix;
  280. int i;
  281. /* read the pixels */
  282. p = block;
  283. pix = pixels;
  284. MOVQ_ZERO(mm7);
  285. i = 4;
  286. do {
  287. __asm __volatile(
  288. "movq (%2), %%mm0\n\t"
  289. "movq 8(%2), %%mm1\n\t"
  290. "movq 16(%2), %%mm2\n\t"
  291. "movq 24(%2), %%mm3\n\t"
  292. "movq %0, %%mm4\n\t"
  293. "movq %1, %%mm6\n\t"
  294. "movq %%mm4, %%mm5\n\t"
  295. "punpcklbw %%mm7, %%mm4\n\t"
  296. "punpckhbw %%mm7, %%mm5\n\t"
  297. "paddsw %%mm4, %%mm0\n\t"
  298. "paddsw %%mm5, %%mm1\n\t"
  299. "movq %%mm6, %%mm5\n\t"
  300. "punpcklbw %%mm7, %%mm6\n\t"
  301. "punpckhbw %%mm7, %%mm5\n\t"
  302. "paddsw %%mm6, %%mm2\n\t"
  303. "paddsw %%mm5, %%mm3\n\t"
  304. "packuswb %%mm1, %%mm0\n\t"
  305. "packuswb %%mm3, %%mm2\n\t"
  306. "movq %%mm0, %0\n\t"
  307. "movq %%mm2, %1\n\t"
  308. :"+m"(*pix), "+m"(*(pix+line_size))
  309. :"r"(p)
  310. :"memory");
  311. pix += line_size*2;
  312. p += 16;
  313. } while (--i);
  314. }
  315. static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  316. {
  317. __asm __volatile(
  318. "lea (%3, %3), %%"REG_a" \n\t"
  319. ".balign 8 \n\t"
  320. "1: \n\t"
  321. "movd (%1), %%mm0 \n\t"
  322. "movd (%1, %3), %%mm1 \n\t"
  323. "movd %%mm0, (%2) \n\t"
  324. "movd %%mm1, (%2, %3) \n\t"
  325. "add %%"REG_a", %1 \n\t"
  326. "add %%"REG_a", %2 \n\t"
  327. "movd (%1), %%mm0 \n\t"
  328. "movd (%1, %3), %%mm1 \n\t"
  329. "movd %%mm0, (%2) \n\t"
  330. "movd %%mm1, (%2, %3) \n\t"
  331. "add %%"REG_a", %1 \n\t"
  332. "add %%"REG_a", %2 \n\t"
  333. "subl $4, %0 \n\t"
  334. "jnz 1b \n\t"
  335. : "+g"(h), "+r" (pixels), "+r" (block)
  336. : "r"((long)line_size)
  337. : "%"REG_a, "memory"
  338. );
  339. }
  340. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  341. {
  342. __asm __volatile(
  343. "lea (%3, %3), %%"REG_a" \n\t"
  344. ".balign 8 \n\t"
  345. "1: \n\t"
  346. "movq (%1), %%mm0 \n\t"
  347. "movq (%1, %3), %%mm1 \n\t"
  348. "movq %%mm0, (%2) \n\t"
  349. "movq %%mm1, (%2, %3) \n\t"
  350. "add %%"REG_a", %1 \n\t"
  351. "add %%"REG_a", %2 \n\t"
  352. "movq (%1), %%mm0 \n\t"
  353. "movq (%1, %3), %%mm1 \n\t"
  354. "movq %%mm0, (%2) \n\t"
  355. "movq %%mm1, (%2, %3) \n\t"
  356. "add %%"REG_a", %1 \n\t"
  357. "add %%"REG_a", %2 \n\t"
  358. "subl $4, %0 \n\t"
  359. "jnz 1b \n\t"
  360. : "+g"(h), "+r" (pixels), "+r" (block)
  361. : "r"((long)line_size)
  362. : "%"REG_a, "memory"
  363. );
  364. }
  365. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  366. {
  367. __asm __volatile(
  368. "lea (%3, %3), %%"REG_a" \n\t"
  369. ".balign 8 \n\t"
  370. "1: \n\t"
  371. "movq (%1), %%mm0 \n\t"
  372. "movq 8(%1), %%mm4 \n\t"
  373. "movq (%1, %3), %%mm1 \n\t"
  374. "movq 8(%1, %3), %%mm5 \n\t"
  375. "movq %%mm0, (%2) \n\t"
  376. "movq %%mm4, 8(%2) \n\t"
  377. "movq %%mm1, (%2, %3) \n\t"
  378. "movq %%mm5, 8(%2, %3) \n\t"
  379. "add %%"REG_a", %1 \n\t"
  380. "add %%"REG_a", %2 \n\t"
  381. "movq (%1), %%mm0 \n\t"
  382. "movq 8(%1), %%mm4 \n\t"
  383. "movq (%1, %3), %%mm1 \n\t"
  384. "movq 8(%1, %3), %%mm5 \n\t"
  385. "movq %%mm0, (%2) \n\t"
  386. "movq %%mm4, 8(%2) \n\t"
  387. "movq %%mm1, (%2, %3) \n\t"
  388. "movq %%mm5, 8(%2, %3) \n\t"
  389. "add %%"REG_a", %1 \n\t"
  390. "add %%"REG_a", %2 \n\t"
  391. "subl $4, %0 \n\t"
  392. "jnz 1b \n\t"
  393. : "+g"(h), "+r" (pixels), "+r" (block)
  394. : "r"((long)line_size)
  395. : "%"REG_a, "memory"
  396. );
  397. }
  398. static void clear_blocks_mmx(DCTELEM *blocks)
  399. {
  400. __asm __volatile(
  401. "pxor %%mm7, %%mm7 \n\t"
  402. "mov $-128*6, %%"REG_a" \n\t"
  403. "1: \n\t"
  404. "movq %%mm7, (%0, %%"REG_a") \n\t"
  405. "movq %%mm7, 8(%0, %%"REG_a") \n\t"
  406. "movq %%mm7, 16(%0, %%"REG_a") \n\t"
  407. "movq %%mm7, 24(%0, %%"REG_a") \n\t"
  408. "add $32, %%"REG_a" \n\t"
  409. " js 1b \n\t"
  410. : : "r" (((uint8_t *)blocks)+128*6)
  411. : "%"REG_a
  412. );
  413. }
  414. #ifdef CONFIG_ENCODERS
  415. static int pix_sum16_mmx(uint8_t * pix, int line_size){
  416. const int h=16;
  417. int sum;
  418. long index= -line_size*h;
  419. __asm __volatile(
  420. "pxor %%mm7, %%mm7 \n\t"
  421. "pxor %%mm6, %%mm6 \n\t"
  422. "1: \n\t"
  423. "movq (%2, %1), %%mm0 \n\t"
  424. "movq (%2, %1), %%mm1 \n\t"
  425. "movq 8(%2, %1), %%mm2 \n\t"
  426. "movq 8(%2, %1), %%mm3 \n\t"
  427. "punpcklbw %%mm7, %%mm0 \n\t"
  428. "punpckhbw %%mm7, %%mm1 \n\t"
  429. "punpcklbw %%mm7, %%mm2 \n\t"
  430. "punpckhbw %%mm7, %%mm3 \n\t"
  431. "paddw %%mm0, %%mm1 \n\t"
  432. "paddw %%mm2, %%mm3 \n\t"
  433. "paddw %%mm1, %%mm3 \n\t"
  434. "paddw %%mm3, %%mm6 \n\t"
  435. "add %3, %1 \n\t"
  436. " js 1b \n\t"
  437. "movq %%mm6, %%mm5 \n\t"
  438. "psrlq $32, %%mm6 \n\t"
  439. "paddw %%mm5, %%mm6 \n\t"
  440. "movq %%mm6, %%mm5 \n\t"
  441. "psrlq $16, %%mm6 \n\t"
  442. "paddw %%mm5, %%mm6 \n\t"
  443. "movd %%mm6, %0 \n\t"
  444. "andl $0xFFFF, %0 \n\t"
  445. : "=&r" (sum), "+r" (index)
  446. : "r" (pix - index), "r" ((long)line_size)
  447. );
  448. return sum;
  449. }
  450. #endif //CONFIG_ENCODERS
  451. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
  452. long i=0;
  453. asm volatile(
  454. "1: \n\t"
  455. "movq (%1, %0), %%mm0 \n\t"
  456. "movq (%2, %0), %%mm1 \n\t"
  457. "paddb %%mm0, %%mm1 \n\t"
  458. "movq %%mm1, (%2, %0) \n\t"
  459. "movq 8(%1, %0), %%mm0 \n\t"
  460. "movq 8(%2, %0), %%mm1 \n\t"
  461. "paddb %%mm0, %%mm1 \n\t"
  462. "movq %%mm1, 8(%2, %0) \n\t"
  463. "add $16, %0 \n\t"
  464. "cmp %3, %0 \n\t"
  465. " jb 1b \n\t"
  466. : "+r" (i)
  467. : "r"(src), "r"(dst), "r"((long)w-15)
  468. );
  469. for(; i<w; i++)
  470. dst[i+0] += src[i+0];
  471. }
  472. #define H263_LOOP_FILTER \
  473. "pxor %%mm7, %%mm7 \n\t"\
  474. "movq %0, %%mm0 \n\t"\
  475. "movq %0, %%mm1 \n\t"\
  476. "movq %3, %%mm2 \n\t"\
  477. "movq %3, %%mm3 \n\t"\
  478. "punpcklbw %%mm7, %%mm0 \n\t"\
  479. "punpckhbw %%mm7, %%mm1 \n\t"\
  480. "punpcklbw %%mm7, %%mm2 \n\t"\
  481. "punpckhbw %%mm7, %%mm3 \n\t"\
  482. "psubw %%mm2, %%mm0 \n\t"\
  483. "psubw %%mm3, %%mm1 \n\t"\
  484. "movq %1, %%mm2 \n\t"\
  485. "movq %1, %%mm3 \n\t"\
  486. "movq %2, %%mm4 \n\t"\
  487. "movq %2, %%mm5 \n\t"\
  488. "punpcklbw %%mm7, %%mm2 \n\t"\
  489. "punpckhbw %%mm7, %%mm3 \n\t"\
  490. "punpcklbw %%mm7, %%mm4 \n\t"\
  491. "punpckhbw %%mm7, %%mm5 \n\t"\
  492. "psubw %%mm2, %%mm4 \n\t"\
  493. "psubw %%mm3, %%mm5 \n\t"\
  494. "psllw $2, %%mm4 \n\t"\
  495. "psllw $2, %%mm5 \n\t"\
  496. "paddw %%mm0, %%mm4 \n\t"\
  497. "paddw %%mm1, %%mm5 \n\t"\
  498. "pxor %%mm6, %%mm6 \n\t"\
  499. "pcmpgtw %%mm4, %%mm6 \n\t"\
  500. "pcmpgtw %%mm5, %%mm7 \n\t"\
  501. "pxor %%mm6, %%mm4 \n\t"\
  502. "pxor %%mm7, %%mm5 \n\t"\
  503. "psubw %%mm6, %%mm4 \n\t"\
  504. "psubw %%mm7, %%mm5 \n\t"\
  505. "psrlw $3, %%mm4 \n\t"\
  506. "psrlw $3, %%mm5 \n\t"\
  507. "packuswb %%mm5, %%mm4 \n\t"\
  508. "packsswb %%mm7, %%mm6 \n\t"\
  509. "pxor %%mm7, %%mm7 \n\t"\
  510. "movd %4, %%mm2 \n\t"\
  511. "punpcklbw %%mm2, %%mm2 \n\t"\
  512. "punpcklbw %%mm2, %%mm2 \n\t"\
  513. "punpcklbw %%mm2, %%mm2 \n\t"\
  514. "psubusb %%mm4, %%mm2 \n\t"\
  515. "movq %%mm2, %%mm3 \n\t"\
  516. "psubusb %%mm4, %%mm3 \n\t"\
  517. "psubb %%mm3, %%mm2 \n\t"\
  518. "movq %1, %%mm3 \n\t"\
  519. "movq %2, %%mm4 \n\t"\
  520. "pxor %%mm6, %%mm3 \n\t"\
  521. "pxor %%mm6, %%mm4 \n\t"\
  522. "paddusb %%mm2, %%mm3 \n\t"\
  523. "psubusb %%mm2, %%mm4 \n\t"\
  524. "pxor %%mm6, %%mm3 \n\t"\
  525. "pxor %%mm6, %%mm4 \n\t"\
  526. "paddusb %%mm2, %%mm2 \n\t"\
  527. "packsswb %%mm1, %%mm0 \n\t"\
  528. "pcmpgtb %%mm0, %%mm7 \n\t"\
  529. "pxor %%mm7, %%mm0 \n\t"\
  530. "psubb %%mm7, %%mm0 \n\t"\
  531. "movq %%mm0, %%mm1 \n\t"\
  532. "psubusb %%mm2, %%mm0 \n\t"\
  533. "psubb %%mm0, %%mm1 \n\t"\
  534. "pand %5, %%mm1 \n\t"\
  535. "psrlw $2, %%mm1 \n\t"\
  536. "pxor %%mm7, %%mm1 \n\t"\
  537. "psubb %%mm7, %%mm1 \n\t"\
  538. "movq %0, %%mm5 \n\t"\
  539. "movq %3, %%mm6 \n\t"\
  540. "psubb %%mm1, %%mm5 \n\t"\
  541. "paddb %%mm1, %%mm6 \n\t"
  542. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  543. const int strength= ff_h263_loop_filter_strength[qscale];
  544. asm volatile(
  545. H263_LOOP_FILTER
  546. "movq %%mm3, %1 \n\t"
  547. "movq %%mm4, %2 \n\t"
  548. "movq %%mm5, %0 \n\t"
  549. "movq %%mm6, %3 \n\t"
  550. : "+m" (*(uint64_t*)(src - 2*stride)),
  551. "+m" (*(uint64_t*)(src - 1*stride)),
  552. "+m" (*(uint64_t*)(src + 0*stride)),
  553. "+m" (*(uint64_t*)(src + 1*stride))
  554. : "g" (2*strength), "m"(ff_pb_FC)
  555. );
  556. }
  557. static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
  558. asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
  559. "movd %4, %%mm0 \n\t"
  560. "movd %5, %%mm1 \n\t"
  561. "movd %6, %%mm2 \n\t"
  562. "movd %7, %%mm3 \n\t"
  563. "punpcklbw %%mm1, %%mm0 \n\t"
  564. "punpcklbw %%mm3, %%mm2 \n\t"
  565. "movq %%mm0, %%mm1 \n\t"
  566. "punpcklwd %%mm2, %%mm0 \n\t"
  567. "punpckhwd %%mm2, %%mm1 \n\t"
  568. "movd %%mm0, %0 \n\t"
  569. "punpckhdq %%mm0, %%mm0 \n\t"
  570. "movd %%mm0, %1 \n\t"
  571. "movd %%mm1, %2 \n\t"
  572. "punpckhdq %%mm1, %%mm1 \n\t"
  573. "movd %%mm1, %3 \n\t"
  574. : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
  575. "=m" (*(uint32_t*)(dst + 1*dst_stride)),
  576. "=m" (*(uint32_t*)(dst + 2*dst_stride)),
  577. "=m" (*(uint32_t*)(dst + 3*dst_stride))
  578. : "m" (*(uint32_t*)(src + 0*src_stride)),
  579. "m" (*(uint32_t*)(src + 1*src_stride)),
  580. "m" (*(uint32_t*)(src + 2*src_stride)),
  581. "m" (*(uint32_t*)(src + 3*src_stride))
  582. );
  583. }
  584. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  585. const int strength= ff_h263_loop_filter_strength[qscale];
  586. uint64_t temp[4] __attribute__ ((aligned(8)));
  587. uint8_t *btemp= (uint8_t*)temp;
  588. src -= 2;
  589. transpose4x4(btemp , src , 8, stride);
  590. transpose4x4(btemp+4, src + 4*stride, 8, stride);
  591. asm volatile(
  592. H263_LOOP_FILTER // 5 3 4 6
  593. : "+m" (temp[0]),
  594. "+m" (temp[1]),
  595. "+m" (temp[2]),
  596. "+m" (temp[3])
  597. : "g" (2*strength), "m"(ff_pb_FC)
  598. );
  599. asm volatile(
  600. "movq %%mm5, %%mm1 \n\t"
  601. "movq %%mm4, %%mm0 \n\t"
  602. "punpcklbw %%mm3, %%mm5 \n\t"
  603. "punpcklbw %%mm6, %%mm4 \n\t"
  604. "punpckhbw %%mm3, %%mm1 \n\t"
  605. "punpckhbw %%mm6, %%mm0 \n\t"
  606. "movq %%mm5, %%mm3 \n\t"
  607. "movq %%mm1, %%mm6 \n\t"
  608. "punpcklwd %%mm4, %%mm5 \n\t"
  609. "punpcklwd %%mm0, %%mm1 \n\t"
  610. "punpckhwd %%mm4, %%mm3 \n\t"
  611. "punpckhwd %%mm0, %%mm6 \n\t"
  612. "movd %%mm5, %0 \n\t"
  613. "punpckhdq %%mm5, %%mm5 \n\t"
  614. "movd %%mm5, %1 \n\t"
  615. "movd %%mm3, %2 \n\t"
  616. "punpckhdq %%mm3, %%mm3 \n\t"
  617. "movd %%mm3, %3 \n\t"
  618. "movd %%mm1, %4 \n\t"
  619. "punpckhdq %%mm1, %%mm1 \n\t"
  620. "movd %%mm1, %5 \n\t"
  621. "movd %%mm6, %6 \n\t"
  622. "punpckhdq %%mm6, %%mm6 \n\t"
  623. "movd %%mm6, %7 \n\t"
  624. : "=m" (*(uint32_t*)(src + 0*stride)),
  625. "=m" (*(uint32_t*)(src + 1*stride)),
  626. "=m" (*(uint32_t*)(src + 2*stride)),
  627. "=m" (*(uint32_t*)(src + 3*stride)),
  628. "=m" (*(uint32_t*)(src + 4*stride)),
  629. "=m" (*(uint32_t*)(src + 5*stride)),
  630. "=m" (*(uint32_t*)(src + 6*stride)),
  631. "=m" (*(uint32_t*)(src + 7*stride))
  632. );
  633. }
  634. #ifdef CONFIG_ENCODERS
  635. static int pix_norm1_mmx(uint8_t *pix, int line_size) {
  636. int tmp;
  637. asm volatile (
  638. "movl $16,%%ecx\n"
  639. "pxor %%mm0,%%mm0\n"
  640. "pxor %%mm7,%%mm7\n"
  641. "1:\n"
  642. "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
  643. "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
  644. "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
  645. "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
  646. "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
  647. "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
  648. "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
  649. "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
  650. "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
  651. "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
  652. "pmaddwd %%mm3,%%mm3\n"
  653. "pmaddwd %%mm4,%%mm4\n"
  654. "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
  655. pix2^2+pix3^2+pix6^2+pix7^2) */
  656. "paddd %%mm3,%%mm4\n"
  657. "paddd %%mm2,%%mm7\n"
  658. "add %2, %0\n"
  659. "paddd %%mm4,%%mm7\n"
  660. "dec %%ecx\n"
  661. "jnz 1b\n"
  662. "movq %%mm7,%%mm1\n"
  663. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  664. "paddd %%mm7,%%mm1\n"
  665. "movd %%mm1,%1\n"
  666. : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
  667. return tmp;
  668. }
  669. static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  670. int tmp;
  671. asm volatile (
  672. "movl %4,%%ecx\n"
  673. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  674. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  675. "1:\n"
  676. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  677. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  678. "movq %%mm1,%%mm5\n"
  679. "psubusb %%mm2,%%mm1\n"
  680. "psubusb %%mm5,%%mm2\n"
  681. "por %%mm1,%%mm2\n"
  682. "movq %%mm2,%%mm1\n"
  683. "punpckhbw %%mm0,%%mm2\n"
  684. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  685. "pmaddwd %%mm2,%%mm2\n"
  686. "pmaddwd %%mm1,%%mm1\n"
  687. "add %3,%0\n"
  688. "add %3,%1\n"
  689. "paddd %%mm2,%%mm1\n"
  690. "paddd %%mm1,%%mm7\n"
  691. "decl %%ecx\n"
  692. "jnz 1b\n"
  693. "movq %%mm7,%%mm1\n"
  694. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  695. "paddd %%mm7,%%mm1\n"
  696. "movd %%mm1,%2\n"
  697. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  698. : "r" ((long)line_size) , "m" (h)
  699. : "%ecx");
  700. return tmp;
  701. }
  702. static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  703. int tmp;
  704. asm volatile (
  705. "movl %4,%%ecx\n"
  706. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  707. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  708. "1:\n"
  709. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  710. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  711. "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
  712. "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
  713. /* todo: mm1-mm2, mm3-mm4 */
  714. /* algo: substract mm1 from mm2 with saturation and vice versa */
  715. /* OR the results to get absolute difference */
  716. "movq %%mm1,%%mm5\n"
  717. "movq %%mm3,%%mm6\n"
  718. "psubusb %%mm2,%%mm1\n"
  719. "psubusb %%mm4,%%mm3\n"
  720. "psubusb %%mm5,%%mm2\n"
  721. "psubusb %%mm6,%%mm4\n"
  722. "por %%mm1,%%mm2\n"
  723. "por %%mm3,%%mm4\n"
  724. /* now convert to 16-bit vectors so we can square them */
  725. "movq %%mm2,%%mm1\n"
  726. "movq %%mm4,%%mm3\n"
  727. "punpckhbw %%mm0,%%mm2\n"
  728. "punpckhbw %%mm0,%%mm4\n"
  729. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  730. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  731. "pmaddwd %%mm2,%%mm2\n"
  732. "pmaddwd %%mm4,%%mm4\n"
  733. "pmaddwd %%mm1,%%mm1\n"
  734. "pmaddwd %%mm3,%%mm3\n"
  735. "add %3,%0\n"
  736. "add %3,%1\n"
  737. "paddd %%mm2,%%mm1\n"
  738. "paddd %%mm4,%%mm3\n"
  739. "paddd %%mm1,%%mm7\n"
  740. "paddd %%mm3,%%mm7\n"
  741. "decl %%ecx\n"
  742. "jnz 1b\n"
  743. "movq %%mm7,%%mm1\n"
  744. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  745. "paddd %%mm7,%%mm1\n"
  746. "movd %%mm1,%2\n"
  747. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  748. : "r" ((long)line_size) , "m" (h)
  749. : "%ecx");
  750. return tmp;
  751. }
  752. static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
  753. int tmp;
  754. asm volatile (
  755. "movl %3,%%ecx\n"
  756. "pxor %%mm7,%%mm7\n"
  757. "pxor %%mm6,%%mm6\n"
  758. "movq (%0),%%mm0\n"
  759. "movq %%mm0, %%mm1\n"
  760. "psllq $8, %%mm0\n"
  761. "psrlq $8, %%mm1\n"
  762. "psrlq $8, %%mm0\n"
  763. "movq %%mm0, %%mm2\n"
  764. "movq %%mm1, %%mm3\n"
  765. "punpcklbw %%mm7,%%mm0\n"
  766. "punpcklbw %%mm7,%%mm1\n"
  767. "punpckhbw %%mm7,%%mm2\n"
  768. "punpckhbw %%mm7,%%mm3\n"
  769. "psubw %%mm1, %%mm0\n"
  770. "psubw %%mm3, %%mm2\n"
  771. "add %2,%0\n"
  772. "movq (%0),%%mm4\n"
  773. "movq %%mm4, %%mm1\n"
  774. "psllq $8, %%mm4\n"
  775. "psrlq $8, %%mm1\n"
  776. "psrlq $8, %%mm4\n"
  777. "movq %%mm4, %%mm5\n"
  778. "movq %%mm1, %%mm3\n"
  779. "punpcklbw %%mm7,%%mm4\n"
  780. "punpcklbw %%mm7,%%mm1\n"
  781. "punpckhbw %%mm7,%%mm5\n"
  782. "punpckhbw %%mm7,%%mm3\n"
  783. "psubw %%mm1, %%mm4\n"
  784. "psubw %%mm3, %%mm5\n"
  785. "psubw %%mm4, %%mm0\n"
  786. "psubw %%mm5, %%mm2\n"
  787. "pxor %%mm3, %%mm3\n"
  788. "pxor %%mm1, %%mm1\n"
  789. "pcmpgtw %%mm0, %%mm3\n\t"
  790. "pcmpgtw %%mm2, %%mm1\n\t"
  791. "pxor %%mm3, %%mm0\n"
  792. "pxor %%mm1, %%mm2\n"
  793. "psubw %%mm3, %%mm0\n"
  794. "psubw %%mm1, %%mm2\n"
  795. "paddw %%mm0, %%mm2\n"
  796. "paddw %%mm2, %%mm6\n"
  797. "add %2,%0\n"
  798. "1:\n"
  799. "movq (%0),%%mm0\n"
  800. "movq %%mm0, %%mm1\n"
  801. "psllq $8, %%mm0\n"
  802. "psrlq $8, %%mm1\n"
  803. "psrlq $8, %%mm0\n"
  804. "movq %%mm0, %%mm2\n"
  805. "movq %%mm1, %%mm3\n"
  806. "punpcklbw %%mm7,%%mm0\n"
  807. "punpcklbw %%mm7,%%mm1\n"
  808. "punpckhbw %%mm7,%%mm2\n"
  809. "punpckhbw %%mm7,%%mm3\n"
  810. "psubw %%mm1, %%mm0\n"
  811. "psubw %%mm3, %%mm2\n"
  812. "psubw %%mm0, %%mm4\n"
  813. "psubw %%mm2, %%mm5\n"
  814. "pxor %%mm3, %%mm3\n"
  815. "pxor %%mm1, %%mm1\n"
  816. "pcmpgtw %%mm4, %%mm3\n\t"
  817. "pcmpgtw %%mm5, %%mm1\n\t"
  818. "pxor %%mm3, %%mm4\n"
  819. "pxor %%mm1, %%mm5\n"
  820. "psubw %%mm3, %%mm4\n"
  821. "psubw %%mm1, %%mm5\n"
  822. "paddw %%mm4, %%mm5\n"
  823. "paddw %%mm5, %%mm6\n"
  824. "add %2,%0\n"
  825. "movq (%0),%%mm4\n"
  826. "movq %%mm4, %%mm1\n"
  827. "psllq $8, %%mm4\n"
  828. "psrlq $8, %%mm1\n"
  829. "psrlq $8, %%mm4\n"
  830. "movq %%mm4, %%mm5\n"
  831. "movq %%mm1, %%mm3\n"
  832. "punpcklbw %%mm7,%%mm4\n"
  833. "punpcklbw %%mm7,%%mm1\n"
  834. "punpckhbw %%mm7,%%mm5\n"
  835. "punpckhbw %%mm7,%%mm3\n"
  836. "psubw %%mm1, %%mm4\n"
  837. "psubw %%mm3, %%mm5\n"
  838. "psubw %%mm4, %%mm0\n"
  839. "psubw %%mm5, %%mm2\n"
  840. "pxor %%mm3, %%mm3\n"
  841. "pxor %%mm1, %%mm1\n"
  842. "pcmpgtw %%mm0, %%mm3\n\t"
  843. "pcmpgtw %%mm2, %%mm1\n\t"
  844. "pxor %%mm3, %%mm0\n"
  845. "pxor %%mm1, %%mm2\n"
  846. "psubw %%mm3, %%mm0\n"
  847. "psubw %%mm1, %%mm2\n"
  848. "paddw %%mm0, %%mm2\n"
  849. "paddw %%mm2, %%mm6\n"
  850. "add %2,%0\n"
  851. "subl $2, %%ecx\n"
  852. " jnz 1b\n"
  853. "movq %%mm6, %%mm0\n"
  854. "punpcklwd %%mm7,%%mm0\n"
  855. "punpckhwd %%mm7,%%mm6\n"
  856. "paddd %%mm0, %%mm6\n"
  857. "movq %%mm6,%%mm0\n"
  858. "psrlq $32, %%mm6\n"
  859. "paddd %%mm6,%%mm0\n"
  860. "movd %%mm0,%1\n"
  861. : "+r" (pix1), "=r"(tmp)
  862. : "r" ((long)line_size) , "g" (h-2)
  863. : "%ecx");
  864. return tmp;
  865. }
  866. static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
  867. int tmp;
  868. uint8_t * pix= pix1;
  869. asm volatile (
  870. "movl %3,%%ecx\n"
  871. "pxor %%mm7,%%mm7\n"
  872. "pxor %%mm6,%%mm6\n"
  873. "movq (%0),%%mm0\n"
  874. "movq 1(%0),%%mm1\n"
  875. "movq %%mm0, %%mm2\n"
  876. "movq %%mm1, %%mm3\n"
  877. "punpcklbw %%mm7,%%mm0\n"
  878. "punpcklbw %%mm7,%%mm1\n"
  879. "punpckhbw %%mm7,%%mm2\n"
  880. "punpckhbw %%mm7,%%mm3\n"
  881. "psubw %%mm1, %%mm0\n"
  882. "psubw %%mm3, %%mm2\n"
  883. "add %2,%0\n"
  884. "movq (%0),%%mm4\n"
  885. "movq 1(%0),%%mm1\n"
  886. "movq %%mm4, %%mm5\n"
  887. "movq %%mm1, %%mm3\n"
  888. "punpcklbw %%mm7,%%mm4\n"
  889. "punpcklbw %%mm7,%%mm1\n"
  890. "punpckhbw %%mm7,%%mm5\n"
  891. "punpckhbw %%mm7,%%mm3\n"
  892. "psubw %%mm1, %%mm4\n"
  893. "psubw %%mm3, %%mm5\n"
  894. "psubw %%mm4, %%mm0\n"
  895. "psubw %%mm5, %%mm2\n"
  896. "pxor %%mm3, %%mm3\n"
  897. "pxor %%mm1, %%mm1\n"
  898. "pcmpgtw %%mm0, %%mm3\n\t"
  899. "pcmpgtw %%mm2, %%mm1\n\t"
  900. "pxor %%mm3, %%mm0\n"
  901. "pxor %%mm1, %%mm2\n"
  902. "psubw %%mm3, %%mm0\n"
  903. "psubw %%mm1, %%mm2\n"
  904. "paddw %%mm0, %%mm2\n"
  905. "paddw %%mm2, %%mm6\n"
  906. "add %2,%0\n"
  907. "1:\n"
  908. "movq (%0),%%mm0\n"
  909. "movq 1(%0),%%mm1\n"
  910. "movq %%mm0, %%mm2\n"
  911. "movq %%mm1, %%mm3\n"
  912. "punpcklbw %%mm7,%%mm0\n"
  913. "punpcklbw %%mm7,%%mm1\n"
  914. "punpckhbw %%mm7,%%mm2\n"
  915. "punpckhbw %%mm7,%%mm3\n"
  916. "psubw %%mm1, %%mm0\n"
  917. "psubw %%mm3, %%mm2\n"
  918. "psubw %%mm0, %%mm4\n"
  919. "psubw %%mm2, %%mm5\n"
  920. "pxor %%mm3, %%mm3\n"
  921. "pxor %%mm1, %%mm1\n"
  922. "pcmpgtw %%mm4, %%mm3\n\t"
  923. "pcmpgtw %%mm5, %%mm1\n\t"
  924. "pxor %%mm3, %%mm4\n"
  925. "pxor %%mm1, %%mm5\n"
  926. "psubw %%mm3, %%mm4\n"
  927. "psubw %%mm1, %%mm5\n"
  928. "paddw %%mm4, %%mm5\n"
  929. "paddw %%mm5, %%mm6\n"
  930. "add %2,%0\n"
  931. "movq (%0),%%mm4\n"
  932. "movq 1(%0),%%mm1\n"
  933. "movq %%mm4, %%mm5\n"
  934. "movq %%mm1, %%mm3\n"
  935. "punpcklbw %%mm7,%%mm4\n"
  936. "punpcklbw %%mm7,%%mm1\n"
  937. "punpckhbw %%mm7,%%mm5\n"
  938. "punpckhbw %%mm7,%%mm3\n"
  939. "psubw %%mm1, %%mm4\n"
  940. "psubw %%mm3, %%mm5\n"
  941. "psubw %%mm4, %%mm0\n"
  942. "psubw %%mm5, %%mm2\n"
  943. "pxor %%mm3, %%mm3\n"
  944. "pxor %%mm1, %%mm1\n"
  945. "pcmpgtw %%mm0, %%mm3\n\t"
  946. "pcmpgtw %%mm2, %%mm1\n\t"
  947. "pxor %%mm3, %%mm0\n"
  948. "pxor %%mm1, %%mm2\n"
  949. "psubw %%mm3, %%mm0\n"
  950. "psubw %%mm1, %%mm2\n"
  951. "paddw %%mm0, %%mm2\n"
  952. "paddw %%mm2, %%mm6\n"
  953. "add %2,%0\n"
  954. "subl $2, %%ecx\n"
  955. " jnz 1b\n"
  956. "movq %%mm6, %%mm0\n"
  957. "punpcklwd %%mm7,%%mm0\n"
  958. "punpckhwd %%mm7,%%mm6\n"
  959. "paddd %%mm0, %%mm6\n"
  960. "movq %%mm6,%%mm0\n"
  961. "psrlq $32, %%mm6\n"
  962. "paddd %%mm6,%%mm0\n"
  963. "movd %%mm0,%1\n"
  964. : "+r" (pix1), "=r"(tmp)
  965. : "r" ((long)line_size) , "g" (h-2)
  966. : "%ecx");
  967. return tmp + hf_noise8_mmx(pix+8, line_size, h);
  968. }
  969. static int nsse16_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  970. int score1= sse16_mmx(c, pix1, pix2, line_size, h);
  971. int score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
  972. if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
  973. else return score1 + ABS(score2)*8;
  974. }
  975. static int nsse8_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  976. int score1= sse8_mmx(c, pix1, pix2, line_size, h);
  977. int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
  978. if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
  979. else return score1 + ABS(score2)*8;
  980. }
  981. static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  982. int tmp;
  983. assert( (((int)pix) & 7) == 0);
  984. assert((line_size &7) ==0);
  985. #define SUM(in0, in1, out0, out1) \
  986. "movq (%0), %%mm2\n"\
  987. "movq 8(%0), %%mm3\n"\
  988. "add %2,%0\n"\
  989. "movq %%mm2, " #out0 "\n"\
  990. "movq %%mm3, " #out1 "\n"\
  991. "psubusb " #in0 ", %%mm2\n"\
  992. "psubusb " #in1 ", %%mm3\n"\
  993. "psubusb " #out0 ", " #in0 "\n"\
  994. "psubusb " #out1 ", " #in1 "\n"\
  995. "por %%mm2, " #in0 "\n"\
  996. "por %%mm3, " #in1 "\n"\
  997. "movq " #in0 ", %%mm2\n"\
  998. "movq " #in1 ", %%mm3\n"\
  999. "punpcklbw %%mm7, " #in0 "\n"\
  1000. "punpcklbw %%mm7, " #in1 "\n"\
  1001. "punpckhbw %%mm7, %%mm2\n"\
  1002. "punpckhbw %%mm7, %%mm3\n"\
  1003. "paddw " #in1 ", " #in0 "\n"\
  1004. "paddw %%mm3, %%mm2\n"\
  1005. "paddw %%mm2, " #in0 "\n"\
  1006. "paddw " #in0 ", %%mm6\n"
  1007. asm volatile (
  1008. "movl %3,%%ecx\n"
  1009. "pxor %%mm6,%%mm6\n"
  1010. "pxor %%mm7,%%mm7\n"
  1011. "movq (%0),%%mm0\n"
  1012. "movq 8(%0),%%mm1\n"
  1013. "add %2,%0\n"
  1014. "subl $2, %%ecx\n"
  1015. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1016. "1:\n"
  1017. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1018. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1019. "subl $2, %%ecx\n"
  1020. "jnz 1b\n"
  1021. "movq %%mm6,%%mm0\n"
  1022. "psrlq $32, %%mm6\n"
  1023. "paddw %%mm6,%%mm0\n"
  1024. "movq %%mm0,%%mm6\n"
  1025. "psrlq $16, %%mm0\n"
  1026. "paddw %%mm6,%%mm0\n"
  1027. "movd %%mm0,%1\n"
  1028. : "+r" (pix), "=r"(tmp)
  1029. : "r" ((long)line_size) , "m" (h)
  1030. : "%ecx");
  1031. return tmp & 0xFFFF;
  1032. }
  1033. #undef SUM
  1034. static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  1035. int tmp;
  1036. assert( (((int)pix) & 7) == 0);
  1037. assert((line_size &7) ==0);
  1038. #define SUM(in0, in1, out0, out1) \
  1039. "movq (%0), " #out0 "\n"\
  1040. "movq 8(%0), " #out1 "\n"\
  1041. "add %2,%0\n"\
  1042. "psadbw " #out0 ", " #in0 "\n"\
  1043. "psadbw " #out1 ", " #in1 "\n"\
  1044. "paddw " #in1 ", " #in0 "\n"\
  1045. "paddw " #in0 ", %%mm6\n"
  1046. asm volatile (
  1047. "movl %3,%%ecx\n"
  1048. "pxor %%mm6,%%mm6\n"
  1049. "pxor %%mm7,%%mm7\n"
  1050. "movq (%0),%%mm0\n"
  1051. "movq 8(%0),%%mm1\n"
  1052. "add %2,%0\n"
  1053. "subl $2, %%ecx\n"
  1054. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1055. "1:\n"
  1056. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1057. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1058. "subl $2, %%ecx\n"
  1059. "jnz 1b\n"
  1060. "movd %%mm6,%1\n"
  1061. : "+r" (pix), "=r"(tmp)
  1062. : "r" ((long)line_size) , "m" (h)
  1063. : "%ecx");
  1064. return tmp;
  1065. }
  1066. #undef SUM
  1067. static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1068. int tmp;
  1069. assert( (((int)pix1) & 7) == 0);
  1070. assert( (((int)pix2) & 7) == 0);
  1071. assert((line_size &7) ==0);
  1072. #define SUM(in0, in1, out0, out1) \
  1073. "movq (%0),%%mm2\n"\
  1074. "movq (%1)," #out0 "\n"\
  1075. "movq 8(%0),%%mm3\n"\
  1076. "movq 8(%1)," #out1 "\n"\
  1077. "add %3,%0\n"\
  1078. "add %3,%1\n"\
  1079. "psubb " #out0 ", %%mm2\n"\
  1080. "psubb " #out1 ", %%mm3\n"\
  1081. "pxor %%mm7, %%mm2\n"\
  1082. "pxor %%mm7, %%mm3\n"\
  1083. "movq %%mm2, " #out0 "\n"\
  1084. "movq %%mm3, " #out1 "\n"\
  1085. "psubusb " #in0 ", %%mm2\n"\
  1086. "psubusb " #in1 ", %%mm3\n"\
  1087. "psubusb " #out0 ", " #in0 "\n"\
  1088. "psubusb " #out1 ", " #in1 "\n"\
  1089. "por %%mm2, " #in0 "\n"\
  1090. "por %%mm3, " #in1 "\n"\
  1091. "movq " #in0 ", %%mm2\n"\
  1092. "movq " #in1 ", %%mm3\n"\
  1093. "punpcklbw %%mm7, " #in0 "\n"\
  1094. "punpcklbw %%mm7, " #in1 "\n"\
  1095. "punpckhbw %%mm7, %%mm2\n"\
  1096. "punpckhbw %%mm7, %%mm3\n"\
  1097. "paddw " #in1 ", " #in0 "\n"\
  1098. "paddw %%mm3, %%mm2\n"\
  1099. "paddw %%mm2, " #in0 "\n"\
  1100. "paddw " #in0 ", %%mm6\n"
  1101. asm volatile (
  1102. "movl %4,%%ecx\n"
  1103. "pxor %%mm6,%%mm6\n"
  1104. "pcmpeqw %%mm7,%%mm7\n"
  1105. "psllw $15, %%mm7\n"
  1106. "packsswb %%mm7, %%mm7\n"
  1107. "movq (%0),%%mm0\n"
  1108. "movq (%1),%%mm2\n"
  1109. "movq 8(%0),%%mm1\n"
  1110. "movq 8(%1),%%mm3\n"
  1111. "add %3,%0\n"
  1112. "add %3,%1\n"
  1113. "subl $2, %%ecx\n"
  1114. "psubb %%mm2, %%mm0\n"
  1115. "psubb %%mm3, %%mm1\n"
  1116. "pxor %%mm7, %%mm0\n"
  1117. "pxor %%mm7, %%mm1\n"
  1118. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1119. "1:\n"
  1120. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1121. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1122. "subl $2, %%ecx\n"
  1123. "jnz 1b\n"
  1124. "movq %%mm6,%%mm0\n"
  1125. "psrlq $32, %%mm6\n"
  1126. "paddw %%mm6,%%mm0\n"
  1127. "movq %%mm0,%%mm6\n"
  1128. "psrlq $16, %%mm0\n"
  1129. "paddw %%mm6,%%mm0\n"
  1130. "movd %%mm0,%2\n"
  1131. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  1132. : "r" ((long)line_size) , "m" (h)
  1133. : "%ecx");
  1134. return tmp & 0x7FFF;
  1135. }
  1136. #undef SUM
  1137. static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1138. int tmp;
  1139. assert( (((int)pix1) & 7) == 0);
  1140. assert( (((int)pix2) & 7) == 0);
  1141. assert((line_size &7) ==0);
  1142. #define SUM(in0, in1, out0, out1) \
  1143. "movq (%0)," #out0 "\n"\
  1144. "movq (%1),%%mm2\n"\
  1145. "movq 8(%0)," #out1 "\n"\
  1146. "movq 8(%1),%%mm3\n"\
  1147. "add %3,%0\n"\
  1148. "add %3,%1\n"\
  1149. "psubb %%mm2, " #out0 "\n"\
  1150. "psubb %%mm3, " #out1 "\n"\
  1151. "pxor %%mm7, " #out0 "\n"\
  1152. "pxor %%mm7, " #out1 "\n"\
  1153. "psadbw " #out0 ", " #in0 "\n"\
  1154. "psadbw " #out1 ", " #in1 "\n"\
  1155. "paddw " #in1 ", " #in0 "\n"\
  1156. "paddw " #in0 ", %%mm6\n"
  1157. asm volatile (
  1158. "movl %4,%%ecx\n"
  1159. "pxor %%mm6,%%mm6\n"
  1160. "pcmpeqw %%mm7,%%mm7\n"
  1161. "psllw $15, %%mm7\n"
  1162. "packsswb %%mm7, %%mm7\n"
  1163. "movq (%0),%%mm0\n"
  1164. "movq (%1),%%mm2\n"
  1165. "movq 8(%0),%%mm1\n"
  1166. "movq 8(%1),%%mm3\n"
  1167. "add %3,%0\n"
  1168. "add %3,%1\n"
  1169. "subl $2, %%ecx\n"
  1170. "psubb %%mm2, %%mm0\n"
  1171. "psubb %%mm3, %%mm1\n"
  1172. "pxor %%mm7, %%mm0\n"
  1173. "pxor %%mm7, %%mm1\n"
  1174. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1175. "1:\n"
  1176. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1177. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1178. "subl $2, %%ecx\n"
  1179. "jnz 1b\n"
  1180. "movd %%mm6,%2\n"
  1181. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  1182. : "r" ((long)line_size) , "m" (h)
  1183. : "%ecx");
  1184. return tmp;
  1185. }
  1186. #undef SUM
  1187. static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  1188. long i=0;
  1189. asm volatile(
  1190. "1: \n\t"
  1191. "movq (%2, %0), %%mm0 \n\t"
  1192. "movq (%1, %0), %%mm1 \n\t"
  1193. "psubb %%mm0, %%mm1 \n\t"
  1194. "movq %%mm1, (%3, %0) \n\t"
  1195. "movq 8(%2, %0), %%mm0 \n\t"
  1196. "movq 8(%1, %0), %%mm1 \n\t"
  1197. "psubb %%mm0, %%mm1 \n\t"
  1198. "movq %%mm1, 8(%3, %0) \n\t"
  1199. "add $16, %0 \n\t"
  1200. "cmp %4, %0 \n\t"
  1201. " jb 1b \n\t"
  1202. : "+r" (i)
  1203. : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
  1204. );
  1205. for(; i<w; i++)
  1206. dst[i+0] = src1[i+0]-src2[i+0];
  1207. }
  1208. static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
  1209. long i=0;
  1210. uint8_t l, lt;
  1211. asm volatile(
  1212. "1: \n\t"
  1213. "movq -1(%1, %0), %%mm0 \n\t" // LT
  1214. "movq (%1, %0), %%mm1 \n\t" // T
  1215. "movq -1(%2, %0), %%mm2 \n\t" // L
  1216. "movq (%2, %0), %%mm3 \n\t" // X
  1217. "movq %%mm2, %%mm4 \n\t" // L
  1218. "psubb %%mm0, %%mm2 \n\t"
  1219. "paddb %%mm1, %%mm2 \n\t" // L + T - LT
  1220. "movq %%mm4, %%mm5 \n\t" // L
  1221. "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
  1222. "pminub %%mm5, %%mm1 \n\t" // min(T, L)
  1223. "pminub %%mm2, %%mm4 \n\t"
  1224. "pmaxub %%mm1, %%mm4 \n\t"
  1225. "psubb %%mm4, %%mm3 \n\t" // dst - pred
  1226. "movq %%mm3, (%3, %0) \n\t"
  1227. "add $8, %0 \n\t"
  1228. "cmp %4, %0 \n\t"
  1229. " jb 1b \n\t"
  1230. : "+r" (i)
  1231. : "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
  1232. );
  1233. l= *left;
  1234. lt= *left_top;
  1235. dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
  1236. *left_top= src1[w-1];
  1237. *left = src2[w-1];
  1238. }
  1239. #define LBUTTERFLY2(a1,b1,a2,b2)\
  1240. "paddw " #b1 ", " #a1 " \n\t"\
  1241. "paddw " #b2 ", " #a2 " \n\t"\
  1242. "paddw " #b1 ", " #b1 " \n\t"\
  1243. "paddw " #b2 ", " #b2 " \n\t"\
  1244. "psubw " #a1 ", " #b1 " \n\t"\
  1245. "psubw " #a2 ", " #b2 " \n\t"
  1246. #define HADAMARD48\
  1247. LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
  1248. LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
  1249. LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
  1250. LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
  1251. LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
  1252. LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
  1253. #define MMABS(a,z)\
  1254. "pxor " #z ", " #z " \n\t"\
  1255. "pcmpgtw " #a ", " #z " \n\t"\
  1256. "pxor " #z ", " #a " \n\t"\
  1257. "psubw " #z ", " #a " \n\t"
  1258. #define MMABS_SUM(a,z, sum)\
  1259. "pxor " #z ", " #z " \n\t"\
  1260. "pcmpgtw " #a ", " #z " \n\t"\
  1261. "pxor " #z ", " #a " \n\t"\
  1262. "psubw " #z ", " #a " \n\t"\
  1263. "paddusw " #a ", " #sum " \n\t"
  1264. #define MMABS_MMX2(a,z)\
  1265. "pxor " #z ", " #z " \n\t"\
  1266. "psubw " #a ", " #z " \n\t"\
  1267. "pmaxsw " #z ", " #a " \n\t"
  1268. #define MMABS_SUM_MMX2(a,z, sum)\
  1269. "pxor " #z ", " #z " \n\t"\
  1270. "psubw " #a ", " #z " \n\t"\
  1271. "pmaxsw " #z ", " #a " \n\t"\
  1272. "paddusw " #a ", " #sum " \n\t"
  1273. #define SBUTTERFLY(a,b,t,n)\
  1274. "movq " #a ", " #t " \n\t" /* abcd */\
  1275. "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
  1276. "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
  1277. #define TRANSPOSE4(a,b,c,d,t)\
  1278. SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
  1279. SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
  1280. SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
  1281. SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
  1282. #define LOAD4(o, a, b, c, d)\
  1283. "movq "#o"(%1), " #a " \n\t"\
  1284. "movq "#o"+16(%1), " #b " \n\t"\
  1285. "movq "#o"+32(%1), " #c " \n\t"\
  1286. "movq "#o"+48(%1), " #d " \n\t"
  1287. #define STORE4(o, a, b, c, d)\
  1288. "movq "#a", "#o"(%1) \n\t"\
  1289. "movq "#b", "#o"+16(%1) \n\t"\
  1290. "movq "#c", "#o"+32(%1) \n\t"\
  1291. "movq "#d", "#o"+48(%1) \n\t"\
  1292. static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
  1293. uint64_t temp[16] __align8;
  1294. int sum=0;
  1295. assert(h==8);
  1296. diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
  1297. asm volatile(
  1298. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1299. LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
  1300. HADAMARD48
  1301. "movq %%mm7, 112(%1) \n\t"
  1302. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1303. STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
  1304. "movq 112(%1), %%mm7 \n\t"
  1305. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1306. STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
  1307. LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
  1308. LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1309. HADAMARD48
  1310. "movq %%mm7, 120(%1) \n\t"
  1311. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1312. STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
  1313. "movq 120(%1), %%mm7 \n\t"
  1314. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1315. "movq %%mm7, %%mm5 \n\t"//FIXME remove
  1316. "movq %%mm6, %%mm7 \n\t"
  1317. "movq %%mm0, %%mm6 \n\t"
  1318. // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
  1319. LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
  1320. // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1321. HADAMARD48
  1322. "movq %%mm7, 64(%1) \n\t"
  1323. MMABS(%%mm0, %%mm7)
  1324. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1325. MMABS_SUM(%%mm2, %%mm7, %%mm0)
  1326. MMABS_SUM(%%mm3, %%mm7, %%mm0)
  1327. MMABS_SUM(%%mm4, %%mm7, %%mm0)
  1328. MMABS_SUM(%%mm5, %%mm7, %%mm0)
  1329. MMABS_SUM(%%mm6, %%mm7, %%mm0)
  1330. "movq 64(%1), %%mm1 \n\t"
  1331. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1332. "movq %%mm0, 64(%1) \n\t"
  1333. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1334. LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
  1335. HADAMARD48
  1336. "movq %%mm7, (%1) \n\t"
  1337. MMABS(%%mm0, %%mm7)
  1338. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1339. MMABS_SUM(%%mm2, %%mm7, %%mm0)
  1340. MMABS_SUM(%%mm3, %%mm7, %%mm0)
  1341. MMABS_SUM(%%mm4, %%mm7, %%mm0)
  1342. MMABS_SUM(%%mm5, %%mm7, %%mm0)
  1343. MMABS_SUM(%%mm6, %%mm7, %%mm0)
  1344. "movq (%1), %%mm1 \n\t"
  1345. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1346. "movq 64(%1), %%mm1 \n\t"
  1347. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1348. "movq %%mm0, %%mm1 \n\t"
  1349. "psrlq $32, %%mm0 \n\t"
  1350. "paddusw %%mm1, %%mm0 \n\t"
  1351. "movq %%mm0, %%mm1 \n\t"
  1352. "psrlq $16, %%mm0 \n\t"
  1353. "paddusw %%mm1, %%mm0 \n\t"
  1354. "movd %%mm0, %0 \n\t"
  1355. : "=r" (sum)
  1356. : "r"(temp)
  1357. );
  1358. return sum&0xFFFF;
  1359. }
  1360. static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
  1361. uint64_t temp[16] __align8;
  1362. int sum=0;
  1363. assert(h==8);
  1364. diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
  1365. asm volatile(
  1366. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1367. LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
  1368. HADAMARD48
  1369. "movq %%mm7, 112(%1) \n\t"
  1370. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1371. STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
  1372. "movq 112(%1), %%mm7 \n\t"
  1373. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1374. STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
  1375. LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
  1376. LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1377. HADAMARD48
  1378. "movq %%mm7, 120(%1) \n\t"
  1379. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1380. STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
  1381. "movq 120(%1), %%mm7 \n\t"
  1382. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1383. "movq %%mm7, %%mm5 \n\t"//FIXME remove
  1384. "movq %%mm6, %%mm7 \n\t"
  1385. "movq %%mm0, %%mm6 \n\t"
  1386. // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
  1387. LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
  1388. // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1389. HADAMARD48
  1390. "movq %%mm7, 64(%1) \n\t"
  1391. MMABS_MMX2(%%mm0, %%mm7)
  1392. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1393. MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
  1394. MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
  1395. MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
  1396. MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
  1397. MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
  1398. "movq 64(%1), %%mm1 \n\t"
  1399. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1400. "movq %%mm0, 64(%1) \n\t"
  1401. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1402. LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
  1403. HADAMARD48
  1404. "movq %%mm7, (%1) \n\t"
  1405. MMABS_MMX2(%%mm0, %%mm7)
  1406. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1407. MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
  1408. MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
  1409. MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
  1410. MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
  1411. MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
  1412. "movq (%1), %%mm1 \n\t"
  1413. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1414. "movq 64(%1), %%mm1 \n\t"
  1415. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1416. "movq %%mm0, %%mm1 \n\t"
  1417. "psrlq $32, %%mm0 \n\t"
  1418. "paddusw %%mm1, %%mm0 \n\t"
  1419. "movq %%mm0, %%mm1 \n\t"
  1420. "psrlq $16, %%mm0 \n\t"
  1421. "paddusw %%mm1, %%mm0 \n\t"
  1422. "movd %%mm0, %0 \n\t"
  1423. : "=r" (sum)
  1424. : "r"(temp)
  1425. );
  1426. return sum&0xFFFF;
  1427. }
  1428. WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
  1429. WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
  1430. #endif //CONFIG_ENCODERS
  1431. #define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
  1432. #define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
  1433. #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
  1434. "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
  1435. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
  1436. "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
  1437. "movq "#in7", " #m3 " \n\t" /* d */\
  1438. "movq "#in0", %%mm5 \n\t" /* D */\
  1439. "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
  1440. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
  1441. "movq "#in1", %%mm5 \n\t" /* C */\
  1442. "movq "#in2", %%mm6 \n\t" /* B */\
  1443. "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
  1444. "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
  1445. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
  1446. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
  1447. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
  1448. "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
  1449. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
  1450. "psraw $5, %%mm5 \n\t"\
  1451. "packuswb %%mm5, %%mm5 \n\t"\
  1452. OP(%%mm5, out, %%mm7, d)
  1453. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
  1454. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1455. uint64_t temp;\
  1456. \
  1457. asm volatile(\
  1458. "pxor %%mm7, %%mm7 \n\t"\
  1459. "1: \n\t"\
  1460. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1461. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1462. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1463. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1464. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1465. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1466. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1467. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1468. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1469. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1470. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1471. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1472. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1473. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1474. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1475. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1476. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1477. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1478. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1479. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1480. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1481. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1482. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1483. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1484. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1485. "paddw %6, %%mm6 \n\t"\
  1486. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1487. "psraw $5, %%mm0 \n\t"\
  1488. "movq %%mm0, %5 \n\t"\
  1489. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1490. \
  1491. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
  1492. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
  1493. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
  1494. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
  1495. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
  1496. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
  1497. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
  1498. "paddw %%mm0, %%mm2 \n\t" /* b */\
  1499. "paddw %%mm5, %%mm3 \n\t" /* c */\
  1500. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1501. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1502. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
  1503. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
  1504. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
  1505. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
  1506. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1507. "paddw %%mm2, %%mm1 \n\t" /* a */\
  1508. "paddw %%mm6, %%mm4 \n\t" /* d */\
  1509. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1510. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
  1511. "paddw %6, %%mm1 \n\t"\
  1512. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
  1513. "psraw $5, %%mm3 \n\t"\
  1514. "movq %5, %%mm1 \n\t"\
  1515. "packuswb %%mm3, %%mm1 \n\t"\
  1516. OP_MMX2(%%mm1, (%1),%%mm4, q)\
  1517. /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
  1518. \
  1519. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
  1520. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
  1521. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
  1522. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
  1523. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
  1524. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
  1525. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
  1526. "paddw %%mm1, %%mm5 \n\t" /* b */\
  1527. "paddw %%mm4, %%mm0 \n\t" /* c */\
  1528. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1529. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
  1530. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
  1531. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
  1532. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
  1533. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
  1534. "paddw %%mm3, %%mm2 \n\t" /* d */\
  1535. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
  1536. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
  1537. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
  1538. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
  1539. "paddw %%mm2, %%mm6 \n\t" /* a */\
  1540. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
  1541. "paddw %6, %%mm0 \n\t"\
  1542. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1543. "psraw $5, %%mm0 \n\t"\
  1544. /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
  1545. \
  1546. "paddw %%mm5, %%mm3 \n\t" /* a */\
  1547. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
  1548. "paddw %%mm4, %%mm6 \n\t" /* b */\
  1549. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
  1550. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
  1551. "paddw %%mm1, %%mm4 \n\t" /* c */\
  1552. "paddw %%mm2, %%mm5 \n\t" /* d */\
  1553. "paddw %%mm6, %%mm6 \n\t" /* 2b */\
  1554. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
  1555. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
  1556. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
  1557. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
  1558. "paddw %6, %%mm4 \n\t"\
  1559. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
  1560. "psraw $5, %%mm4 \n\t"\
  1561. "packuswb %%mm4, %%mm0 \n\t"\
  1562. OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
  1563. \
  1564. "add %3, %0 \n\t"\
  1565. "add %4, %1 \n\t"\
  1566. "decl %2 \n\t"\
  1567. " jnz 1b \n\t"\
  1568. : "+a"(src), "+c"(dst), "+m"(h)\
  1569. : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  1570. : "memory"\
  1571. );\
  1572. }\
  1573. \
  1574. static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1575. int i;\
  1576. int16_t temp[16];\
  1577. /* quick HACK, XXX FIXME MUST be optimized */\
  1578. for(i=0; i<h; i++)\
  1579. {\
  1580. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1581. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1582. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1583. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1584. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1585. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
  1586. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
  1587. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
  1588. temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
  1589. temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
  1590. temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
  1591. temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
  1592. temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
  1593. temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
  1594. temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
  1595. temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
  1596. asm volatile(\
  1597. "movq (%0), %%mm0 \n\t"\
  1598. "movq 8(%0), %%mm1 \n\t"\
  1599. "paddw %2, %%mm0 \n\t"\
  1600. "paddw %2, %%mm1 \n\t"\
  1601. "psraw $5, %%mm0 \n\t"\
  1602. "psraw $5, %%mm1 \n\t"\
  1603. "packuswb %%mm1, %%mm0 \n\t"\
  1604. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1605. "movq 16(%0), %%mm0 \n\t"\
  1606. "movq 24(%0), %%mm1 \n\t"\
  1607. "paddw %2, %%mm0 \n\t"\
  1608. "paddw %2, %%mm1 \n\t"\
  1609. "psraw $5, %%mm0 \n\t"\
  1610. "psraw $5, %%mm1 \n\t"\
  1611. "packuswb %%mm1, %%mm0 \n\t"\
  1612. OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
  1613. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1614. : "memory"\
  1615. );\
  1616. dst+=dstStride;\
  1617. src+=srcStride;\
  1618. }\
  1619. }\
  1620. \
  1621. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1622. uint64_t temp;\
  1623. \
  1624. asm volatile(\
  1625. "pxor %%mm7, %%mm7 \n\t"\
  1626. "1: \n\t"\
  1627. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1628. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1629. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1630. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1631. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1632. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1633. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1634. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1635. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1636. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1637. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1638. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1639. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1640. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1641. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1642. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1643. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1644. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1645. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1646. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1647. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1648. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1649. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1650. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1651. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1652. "paddw %6, %%mm6 \n\t"\
  1653. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1654. "psraw $5, %%mm0 \n\t"\
  1655. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1656. \
  1657. "movd 5(%0), %%mm5 \n\t" /* FGHI */\
  1658. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
  1659. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
  1660. "paddw %%mm5, %%mm1 \n\t" /* a */\
  1661. "paddw %%mm6, %%mm2 \n\t" /* b */\
  1662. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
  1663. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
  1664. "paddw %%mm6, %%mm3 \n\t" /* c */\
  1665. "paddw %%mm5, %%mm4 \n\t" /* d */\
  1666. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1667. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1668. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1669. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1670. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
  1671. "paddw %6, %%mm1 \n\t"\
  1672. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
  1673. "psraw $5, %%mm3 \n\t"\
  1674. "packuswb %%mm3, %%mm0 \n\t"\
  1675. OP_MMX2(%%mm0, (%1), %%mm4, q)\
  1676. \
  1677. "add %3, %0 \n\t"\
  1678. "add %4, %1 \n\t"\
  1679. "decl %2 \n\t"\
  1680. " jnz 1b \n\t"\
  1681. : "+a"(src), "+c"(dst), "+m"(h)\
  1682. : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  1683. : "memory"\
  1684. );\
  1685. }\
  1686. \
  1687. static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1688. int i;\
  1689. int16_t temp[8];\
  1690. /* quick HACK, XXX FIXME MUST be optimized */\
  1691. for(i=0; i<h; i++)\
  1692. {\
  1693. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1694. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1695. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1696. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1697. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1698. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
  1699. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
  1700. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
  1701. asm volatile(\
  1702. "movq (%0), %%mm0 \n\t"\
  1703. "movq 8(%0), %%mm1 \n\t"\
  1704. "paddw %2, %%mm0 \n\t"\
  1705. "paddw %2, %%mm1 \n\t"\
  1706. "psraw $5, %%mm0 \n\t"\
  1707. "psraw $5, %%mm1 \n\t"\
  1708. "packuswb %%mm1, %%mm0 \n\t"\
  1709. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1710. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1711. :"memory"\
  1712. );\
  1713. dst+=dstStride;\
  1714. src+=srcStride;\
  1715. }\
  1716. }
  1717. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
  1718. \
  1719. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1720. uint64_t temp[17*4];\
  1721. uint64_t *temp_ptr= temp;\
  1722. int count= 17;\
  1723. \
  1724. /*FIXME unroll */\
  1725. asm volatile(\
  1726. "pxor %%mm7, %%mm7 \n\t"\
  1727. "1: \n\t"\
  1728. "movq (%0), %%mm0 \n\t"\
  1729. "movq (%0), %%mm1 \n\t"\
  1730. "movq 8(%0), %%mm2 \n\t"\
  1731. "movq 8(%0), %%mm3 \n\t"\
  1732. "punpcklbw %%mm7, %%mm0 \n\t"\
  1733. "punpckhbw %%mm7, %%mm1 \n\t"\
  1734. "punpcklbw %%mm7, %%mm2 \n\t"\
  1735. "punpckhbw %%mm7, %%mm3 \n\t"\
  1736. "movq %%mm0, (%1) \n\t"\
  1737. "movq %%mm1, 17*8(%1) \n\t"\
  1738. "movq %%mm2, 2*17*8(%1) \n\t"\
  1739. "movq %%mm3, 3*17*8(%1) \n\t"\
  1740. "add $8, %1 \n\t"\
  1741. "add %3, %0 \n\t"\
  1742. "decl %2 \n\t"\
  1743. " jnz 1b \n\t"\
  1744. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1745. : "r" ((long)srcStride)\
  1746. : "memory"\
  1747. );\
  1748. \
  1749. temp_ptr= temp;\
  1750. count=4;\
  1751. \
  1752. /*FIXME reorder for speed */\
  1753. asm volatile(\
  1754. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1755. "1: \n\t"\
  1756. "movq (%0), %%mm0 \n\t"\
  1757. "movq 8(%0), %%mm1 \n\t"\
  1758. "movq 16(%0), %%mm2 \n\t"\
  1759. "movq 24(%0), %%mm3 \n\t"\
  1760. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1761. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1762. "add %4, %1 \n\t"\
  1763. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1764. \
  1765. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1766. "add %4, %1 \n\t"\
  1767. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1768. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
  1769. "add %4, %1 \n\t"\
  1770. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
  1771. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
  1772. "add %4, %1 \n\t"\
  1773. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
  1774. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
  1775. "add %4, %1 \n\t"\
  1776. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
  1777. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
  1778. "add %4, %1 \n\t"\
  1779. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
  1780. \
  1781. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
  1782. "add %4, %1 \n\t" \
  1783. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
  1784. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
  1785. \
  1786. "add $136, %0 \n\t"\
  1787. "add %6, %1 \n\t"\
  1788. "decl %2 \n\t"\
  1789. " jnz 1b \n\t"\
  1790. \
  1791. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1792. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\
  1793. :"memory"\
  1794. );\
  1795. }\
  1796. \
  1797. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1798. uint64_t temp[9*2];\
  1799. uint64_t *temp_ptr= temp;\
  1800. int count= 9;\
  1801. \
  1802. /*FIXME unroll */\
  1803. asm volatile(\
  1804. "pxor %%mm7, %%mm7 \n\t"\
  1805. "1: \n\t"\
  1806. "movq (%0), %%mm0 \n\t"\
  1807. "movq (%0), %%mm1 \n\t"\
  1808. "punpcklbw %%mm7, %%mm0 \n\t"\
  1809. "punpckhbw %%mm7, %%mm1 \n\t"\
  1810. "movq %%mm0, (%1) \n\t"\
  1811. "movq %%mm1, 9*8(%1) \n\t"\
  1812. "add $8, %1 \n\t"\
  1813. "add %3, %0 \n\t"\
  1814. "decl %2 \n\t"\
  1815. " jnz 1b \n\t"\
  1816. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1817. : "r" ((long)srcStride)\
  1818. : "memory"\
  1819. );\
  1820. \
  1821. temp_ptr= temp;\
  1822. count=2;\
  1823. \
  1824. /*FIXME reorder for speed */\
  1825. asm volatile(\
  1826. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1827. "1: \n\t"\
  1828. "movq (%0), %%mm0 \n\t"\
  1829. "movq 8(%0), %%mm1 \n\t"\
  1830. "movq 16(%0), %%mm2 \n\t"\
  1831. "movq 24(%0), %%mm3 \n\t"\
  1832. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1833. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1834. "add %4, %1 \n\t"\
  1835. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1836. \
  1837. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1838. "add %4, %1 \n\t"\
  1839. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1840. \
  1841. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
  1842. "add %4, %1 \n\t"\
  1843. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
  1844. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
  1845. \
  1846. "add $72, %0 \n\t"\
  1847. "add %6, %1 \n\t"\
  1848. "decl %2 \n\t"\
  1849. " jnz 1b \n\t"\
  1850. \
  1851. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1852. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\
  1853. : "memory"\
  1854. );\
  1855. }\
  1856. \
  1857. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1858. OPNAME ## pixels8_mmx(dst, src, stride, 8);\
  1859. }\
  1860. \
  1861. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1862. uint64_t temp[8];\
  1863. uint8_t * const half= (uint8_t*)temp;\
  1864. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1865. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1866. }\
  1867. \
  1868. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1869. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
  1870. }\
  1871. \
  1872. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1873. uint64_t temp[8];\
  1874. uint8_t * const half= (uint8_t*)temp;\
  1875. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1876. OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
  1877. }\
  1878. \
  1879. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1880. uint64_t temp[8];\
  1881. uint8_t * const half= (uint8_t*)temp;\
  1882. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1883. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1884. }\
  1885. \
  1886. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1887. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1888. }\
  1889. \
  1890. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1891. uint64_t temp[8];\
  1892. uint8_t * const half= (uint8_t*)temp;\
  1893. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1894. OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
  1895. }\
  1896. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1897. uint64_t half[8 + 9];\
  1898. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1899. uint8_t * const halfHV= ((uint8_t*)half);\
  1900. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1901. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1902. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1903. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1904. }\
  1905. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1906. uint64_t half[8 + 9];\
  1907. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1908. uint8_t * const halfHV= ((uint8_t*)half);\
  1909. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1910. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1911. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1912. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1913. }\
  1914. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1915. uint64_t half[8 + 9];\
  1916. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1917. uint8_t * const halfHV= ((uint8_t*)half);\
  1918. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1919. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1920. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1921. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1922. }\
  1923. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1924. uint64_t half[8 + 9];\
  1925. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1926. uint8_t * const halfHV= ((uint8_t*)half);\
  1927. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1928. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1929. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1930. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1931. }\
  1932. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1933. uint64_t half[8 + 9];\
  1934. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1935. uint8_t * const halfHV= ((uint8_t*)half);\
  1936. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1937. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1938. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1939. }\
  1940. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1941. uint64_t half[8 + 9];\
  1942. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1943. uint8_t * const halfHV= ((uint8_t*)half);\
  1944. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1945. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1946. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1947. }\
  1948. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1949. uint64_t half[8 + 9];\
  1950. uint8_t * const halfH= ((uint8_t*)half);\
  1951. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1952. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1953. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1954. }\
  1955. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1956. uint64_t half[8 + 9];\
  1957. uint8_t * const halfH= ((uint8_t*)half);\
  1958. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1959. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1960. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1961. }\
  1962. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1963. uint64_t half[9];\
  1964. uint8_t * const halfH= ((uint8_t*)half);\
  1965. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1966. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1967. }\
  1968. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1969. OPNAME ## pixels16_mmx(dst, src, stride, 16);\
  1970. }\
  1971. \
  1972. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1973. uint64_t temp[32];\
  1974. uint8_t * const half= (uint8_t*)temp;\
  1975. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1976. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1977. }\
  1978. \
  1979. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1980. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
  1981. }\
  1982. \
  1983. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1984. uint64_t temp[32];\
  1985. uint8_t * const half= (uint8_t*)temp;\
  1986. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1987. OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
  1988. }\
  1989. \
  1990. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1991. uint64_t temp[32];\
  1992. uint8_t * const half= (uint8_t*)temp;\
  1993. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1994. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1995. }\
  1996. \
  1997. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1998. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1999. }\
  2000. \
  2001. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2002. uint64_t temp[32];\
  2003. uint8_t * const half= (uint8_t*)temp;\
  2004. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  2005. OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
  2006. }\
  2007. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2008. uint64_t half[16*2 + 17*2];\
  2009. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2010. uint8_t * const halfHV= ((uint8_t*)half);\
  2011. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2012. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2013. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2014. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2015. }\
  2016. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2017. uint64_t half[16*2 + 17*2];\
  2018. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2019. uint8_t * const halfHV= ((uint8_t*)half);\
  2020. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2021. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2022. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2023. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2024. }\
  2025. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2026. uint64_t half[16*2 + 17*2];\
  2027. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2028. uint8_t * const halfHV= ((uint8_t*)half);\
  2029. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2030. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2031. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2032. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2033. }\
  2034. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2035. uint64_t half[16*2 + 17*2];\
  2036. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2037. uint8_t * const halfHV= ((uint8_t*)half);\
  2038. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2039. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2040. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2041. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2042. }\
  2043. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2044. uint64_t half[16*2 + 17*2];\
  2045. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2046. uint8_t * const halfHV= ((uint8_t*)half);\
  2047. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2048. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2049. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2050. }\
  2051. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2052. uint64_t half[16*2 + 17*2];\
  2053. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2054. uint8_t * const halfHV= ((uint8_t*)half);\
  2055. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2056. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2057. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2058. }\
  2059. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2060. uint64_t half[17*2];\
  2061. uint8_t * const halfH= ((uint8_t*)half);\
  2062. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2063. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2064. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2065. }\
  2066. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2067. uint64_t half[17*2];\
  2068. uint8_t * const halfH= ((uint8_t*)half);\
  2069. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2070. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2071. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2072. }\
  2073. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2074. uint64_t half[17*2];\
  2075. uint8_t * const halfH= ((uint8_t*)half);\
  2076. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2077. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2078. }
  2079. #define QPEL_H264V(A,B,C,D,E,F,OP)\
  2080. "movd (%0), "#F" \n\t"\
  2081. "movq "#C", %%mm6 \n\t"\
  2082. "paddw "#D", %%mm6 \n\t"\
  2083. "psllw $2, %%mm6 \n\t"\
  2084. "psubw "#B", %%mm6 \n\t"\
  2085. "psubw "#E", %%mm6 \n\t"\
  2086. "pmullw %4, %%mm6 \n\t"\
  2087. "add %2, %0 \n\t"\
  2088. "punpcklbw %%mm7, "#F" \n\t"\
  2089. "paddw %5, "#A" \n\t"\
  2090. "paddw "#F", "#A" \n\t"\
  2091. "paddw "#A", %%mm6 \n\t"\
  2092. "psraw $5, %%mm6 \n\t"\
  2093. "packuswb %%mm6, %%mm6 \n\t"\
  2094. OP(%%mm6, (%1), A, d)\
  2095. "add %3, %1 \n\t"
  2096. #define QPEL_H264HV(A,B,C,D,E,F,OF)\
  2097. "movd (%0), "#F" \n\t"\
  2098. "movq "#C", %%mm6 \n\t"\
  2099. "paddw "#D", %%mm6 \n\t"\
  2100. "psllw $2, %%mm6 \n\t"\
  2101. "psubw "#B", %%mm6 \n\t"\
  2102. "psubw "#E", %%mm6 \n\t"\
  2103. "pmullw %3, %%mm6 \n\t"\
  2104. "add %2, %0 \n\t"\
  2105. "punpcklbw %%mm7, "#F" \n\t"\
  2106. "paddw "#F", "#A" \n\t"\
  2107. "paddw "#A", %%mm6 \n\t"\
  2108. "movq %%mm6, "#OF"(%1) \n\t"
  2109. #define QPEL_H264(OPNAME, OP, MMX)\
  2110. static void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2111. int h=4;\
  2112. \
  2113. asm volatile(\
  2114. "pxor %%mm7, %%mm7 \n\t"\
  2115. "movq %5, %%mm4 \n\t"\
  2116. "movq %6, %%mm5 \n\t"\
  2117. "1: \n\t"\
  2118. "movd -1(%0), %%mm1 \n\t"\
  2119. "movd (%0), %%mm2 \n\t"\
  2120. "movd 1(%0), %%mm3 \n\t"\
  2121. "movd 2(%0), %%mm0 \n\t"\
  2122. "punpcklbw %%mm7, %%mm1 \n\t"\
  2123. "punpcklbw %%mm7, %%mm2 \n\t"\
  2124. "punpcklbw %%mm7, %%mm3 \n\t"\
  2125. "punpcklbw %%mm7, %%mm0 \n\t"\
  2126. "paddw %%mm0, %%mm1 \n\t"\
  2127. "paddw %%mm3, %%mm2 \n\t"\
  2128. "movd -2(%0), %%mm0 \n\t"\
  2129. "movd 3(%0), %%mm3 \n\t"\
  2130. "punpcklbw %%mm7, %%mm0 \n\t"\
  2131. "punpcklbw %%mm7, %%mm3 \n\t"\
  2132. "paddw %%mm3, %%mm0 \n\t"\
  2133. "psllw $2, %%mm2 \n\t"\
  2134. "psubw %%mm1, %%mm2 \n\t"\
  2135. "pmullw %%mm4, %%mm2 \n\t"\
  2136. "paddw %%mm5, %%mm0 \n\t"\
  2137. "paddw %%mm2, %%mm0 \n\t"\
  2138. "psraw $5, %%mm0 \n\t"\
  2139. "packuswb %%mm0, %%mm0 \n\t"\
  2140. OP(%%mm0, (%1),%%mm6, d)\
  2141. "add %3, %0 \n\t"\
  2142. "add %4, %1 \n\t"\
  2143. "decl %2 \n\t"\
  2144. " jnz 1b \n\t"\
  2145. : "+a"(src), "+c"(dst), "+m"(h)\
  2146. : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  2147. : "memory"\
  2148. );\
  2149. }\
  2150. static void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2151. src -= 2*srcStride;\
  2152. asm volatile(\
  2153. "pxor %%mm7, %%mm7 \n\t"\
  2154. "movd (%0), %%mm0 \n\t"\
  2155. "add %2, %0 \n\t"\
  2156. "movd (%0), %%mm1 \n\t"\
  2157. "add %2, %0 \n\t"\
  2158. "movd (%0), %%mm2 \n\t"\
  2159. "add %2, %0 \n\t"\
  2160. "movd (%0), %%mm3 \n\t"\
  2161. "add %2, %0 \n\t"\
  2162. "movd (%0), %%mm4 \n\t"\
  2163. "add %2, %0 \n\t"\
  2164. "punpcklbw %%mm7, %%mm0 \n\t"\
  2165. "punpcklbw %%mm7, %%mm1 \n\t"\
  2166. "punpcklbw %%mm7, %%mm2 \n\t"\
  2167. "punpcklbw %%mm7, %%mm3 \n\t"\
  2168. "punpcklbw %%mm7, %%mm4 \n\t"\
  2169. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  2170. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  2171. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  2172. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  2173. \
  2174. : "+a"(src), "+c"(dst)\
  2175. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  2176. : "memory"\
  2177. );\
  2178. }\
  2179. static void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  2180. int h=4;\
  2181. int w=3;\
  2182. src -= 2*srcStride+2;\
  2183. while(w--){\
  2184. asm volatile(\
  2185. "pxor %%mm7, %%mm7 \n\t"\
  2186. "movd (%0), %%mm0 \n\t"\
  2187. "add %2, %0 \n\t"\
  2188. "movd (%0), %%mm1 \n\t"\
  2189. "add %2, %0 \n\t"\
  2190. "movd (%0), %%mm2 \n\t"\
  2191. "add %2, %0 \n\t"\
  2192. "movd (%0), %%mm3 \n\t"\
  2193. "add %2, %0 \n\t"\
  2194. "movd (%0), %%mm4 \n\t"\
  2195. "add %2, %0 \n\t"\
  2196. "punpcklbw %%mm7, %%mm0 \n\t"\
  2197. "punpcklbw %%mm7, %%mm1 \n\t"\
  2198. "punpcklbw %%mm7, %%mm2 \n\t"\
  2199. "punpcklbw %%mm7, %%mm3 \n\t"\
  2200. "punpcklbw %%mm7, %%mm4 \n\t"\
  2201. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
  2202. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
  2203. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
  2204. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
  2205. \
  2206. : "+a"(src)\
  2207. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
  2208. : "memory"\
  2209. );\
  2210. tmp += 4;\
  2211. src += 4 - 9*srcStride;\
  2212. }\
  2213. tmp -= 3*4;\
  2214. asm volatile(\
  2215. "movq %4, %%mm6 \n\t"\
  2216. "1: \n\t"\
  2217. "movq (%0), %%mm0 \n\t"\
  2218. "paddw 10(%0), %%mm0 \n\t"\
  2219. "movq 2(%0), %%mm1 \n\t"\
  2220. "paddw 8(%0), %%mm1 \n\t"\
  2221. "movq 4(%0), %%mm2 \n\t"\
  2222. "paddw 6(%0), %%mm2 \n\t"\
  2223. "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
  2224. "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
  2225. "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
  2226. "paddsw %%mm2, %%mm0 \n\t"\
  2227. "psraw $2, %%mm0 \n\t"/*((a-b)/4-b)/4 */\
  2228. "paddw %%mm6, %%mm2 \n\t"\
  2229. "paddw %%mm2, %%mm0 \n\t"\
  2230. "psraw $6, %%mm0 \n\t"\
  2231. "packuswb %%mm0, %%mm0 \n\t"\
  2232. OP(%%mm0, (%1),%%mm7, d)\
  2233. "add $24, %0 \n\t"\
  2234. "add %3, %1 \n\t"\
  2235. "decl %2 \n\t"\
  2236. " jnz 1b \n\t"\
  2237. : "+a"(tmp), "+c"(dst), "+m"(h)\
  2238. : "S"((long)dstStride), "m"(ff_pw_32)\
  2239. : "memory"\
  2240. );\
  2241. }\
  2242. \
  2243. static void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2244. int h=8;\
  2245. asm volatile(\
  2246. "pxor %%mm7, %%mm7 \n\t"\
  2247. "movq %5, %%mm6 \n\t"\
  2248. "1: \n\t"\
  2249. "movq (%0), %%mm0 \n\t"\
  2250. "movq 1(%0), %%mm2 \n\t"\
  2251. "movq %%mm0, %%mm1 \n\t"\
  2252. "movq %%mm2, %%mm3 \n\t"\
  2253. "punpcklbw %%mm7, %%mm0 \n\t"\
  2254. "punpckhbw %%mm7, %%mm1 \n\t"\
  2255. "punpcklbw %%mm7, %%mm2 \n\t"\
  2256. "punpckhbw %%mm7, %%mm3 \n\t"\
  2257. "paddw %%mm2, %%mm0 \n\t"\
  2258. "paddw %%mm3, %%mm1 \n\t"\
  2259. "psllw $2, %%mm0 \n\t"\
  2260. "psllw $2, %%mm1 \n\t"\
  2261. "movq -1(%0), %%mm2 \n\t"\
  2262. "movq 2(%0), %%mm4 \n\t"\
  2263. "movq %%mm2, %%mm3 \n\t"\
  2264. "movq %%mm4, %%mm5 \n\t"\
  2265. "punpcklbw %%mm7, %%mm2 \n\t"\
  2266. "punpckhbw %%mm7, %%mm3 \n\t"\
  2267. "punpcklbw %%mm7, %%mm4 \n\t"\
  2268. "punpckhbw %%mm7, %%mm5 \n\t"\
  2269. "paddw %%mm4, %%mm2 \n\t"\
  2270. "paddw %%mm3, %%mm5 \n\t"\
  2271. "psubw %%mm2, %%mm0 \n\t"\
  2272. "psubw %%mm5, %%mm1 \n\t"\
  2273. "pmullw %%mm6, %%mm0 \n\t"\
  2274. "pmullw %%mm6, %%mm1 \n\t"\
  2275. "movd -2(%0), %%mm2 \n\t"\
  2276. "movd 7(%0), %%mm5 \n\t"\
  2277. "punpcklbw %%mm7, %%mm2 \n\t"\
  2278. "punpcklbw %%mm7, %%mm5 \n\t"\
  2279. "paddw %%mm3, %%mm2 \n\t"\
  2280. "paddw %%mm5, %%mm4 \n\t"\
  2281. "movq %6, %%mm5 \n\t"\
  2282. "paddw %%mm5, %%mm2 \n\t"\
  2283. "paddw %%mm5, %%mm4 \n\t"\
  2284. "paddw %%mm2, %%mm0 \n\t"\
  2285. "paddw %%mm4, %%mm1 \n\t"\
  2286. "psraw $5, %%mm0 \n\t"\
  2287. "psraw $5, %%mm1 \n\t"\
  2288. "packuswb %%mm1, %%mm0 \n\t"\
  2289. OP(%%mm0, (%1),%%mm5, q)\
  2290. "add %3, %0 \n\t"\
  2291. "add %4, %1 \n\t"\
  2292. "decl %2 \n\t"\
  2293. " jnz 1b \n\t"\
  2294. : "+a"(src), "+c"(dst), "+m"(h)\
  2295. : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  2296. : "memory"\
  2297. );\
  2298. }\
  2299. \
  2300. static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2301. int h= 2;\
  2302. src -= 2*srcStride;\
  2303. \
  2304. while(h--){\
  2305. asm volatile(\
  2306. "pxor %%mm7, %%mm7 \n\t"\
  2307. "movd (%0), %%mm0 \n\t"\
  2308. "add %2, %0 \n\t"\
  2309. "movd (%0), %%mm1 \n\t"\
  2310. "add %2, %0 \n\t"\
  2311. "movd (%0), %%mm2 \n\t"\
  2312. "add %2, %0 \n\t"\
  2313. "movd (%0), %%mm3 \n\t"\
  2314. "add %2, %0 \n\t"\
  2315. "movd (%0), %%mm4 \n\t"\
  2316. "add %2, %0 \n\t"\
  2317. "punpcklbw %%mm7, %%mm0 \n\t"\
  2318. "punpcklbw %%mm7, %%mm1 \n\t"\
  2319. "punpcklbw %%mm7, %%mm2 \n\t"\
  2320. "punpcklbw %%mm7, %%mm3 \n\t"\
  2321. "punpcklbw %%mm7, %%mm4 \n\t"\
  2322. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  2323. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  2324. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  2325. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  2326. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  2327. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  2328. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  2329. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  2330. \
  2331. : "+a"(src), "+c"(dst)\
  2332. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  2333. : "memory"\
  2334. );\
  2335. src += 4-13*srcStride;\
  2336. dst += 4-8*dstStride;\
  2337. }\
  2338. }\
  2339. static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  2340. int h=8;\
  2341. int w=4;\
  2342. src -= 2*srcStride+2;\
  2343. while(w--){\
  2344. asm volatile(\
  2345. "pxor %%mm7, %%mm7 \n\t"\
  2346. "movd (%0), %%mm0 \n\t"\
  2347. "add %2, %0 \n\t"\
  2348. "movd (%0), %%mm1 \n\t"\
  2349. "add %2, %0 \n\t"\
  2350. "movd (%0), %%mm2 \n\t"\
  2351. "add %2, %0 \n\t"\
  2352. "movd (%0), %%mm3 \n\t"\
  2353. "add %2, %0 \n\t"\
  2354. "movd (%0), %%mm4 \n\t"\
  2355. "add %2, %0 \n\t"\
  2356. "punpcklbw %%mm7, %%mm0 \n\t"\
  2357. "punpcklbw %%mm7, %%mm1 \n\t"\
  2358. "punpcklbw %%mm7, %%mm2 \n\t"\
  2359. "punpcklbw %%mm7, %%mm3 \n\t"\
  2360. "punpcklbw %%mm7, %%mm4 \n\t"\
  2361. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*4)\
  2362. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*4)\
  2363. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*4)\
  2364. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*4)\
  2365. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*8*4)\
  2366. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*8*4)\
  2367. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*8*4)\
  2368. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*8*4)\
  2369. \
  2370. : "+a"(src)\
  2371. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
  2372. : "memory"\
  2373. );\
  2374. tmp += 4;\
  2375. src += 4 - 13*srcStride;\
  2376. }\
  2377. tmp -= 4*4;\
  2378. asm volatile(\
  2379. "movq %4, %%mm6 \n\t"\
  2380. "1: \n\t"\
  2381. "movq (%0), %%mm0 \n\t"\
  2382. "movq 8(%0), %%mm3 \n\t"\
  2383. "movq 2(%0), %%mm1 \n\t"\
  2384. "movq 10(%0), %%mm4 \n\t"\
  2385. "paddw %%mm4, %%mm0 \n\t"\
  2386. "paddw %%mm3, %%mm1 \n\t"\
  2387. "paddw 18(%0), %%mm3 \n\t"\
  2388. "paddw 16(%0), %%mm4 \n\t"\
  2389. "movq 4(%0), %%mm2 \n\t"\
  2390. "movq 12(%0), %%mm5 \n\t"\
  2391. "paddw 6(%0), %%mm2 \n\t"\
  2392. "paddw 14(%0), %%mm5 \n\t"\
  2393. "psubw %%mm1, %%mm0 \n\t"\
  2394. "psubw %%mm4, %%mm3 \n\t"\
  2395. "psraw $2, %%mm0 \n\t"\
  2396. "psraw $2, %%mm3 \n\t"\
  2397. "psubw %%mm1, %%mm0 \n\t"\
  2398. "psubw %%mm4, %%mm3 \n\t"\
  2399. "paddsw %%mm2, %%mm0 \n\t"\
  2400. "paddsw %%mm5, %%mm3 \n\t"\
  2401. "psraw $2, %%mm0 \n\t"\
  2402. "psraw $2, %%mm3 \n\t"\
  2403. "paddw %%mm6, %%mm2 \n\t"\
  2404. "paddw %%mm6, %%mm5 \n\t"\
  2405. "paddw %%mm2, %%mm0 \n\t"\
  2406. "paddw %%mm5, %%mm3 \n\t"\
  2407. "psraw $6, %%mm0 \n\t"\
  2408. "psraw $6, %%mm3 \n\t"\
  2409. "packuswb %%mm3, %%mm0 \n\t"\
  2410. OP(%%mm0, (%1),%%mm7, q)\
  2411. "add $32, %0 \n\t"\
  2412. "add %3, %1 \n\t"\
  2413. "decl %2 \n\t"\
  2414. " jnz 1b \n\t"\
  2415. : "+a"(tmp), "+c"(dst), "+m"(h)\
  2416. : "S"((long)dstStride), "m"(ff_pw_32)\
  2417. : "memory"\
  2418. );\
  2419. }\
  2420. static void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2421. OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  2422. OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  2423. src += 8*srcStride;\
  2424. dst += 8*dstStride;\
  2425. OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  2426. OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  2427. }\
  2428. \
  2429. static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  2430. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  2431. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  2432. src += 8*srcStride;\
  2433. dst += 8*dstStride;\
  2434. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  2435. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  2436. }\
  2437. \
  2438. static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  2439. OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\
  2440. OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\
  2441. src += 8*srcStride;\
  2442. dst += 8*dstStride;\
  2443. OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\
  2444. OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\
  2445. }\
  2446. #define H264_MC(OPNAME, SIZE, MMX) \
  2447. static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  2448. OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\
  2449. }\
  2450. \
  2451. static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2452. uint64_t temp[SIZE*SIZE/8];\
  2453. uint8_t * const half= (uint8_t*)temp;\
  2454. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\
  2455. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
  2456. }\
  2457. \
  2458. static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2459. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
  2460. }\
  2461. \
  2462. static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2463. uint64_t temp[SIZE*SIZE/8];\
  2464. uint8_t * const half= (uint8_t*)temp;\
  2465. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\
  2466. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+1, half, stride, stride, SIZE);\
  2467. }\
  2468. \
  2469. static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2470. uint64_t temp[SIZE*SIZE/8];\
  2471. uint8_t * const half= (uint8_t*)temp;\
  2472. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
  2473. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
  2474. }\
  2475. \
  2476. static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2477. OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
  2478. }\
  2479. \
  2480. static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2481. uint64_t temp[SIZE*SIZE/8];\
  2482. uint8_t * const half= (uint8_t*)temp;\
  2483. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
  2484. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\
  2485. }\
  2486. \
  2487. static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2488. uint64_t temp[SIZE*SIZE/4];\
  2489. uint8_t * const halfH= (uint8_t*)temp;\
  2490. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  2491. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
  2492. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
  2493. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  2494. }\
  2495. \
  2496. static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2497. uint64_t temp[SIZE*SIZE/4];\
  2498. uint8_t * const halfH= (uint8_t*)temp;\
  2499. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  2500. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
  2501. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
  2502. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  2503. }\
  2504. \
  2505. static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2506. uint64_t temp[SIZE*SIZE/4];\
  2507. uint8_t * const halfH= (uint8_t*)temp;\
  2508. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  2509. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
  2510. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
  2511. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  2512. }\
  2513. \
  2514. static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2515. uint64_t temp[SIZE*SIZE/4];\
  2516. uint8_t * const halfH= (uint8_t*)temp;\
  2517. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  2518. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
  2519. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
  2520. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  2521. }\
  2522. \
  2523. static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2524. uint64_t temp[SIZE*(SIZE+8)/4];\
  2525. int16_t * const tmp= (int16_t*)temp;\
  2526. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\
  2527. }\
  2528. \
  2529. static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2530. uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
  2531. uint8_t * const halfH= (uint8_t*)temp;\
  2532. uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
  2533. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
  2534. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
  2535. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  2536. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\
  2537. }\
  2538. \
  2539. static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2540. uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
  2541. uint8_t * const halfH= (uint8_t*)temp;\
  2542. uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
  2543. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
  2544. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
  2545. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  2546. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\
  2547. }\
  2548. \
  2549. static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2550. uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
  2551. uint8_t * const halfV= (uint8_t*)temp;\
  2552. uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
  2553. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
  2554. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
  2555. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  2556. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\
  2557. }\
  2558. \
  2559. static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2560. uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
  2561. uint8_t * const halfV= (uint8_t*)temp;\
  2562. uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
  2563. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
  2564. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
  2565. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  2566. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\
  2567. }\
  2568. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  2569. #define AVG_3DNOW_OP(a,b,temp, size) \
  2570. "mov" #size " " #b ", " #temp " \n\t"\
  2571. "pavgusb " #temp ", " #a " \n\t"\
  2572. "mov" #size " " #a ", " #b " \n\t"
  2573. #define AVG_MMX2_OP(a,b,temp, size) \
  2574. "mov" #size " " #b ", " #temp " \n\t"\
  2575. "pavgb " #temp ", " #a " \n\t"\
  2576. "mov" #size " " #a ", " #b " \n\t"
  2577. QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
  2578. QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
  2579. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
  2580. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
  2581. QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
  2582. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
  2583. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
  2584. QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
  2585. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
  2586. QPEL_H264(put_ , PUT_OP, 3dnow)
  2587. QPEL_H264(avg_ , AVG_3DNOW_OP, 3dnow)
  2588. QPEL_H264(put_ , PUT_OP, mmx2)
  2589. QPEL_H264(avg_ , AVG_MMX2_OP, mmx2)
  2590. H264_MC(put_, 4, 3dnow)
  2591. H264_MC(put_, 8, 3dnow)
  2592. H264_MC(put_, 16,3dnow)
  2593. H264_MC(avg_, 4, 3dnow)
  2594. H264_MC(avg_, 8, 3dnow)
  2595. H264_MC(avg_, 16,3dnow)
  2596. H264_MC(put_, 4, mmx2)
  2597. H264_MC(put_, 8, mmx2)
  2598. H264_MC(put_, 16,mmx2)
  2599. H264_MC(avg_, 4, mmx2)
  2600. H264_MC(avg_, 8, mmx2)
  2601. H264_MC(avg_, 16,mmx2)
  2602. #if 0
  2603. static void just_return() { return; }
  2604. #endif
  2605. #define SET_QPEL_FUNC(postfix1, postfix2) \
  2606. c->put_ ## postfix1 = put_ ## postfix2;\
  2607. c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
  2608. c->avg_ ## postfix1 = avg_ ## postfix2;
  2609. static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
  2610. long i=0;
  2611. assert(ABS(scale) < 256);
  2612. scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
  2613. asm volatile(
  2614. "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
  2615. "psrlw $15, %%mm6 \n\t" // 1w
  2616. "pxor %%mm7, %%mm7 \n\t"
  2617. "movd %4, %%mm5 \n\t"
  2618. "punpcklwd %%mm5, %%mm5 \n\t"
  2619. "punpcklwd %%mm5, %%mm5 \n\t"
  2620. "1: \n\t"
  2621. "movq (%1, %0), %%mm0 \n\t"
  2622. "movq 8(%1, %0), %%mm1 \n\t"
  2623. "pmulhw %%mm5, %%mm0 \n\t"
  2624. "pmulhw %%mm5, %%mm1 \n\t"
  2625. "paddw %%mm6, %%mm0 \n\t"
  2626. "paddw %%mm6, %%mm1 \n\t"
  2627. "psraw $1, %%mm0 \n\t"
  2628. "psraw $1, %%mm1 \n\t"
  2629. "paddw (%2, %0), %%mm0 \n\t"
  2630. "paddw 8(%2, %0), %%mm1 \n\t"
  2631. "psraw $6, %%mm0 \n\t"
  2632. "psraw $6, %%mm1 \n\t"
  2633. "pmullw (%3, %0), %%mm0 \n\t"
  2634. "pmullw 8(%3, %0), %%mm1 \n\t"
  2635. "pmaddwd %%mm0, %%mm0 \n\t"
  2636. "pmaddwd %%mm1, %%mm1 \n\t"
  2637. "paddd %%mm1, %%mm0 \n\t"
  2638. "psrld $4, %%mm0 \n\t"
  2639. "paddd %%mm0, %%mm7 \n\t"
  2640. "add $16, %0 \n\t"
  2641. "cmp $128, %0 \n\t" //FIXME optimize & bench
  2642. " jb 1b \n\t"
  2643. "movq %%mm7, %%mm6 \n\t"
  2644. "psrlq $32, %%mm7 \n\t"
  2645. "paddd %%mm6, %%mm7 \n\t"
  2646. "psrld $2, %%mm7 \n\t"
  2647. "movd %%mm7, %0 \n\t"
  2648. : "+r" (i)
  2649. : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
  2650. );
  2651. return i;
  2652. }
  2653. static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
  2654. long i=0;
  2655. if(ABS(scale) < 256){
  2656. scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
  2657. asm volatile(
  2658. "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
  2659. "psrlw $15, %%mm6 \n\t" // 1w
  2660. "movd %3, %%mm5 \n\t"
  2661. "punpcklwd %%mm5, %%mm5 \n\t"
  2662. "punpcklwd %%mm5, %%mm5 \n\t"
  2663. "1: \n\t"
  2664. "movq (%1, %0), %%mm0 \n\t"
  2665. "movq 8(%1, %0), %%mm1 \n\t"
  2666. "pmulhw %%mm5, %%mm0 \n\t"
  2667. "pmulhw %%mm5, %%mm1 \n\t"
  2668. "paddw %%mm6, %%mm0 \n\t"
  2669. "paddw %%mm6, %%mm1 \n\t"
  2670. "psraw $1, %%mm0 \n\t"
  2671. "psraw $1, %%mm1 \n\t"
  2672. "paddw (%2, %0), %%mm0 \n\t"
  2673. "paddw 8(%2, %0), %%mm1 \n\t"
  2674. "movq %%mm0, (%2, %0) \n\t"
  2675. "movq %%mm1, 8(%2, %0) \n\t"
  2676. "add $16, %0 \n\t"
  2677. "cmp $128, %0 \n\t" //FIXME optimize & bench
  2678. " jb 1b \n\t"
  2679. : "+r" (i)
  2680. : "r"(basis), "r"(rem), "g"(scale)
  2681. );
  2682. }else{
  2683. for(i=0; i<8*8; i++){
  2684. rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
  2685. }
  2686. }
  2687. }
  2688. /* external functions, from idct_mmx.c */
  2689. void ff_mmx_idct(DCTELEM *block);
  2690. void ff_mmxext_idct(DCTELEM *block);
  2691. /* XXX: those functions should be suppressed ASAP when all IDCTs are
  2692. converted */
  2693. static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  2694. {
  2695. ff_mmx_idct (block);
  2696. put_pixels_clamped_mmx(block, dest, line_size);
  2697. }
  2698. static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  2699. {
  2700. ff_mmx_idct (block);
  2701. add_pixels_clamped_mmx(block, dest, line_size);
  2702. }
  2703. static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  2704. {
  2705. ff_mmxext_idct (block);
  2706. put_pixels_clamped_mmx(block, dest, line_size);
  2707. }
  2708. static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  2709. {
  2710. ff_mmxext_idct (block);
  2711. add_pixels_clamped_mmx(block, dest, line_size);
  2712. }
  2713. void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
  2714. {
  2715. mm_flags = mm_support();
  2716. if (avctx->dsp_mask) {
  2717. if (avctx->dsp_mask & FF_MM_FORCE)
  2718. mm_flags |= (avctx->dsp_mask & 0xffff);
  2719. else
  2720. mm_flags &= ~(avctx->dsp_mask & 0xffff);
  2721. }
  2722. #if 0
  2723. av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
  2724. if (mm_flags & MM_MMX)
  2725. av_log(avctx, AV_LOG_INFO, " mmx");
  2726. if (mm_flags & MM_MMXEXT)
  2727. av_log(avctx, AV_LOG_INFO, " mmxext");
  2728. if (mm_flags & MM_3DNOW)
  2729. av_log(avctx, AV_LOG_INFO, " 3dnow");
  2730. if (mm_flags & MM_SSE)
  2731. av_log(avctx, AV_LOG_INFO, " sse");
  2732. if (mm_flags & MM_SSE2)
  2733. av_log(avctx, AV_LOG_INFO, " sse2");
  2734. av_log(avctx, AV_LOG_INFO, "\n");
  2735. #endif
  2736. if (mm_flags & MM_MMX) {
  2737. const int idct_algo= avctx->idct_algo;
  2738. #ifdef CONFIG_ENCODERS
  2739. const int dct_algo = avctx->dct_algo;
  2740. if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
  2741. if(mm_flags & MM_SSE2){
  2742. c->fdct = ff_fdct_sse2;
  2743. }else if(mm_flags & MM_MMXEXT){
  2744. c->fdct = ff_fdct_mmx2;
  2745. }else{
  2746. c->fdct = ff_fdct_mmx;
  2747. }
  2748. }
  2749. #endif //CONFIG_ENCODERS
  2750. if(avctx->lowres==0){
  2751. if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
  2752. c->idct_put= ff_simple_idct_put_mmx;
  2753. c->idct_add= ff_simple_idct_add_mmx;
  2754. c->idct = ff_simple_idct_mmx;
  2755. c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
  2756. }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
  2757. if(mm_flags & MM_MMXEXT){
  2758. c->idct_put= ff_libmpeg2mmx2_idct_put;
  2759. c->idct_add= ff_libmpeg2mmx2_idct_add;
  2760. c->idct = ff_mmxext_idct;
  2761. }else{
  2762. c->idct_put= ff_libmpeg2mmx_idct_put;
  2763. c->idct_add= ff_libmpeg2mmx_idct_add;
  2764. c->idct = ff_mmx_idct;
  2765. }
  2766. c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
  2767. }
  2768. }
  2769. /* VP3 optimized DSP functions */
  2770. if (mm_flags & MM_SSE2) {
  2771. c->vp3_dsp_init = vp3_dsp_init_sse2;
  2772. c->vp3_idct = vp3_idct_sse2;
  2773. } else {
  2774. c->vp3_dsp_init = vp3_dsp_init_mmx;
  2775. c->vp3_idct = vp3_idct_mmx;
  2776. }
  2777. #ifdef CONFIG_ENCODERS
  2778. c->get_pixels = get_pixels_mmx;
  2779. c->diff_pixels = diff_pixels_mmx;
  2780. #endif //CONFIG_ENCODERS
  2781. c->put_pixels_clamped = put_pixels_clamped_mmx;
  2782. c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
  2783. c->add_pixels_clamped = add_pixels_clamped_mmx;
  2784. c->clear_blocks = clear_blocks_mmx;
  2785. #ifdef CONFIG_ENCODERS
  2786. c->pix_sum = pix_sum16_mmx;
  2787. #endif //CONFIG_ENCODERS
  2788. c->put_pixels_tab[0][0] = put_pixels16_mmx;
  2789. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
  2790. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
  2791. c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
  2792. c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
  2793. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
  2794. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
  2795. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
  2796. c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
  2797. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
  2798. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
  2799. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
  2800. c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
  2801. c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
  2802. c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
  2803. c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
  2804. c->put_pixels_tab[1][0] = put_pixels8_mmx;
  2805. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
  2806. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
  2807. c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
  2808. c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
  2809. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
  2810. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
  2811. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
  2812. c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
  2813. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
  2814. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
  2815. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
  2816. c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
  2817. c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
  2818. c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
  2819. c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
  2820. c->add_bytes= add_bytes_mmx;
  2821. #ifdef CONFIG_ENCODERS
  2822. c->diff_bytes= diff_bytes_mmx;
  2823. c->hadamard8_diff[0]= hadamard8_diff16_mmx;
  2824. c->hadamard8_diff[1]= hadamard8_diff_mmx;
  2825. c->pix_norm1 = pix_norm1_mmx;
  2826. c->sse[0] = sse16_mmx;
  2827. c->sse[1] = sse8_mmx;
  2828. c->vsad[4]= vsad_intra16_mmx;
  2829. c->nsse[0] = nsse16_mmx;
  2830. c->nsse[1] = nsse8_mmx;
  2831. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2832. c->vsad[0] = vsad16_mmx;
  2833. }
  2834. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2835. c->try_8x8basis= try_8x8basis_mmx;
  2836. }
  2837. c->add_8x8basis= add_8x8basis_mmx;
  2838. #endif //CONFIG_ENCODERS
  2839. c->h263_v_loop_filter= h263_v_loop_filter_mmx;
  2840. c->h263_h_loop_filter= h263_h_loop_filter_mmx;
  2841. if (mm_flags & MM_MMXEXT) {
  2842. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
  2843. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
  2844. c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
  2845. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
  2846. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
  2847. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
  2848. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
  2849. c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
  2850. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
  2851. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
  2852. #ifdef CONFIG_ENCODERS
  2853. c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
  2854. c->hadamard8_diff[1]= hadamard8_diff_mmx2;
  2855. c->vsad[4]= vsad_intra16_mmx2;
  2856. #endif //CONFIG_ENCODERS
  2857. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2858. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
  2859. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
  2860. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
  2861. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
  2862. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
  2863. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
  2864. #ifdef CONFIG_ENCODERS
  2865. c->vsad[0] = vsad16_mmx2;
  2866. #endif //CONFIG_ENCODERS
  2867. }
  2868. #if 1
  2869. SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
  2870. SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
  2871. SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
  2872. SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
  2873. SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
  2874. SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
  2875. SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
  2876. SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
  2877. SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
  2878. SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
  2879. SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
  2880. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
  2881. SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
  2882. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
  2883. SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
  2884. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
  2885. SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
  2886. SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
  2887. SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
  2888. SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
  2889. SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
  2890. SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
  2891. SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
  2892. SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
  2893. SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
  2894. SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
  2895. SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
  2896. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
  2897. SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
  2898. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
  2899. SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
  2900. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
  2901. #endif
  2902. //FIXME 3dnow too
  2903. #define dspfunc(PFX, IDX, NUM) \
  2904. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \
  2905. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \
  2906. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \
  2907. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \
  2908. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \
  2909. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \
  2910. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \
  2911. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \
  2912. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \
  2913. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \
  2914. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \
  2915. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \
  2916. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \
  2917. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \
  2918. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \
  2919. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2
  2920. dspfunc(put_h264_qpel, 0, 16);
  2921. dspfunc(put_h264_qpel, 1, 8);
  2922. dspfunc(put_h264_qpel, 2, 4);
  2923. dspfunc(avg_h264_qpel, 0, 16);
  2924. dspfunc(avg_h264_qpel, 1, 8);
  2925. dspfunc(avg_h264_qpel, 2, 4);
  2926. #undef dspfunc
  2927. #ifdef CONFIG_ENCODERS
  2928. c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
  2929. #endif //CONFIG_ENCODERS
  2930. } else if (mm_flags & MM_3DNOW) {
  2931. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  2932. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  2933. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  2934. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  2935. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  2936. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  2937. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  2938. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  2939. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  2940. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  2941. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2942. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  2943. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  2944. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  2945. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  2946. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  2947. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  2948. }
  2949. SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
  2950. SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
  2951. SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
  2952. SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
  2953. SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
  2954. SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
  2955. SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
  2956. SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
  2957. SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
  2958. SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
  2959. SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
  2960. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
  2961. SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
  2962. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
  2963. SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
  2964. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
  2965. SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
  2966. SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
  2967. SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
  2968. SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
  2969. SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
  2970. SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
  2971. SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
  2972. SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
  2973. SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
  2974. SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
  2975. SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
  2976. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
  2977. SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
  2978. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
  2979. SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
  2980. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
  2981. #define dspfunc(PFX, IDX, NUM) \
  2982. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \
  2983. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \
  2984. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \
  2985. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \
  2986. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \
  2987. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \
  2988. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \
  2989. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \
  2990. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \
  2991. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \
  2992. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \
  2993. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \
  2994. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \
  2995. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \
  2996. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \
  2997. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow
  2998. dspfunc(put_h264_qpel, 0, 16);
  2999. dspfunc(put_h264_qpel, 1, 8);
  3000. dspfunc(put_h264_qpel, 2, 4);
  3001. dspfunc(avg_h264_qpel, 0, 16);
  3002. dspfunc(avg_h264_qpel, 1, 8);
  3003. dspfunc(avg_h264_qpel, 2, 4);
  3004. }
  3005. }
  3006. #ifdef CONFIG_ENCODERS
  3007. dsputil_init_pix_mmx(c, avctx);
  3008. #endif //CONFIG_ENCODERS
  3009. #if 0
  3010. // for speed testing
  3011. get_pixels = just_return;
  3012. put_pixels_clamped = just_return;
  3013. add_pixels_clamped = just_return;
  3014. pix_abs16x16 = just_return;
  3015. pix_abs16x16_x2 = just_return;
  3016. pix_abs16x16_y2 = just_return;
  3017. pix_abs16x16_xy2 = just_return;
  3018. put_pixels_tab[0] = just_return;
  3019. put_pixels_tab[1] = just_return;
  3020. put_pixels_tab[2] = just_return;
  3021. put_pixels_tab[3] = just_return;
  3022. put_no_rnd_pixels_tab[0] = just_return;
  3023. put_no_rnd_pixels_tab[1] = just_return;
  3024. put_no_rnd_pixels_tab[2] = just_return;
  3025. put_no_rnd_pixels_tab[3] = just_return;
  3026. avg_pixels_tab[0] = just_return;
  3027. avg_pixels_tab[1] = just_return;
  3028. avg_pixels_tab[2] = just_return;
  3029. avg_pixels_tab[3] = just_return;
  3030. avg_no_rnd_pixels_tab[0] = just_return;
  3031. avg_no_rnd_pixels_tab[1] = just_return;
  3032. avg_no_rnd_pixels_tab[2] = just_return;
  3033. avg_no_rnd_pixels_tab[3] = just_return;
  3034. //av_fdct = just_return;
  3035. //ff_idct = just_return;
  3036. #endif
  3037. }