You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2846 lines
100KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  21. */
  22. #include "../dsputil.h"
  23. #include "../simple_idct.h"
  24. #include "../mpegvideo.h"
  25. #include "mmx.h"
  26. //#undef NDEBUG
  27. //#include <assert.h>
  28. extern const uint8_t ff_h263_loop_filter_strength[32];
  29. int mm_flags; /* multimedia extension flags */
  30. /* pixel operations */
  31. static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
  32. static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
  33. static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
  34. static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
  35. static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
  36. static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
  37. static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
  38. static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
  39. static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
  40. static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
  41. static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
  42. static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
  43. static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
  44. #define JUMPALIGN() __asm __volatile (".balign 8"::)
  45. #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
  46. #define MOVQ_WONE(regd) \
  47. __asm __volatile ( \
  48. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  49. "psrlw $15, %%" #regd ::)
  50. #define MOVQ_BFE(regd) \
  51. __asm __volatile ( \
  52. "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
  53. "paddb %%" #regd ", %%" #regd " \n\t" ::)
  54. #ifndef PIC
  55. #define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
  56. #define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
  57. #else
  58. // for shared library it's better to use this way for accessing constants
  59. // pcmpeqd -> -1
  60. #define MOVQ_BONE(regd) \
  61. __asm __volatile ( \
  62. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  63. "psrlw $15, %%" #regd " \n\t" \
  64. "packuswb %%" #regd ", %%" #regd " \n\t" ::)
  65. #define MOVQ_WTWO(regd) \
  66. __asm __volatile ( \
  67. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  68. "psrlw $15, %%" #regd " \n\t" \
  69. "psllw $1, %%" #regd " \n\t"::)
  70. #endif
  71. // using regr as temporary and for the output result
  72. // first argument is unmodifed and second is trashed
  73. // regfe is supposed to contain 0xfefefefefefefefe
  74. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  75. "movq " #rega ", " #regr " \n\t"\
  76. "pand " #regb ", " #regr " \n\t"\
  77. "pxor " #rega ", " #regb " \n\t"\
  78. "pand " #regfe "," #regb " \n\t"\
  79. "psrlq $1, " #regb " \n\t"\
  80. "paddb " #regb ", " #regr " \n\t"
  81. #define PAVGB_MMX(rega, regb, regr, regfe) \
  82. "movq " #rega ", " #regr " \n\t"\
  83. "por " #regb ", " #regr " \n\t"\
  84. "pxor " #rega ", " #regb " \n\t"\
  85. "pand " #regfe "," #regb " \n\t"\
  86. "psrlq $1, " #regb " \n\t"\
  87. "psubb " #regb ", " #regr " \n\t"
  88. // mm6 is supposed to contain 0xfefefefefefefefe
  89. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  90. "movq " #rega ", " #regr " \n\t"\
  91. "movq " #regc ", " #regp " \n\t"\
  92. "pand " #regb ", " #regr " \n\t"\
  93. "pand " #regd ", " #regp " \n\t"\
  94. "pxor " #rega ", " #regb " \n\t"\
  95. "pxor " #regc ", " #regd " \n\t"\
  96. "pand %%mm6, " #regb " \n\t"\
  97. "pand %%mm6, " #regd " \n\t"\
  98. "psrlq $1, " #regb " \n\t"\
  99. "psrlq $1, " #regd " \n\t"\
  100. "paddb " #regb ", " #regr " \n\t"\
  101. "paddb " #regd ", " #regp " \n\t"
  102. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  103. "movq " #rega ", " #regr " \n\t"\
  104. "movq " #regc ", " #regp " \n\t"\
  105. "por " #regb ", " #regr " \n\t"\
  106. "por " #regd ", " #regp " \n\t"\
  107. "pxor " #rega ", " #regb " \n\t"\
  108. "pxor " #regc ", " #regd " \n\t"\
  109. "pand %%mm6, " #regb " \n\t"\
  110. "pand %%mm6, " #regd " \n\t"\
  111. "psrlq $1, " #regd " \n\t"\
  112. "psrlq $1, " #regb " \n\t"\
  113. "psubb " #regb ", " #regr " \n\t"\
  114. "psubb " #regd ", " #regp " \n\t"
  115. /***********************************/
  116. /* MMX no rounding */
  117. #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
  118. #define SET_RND MOVQ_WONE
  119. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  120. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  121. #include "dsputil_mmx_rnd.h"
  122. #undef DEF
  123. #undef SET_RND
  124. #undef PAVGBP
  125. #undef PAVGB
  126. /***********************************/
  127. /* MMX rounding */
  128. #define DEF(x, y) x ## _ ## y ##_mmx
  129. #define SET_RND MOVQ_WTWO
  130. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  131. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  132. #include "dsputil_mmx_rnd.h"
  133. #undef DEF
  134. #undef SET_RND
  135. #undef PAVGBP
  136. #undef PAVGB
  137. /***********************************/
  138. /* 3Dnow specific */
  139. #define DEF(x) x ## _3dnow
  140. /* for Athlons PAVGUSB is prefered */
  141. #define PAVGB "pavgusb"
  142. #include "dsputil_mmx_avg.h"
  143. #undef DEF
  144. #undef PAVGB
  145. /***********************************/
  146. /* MMX2 specific */
  147. #define DEF(x) x ## _mmx2
  148. /* Introduced only in MMX2 set */
  149. #define PAVGB "pavgb"
  150. #include "dsputil_mmx_avg.h"
  151. #undef DEF
  152. #undef PAVGB
  153. /***********************************/
  154. /* standard MMX */
  155. #ifdef CONFIG_ENCODERS
  156. static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
  157. {
  158. asm volatile(
  159. "mov $-128, %%"REG_a" \n\t"
  160. "pxor %%mm7, %%mm7 \n\t"
  161. ".balign 16 \n\t"
  162. "1: \n\t"
  163. "movq (%0), %%mm0 \n\t"
  164. "movq (%0, %2), %%mm2 \n\t"
  165. "movq %%mm0, %%mm1 \n\t"
  166. "movq %%mm2, %%mm3 \n\t"
  167. "punpcklbw %%mm7, %%mm0 \n\t"
  168. "punpckhbw %%mm7, %%mm1 \n\t"
  169. "punpcklbw %%mm7, %%mm2 \n\t"
  170. "punpckhbw %%mm7, %%mm3 \n\t"
  171. "movq %%mm0, (%1, %%"REG_a")\n\t"
  172. "movq %%mm1, 8(%1, %%"REG_a")\n\t"
  173. "movq %%mm2, 16(%1, %%"REG_a")\n\t"
  174. "movq %%mm3, 24(%1, %%"REG_a")\n\t"
  175. "add %3, %0 \n\t"
  176. "add $32, %%"REG_a" \n\t"
  177. "js 1b \n\t"
  178. : "+r" (pixels)
  179. : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
  180. : "%"REG_a
  181. );
  182. }
  183. static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
  184. {
  185. asm volatile(
  186. "pxor %%mm7, %%mm7 \n\t"
  187. "mov $-128, %%"REG_a" \n\t"
  188. ".balign 16 \n\t"
  189. "1: \n\t"
  190. "movq (%0), %%mm0 \n\t"
  191. "movq (%1), %%mm2 \n\t"
  192. "movq %%mm0, %%mm1 \n\t"
  193. "movq %%mm2, %%mm3 \n\t"
  194. "punpcklbw %%mm7, %%mm0 \n\t"
  195. "punpckhbw %%mm7, %%mm1 \n\t"
  196. "punpcklbw %%mm7, %%mm2 \n\t"
  197. "punpckhbw %%mm7, %%mm3 \n\t"
  198. "psubw %%mm2, %%mm0 \n\t"
  199. "psubw %%mm3, %%mm1 \n\t"
  200. "movq %%mm0, (%2, %%"REG_a")\n\t"
  201. "movq %%mm1, 8(%2, %%"REG_a")\n\t"
  202. "add %3, %0 \n\t"
  203. "add %3, %1 \n\t"
  204. "add $16, %%"REG_a" \n\t"
  205. "jnz 1b \n\t"
  206. : "+r" (s1), "+r" (s2)
  207. : "r" (block+64), "r" ((long)stride)
  208. : "%"REG_a
  209. );
  210. }
  211. #endif //CONFIG_ENCODERS
  212. void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  213. {
  214. const DCTELEM *p;
  215. uint8_t *pix;
  216. /* read the pixels */
  217. p = block;
  218. pix = pixels;
  219. /* unrolled loop */
  220. __asm __volatile(
  221. "movq %3, %%mm0\n\t"
  222. "movq 8%3, %%mm1\n\t"
  223. "movq 16%3, %%mm2\n\t"
  224. "movq 24%3, %%mm3\n\t"
  225. "movq 32%3, %%mm4\n\t"
  226. "movq 40%3, %%mm5\n\t"
  227. "movq 48%3, %%mm6\n\t"
  228. "movq 56%3, %%mm7\n\t"
  229. "packuswb %%mm1, %%mm0\n\t"
  230. "packuswb %%mm3, %%mm2\n\t"
  231. "packuswb %%mm5, %%mm4\n\t"
  232. "packuswb %%mm7, %%mm6\n\t"
  233. "movq %%mm0, (%0)\n\t"
  234. "movq %%mm2, (%0, %1)\n\t"
  235. "movq %%mm4, (%0, %1, 2)\n\t"
  236. "movq %%mm6, (%0, %2)\n\t"
  237. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
  238. :"memory");
  239. pix += line_size*4;
  240. p += 32;
  241. // if here would be an exact copy of the code above
  242. // compiler would generate some very strange code
  243. // thus using "r"
  244. __asm __volatile(
  245. "movq (%3), %%mm0\n\t"
  246. "movq 8(%3), %%mm1\n\t"
  247. "movq 16(%3), %%mm2\n\t"
  248. "movq 24(%3), %%mm3\n\t"
  249. "movq 32(%3), %%mm4\n\t"
  250. "movq 40(%3), %%mm5\n\t"
  251. "movq 48(%3), %%mm6\n\t"
  252. "movq 56(%3), %%mm7\n\t"
  253. "packuswb %%mm1, %%mm0\n\t"
  254. "packuswb %%mm3, %%mm2\n\t"
  255. "packuswb %%mm5, %%mm4\n\t"
  256. "packuswb %%mm7, %%mm6\n\t"
  257. "movq %%mm0, (%0)\n\t"
  258. "movq %%mm2, (%0, %1)\n\t"
  259. "movq %%mm4, (%0, %1, 2)\n\t"
  260. "movq %%mm6, (%0, %2)\n\t"
  261. ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
  262. :"memory");
  263. }
  264. static const unsigned char __align8 vector128[8] =
  265. { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
  266. void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  267. {
  268. int i;
  269. movq_m2r(*vector128, mm1);
  270. for (i = 0; i < 8; i++) {
  271. movq_m2r(*(block), mm0);
  272. packsswb_m2r(*(block + 4), mm0);
  273. block += 8;
  274. paddb_r2r(mm1, mm0);
  275. movq_r2m(mm0, *pixels);
  276. pixels += line_size;
  277. }
  278. }
  279. void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  280. {
  281. const DCTELEM *p;
  282. uint8_t *pix;
  283. int i;
  284. /* read the pixels */
  285. p = block;
  286. pix = pixels;
  287. MOVQ_ZERO(mm7);
  288. i = 4;
  289. do {
  290. __asm __volatile(
  291. "movq (%2), %%mm0\n\t"
  292. "movq 8(%2), %%mm1\n\t"
  293. "movq 16(%2), %%mm2\n\t"
  294. "movq 24(%2), %%mm3\n\t"
  295. "movq %0, %%mm4\n\t"
  296. "movq %1, %%mm6\n\t"
  297. "movq %%mm4, %%mm5\n\t"
  298. "punpcklbw %%mm7, %%mm4\n\t"
  299. "punpckhbw %%mm7, %%mm5\n\t"
  300. "paddsw %%mm4, %%mm0\n\t"
  301. "paddsw %%mm5, %%mm1\n\t"
  302. "movq %%mm6, %%mm5\n\t"
  303. "punpcklbw %%mm7, %%mm6\n\t"
  304. "punpckhbw %%mm7, %%mm5\n\t"
  305. "paddsw %%mm6, %%mm2\n\t"
  306. "paddsw %%mm5, %%mm3\n\t"
  307. "packuswb %%mm1, %%mm0\n\t"
  308. "packuswb %%mm3, %%mm2\n\t"
  309. "movq %%mm0, %0\n\t"
  310. "movq %%mm2, %1\n\t"
  311. :"+m"(*pix), "+m"(*(pix+line_size))
  312. :"r"(p)
  313. :"memory");
  314. pix += line_size*2;
  315. p += 16;
  316. } while (--i);
  317. }
  318. static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  319. {
  320. __asm __volatile(
  321. "lea (%3, %3), %%"REG_a" \n\t"
  322. ".balign 8 \n\t"
  323. "1: \n\t"
  324. "movd (%1), %%mm0 \n\t"
  325. "movd (%1, %3), %%mm1 \n\t"
  326. "movd %%mm0, (%2) \n\t"
  327. "movd %%mm1, (%2, %3) \n\t"
  328. "add %%"REG_a", %1 \n\t"
  329. "add %%"REG_a", %2 \n\t"
  330. "movd (%1), %%mm0 \n\t"
  331. "movd (%1, %3), %%mm1 \n\t"
  332. "movd %%mm0, (%2) \n\t"
  333. "movd %%mm1, (%2, %3) \n\t"
  334. "add %%"REG_a", %1 \n\t"
  335. "add %%"REG_a", %2 \n\t"
  336. "subl $4, %0 \n\t"
  337. "jnz 1b \n\t"
  338. : "+g"(h), "+r" (pixels), "+r" (block)
  339. : "r"((long)line_size)
  340. : "%"REG_a, "memory"
  341. );
  342. }
  343. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  344. {
  345. __asm __volatile(
  346. "lea (%3, %3), %%"REG_a" \n\t"
  347. ".balign 8 \n\t"
  348. "1: \n\t"
  349. "movq (%1), %%mm0 \n\t"
  350. "movq (%1, %3), %%mm1 \n\t"
  351. "movq %%mm0, (%2) \n\t"
  352. "movq %%mm1, (%2, %3) \n\t"
  353. "add %%"REG_a", %1 \n\t"
  354. "add %%"REG_a", %2 \n\t"
  355. "movq (%1), %%mm0 \n\t"
  356. "movq (%1, %3), %%mm1 \n\t"
  357. "movq %%mm0, (%2) \n\t"
  358. "movq %%mm1, (%2, %3) \n\t"
  359. "add %%"REG_a", %1 \n\t"
  360. "add %%"REG_a", %2 \n\t"
  361. "subl $4, %0 \n\t"
  362. "jnz 1b \n\t"
  363. : "+g"(h), "+r" (pixels), "+r" (block)
  364. : "r"((long)line_size)
  365. : "%"REG_a, "memory"
  366. );
  367. }
  368. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  369. {
  370. __asm __volatile(
  371. "lea (%3, %3), %%"REG_a" \n\t"
  372. ".balign 8 \n\t"
  373. "1: \n\t"
  374. "movq (%1), %%mm0 \n\t"
  375. "movq 8(%1), %%mm4 \n\t"
  376. "movq (%1, %3), %%mm1 \n\t"
  377. "movq 8(%1, %3), %%mm5 \n\t"
  378. "movq %%mm0, (%2) \n\t"
  379. "movq %%mm4, 8(%2) \n\t"
  380. "movq %%mm1, (%2, %3) \n\t"
  381. "movq %%mm5, 8(%2, %3) \n\t"
  382. "add %%"REG_a", %1 \n\t"
  383. "add %%"REG_a", %2 \n\t"
  384. "movq (%1), %%mm0 \n\t"
  385. "movq 8(%1), %%mm4 \n\t"
  386. "movq (%1, %3), %%mm1 \n\t"
  387. "movq 8(%1, %3), %%mm5 \n\t"
  388. "movq %%mm0, (%2) \n\t"
  389. "movq %%mm4, 8(%2) \n\t"
  390. "movq %%mm1, (%2, %3) \n\t"
  391. "movq %%mm5, 8(%2, %3) \n\t"
  392. "add %%"REG_a", %1 \n\t"
  393. "add %%"REG_a", %2 \n\t"
  394. "subl $4, %0 \n\t"
  395. "jnz 1b \n\t"
  396. : "+g"(h), "+r" (pixels), "+r" (block)
  397. : "r"((long)line_size)
  398. : "%"REG_a, "memory"
  399. );
  400. }
  401. static void clear_blocks_mmx(DCTELEM *blocks)
  402. {
  403. __asm __volatile(
  404. "pxor %%mm7, %%mm7 \n\t"
  405. "mov $-128*6, %%"REG_a" \n\t"
  406. "1: \n\t"
  407. "movq %%mm7, (%0, %%"REG_a") \n\t"
  408. "movq %%mm7, 8(%0, %%"REG_a") \n\t"
  409. "movq %%mm7, 16(%0, %%"REG_a") \n\t"
  410. "movq %%mm7, 24(%0, %%"REG_a") \n\t"
  411. "add $32, %%"REG_a" \n\t"
  412. " js 1b \n\t"
  413. : : "r" (((uint8_t *)blocks)+128*6)
  414. : "%"REG_a
  415. );
  416. }
  417. #ifdef CONFIG_ENCODERS
  418. static int pix_sum16_mmx(uint8_t * pix, int line_size){
  419. const int h=16;
  420. int sum;
  421. long index= -line_size*h;
  422. __asm __volatile(
  423. "pxor %%mm7, %%mm7 \n\t"
  424. "pxor %%mm6, %%mm6 \n\t"
  425. "1: \n\t"
  426. "movq (%2, %1), %%mm0 \n\t"
  427. "movq (%2, %1), %%mm1 \n\t"
  428. "movq 8(%2, %1), %%mm2 \n\t"
  429. "movq 8(%2, %1), %%mm3 \n\t"
  430. "punpcklbw %%mm7, %%mm0 \n\t"
  431. "punpckhbw %%mm7, %%mm1 \n\t"
  432. "punpcklbw %%mm7, %%mm2 \n\t"
  433. "punpckhbw %%mm7, %%mm3 \n\t"
  434. "paddw %%mm0, %%mm1 \n\t"
  435. "paddw %%mm2, %%mm3 \n\t"
  436. "paddw %%mm1, %%mm3 \n\t"
  437. "paddw %%mm3, %%mm6 \n\t"
  438. "add %3, %1 \n\t"
  439. " js 1b \n\t"
  440. "movq %%mm6, %%mm5 \n\t"
  441. "psrlq $32, %%mm6 \n\t"
  442. "paddw %%mm5, %%mm6 \n\t"
  443. "movq %%mm6, %%mm5 \n\t"
  444. "psrlq $16, %%mm6 \n\t"
  445. "paddw %%mm5, %%mm6 \n\t"
  446. "movd %%mm6, %0 \n\t"
  447. "andl $0xFFFF, %0 \n\t"
  448. : "=&r" (sum), "+r" (index)
  449. : "r" (pix - index), "r" ((long)line_size)
  450. );
  451. return sum;
  452. }
  453. #endif //CONFIG_ENCODERS
  454. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
  455. long i=0;
  456. asm volatile(
  457. "1: \n\t"
  458. "movq (%1, %0), %%mm0 \n\t"
  459. "movq (%2, %0), %%mm1 \n\t"
  460. "paddb %%mm0, %%mm1 \n\t"
  461. "movq %%mm1, (%2, %0) \n\t"
  462. "movq 8(%1, %0), %%mm0 \n\t"
  463. "movq 8(%2, %0), %%mm1 \n\t"
  464. "paddb %%mm0, %%mm1 \n\t"
  465. "movq %%mm1, 8(%2, %0) \n\t"
  466. "add $16, %0 \n\t"
  467. "cmp %3, %0 \n\t"
  468. " jb 1b \n\t"
  469. : "+r" (i)
  470. : "r"(src), "r"(dst), "r"((long)w-15)
  471. );
  472. for(; i<w; i++)
  473. dst[i+0] += src[i+0];
  474. }
  475. #define H263_LOOP_FILTER \
  476. "pxor %%mm7, %%mm7 \n\t"\
  477. "movq %0, %%mm0 \n\t"\
  478. "movq %0, %%mm1 \n\t"\
  479. "movq %3, %%mm2 \n\t"\
  480. "movq %3, %%mm3 \n\t"\
  481. "punpcklbw %%mm7, %%mm0 \n\t"\
  482. "punpckhbw %%mm7, %%mm1 \n\t"\
  483. "punpcklbw %%mm7, %%mm2 \n\t"\
  484. "punpckhbw %%mm7, %%mm3 \n\t"\
  485. "psubw %%mm2, %%mm0 \n\t"\
  486. "psubw %%mm3, %%mm1 \n\t"\
  487. "movq %1, %%mm2 \n\t"\
  488. "movq %1, %%mm3 \n\t"\
  489. "movq %2, %%mm4 \n\t"\
  490. "movq %2, %%mm5 \n\t"\
  491. "punpcklbw %%mm7, %%mm2 \n\t"\
  492. "punpckhbw %%mm7, %%mm3 \n\t"\
  493. "punpcklbw %%mm7, %%mm4 \n\t"\
  494. "punpckhbw %%mm7, %%mm5 \n\t"\
  495. "psubw %%mm2, %%mm4 \n\t"\
  496. "psubw %%mm3, %%mm5 \n\t"\
  497. "psllw $2, %%mm4 \n\t"\
  498. "psllw $2, %%mm5 \n\t"\
  499. "paddw %%mm0, %%mm4 \n\t"\
  500. "paddw %%mm1, %%mm5 \n\t"\
  501. "pxor %%mm6, %%mm6 \n\t"\
  502. "pcmpgtw %%mm4, %%mm6 \n\t"\
  503. "pcmpgtw %%mm5, %%mm7 \n\t"\
  504. "pxor %%mm6, %%mm4 \n\t"\
  505. "pxor %%mm7, %%mm5 \n\t"\
  506. "psubw %%mm6, %%mm4 \n\t"\
  507. "psubw %%mm7, %%mm5 \n\t"\
  508. "psrlw $3, %%mm4 \n\t"\
  509. "psrlw $3, %%mm5 \n\t"\
  510. "packuswb %%mm5, %%mm4 \n\t"\
  511. "packsswb %%mm7, %%mm6 \n\t"\
  512. "pxor %%mm7, %%mm7 \n\t"\
  513. "movd %4, %%mm2 \n\t"\
  514. "punpcklbw %%mm2, %%mm2 \n\t"\
  515. "punpcklbw %%mm2, %%mm2 \n\t"\
  516. "punpcklbw %%mm2, %%mm2 \n\t"\
  517. "psubusb %%mm4, %%mm2 \n\t"\
  518. "movq %%mm2, %%mm3 \n\t"\
  519. "psubusb %%mm4, %%mm3 \n\t"\
  520. "psubb %%mm3, %%mm2 \n\t"\
  521. "movq %1, %%mm3 \n\t"\
  522. "movq %2, %%mm4 \n\t"\
  523. "pxor %%mm6, %%mm3 \n\t"\
  524. "pxor %%mm6, %%mm4 \n\t"\
  525. "paddusb %%mm2, %%mm3 \n\t"\
  526. "psubusb %%mm2, %%mm4 \n\t"\
  527. "pxor %%mm6, %%mm3 \n\t"\
  528. "pxor %%mm6, %%mm4 \n\t"\
  529. "paddusb %%mm2, %%mm2 \n\t"\
  530. "packsswb %%mm1, %%mm0 \n\t"\
  531. "pcmpgtb %%mm0, %%mm7 \n\t"\
  532. "pxor %%mm7, %%mm0 \n\t"\
  533. "psubb %%mm7, %%mm0 \n\t"\
  534. "movq %%mm0, %%mm1 \n\t"\
  535. "psubusb %%mm2, %%mm0 \n\t"\
  536. "psubb %%mm0, %%mm1 \n\t"\
  537. "pand %5, %%mm1 \n\t"\
  538. "psrlw $2, %%mm1 \n\t"\
  539. "pxor %%mm7, %%mm1 \n\t"\
  540. "psubb %%mm7, %%mm1 \n\t"\
  541. "movq %0, %%mm5 \n\t"\
  542. "movq %3, %%mm6 \n\t"\
  543. "psubb %%mm1, %%mm5 \n\t"\
  544. "paddb %%mm1, %%mm6 \n\t"
  545. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  546. const int strength= ff_h263_loop_filter_strength[qscale];
  547. asm volatile(
  548. H263_LOOP_FILTER
  549. "movq %%mm3, %1 \n\t"
  550. "movq %%mm4, %2 \n\t"
  551. "movq %%mm5, %0 \n\t"
  552. "movq %%mm6, %3 \n\t"
  553. : "+m" (*(uint64_t*)(src - 2*stride)),
  554. "+m" (*(uint64_t*)(src - 1*stride)),
  555. "+m" (*(uint64_t*)(src + 0*stride)),
  556. "+m" (*(uint64_t*)(src + 1*stride))
  557. : "g" (2*strength), "m"(ff_pb_FC)
  558. );
  559. }
  560. static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
  561. asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
  562. "movd %4, %%mm0 \n\t"
  563. "movd %5, %%mm1 \n\t"
  564. "movd %6, %%mm2 \n\t"
  565. "movd %7, %%mm3 \n\t"
  566. "punpcklbw %%mm1, %%mm0 \n\t"
  567. "punpcklbw %%mm3, %%mm2 \n\t"
  568. "movq %%mm0, %%mm1 \n\t"
  569. "punpcklwd %%mm2, %%mm0 \n\t"
  570. "punpckhwd %%mm2, %%mm1 \n\t"
  571. "movd %%mm0, %0 \n\t"
  572. "punpckhdq %%mm0, %%mm0 \n\t"
  573. "movd %%mm0, %1 \n\t"
  574. "movd %%mm1, %2 \n\t"
  575. "punpckhdq %%mm1, %%mm1 \n\t"
  576. "movd %%mm1, %3 \n\t"
  577. : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
  578. "=m" (*(uint32_t*)(dst + 1*dst_stride)),
  579. "=m" (*(uint32_t*)(dst + 2*dst_stride)),
  580. "=m" (*(uint32_t*)(dst + 3*dst_stride))
  581. : "m" (*(uint32_t*)(src + 0*src_stride)),
  582. "m" (*(uint32_t*)(src + 1*src_stride)),
  583. "m" (*(uint32_t*)(src + 2*src_stride)),
  584. "m" (*(uint32_t*)(src + 3*src_stride))
  585. );
  586. }
  587. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  588. const int strength= ff_h263_loop_filter_strength[qscale];
  589. uint64_t temp[4] __attribute__ ((aligned(8)));
  590. uint8_t *btemp= (uint8_t*)temp;
  591. src -= 2;
  592. transpose4x4(btemp , src , 8, stride);
  593. transpose4x4(btemp+4, src + 4*stride, 8, stride);
  594. asm volatile(
  595. H263_LOOP_FILTER // 5 3 4 6
  596. : "+m" (temp[0]),
  597. "+m" (temp[1]),
  598. "+m" (temp[2]),
  599. "+m" (temp[3])
  600. : "g" (2*strength), "m"(ff_pb_FC)
  601. );
  602. asm volatile(
  603. "movq %%mm5, %%mm1 \n\t"
  604. "movq %%mm4, %%mm0 \n\t"
  605. "punpcklbw %%mm3, %%mm5 \n\t"
  606. "punpcklbw %%mm6, %%mm4 \n\t"
  607. "punpckhbw %%mm3, %%mm1 \n\t"
  608. "punpckhbw %%mm6, %%mm0 \n\t"
  609. "movq %%mm5, %%mm3 \n\t"
  610. "movq %%mm1, %%mm6 \n\t"
  611. "punpcklwd %%mm4, %%mm5 \n\t"
  612. "punpcklwd %%mm0, %%mm1 \n\t"
  613. "punpckhwd %%mm4, %%mm3 \n\t"
  614. "punpckhwd %%mm0, %%mm6 \n\t"
  615. "movd %%mm5, (%0) \n\t"
  616. "punpckhdq %%mm5, %%mm5 \n\t"
  617. "movd %%mm5, (%0,%2) \n\t"
  618. "movd %%mm3, (%0,%2,2) \n\t"
  619. "punpckhdq %%mm3, %%mm3 \n\t"
  620. "movd %%mm3, (%0,%3) \n\t"
  621. "movd %%mm1, (%1) \n\t"
  622. "punpckhdq %%mm1, %%mm1 \n\t"
  623. "movd %%mm1, (%1,%2) \n\t"
  624. "movd %%mm6, (%1,%2,2) \n\t"
  625. "punpckhdq %%mm6, %%mm6 \n\t"
  626. "movd %%mm6, (%1,%3) \n\t"
  627. :: "r" (src),
  628. "r" (src + 4*stride),
  629. "r" ((long) stride ),
  630. "r" ((long)(3*stride))
  631. );
  632. }
  633. #ifdef CONFIG_ENCODERS
  634. static int pix_norm1_mmx(uint8_t *pix, int line_size) {
  635. int tmp;
  636. asm volatile (
  637. "movl $16,%%ecx\n"
  638. "pxor %%mm0,%%mm0\n"
  639. "pxor %%mm7,%%mm7\n"
  640. "1:\n"
  641. "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
  642. "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
  643. "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
  644. "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
  645. "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
  646. "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
  647. "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
  648. "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
  649. "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
  650. "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
  651. "pmaddwd %%mm3,%%mm3\n"
  652. "pmaddwd %%mm4,%%mm4\n"
  653. "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
  654. pix2^2+pix3^2+pix6^2+pix7^2) */
  655. "paddd %%mm3,%%mm4\n"
  656. "paddd %%mm2,%%mm7\n"
  657. "add %2, %0\n"
  658. "paddd %%mm4,%%mm7\n"
  659. "dec %%ecx\n"
  660. "jnz 1b\n"
  661. "movq %%mm7,%%mm1\n"
  662. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  663. "paddd %%mm7,%%mm1\n"
  664. "movd %%mm1,%1\n"
  665. : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
  666. return tmp;
  667. }
  668. static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  669. int tmp;
  670. asm volatile (
  671. "movl %4,%%ecx\n"
  672. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  673. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  674. "1:\n"
  675. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  676. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  677. "movq %%mm1,%%mm5\n"
  678. "psubusb %%mm2,%%mm1\n"
  679. "psubusb %%mm5,%%mm2\n"
  680. "por %%mm1,%%mm2\n"
  681. "movq %%mm2,%%mm1\n"
  682. "punpckhbw %%mm0,%%mm2\n"
  683. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  684. "pmaddwd %%mm2,%%mm2\n"
  685. "pmaddwd %%mm1,%%mm1\n"
  686. "add %3,%0\n"
  687. "add %3,%1\n"
  688. "paddd %%mm2,%%mm1\n"
  689. "paddd %%mm1,%%mm7\n"
  690. "decl %%ecx\n"
  691. "jnz 1b\n"
  692. "movq %%mm7,%%mm1\n"
  693. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  694. "paddd %%mm7,%%mm1\n"
  695. "movd %%mm1,%2\n"
  696. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  697. : "r" ((long)line_size) , "m" (h)
  698. : "%ecx");
  699. return tmp;
  700. }
  701. static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  702. int tmp;
  703. asm volatile (
  704. "movl %4,%%ecx\n"
  705. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  706. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  707. "1:\n"
  708. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  709. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  710. "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
  711. "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
  712. /* todo: mm1-mm2, mm3-mm4 */
  713. /* algo: substract mm1 from mm2 with saturation and vice versa */
  714. /* OR the results to get absolute difference */
  715. "movq %%mm1,%%mm5\n"
  716. "movq %%mm3,%%mm6\n"
  717. "psubusb %%mm2,%%mm1\n"
  718. "psubusb %%mm4,%%mm3\n"
  719. "psubusb %%mm5,%%mm2\n"
  720. "psubusb %%mm6,%%mm4\n"
  721. "por %%mm1,%%mm2\n"
  722. "por %%mm3,%%mm4\n"
  723. /* now convert to 16-bit vectors so we can square them */
  724. "movq %%mm2,%%mm1\n"
  725. "movq %%mm4,%%mm3\n"
  726. "punpckhbw %%mm0,%%mm2\n"
  727. "punpckhbw %%mm0,%%mm4\n"
  728. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  729. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  730. "pmaddwd %%mm2,%%mm2\n"
  731. "pmaddwd %%mm4,%%mm4\n"
  732. "pmaddwd %%mm1,%%mm1\n"
  733. "pmaddwd %%mm3,%%mm3\n"
  734. "add %3,%0\n"
  735. "add %3,%1\n"
  736. "paddd %%mm2,%%mm1\n"
  737. "paddd %%mm4,%%mm3\n"
  738. "paddd %%mm1,%%mm7\n"
  739. "paddd %%mm3,%%mm7\n"
  740. "decl %%ecx\n"
  741. "jnz 1b\n"
  742. "movq %%mm7,%%mm1\n"
  743. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  744. "paddd %%mm7,%%mm1\n"
  745. "movd %%mm1,%2\n"
  746. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  747. : "r" ((long)line_size) , "m" (h)
  748. : "%ecx");
  749. return tmp;
  750. }
  751. static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
  752. int tmp;
  753. asm volatile (
  754. "movl %3,%%ecx\n"
  755. "pxor %%mm7,%%mm7\n"
  756. "pxor %%mm6,%%mm6\n"
  757. "movq (%0),%%mm0\n"
  758. "movq %%mm0, %%mm1\n"
  759. "psllq $8, %%mm0\n"
  760. "psrlq $8, %%mm1\n"
  761. "psrlq $8, %%mm0\n"
  762. "movq %%mm0, %%mm2\n"
  763. "movq %%mm1, %%mm3\n"
  764. "punpcklbw %%mm7,%%mm0\n"
  765. "punpcklbw %%mm7,%%mm1\n"
  766. "punpckhbw %%mm7,%%mm2\n"
  767. "punpckhbw %%mm7,%%mm3\n"
  768. "psubw %%mm1, %%mm0\n"
  769. "psubw %%mm3, %%mm2\n"
  770. "add %2,%0\n"
  771. "movq (%0),%%mm4\n"
  772. "movq %%mm4, %%mm1\n"
  773. "psllq $8, %%mm4\n"
  774. "psrlq $8, %%mm1\n"
  775. "psrlq $8, %%mm4\n"
  776. "movq %%mm4, %%mm5\n"
  777. "movq %%mm1, %%mm3\n"
  778. "punpcklbw %%mm7,%%mm4\n"
  779. "punpcklbw %%mm7,%%mm1\n"
  780. "punpckhbw %%mm7,%%mm5\n"
  781. "punpckhbw %%mm7,%%mm3\n"
  782. "psubw %%mm1, %%mm4\n"
  783. "psubw %%mm3, %%mm5\n"
  784. "psubw %%mm4, %%mm0\n"
  785. "psubw %%mm5, %%mm2\n"
  786. "pxor %%mm3, %%mm3\n"
  787. "pxor %%mm1, %%mm1\n"
  788. "pcmpgtw %%mm0, %%mm3\n\t"
  789. "pcmpgtw %%mm2, %%mm1\n\t"
  790. "pxor %%mm3, %%mm0\n"
  791. "pxor %%mm1, %%mm2\n"
  792. "psubw %%mm3, %%mm0\n"
  793. "psubw %%mm1, %%mm2\n"
  794. "paddw %%mm0, %%mm2\n"
  795. "paddw %%mm2, %%mm6\n"
  796. "add %2,%0\n"
  797. "1:\n"
  798. "movq (%0),%%mm0\n"
  799. "movq %%mm0, %%mm1\n"
  800. "psllq $8, %%mm0\n"
  801. "psrlq $8, %%mm1\n"
  802. "psrlq $8, %%mm0\n"
  803. "movq %%mm0, %%mm2\n"
  804. "movq %%mm1, %%mm3\n"
  805. "punpcklbw %%mm7,%%mm0\n"
  806. "punpcklbw %%mm7,%%mm1\n"
  807. "punpckhbw %%mm7,%%mm2\n"
  808. "punpckhbw %%mm7,%%mm3\n"
  809. "psubw %%mm1, %%mm0\n"
  810. "psubw %%mm3, %%mm2\n"
  811. "psubw %%mm0, %%mm4\n"
  812. "psubw %%mm2, %%mm5\n"
  813. "pxor %%mm3, %%mm3\n"
  814. "pxor %%mm1, %%mm1\n"
  815. "pcmpgtw %%mm4, %%mm3\n\t"
  816. "pcmpgtw %%mm5, %%mm1\n\t"
  817. "pxor %%mm3, %%mm4\n"
  818. "pxor %%mm1, %%mm5\n"
  819. "psubw %%mm3, %%mm4\n"
  820. "psubw %%mm1, %%mm5\n"
  821. "paddw %%mm4, %%mm5\n"
  822. "paddw %%mm5, %%mm6\n"
  823. "add %2,%0\n"
  824. "movq (%0),%%mm4\n"
  825. "movq %%mm4, %%mm1\n"
  826. "psllq $8, %%mm4\n"
  827. "psrlq $8, %%mm1\n"
  828. "psrlq $8, %%mm4\n"
  829. "movq %%mm4, %%mm5\n"
  830. "movq %%mm1, %%mm3\n"
  831. "punpcklbw %%mm7,%%mm4\n"
  832. "punpcklbw %%mm7,%%mm1\n"
  833. "punpckhbw %%mm7,%%mm5\n"
  834. "punpckhbw %%mm7,%%mm3\n"
  835. "psubw %%mm1, %%mm4\n"
  836. "psubw %%mm3, %%mm5\n"
  837. "psubw %%mm4, %%mm0\n"
  838. "psubw %%mm5, %%mm2\n"
  839. "pxor %%mm3, %%mm3\n"
  840. "pxor %%mm1, %%mm1\n"
  841. "pcmpgtw %%mm0, %%mm3\n\t"
  842. "pcmpgtw %%mm2, %%mm1\n\t"
  843. "pxor %%mm3, %%mm0\n"
  844. "pxor %%mm1, %%mm2\n"
  845. "psubw %%mm3, %%mm0\n"
  846. "psubw %%mm1, %%mm2\n"
  847. "paddw %%mm0, %%mm2\n"
  848. "paddw %%mm2, %%mm6\n"
  849. "add %2,%0\n"
  850. "subl $2, %%ecx\n"
  851. " jnz 1b\n"
  852. "movq %%mm6, %%mm0\n"
  853. "punpcklwd %%mm7,%%mm0\n"
  854. "punpckhwd %%mm7,%%mm6\n"
  855. "paddd %%mm0, %%mm6\n"
  856. "movq %%mm6,%%mm0\n"
  857. "psrlq $32, %%mm6\n"
  858. "paddd %%mm6,%%mm0\n"
  859. "movd %%mm0,%1\n"
  860. : "+r" (pix1), "=r"(tmp)
  861. : "r" ((long)line_size) , "g" (h-2)
  862. : "%ecx");
  863. return tmp;
  864. }
  865. static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
  866. int tmp;
  867. uint8_t * pix= pix1;
  868. asm volatile (
  869. "movl %3,%%ecx\n"
  870. "pxor %%mm7,%%mm7\n"
  871. "pxor %%mm6,%%mm6\n"
  872. "movq (%0),%%mm0\n"
  873. "movq 1(%0),%%mm1\n"
  874. "movq %%mm0, %%mm2\n"
  875. "movq %%mm1, %%mm3\n"
  876. "punpcklbw %%mm7,%%mm0\n"
  877. "punpcklbw %%mm7,%%mm1\n"
  878. "punpckhbw %%mm7,%%mm2\n"
  879. "punpckhbw %%mm7,%%mm3\n"
  880. "psubw %%mm1, %%mm0\n"
  881. "psubw %%mm3, %%mm2\n"
  882. "add %2,%0\n"
  883. "movq (%0),%%mm4\n"
  884. "movq 1(%0),%%mm1\n"
  885. "movq %%mm4, %%mm5\n"
  886. "movq %%mm1, %%mm3\n"
  887. "punpcklbw %%mm7,%%mm4\n"
  888. "punpcklbw %%mm7,%%mm1\n"
  889. "punpckhbw %%mm7,%%mm5\n"
  890. "punpckhbw %%mm7,%%mm3\n"
  891. "psubw %%mm1, %%mm4\n"
  892. "psubw %%mm3, %%mm5\n"
  893. "psubw %%mm4, %%mm0\n"
  894. "psubw %%mm5, %%mm2\n"
  895. "pxor %%mm3, %%mm3\n"
  896. "pxor %%mm1, %%mm1\n"
  897. "pcmpgtw %%mm0, %%mm3\n\t"
  898. "pcmpgtw %%mm2, %%mm1\n\t"
  899. "pxor %%mm3, %%mm0\n"
  900. "pxor %%mm1, %%mm2\n"
  901. "psubw %%mm3, %%mm0\n"
  902. "psubw %%mm1, %%mm2\n"
  903. "paddw %%mm0, %%mm2\n"
  904. "paddw %%mm2, %%mm6\n"
  905. "add %2,%0\n"
  906. "1:\n"
  907. "movq (%0),%%mm0\n"
  908. "movq 1(%0),%%mm1\n"
  909. "movq %%mm0, %%mm2\n"
  910. "movq %%mm1, %%mm3\n"
  911. "punpcklbw %%mm7,%%mm0\n"
  912. "punpcklbw %%mm7,%%mm1\n"
  913. "punpckhbw %%mm7,%%mm2\n"
  914. "punpckhbw %%mm7,%%mm3\n"
  915. "psubw %%mm1, %%mm0\n"
  916. "psubw %%mm3, %%mm2\n"
  917. "psubw %%mm0, %%mm4\n"
  918. "psubw %%mm2, %%mm5\n"
  919. "pxor %%mm3, %%mm3\n"
  920. "pxor %%mm1, %%mm1\n"
  921. "pcmpgtw %%mm4, %%mm3\n\t"
  922. "pcmpgtw %%mm5, %%mm1\n\t"
  923. "pxor %%mm3, %%mm4\n"
  924. "pxor %%mm1, %%mm5\n"
  925. "psubw %%mm3, %%mm4\n"
  926. "psubw %%mm1, %%mm5\n"
  927. "paddw %%mm4, %%mm5\n"
  928. "paddw %%mm5, %%mm6\n"
  929. "add %2,%0\n"
  930. "movq (%0),%%mm4\n"
  931. "movq 1(%0),%%mm1\n"
  932. "movq %%mm4, %%mm5\n"
  933. "movq %%mm1, %%mm3\n"
  934. "punpcklbw %%mm7,%%mm4\n"
  935. "punpcklbw %%mm7,%%mm1\n"
  936. "punpckhbw %%mm7,%%mm5\n"
  937. "punpckhbw %%mm7,%%mm3\n"
  938. "psubw %%mm1, %%mm4\n"
  939. "psubw %%mm3, %%mm5\n"
  940. "psubw %%mm4, %%mm0\n"
  941. "psubw %%mm5, %%mm2\n"
  942. "pxor %%mm3, %%mm3\n"
  943. "pxor %%mm1, %%mm1\n"
  944. "pcmpgtw %%mm0, %%mm3\n\t"
  945. "pcmpgtw %%mm2, %%mm1\n\t"
  946. "pxor %%mm3, %%mm0\n"
  947. "pxor %%mm1, %%mm2\n"
  948. "psubw %%mm3, %%mm0\n"
  949. "psubw %%mm1, %%mm2\n"
  950. "paddw %%mm0, %%mm2\n"
  951. "paddw %%mm2, %%mm6\n"
  952. "add %2,%0\n"
  953. "subl $2, %%ecx\n"
  954. " jnz 1b\n"
  955. "movq %%mm6, %%mm0\n"
  956. "punpcklwd %%mm7,%%mm0\n"
  957. "punpckhwd %%mm7,%%mm6\n"
  958. "paddd %%mm0, %%mm6\n"
  959. "movq %%mm6,%%mm0\n"
  960. "psrlq $32, %%mm6\n"
  961. "paddd %%mm6,%%mm0\n"
  962. "movd %%mm0,%1\n"
  963. : "+r" (pix1), "=r"(tmp)
  964. : "r" ((long)line_size) , "g" (h-2)
  965. : "%ecx");
  966. return tmp + hf_noise8_mmx(pix+8, line_size, h);
  967. }
  968. static int nsse16_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  969. int score1= sse16_mmx(c, pix1, pix2, line_size, h);
  970. int score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
  971. if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
  972. else return score1 + ABS(score2)*8;
  973. }
  974. static int nsse8_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  975. int score1= sse8_mmx(c, pix1, pix2, line_size, h);
  976. int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
  977. if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
  978. else return score1 + ABS(score2)*8;
  979. }
  980. static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  981. int tmp;
  982. assert( (((int)pix) & 7) == 0);
  983. assert((line_size &7) ==0);
  984. #define SUM(in0, in1, out0, out1) \
  985. "movq (%0), %%mm2\n"\
  986. "movq 8(%0), %%mm3\n"\
  987. "add %2,%0\n"\
  988. "movq %%mm2, " #out0 "\n"\
  989. "movq %%mm3, " #out1 "\n"\
  990. "psubusb " #in0 ", %%mm2\n"\
  991. "psubusb " #in1 ", %%mm3\n"\
  992. "psubusb " #out0 ", " #in0 "\n"\
  993. "psubusb " #out1 ", " #in1 "\n"\
  994. "por %%mm2, " #in0 "\n"\
  995. "por %%mm3, " #in1 "\n"\
  996. "movq " #in0 ", %%mm2\n"\
  997. "movq " #in1 ", %%mm3\n"\
  998. "punpcklbw %%mm7, " #in0 "\n"\
  999. "punpcklbw %%mm7, " #in1 "\n"\
  1000. "punpckhbw %%mm7, %%mm2\n"\
  1001. "punpckhbw %%mm7, %%mm3\n"\
  1002. "paddw " #in1 ", " #in0 "\n"\
  1003. "paddw %%mm3, %%mm2\n"\
  1004. "paddw %%mm2, " #in0 "\n"\
  1005. "paddw " #in0 ", %%mm6\n"
  1006. asm volatile (
  1007. "movl %3,%%ecx\n"
  1008. "pxor %%mm6,%%mm6\n"
  1009. "pxor %%mm7,%%mm7\n"
  1010. "movq (%0),%%mm0\n"
  1011. "movq 8(%0),%%mm1\n"
  1012. "add %2,%0\n"
  1013. "subl $2, %%ecx\n"
  1014. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1015. "1:\n"
  1016. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1017. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1018. "subl $2, %%ecx\n"
  1019. "jnz 1b\n"
  1020. "movq %%mm6,%%mm0\n"
  1021. "psrlq $32, %%mm6\n"
  1022. "paddw %%mm6,%%mm0\n"
  1023. "movq %%mm0,%%mm6\n"
  1024. "psrlq $16, %%mm0\n"
  1025. "paddw %%mm6,%%mm0\n"
  1026. "movd %%mm0,%1\n"
  1027. : "+r" (pix), "=r"(tmp)
  1028. : "r" ((long)line_size) , "m" (h)
  1029. : "%ecx");
  1030. return tmp & 0xFFFF;
  1031. }
  1032. #undef SUM
  1033. static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  1034. int tmp;
  1035. assert( (((int)pix) & 7) == 0);
  1036. assert((line_size &7) ==0);
  1037. #define SUM(in0, in1, out0, out1) \
  1038. "movq (%0), " #out0 "\n"\
  1039. "movq 8(%0), " #out1 "\n"\
  1040. "add %2,%0\n"\
  1041. "psadbw " #out0 ", " #in0 "\n"\
  1042. "psadbw " #out1 ", " #in1 "\n"\
  1043. "paddw " #in1 ", " #in0 "\n"\
  1044. "paddw " #in0 ", %%mm6\n"
  1045. asm volatile (
  1046. "movl %3,%%ecx\n"
  1047. "pxor %%mm6,%%mm6\n"
  1048. "pxor %%mm7,%%mm7\n"
  1049. "movq (%0),%%mm0\n"
  1050. "movq 8(%0),%%mm1\n"
  1051. "add %2,%0\n"
  1052. "subl $2, %%ecx\n"
  1053. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1054. "1:\n"
  1055. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1056. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1057. "subl $2, %%ecx\n"
  1058. "jnz 1b\n"
  1059. "movd %%mm6,%1\n"
  1060. : "+r" (pix), "=r"(tmp)
  1061. : "r" ((long)line_size) , "m" (h)
  1062. : "%ecx");
  1063. return tmp;
  1064. }
  1065. #undef SUM
  1066. static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1067. int tmp;
  1068. assert( (((int)pix1) & 7) == 0);
  1069. assert( (((int)pix2) & 7) == 0);
  1070. assert((line_size &7) ==0);
  1071. #define SUM(in0, in1, out0, out1) \
  1072. "movq (%0),%%mm2\n"\
  1073. "movq (%1)," #out0 "\n"\
  1074. "movq 8(%0),%%mm3\n"\
  1075. "movq 8(%1)," #out1 "\n"\
  1076. "add %3,%0\n"\
  1077. "add %3,%1\n"\
  1078. "psubb " #out0 ", %%mm2\n"\
  1079. "psubb " #out1 ", %%mm3\n"\
  1080. "pxor %%mm7, %%mm2\n"\
  1081. "pxor %%mm7, %%mm3\n"\
  1082. "movq %%mm2, " #out0 "\n"\
  1083. "movq %%mm3, " #out1 "\n"\
  1084. "psubusb " #in0 ", %%mm2\n"\
  1085. "psubusb " #in1 ", %%mm3\n"\
  1086. "psubusb " #out0 ", " #in0 "\n"\
  1087. "psubusb " #out1 ", " #in1 "\n"\
  1088. "por %%mm2, " #in0 "\n"\
  1089. "por %%mm3, " #in1 "\n"\
  1090. "movq " #in0 ", %%mm2\n"\
  1091. "movq " #in1 ", %%mm3\n"\
  1092. "punpcklbw %%mm7, " #in0 "\n"\
  1093. "punpcklbw %%mm7, " #in1 "\n"\
  1094. "punpckhbw %%mm7, %%mm2\n"\
  1095. "punpckhbw %%mm7, %%mm3\n"\
  1096. "paddw " #in1 ", " #in0 "\n"\
  1097. "paddw %%mm3, %%mm2\n"\
  1098. "paddw %%mm2, " #in0 "\n"\
  1099. "paddw " #in0 ", %%mm6\n"
  1100. asm volatile (
  1101. "movl %4,%%ecx\n"
  1102. "pxor %%mm6,%%mm6\n"
  1103. "pcmpeqw %%mm7,%%mm7\n"
  1104. "psllw $15, %%mm7\n"
  1105. "packsswb %%mm7, %%mm7\n"
  1106. "movq (%0),%%mm0\n"
  1107. "movq (%1),%%mm2\n"
  1108. "movq 8(%0),%%mm1\n"
  1109. "movq 8(%1),%%mm3\n"
  1110. "add %3,%0\n"
  1111. "add %3,%1\n"
  1112. "subl $2, %%ecx\n"
  1113. "psubb %%mm2, %%mm0\n"
  1114. "psubb %%mm3, %%mm1\n"
  1115. "pxor %%mm7, %%mm0\n"
  1116. "pxor %%mm7, %%mm1\n"
  1117. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1118. "1:\n"
  1119. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1120. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1121. "subl $2, %%ecx\n"
  1122. "jnz 1b\n"
  1123. "movq %%mm6,%%mm0\n"
  1124. "psrlq $32, %%mm6\n"
  1125. "paddw %%mm6,%%mm0\n"
  1126. "movq %%mm0,%%mm6\n"
  1127. "psrlq $16, %%mm0\n"
  1128. "paddw %%mm6,%%mm0\n"
  1129. "movd %%mm0,%2\n"
  1130. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  1131. : "r" ((long)line_size) , "m" (h)
  1132. : "%ecx");
  1133. return tmp & 0x7FFF;
  1134. }
  1135. #undef SUM
  1136. static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  1137. int tmp;
  1138. assert( (((int)pix1) & 7) == 0);
  1139. assert( (((int)pix2) & 7) == 0);
  1140. assert((line_size &7) ==0);
  1141. #define SUM(in0, in1, out0, out1) \
  1142. "movq (%0)," #out0 "\n"\
  1143. "movq (%1),%%mm2\n"\
  1144. "movq 8(%0)," #out1 "\n"\
  1145. "movq 8(%1),%%mm3\n"\
  1146. "add %3,%0\n"\
  1147. "add %3,%1\n"\
  1148. "psubb %%mm2, " #out0 "\n"\
  1149. "psubb %%mm3, " #out1 "\n"\
  1150. "pxor %%mm7, " #out0 "\n"\
  1151. "pxor %%mm7, " #out1 "\n"\
  1152. "psadbw " #out0 ", " #in0 "\n"\
  1153. "psadbw " #out1 ", " #in1 "\n"\
  1154. "paddw " #in1 ", " #in0 "\n"\
  1155. "paddw " #in0 ", %%mm6\n"
  1156. asm volatile (
  1157. "movl %4,%%ecx\n"
  1158. "pxor %%mm6,%%mm6\n"
  1159. "pcmpeqw %%mm7,%%mm7\n"
  1160. "psllw $15, %%mm7\n"
  1161. "packsswb %%mm7, %%mm7\n"
  1162. "movq (%0),%%mm0\n"
  1163. "movq (%1),%%mm2\n"
  1164. "movq 8(%0),%%mm1\n"
  1165. "movq 8(%1),%%mm3\n"
  1166. "add %3,%0\n"
  1167. "add %3,%1\n"
  1168. "subl $2, %%ecx\n"
  1169. "psubb %%mm2, %%mm0\n"
  1170. "psubb %%mm3, %%mm1\n"
  1171. "pxor %%mm7, %%mm0\n"
  1172. "pxor %%mm7, %%mm1\n"
  1173. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1174. "1:\n"
  1175. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  1176. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  1177. "subl $2, %%ecx\n"
  1178. "jnz 1b\n"
  1179. "movd %%mm6,%2\n"
  1180. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  1181. : "r" ((long)line_size) , "m" (h)
  1182. : "%ecx");
  1183. return tmp;
  1184. }
  1185. #undef SUM
  1186. static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  1187. long i=0;
  1188. asm volatile(
  1189. "1: \n\t"
  1190. "movq (%2, %0), %%mm0 \n\t"
  1191. "movq (%1, %0), %%mm1 \n\t"
  1192. "psubb %%mm0, %%mm1 \n\t"
  1193. "movq %%mm1, (%3, %0) \n\t"
  1194. "movq 8(%2, %0), %%mm0 \n\t"
  1195. "movq 8(%1, %0), %%mm1 \n\t"
  1196. "psubb %%mm0, %%mm1 \n\t"
  1197. "movq %%mm1, 8(%3, %0) \n\t"
  1198. "add $16, %0 \n\t"
  1199. "cmp %4, %0 \n\t"
  1200. " jb 1b \n\t"
  1201. : "+r" (i)
  1202. : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
  1203. );
  1204. for(; i<w; i++)
  1205. dst[i+0] = src1[i+0]-src2[i+0];
  1206. }
  1207. static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
  1208. long i=0;
  1209. uint8_t l, lt;
  1210. asm volatile(
  1211. "1: \n\t"
  1212. "movq -1(%1, %0), %%mm0 \n\t" // LT
  1213. "movq (%1, %0), %%mm1 \n\t" // T
  1214. "movq -1(%2, %0), %%mm2 \n\t" // L
  1215. "movq (%2, %0), %%mm3 \n\t" // X
  1216. "movq %%mm2, %%mm4 \n\t" // L
  1217. "psubb %%mm0, %%mm2 \n\t"
  1218. "paddb %%mm1, %%mm2 \n\t" // L + T - LT
  1219. "movq %%mm4, %%mm5 \n\t" // L
  1220. "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
  1221. "pminub %%mm5, %%mm1 \n\t" // min(T, L)
  1222. "pminub %%mm2, %%mm4 \n\t"
  1223. "pmaxub %%mm1, %%mm4 \n\t"
  1224. "psubb %%mm4, %%mm3 \n\t" // dst - pred
  1225. "movq %%mm3, (%3, %0) \n\t"
  1226. "add $8, %0 \n\t"
  1227. "cmp %4, %0 \n\t"
  1228. " jb 1b \n\t"
  1229. : "+r" (i)
  1230. : "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
  1231. );
  1232. l= *left;
  1233. lt= *left_top;
  1234. dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
  1235. *left_top= src1[w-1];
  1236. *left = src2[w-1];
  1237. }
  1238. #define LBUTTERFLY2(a1,b1,a2,b2)\
  1239. "paddw " #b1 ", " #a1 " \n\t"\
  1240. "paddw " #b2 ", " #a2 " \n\t"\
  1241. "paddw " #b1 ", " #b1 " \n\t"\
  1242. "paddw " #b2 ", " #b2 " \n\t"\
  1243. "psubw " #a1 ", " #b1 " \n\t"\
  1244. "psubw " #a2 ", " #b2 " \n\t"
  1245. #define HADAMARD48\
  1246. LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
  1247. LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
  1248. LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
  1249. LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
  1250. LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
  1251. LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
  1252. #define MMABS(a,z)\
  1253. "pxor " #z ", " #z " \n\t"\
  1254. "pcmpgtw " #a ", " #z " \n\t"\
  1255. "pxor " #z ", " #a " \n\t"\
  1256. "psubw " #z ", " #a " \n\t"
  1257. #define MMABS_SUM(a,z, sum)\
  1258. "pxor " #z ", " #z " \n\t"\
  1259. "pcmpgtw " #a ", " #z " \n\t"\
  1260. "pxor " #z ", " #a " \n\t"\
  1261. "psubw " #z ", " #a " \n\t"\
  1262. "paddusw " #a ", " #sum " \n\t"
  1263. #define MMABS_MMX2(a,z)\
  1264. "pxor " #z ", " #z " \n\t"\
  1265. "psubw " #a ", " #z " \n\t"\
  1266. "pmaxsw " #z ", " #a " \n\t"
  1267. #define MMABS_SUM_MMX2(a,z, sum)\
  1268. "pxor " #z ", " #z " \n\t"\
  1269. "psubw " #a ", " #z " \n\t"\
  1270. "pmaxsw " #z ", " #a " \n\t"\
  1271. "paddusw " #a ", " #sum " \n\t"
  1272. #define SBUTTERFLY(a,b,t,n)\
  1273. "movq " #a ", " #t " \n\t" /* abcd */\
  1274. "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
  1275. "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
  1276. #define TRANSPOSE4(a,b,c,d,t)\
  1277. SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
  1278. SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
  1279. SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
  1280. SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
  1281. #define LOAD4(o, a, b, c, d)\
  1282. "movq "#o"(%1), " #a " \n\t"\
  1283. "movq "#o"+16(%1), " #b " \n\t"\
  1284. "movq "#o"+32(%1), " #c " \n\t"\
  1285. "movq "#o"+48(%1), " #d " \n\t"
  1286. #define STORE4(o, a, b, c, d)\
  1287. "movq "#a", "#o"(%1) \n\t"\
  1288. "movq "#b", "#o"+16(%1) \n\t"\
  1289. "movq "#c", "#o"+32(%1) \n\t"\
  1290. "movq "#d", "#o"+48(%1) \n\t"\
  1291. static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
  1292. uint64_t temp[16] __align8;
  1293. int sum=0;
  1294. assert(h==8);
  1295. diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
  1296. asm volatile(
  1297. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1298. LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
  1299. HADAMARD48
  1300. "movq %%mm7, 112(%1) \n\t"
  1301. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1302. STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
  1303. "movq 112(%1), %%mm7 \n\t"
  1304. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1305. STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
  1306. LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
  1307. LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1308. HADAMARD48
  1309. "movq %%mm7, 120(%1) \n\t"
  1310. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1311. STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
  1312. "movq 120(%1), %%mm7 \n\t"
  1313. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1314. "movq %%mm7, %%mm5 \n\t"//FIXME remove
  1315. "movq %%mm6, %%mm7 \n\t"
  1316. "movq %%mm0, %%mm6 \n\t"
  1317. // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
  1318. LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
  1319. // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1320. HADAMARD48
  1321. "movq %%mm7, 64(%1) \n\t"
  1322. MMABS(%%mm0, %%mm7)
  1323. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1324. MMABS_SUM(%%mm2, %%mm7, %%mm0)
  1325. MMABS_SUM(%%mm3, %%mm7, %%mm0)
  1326. MMABS_SUM(%%mm4, %%mm7, %%mm0)
  1327. MMABS_SUM(%%mm5, %%mm7, %%mm0)
  1328. MMABS_SUM(%%mm6, %%mm7, %%mm0)
  1329. "movq 64(%1), %%mm1 \n\t"
  1330. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1331. "movq %%mm0, 64(%1) \n\t"
  1332. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1333. LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
  1334. HADAMARD48
  1335. "movq %%mm7, (%1) \n\t"
  1336. MMABS(%%mm0, %%mm7)
  1337. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1338. MMABS_SUM(%%mm2, %%mm7, %%mm0)
  1339. MMABS_SUM(%%mm3, %%mm7, %%mm0)
  1340. MMABS_SUM(%%mm4, %%mm7, %%mm0)
  1341. MMABS_SUM(%%mm5, %%mm7, %%mm0)
  1342. MMABS_SUM(%%mm6, %%mm7, %%mm0)
  1343. "movq (%1), %%mm1 \n\t"
  1344. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1345. "movq 64(%1), %%mm1 \n\t"
  1346. MMABS_SUM(%%mm1, %%mm7, %%mm0)
  1347. "movq %%mm0, %%mm1 \n\t"
  1348. "psrlq $32, %%mm0 \n\t"
  1349. "paddusw %%mm1, %%mm0 \n\t"
  1350. "movq %%mm0, %%mm1 \n\t"
  1351. "psrlq $16, %%mm0 \n\t"
  1352. "paddusw %%mm1, %%mm0 \n\t"
  1353. "movd %%mm0, %0 \n\t"
  1354. : "=r" (sum)
  1355. : "r"(temp)
  1356. );
  1357. return sum&0xFFFF;
  1358. }
  1359. static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
  1360. uint64_t temp[16] __align8;
  1361. int sum=0;
  1362. assert(h==8);
  1363. diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
  1364. asm volatile(
  1365. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1366. LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
  1367. HADAMARD48
  1368. "movq %%mm7, 112(%1) \n\t"
  1369. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1370. STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
  1371. "movq 112(%1), %%mm7 \n\t"
  1372. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1373. STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
  1374. LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
  1375. LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1376. HADAMARD48
  1377. "movq %%mm7, 120(%1) \n\t"
  1378. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
  1379. STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
  1380. "movq 120(%1), %%mm7 \n\t"
  1381. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
  1382. "movq %%mm7, %%mm5 \n\t"//FIXME remove
  1383. "movq %%mm6, %%mm7 \n\t"
  1384. "movq %%mm0, %%mm6 \n\t"
  1385. // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
  1386. LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
  1387. // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
  1388. HADAMARD48
  1389. "movq %%mm7, 64(%1) \n\t"
  1390. MMABS_MMX2(%%mm0, %%mm7)
  1391. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1392. MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
  1393. MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
  1394. MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
  1395. MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
  1396. MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
  1397. "movq 64(%1), %%mm1 \n\t"
  1398. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1399. "movq %%mm0, 64(%1) \n\t"
  1400. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
  1401. LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
  1402. HADAMARD48
  1403. "movq %%mm7, (%1) \n\t"
  1404. MMABS_MMX2(%%mm0, %%mm7)
  1405. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1406. MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
  1407. MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
  1408. MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
  1409. MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
  1410. MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
  1411. "movq (%1), %%mm1 \n\t"
  1412. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1413. "movq 64(%1), %%mm1 \n\t"
  1414. MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
  1415. "movq %%mm0, %%mm1 \n\t"
  1416. "psrlq $32, %%mm0 \n\t"
  1417. "paddusw %%mm1, %%mm0 \n\t"
  1418. "movq %%mm0, %%mm1 \n\t"
  1419. "psrlq $16, %%mm0 \n\t"
  1420. "paddusw %%mm1, %%mm0 \n\t"
  1421. "movd %%mm0, %0 \n\t"
  1422. : "=r" (sum)
  1423. : "r"(temp)
  1424. );
  1425. return sum&0xFFFF;
  1426. }
  1427. WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
  1428. WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
  1429. #endif //CONFIG_ENCODERS
  1430. #define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
  1431. #define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
  1432. #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
  1433. "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
  1434. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
  1435. "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
  1436. "movq "#in7", " #m3 " \n\t" /* d */\
  1437. "movq "#in0", %%mm5 \n\t" /* D */\
  1438. "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
  1439. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
  1440. "movq "#in1", %%mm5 \n\t" /* C */\
  1441. "movq "#in2", %%mm6 \n\t" /* B */\
  1442. "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
  1443. "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
  1444. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
  1445. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
  1446. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
  1447. "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
  1448. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
  1449. "psraw $5, %%mm5 \n\t"\
  1450. "packuswb %%mm5, %%mm5 \n\t"\
  1451. OP(%%mm5, out, %%mm7, d)
  1452. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
  1453. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1454. uint64_t temp;\
  1455. \
  1456. asm volatile(\
  1457. "pxor %%mm7, %%mm7 \n\t"\
  1458. "1: \n\t"\
  1459. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1460. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1461. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1462. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1463. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1464. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1465. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1466. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1467. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1468. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1469. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1470. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1471. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1472. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1473. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1474. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1475. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1476. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1477. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1478. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1479. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1480. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1481. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1482. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1483. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1484. "paddw %6, %%mm6 \n\t"\
  1485. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1486. "psraw $5, %%mm0 \n\t"\
  1487. "movq %%mm0, %5 \n\t"\
  1488. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1489. \
  1490. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
  1491. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
  1492. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
  1493. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
  1494. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
  1495. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
  1496. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
  1497. "paddw %%mm0, %%mm2 \n\t" /* b */\
  1498. "paddw %%mm5, %%mm3 \n\t" /* c */\
  1499. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1500. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1501. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
  1502. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
  1503. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
  1504. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
  1505. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1506. "paddw %%mm2, %%mm1 \n\t" /* a */\
  1507. "paddw %%mm6, %%mm4 \n\t" /* d */\
  1508. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1509. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
  1510. "paddw %6, %%mm1 \n\t"\
  1511. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
  1512. "psraw $5, %%mm3 \n\t"\
  1513. "movq %5, %%mm1 \n\t"\
  1514. "packuswb %%mm3, %%mm1 \n\t"\
  1515. OP_MMX2(%%mm1, (%1),%%mm4, q)\
  1516. /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
  1517. \
  1518. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
  1519. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
  1520. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
  1521. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
  1522. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
  1523. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
  1524. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
  1525. "paddw %%mm1, %%mm5 \n\t" /* b */\
  1526. "paddw %%mm4, %%mm0 \n\t" /* c */\
  1527. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1528. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
  1529. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
  1530. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
  1531. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
  1532. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
  1533. "paddw %%mm3, %%mm2 \n\t" /* d */\
  1534. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
  1535. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
  1536. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
  1537. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
  1538. "paddw %%mm2, %%mm6 \n\t" /* a */\
  1539. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
  1540. "paddw %6, %%mm0 \n\t"\
  1541. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1542. "psraw $5, %%mm0 \n\t"\
  1543. /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
  1544. \
  1545. "paddw %%mm5, %%mm3 \n\t" /* a */\
  1546. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
  1547. "paddw %%mm4, %%mm6 \n\t" /* b */\
  1548. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
  1549. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
  1550. "paddw %%mm1, %%mm4 \n\t" /* c */\
  1551. "paddw %%mm2, %%mm5 \n\t" /* d */\
  1552. "paddw %%mm6, %%mm6 \n\t" /* 2b */\
  1553. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
  1554. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
  1555. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
  1556. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
  1557. "paddw %6, %%mm4 \n\t"\
  1558. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
  1559. "psraw $5, %%mm4 \n\t"\
  1560. "packuswb %%mm4, %%mm0 \n\t"\
  1561. OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
  1562. \
  1563. "add %3, %0 \n\t"\
  1564. "add %4, %1 \n\t"\
  1565. "decl %2 \n\t"\
  1566. " jnz 1b \n\t"\
  1567. : "+a"(src), "+c"(dst), "+m"(h)\
  1568. : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  1569. : "memory"\
  1570. );\
  1571. }\
  1572. \
  1573. static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1574. int i;\
  1575. int16_t temp[16];\
  1576. /* quick HACK, XXX FIXME MUST be optimized */\
  1577. for(i=0; i<h; i++)\
  1578. {\
  1579. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1580. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1581. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1582. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1583. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1584. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
  1585. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
  1586. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
  1587. temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
  1588. temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
  1589. temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
  1590. temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
  1591. temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
  1592. temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
  1593. temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
  1594. temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
  1595. asm volatile(\
  1596. "movq (%0), %%mm0 \n\t"\
  1597. "movq 8(%0), %%mm1 \n\t"\
  1598. "paddw %2, %%mm0 \n\t"\
  1599. "paddw %2, %%mm1 \n\t"\
  1600. "psraw $5, %%mm0 \n\t"\
  1601. "psraw $5, %%mm1 \n\t"\
  1602. "packuswb %%mm1, %%mm0 \n\t"\
  1603. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1604. "movq 16(%0), %%mm0 \n\t"\
  1605. "movq 24(%0), %%mm1 \n\t"\
  1606. "paddw %2, %%mm0 \n\t"\
  1607. "paddw %2, %%mm1 \n\t"\
  1608. "psraw $5, %%mm0 \n\t"\
  1609. "psraw $5, %%mm1 \n\t"\
  1610. "packuswb %%mm1, %%mm0 \n\t"\
  1611. OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
  1612. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1613. : "memory"\
  1614. );\
  1615. dst+=dstStride;\
  1616. src+=srcStride;\
  1617. }\
  1618. }\
  1619. \
  1620. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1621. uint64_t temp;\
  1622. \
  1623. asm volatile(\
  1624. "pxor %%mm7, %%mm7 \n\t"\
  1625. "1: \n\t"\
  1626. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1627. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1628. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1629. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1630. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1631. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1632. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1633. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1634. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1635. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1636. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1637. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1638. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1639. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1640. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1641. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1642. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1643. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1644. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1645. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1646. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1647. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1648. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1649. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1650. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1651. "paddw %6, %%mm6 \n\t"\
  1652. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1653. "psraw $5, %%mm0 \n\t"\
  1654. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1655. \
  1656. "movd 5(%0), %%mm5 \n\t" /* FGHI */\
  1657. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
  1658. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
  1659. "paddw %%mm5, %%mm1 \n\t" /* a */\
  1660. "paddw %%mm6, %%mm2 \n\t" /* b */\
  1661. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
  1662. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
  1663. "paddw %%mm6, %%mm3 \n\t" /* c */\
  1664. "paddw %%mm5, %%mm4 \n\t" /* d */\
  1665. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1666. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1667. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1668. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1669. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
  1670. "paddw %6, %%mm1 \n\t"\
  1671. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
  1672. "psraw $5, %%mm3 \n\t"\
  1673. "packuswb %%mm3, %%mm0 \n\t"\
  1674. OP_MMX2(%%mm0, (%1), %%mm4, q)\
  1675. \
  1676. "add %3, %0 \n\t"\
  1677. "add %4, %1 \n\t"\
  1678. "decl %2 \n\t"\
  1679. " jnz 1b \n\t"\
  1680. : "+a"(src), "+c"(dst), "+m"(h)\
  1681. : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  1682. : "memory"\
  1683. );\
  1684. }\
  1685. \
  1686. static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1687. int i;\
  1688. int16_t temp[8];\
  1689. /* quick HACK, XXX FIXME MUST be optimized */\
  1690. for(i=0; i<h; i++)\
  1691. {\
  1692. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1693. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1694. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1695. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1696. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1697. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
  1698. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
  1699. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
  1700. asm volatile(\
  1701. "movq (%0), %%mm0 \n\t"\
  1702. "movq 8(%0), %%mm1 \n\t"\
  1703. "paddw %2, %%mm0 \n\t"\
  1704. "paddw %2, %%mm1 \n\t"\
  1705. "psraw $5, %%mm0 \n\t"\
  1706. "psraw $5, %%mm1 \n\t"\
  1707. "packuswb %%mm1, %%mm0 \n\t"\
  1708. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1709. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1710. :"memory"\
  1711. );\
  1712. dst+=dstStride;\
  1713. src+=srcStride;\
  1714. }\
  1715. }
  1716. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
  1717. \
  1718. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1719. uint64_t temp[17*4];\
  1720. uint64_t *temp_ptr= temp;\
  1721. int count= 17;\
  1722. \
  1723. /*FIXME unroll */\
  1724. asm volatile(\
  1725. "pxor %%mm7, %%mm7 \n\t"\
  1726. "1: \n\t"\
  1727. "movq (%0), %%mm0 \n\t"\
  1728. "movq (%0), %%mm1 \n\t"\
  1729. "movq 8(%0), %%mm2 \n\t"\
  1730. "movq 8(%0), %%mm3 \n\t"\
  1731. "punpcklbw %%mm7, %%mm0 \n\t"\
  1732. "punpckhbw %%mm7, %%mm1 \n\t"\
  1733. "punpcklbw %%mm7, %%mm2 \n\t"\
  1734. "punpckhbw %%mm7, %%mm3 \n\t"\
  1735. "movq %%mm0, (%1) \n\t"\
  1736. "movq %%mm1, 17*8(%1) \n\t"\
  1737. "movq %%mm2, 2*17*8(%1) \n\t"\
  1738. "movq %%mm3, 3*17*8(%1) \n\t"\
  1739. "add $8, %1 \n\t"\
  1740. "add %3, %0 \n\t"\
  1741. "decl %2 \n\t"\
  1742. " jnz 1b \n\t"\
  1743. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1744. : "r" ((long)srcStride)\
  1745. : "memory"\
  1746. );\
  1747. \
  1748. temp_ptr= temp;\
  1749. count=4;\
  1750. \
  1751. /*FIXME reorder for speed */\
  1752. asm volatile(\
  1753. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1754. "1: \n\t"\
  1755. "movq (%0), %%mm0 \n\t"\
  1756. "movq 8(%0), %%mm1 \n\t"\
  1757. "movq 16(%0), %%mm2 \n\t"\
  1758. "movq 24(%0), %%mm3 \n\t"\
  1759. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1760. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1761. "add %4, %1 \n\t"\
  1762. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1763. \
  1764. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1765. "add %4, %1 \n\t"\
  1766. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1767. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
  1768. "add %4, %1 \n\t"\
  1769. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
  1770. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
  1771. "add %4, %1 \n\t"\
  1772. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
  1773. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
  1774. "add %4, %1 \n\t"\
  1775. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
  1776. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
  1777. "add %4, %1 \n\t"\
  1778. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
  1779. \
  1780. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
  1781. "add %4, %1 \n\t" \
  1782. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
  1783. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
  1784. \
  1785. "add $136, %0 \n\t"\
  1786. "add %6, %1 \n\t"\
  1787. "decl %2 \n\t"\
  1788. " jnz 1b \n\t"\
  1789. \
  1790. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1791. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\
  1792. :"memory"\
  1793. );\
  1794. }\
  1795. \
  1796. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1797. uint64_t temp[9*2];\
  1798. uint64_t *temp_ptr= temp;\
  1799. int count= 9;\
  1800. \
  1801. /*FIXME unroll */\
  1802. asm volatile(\
  1803. "pxor %%mm7, %%mm7 \n\t"\
  1804. "1: \n\t"\
  1805. "movq (%0), %%mm0 \n\t"\
  1806. "movq (%0), %%mm1 \n\t"\
  1807. "punpcklbw %%mm7, %%mm0 \n\t"\
  1808. "punpckhbw %%mm7, %%mm1 \n\t"\
  1809. "movq %%mm0, (%1) \n\t"\
  1810. "movq %%mm1, 9*8(%1) \n\t"\
  1811. "add $8, %1 \n\t"\
  1812. "add %3, %0 \n\t"\
  1813. "decl %2 \n\t"\
  1814. " jnz 1b \n\t"\
  1815. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1816. : "r" ((long)srcStride)\
  1817. : "memory"\
  1818. );\
  1819. \
  1820. temp_ptr= temp;\
  1821. count=2;\
  1822. \
  1823. /*FIXME reorder for speed */\
  1824. asm volatile(\
  1825. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1826. "1: \n\t"\
  1827. "movq (%0), %%mm0 \n\t"\
  1828. "movq 8(%0), %%mm1 \n\t"\
  1829. "movq 16(%0), %%mm2 \n\t"\
  1830. "movq 24(%0), %%mm3 \n\t"\
  1831. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1832. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1833. "add %4, %1 \n\t"\
  1834. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1835. \
  1836. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1837. "add %4, %1 \n\t"\
  1838. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1839. \
  1840. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
  1841. "add %4, %1 \n\t"\
  1842. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
  1843. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
  1844. \
  1845. "add $72, %0 \n\t"\
  1846. "add %6, %1 \n\t"\
  1847. "decl %2 \n\t"\
  1848. " jnz 1b \n\t"\
  1849. \
  1850. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1851. : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\
  1852. : "memory"\
  1853. );\
  1854. }\
  1855. \
  1856. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1857. OPNAME ## pixels8_mmx(dst, src, stride, 8);\
  1858. }\
  1859. \
  1860. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1861. uint64_t temp[8];\
  1862. uint8_t * const half= (uint8_t*)temp;\
  1863. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1864. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1865. }\
  1866. \
  1867. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1868. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
  1869. }\
  1870. \
  1871. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1872. uint64_t temp[8];\
  1873. uint8_t * const half= (uint8_t*)temp;\
  1874. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1875. OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
  1876. }\
  1877. \
  1878. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1879. uint64_t temp[8];\
  1880. uint8_t * const half= (uint8_t*)temp;\
  1881. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1882. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1883. }\
  1884. \
  1885. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1886. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1887. }\
  1888. \
  1889. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1890. uint64_t temp[8];\
  1891. uint8_t * const half= (uint8_t*)temp;\
  1892. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1893. OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
  1894. }\
  1895. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1896. uint64_t half[8 + 9];\
  1897. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1898. uint8_t * const halfHV= ((uint8_t*)half);\
  1899. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1900. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1901. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1902. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1903. }\
  1904. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1905. uint64_t half[8 + 9];\
  1906. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1907. uint8_t * const halfHV= ((uint8_t*)half);\
  1908. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1909. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1910. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1911. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1912. }\
  1913. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1914. uint64_t half[8 + 9];\
  1915. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1916. uint8_t * const halfHV= ((uint8_t*)half);\
  1917. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1918. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1919. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1920. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1921. }\
  1922. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1923. uint64_t half[8 + 9];\
  1924. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1925. uint8_t * const halfHV= ((uint8_t*)half);\
  1926. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1927. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1928. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1929. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1930. }\
  1931. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1932. uint64_t half[8 + 9];\
  1933. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1934. uint8_t * const halfHV= ((uint8_t*)half);\
  1935. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1936. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1937. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1938. }\
  1939. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1940. uint64_t half[8 + 9];\
  1941. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1942. uint8_t * const halfHV= ((uint8_t*)half);\
  1943. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1944. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1945. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1946. }\
  1947. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1948. uint64_t half[8 + 9];\
  1949. uint8_t * const halfH= ((uint8_t*)half);\
  1950. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1951. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1952. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1953. }\
  1954. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1955. uint64_t half[8 + 9];\
  1956. uint8_t * const halfH= ((uint8_t*)half);\
  1957. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1958. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1959. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1960. }\
  1961. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1962. uint64_t half[9];\
  1963. uint8_t * const halfH= ((uint8_t*)half);\
  1964. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1965. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1966. }\
  1967. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1968. OPNAME ## pixels16_mmx(dst, src, stride, 16);\
  1969. }\
  1970. \
  1971. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1972. uint64_t temp[32];\
  1973. uint8_t * const half= (uint8_t*)temp;\
  1974. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1975. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1976. }\
  1977. \
  1978. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1979. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
  1980. }\
  1981. \
  1982. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1983. uint64_t temp[32];\
  1984. uint8_t * const half= (uint8_t*)temp;\
  1985. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1986. OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
  1987. }\
  1988. \
  1989. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1990. uint64_t temp[32];\
  1991. uint8_t * const half= (uint8_t*)temp;\
  1992. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1993. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1994. }\
  1995. \
  1996. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1997. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1998. }\
  1999. \
  2000. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2001. uint64_t temp[32];\
  2002. uint8_t * const half= (uint8_t*)temp;\
  2003. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  2004. OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
  2005. }\
  2006. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2007. uint64_t half[16*2 + 17*2];\
  2008. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2009. uint8_t * const halfHV= ((uint8_t*)half);\
  2010. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2011. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2012. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2013. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2014. }\
  2015. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2016. uint64_t half[16*2 + 17*2];\
  2017. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2018. uint8_t * const halfHV= ((uint8_t*)half);\
  2019. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2020. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2021. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2022. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2023. }\
  2024. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2025. uint64_t half[16*2 + 17*2];\
  2026. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2027. uint8_t * const halfHV= ((uint8_t*)half);\
  2028. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2029. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2030. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2031. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2032. }\
  2033. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2034. uint64_t half[16*2 + 17*2];\
  2035. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2036. uint8_t * const halfHV= ((uint8_t*)half);\
  2037. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2038. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2039. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2040. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2041. }\
  2042. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2043. uint64_t half[16*2 + 17*2];\
  2044. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2045. uint8_t * const halfHV= ((uint8_t*)half);\
  2046. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2047. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2048. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  2049. }\
  2050. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2051. uint64_t half[16*2 + 17*2];\
  2052. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  2053. uint8_t * const halfHV= ((uint8_t*)half);\
  2054. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2055. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  2056. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  2057. }\
  2058. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2059. uint64_t half[17*2];\
  2060. uint8_t * const halfH= ((uint8_t*)half);\
  2061. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2062. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  2063. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2064. }\
  2065. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2066. uint64_t half[17*2];\
  2067. uint8_t * const halfH= ((uint8_t*)half);\
  2068. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2069. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  2070. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2071. }\
  2072. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  2073. uint64_t half[17*2];\
  2074. uint8_t * const halfH= ((uint8_t*)half);\
  2075. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  2076. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  2077. }
  2078. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  2079. #define AVG_3DNOW_OP(a,b,temp, size) \
  2080. "mov" #size " " #b ", " #temp " \n\t"\
  2081. "pavgusb " #temp ", " #a " \n\t"\
  2082. "mov" #size " " #a ", " #b " \n\t"
  2083. #define AVG_MMX2_OP(a,b,temp, size) \
  2084. "mov" #size " " #b ", " #temp " \n\t"\
  2085. "pavgb " #temp ", " #a " \n\t"\
  2086. "mov" #size " " #a ", " #b " \n\t"
  2087. QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
  2088. QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
  2089. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
  2090. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
  2091. QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
  2092. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
  2093. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
  2094. QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
  2095. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
  2096. #if 0
  2097. static void just_return() { return; }
  2098. #endif
  2099. #define SET_QPEL_FUNC(postfix1, postfix2) \
  2100. c->put_ ## postfix1 = put_ ## postfix2;\
  2101. c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
  2102. c->avg_ ## postfix1 = avg_ ## postfix2;
  2103. static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
  2104. long i=0;
  2105. assert(ABS(scale) < 256);
  2106. scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
  2107. asm volatile(
  2108. "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
  2109. "psrlw $15, %%mm6 \n\t" // 1w
  2110. "pxor %%mm7, %%mm7 \n\t"
  2111. "movd %4, %%mm5 \n\t"
  2112. "punpcklwd %%mm5, %%mm5 \n\t"
  2113. "punpcklwd %%mm5, %%mm5 \n\t"
  2114. "1: \n\t"
  2115. "movq (%1, %0), %%mm0 \n\t"
  2116. "movq 8(%1, %0), %%mm1 \n\t"
  2117. "pmulhw %%mm5, %%mm0 \n\t"
  2118. "pmulhw %%mm5, %%mm1 \n\t"
  2119. "paddw %%mm6, %%mm0 \n\t"
  2120. "paddw %%mm6, %%mm1 \n\t"
  2121. "psraw $1, %%mm0 \n\t"
  2122. "psraw $1, %%mm1 \n\t"
  2123. "paddw (%2, %0), %%mm0 \n\t"
  2124. "paddw 8(%2, %0), %%mm1 \n\t"
  2125. "psraw $6, %%mm0 \n\t"
  2126. "psraw $6, %%mm1 \n\t"
  2127. "pmullw (%3, %0), %%mm0 \n\t"
  2128. "pmullw 8(%3, %0), %%mm1 \n\t"
  2129. "pmaddwd %%mm0, %%mm0 \n\t"
  2130. "pmaddwd %%mm1, %%mm1 \n\t"
  2131. "paddd %%mm1, %%mm0 \n\t"
  2132. "psrld $4, %%mm0 \n\t"
  2133. "paddd %%mm0, %%mm7 \n\t"
  2134. "add $16, %0 \n\t"
  2135. "cmp $128, %0 \n\t" //FIXME optimize & bench
  2136. " jb 1b \n\t"
  2137. "movq %%mm7, %%mm6 \n\t"
  2138. "psrlq $32, %%mm7 \n\t"
  2139. "paddd %%mm6, %%mm7 \n\t"
  2140. "psrld $2, %%mm7 \n\t"
  2141. "movd %%mm7, %0 \n\t"
  2142. : "+r" (i)
  2143. : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
  2144. );
  2145. return i;
  2146. }
  2147. static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
  2148. long i=0;
  2149. if(ABS(scale) < 256){
  2150. scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
  2151. asm volatile(
  2152. "pcmpeqw %%mm6, %%mm6 \n\t" // -1w
  2153. "psrlw $15, %%mm6 \n\t" // 1w
  2154. "movd %3, %%mm5 \n\t"
  2155. "punpcklwd %%mm5, %%mm5 \n\t"
  2156. "punpcklwd %%mm5, %%mm5 \n\t"
  2157. "1: \n\t"
  2158. "movq (%1, %0), %%mm0 \n\t"
  2159. "movq 8(%1, %0), %%mm1 \n\t"
  2160. "pmulhw %%mm5, %%mm0 \n\t"
  2161. "pmulhw %%mm5, %%mm1 \n\t"
  2162. "paddw %%mm6, %%mm0 \n\t"
  2163. "paddw %%mm6, %%mm1 \n\t"
  2164. "psraw $1, %%mm0 \n\t"
  2165. "psraw $1, %%mm1 \n\t"
  2166. "paddw (%2, %0), %%mm0 \n\t"
  2167. "paddw 8(%2, %0), %%mm1 \n\t"
  2168. "movq %%mm0, (%2, %0) \n\t"
  2169. "movq %%mm1, 8(%2, %0) \n\t"
  2170. "add $16, %0 \n\t"
  2171. "cmp $128, %0 \n\t" //FIXME optimize & bench
  2172. " jb 1b \n\t"
  2173. : "+r" (i)
  2174. : "r"(basis), "r"(rem), "g"(scale)
  2175. );
  2176. }else{
  2177. for(i=0; i<8*8; i++){
  2178. rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
  2179. }
  2180. }
  2181. }
  2182. #include "h264dsp_mmx.c"
  2183. /* external functions, from idct_mmx.c */
  2184. void ff_mmx_idct(DCTELEM *block);
  2185. void ff_mmxext_idct(DCTELEM *block);
  2186. void ff_vp3_idct_sse2(int16_t *input_data);
  2187. void ff_vp3_idct_mmx(int16_t *data);
  2188. void ff_vp3_dsp_init_mmx(void);
  2189. /* XXX: those functions should be suppressed ASAP when all IDCTs are
  2190. converted */
  2191. static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  2192. {
  2193. ff_mmx_idct (block);
  2194. put_pixels_clamped_mmx(block, dest, line_size);
  2195. }
  2196. static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  2197. {
  2198. ff_mmx_idct (block);
  2199. add_pixels_clamped_mmx(block, dest, line_size);
  2200. }
  2201. static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  2202. {
  2203. ff_mmxext_idct (block);
  2204. put_pixels_clamped_mmx(block, dest, line_size);
  2205. }
  2206. static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  2207. {
  2208. ff_mmxext_idct (block);
  2209. add_pixels_clamped_mmx(block, dest, line_size);
  2210. }
  2211. static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
  2212. {
  2213. ff_vp3_idct_sse2(block);
  2214. put_signed_pixels_clamped_mmx(block, dest, line_size);
  2215. }
  2216. static void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block)
  2217. {
  2218. ff_vp3_idct_sse2(block);
  2219. add_pixels_clamped_mmx(block, dest, line_size);
  2220. }
  2221. static void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
  2222. {
  2223. ff_vp3_idct_mmx(block);
  2224. put_signed_pixels_clamped_mmx(block, dest, line_size);
  2225. }
  2226. static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
  2227. {
  2228. ff_vp3_idct_mmx(block);
  2229. add_pixels_clamped_mmx(block, dest, line_size);
  2230. }
  2231. void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
  2232. {
  2233. mm_flags = mm_support();
  2234. if (avctx->dsp_mask) {
  2235. if (avctx->dsp_mask & FF_MM_FORCE)
  2236. mm_flags |= (avctx->dsp_mask & 0xffff);
  2237. else
  2238. mm_flags &= ~(avctx->dsp_mask & 0xffff);
  2239. }
  2240. #if 0
  2241. av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
  2242. if (mm_flags & MM_MMX)
  2243. av_log(avctx, AV_LOG_INFO, " mmx");
  2244. if (mm_flags & MM_MMXEXT)
  2245. av_log(avctx, AV_LOG_INFO, " mmxext");
  2246. if (mm_flags & MM_3DNOW)
  2247. av_log(avctx, AV_LOG_INFO, " 3dnow");
  2248. if (mm_flags & MM_SSE)
  2249. av_log(avctx, AV_LOG_INFO, " sse");
  2250. if (mm_flags & MM_SSE2)
  2251. av_log(avctx, AV_LOG_INFO, " sse2");
  2252. av_log(avctx, AV_LOG_INFO, "\n");
  2253. #endif
  2254. if (mm_flags & MM_MMX) {
  2255. const int idct_algo= avctx->idct_algo;
  2256. #ifdef CONFIG_ENCODERS
  2257. const int dct_algo = avctx->dct_algo;
  2258. if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
  2259. if(mm_flags & MM_SSE2){
  2260. c->fdct = ff_fdct_sse2;
  2261. }else if(mm_flags & MM_MMXEXT){
  2262. c->fdct = ff_fdct_mmx2;
  2263. }else{
  2264. c->fdct = ff_fdct_mmx;
  2265. }
  2266. }
  2267. #endif //CONFIG_ENCODERS
  2268. if(avctx->lowres==0){
  2269. if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
  2270. c->idct_put= ff_simple_idct_put_mmx;
  2271. c->idct_add= ff_simple_idct_add_mmx;
  2272. c->idct = ff_simple_idct_mmx;
  2273. c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
  2274. }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
  2275. if(mm_flags & MM_MMXEXT){
  2276. c->idct_put= ff_libmpeg2mmx2_idct_put;
  2277. c->idct_add= ff_libmpeg2mmx2_idct_add;
  2278. c->idct = ff_mmxext_idct;
  2279. }else{
  2280. c->idct_put= ff_libmpeg2mmx_idct_put;
  2281. c->idct_add= ff_libmpeg2mmx_idct_add;
  2282. c->idct = ff_mmx_idct;
  2283. }
  2284. c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
  2285. }else if(idct_algo==FF_IDCT_VP3){
  2286. if(mm_flags & MM_SSE2){
  2287. c->idct_put= ff_vp3_idct_put_sse2;
  2288. c->idct_add= ff_vp3_idct_add_sse2;
  2289. c->idct = ff_vp3_idct_sse2;
  2290. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  2291. }else{
  2292. ff_vp3_dsp_init_mmx();
  2293. c->idct_put= ff_vp3_idct_put_mmx;
  2294. c->idct_add= ff_vp3_idct_add_mmx;
  2295. c->idct = ff_vp3_idct_mmx;
  2296. c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
  2297. }
  2298. }
  2299. }
  2300. #ifdef CONFIG_ENCODERS
  2301. c->get_pixels = get_pixels_mmx;
  2302. c->diff_pixels = diff_pixels_mmx;
  2303. #endif //CONFIG_ENCODERS
  2304. c->put_pixels_clamped = put_pixels_clamped_mmx;
  2305. c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
  2306. c->add_pixels_clamped = add_pixels_clamped_mmx;
  2307. c->clear_blocks = clear_blocks_mmx;
  2308. #ifdef CONFIG_ENCODERS
  2309. c->pix_sum = pix_sum16_mmx;
  2310. #endif //CONFIG_ENCODERS
  2311. c->put_pixels_tab[0][0] = put_pixels16_mmx;
  2312. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
  2313. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
  2314. c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
  2315. c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
  2316. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
  2317. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
  2318. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
  2319. c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
  2320. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
  2321. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
  2322. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
  2323. c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
  2324. c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
  2325. c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
  2326. c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
  2327. c->put_pixels_tab[1][0] = put_pixels8_mmx;
  2328. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
  2329. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
  2330. c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
  2331. c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
  2332. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
  2333. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
  2334. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
  2335. c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
  2336. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
  2337. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
  2338. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
  2339. c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
  2340. c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
  2341. c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
  2342. c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
  2343. c->add_bytes= add_bytes_mmx;
  2344. #ifdef CONFIG_ENCODERS
  2345. c->diff_bytes= diff_bytes_mmx;
  2346. c->hadamard8_diff[0]= hadamard8_diff16_mmx;
  2347. c->hadamard8_diff[1]= hadamard8_diff_mmx;
  2348. c->pix_norm1 = pix_norm1_mmx;
  2349. c->sse[0] = sse16_mmx;
  2350. c->sse[1] = sse8_mmx;
  2351. c->vsad[4]= vsad_intra16_mmx;
  2352. c->nsse[0] = nsse16_mmx;
  2353. c->nsse[1] = nsse8_mmx;
  2354. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2355. c->vsad[0] = vsad16_mmx;
  2356. }
  2357. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2358. c->try_8x8basis= try_8x8basis_mmx;
  2359. }
  2360. c->add_8x8basis= add_8x8basis_mmx;
  2361. #endif //CONFIG_ENCODERS
  2362. c->h263_v_loop_filter= h263_v_loop_filter_mmx;
  2363. c->h263_h_loop_filter= h263_h_loop_filter_mmx;
  2364. c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx;
  2365. if (mm_flags & MM_MMXEXT) {
  2366. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
  2367. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
  2368. c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
  2369. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
  2370. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
  2371. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
  2372. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
  2373. c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
  2374. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
  2375. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
  2376. #ifdef CONFIG_ENCODERS
  2377. c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
  2378. c->hadamard8_diff[1]= hadamard8_diff_mmx2;
  2379. c->vsad[4]= vsad_intra16_mmx2;
  2380. #endif //CONFIG_ENCODERS
  2381. c->h264_idct_add= ff_h264_idct_add_mmx2;
  2382. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2383. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
  2384. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
  2385. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
  2386. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
  2387. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
  2388. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
  2389. #ifdef CONFIG_ENCODERS
  2390. c->vsad[0] = vsad16_mmx2;
  2391. #endif //CONFIG_ENCODERS
  2392. }
  2393. #if 1
  2394. SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
  2395. SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
  2396. SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
  2397. SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
  2398. SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
  2399. SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
  2400. SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
  2401. SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
  2402. SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
  2403. SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
  2404. SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
  2405. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
  2406. SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
  2407. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
  2408. SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
  2409. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
  2410. SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
  2411. SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
  2412. SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
  2413. SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
  2414. SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
  2415. SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
  2416. SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
  2417. SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
  2418. SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
  2419. SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
  2420. SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
  2421. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
  2422. SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
  2423. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
  2424. SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
  2425. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
  2426. #endif
  2427. //FIXME 3dnow too
  2428. #define dspfunc(PFX, IDX, NUM) \
  2429. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \
  2430. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \
  2431. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \
  2432. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \
  2433. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \
  2434. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \
  2435. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \
  2436. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \
  2437. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \
  2438. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \
  2439. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \
  2440. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \
  2441. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \
  2442. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \
  2443. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \
  2444. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2
  2445. dspfunc(put_h264_qpel, 0, 16);
  2446. dspfunc(put_h264_qpel, 1, 8);
  2447. dspfunc(put_h264_qpel, 2, 4);
  2448. dspfunc(avg_h264_qpel, 0, 16);
  2449. dspfunc(avg_h264_qpel, 1, 8);
  2450. dspfunc(avg_h264_qpel, 2, 4);
  2451. #undef dspfunc
  2452. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2;
  2453. c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
  2454. c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
  2455. c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
  2456. c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
  2457. c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
  2458. c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
  2459. #ifdef CONFIG_ENCODERS
  2460. c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
  2461. #endif //CONFIG_ENCODERS
  2462. } else if (mm_flags & MM_3DNOW) {
  2463. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  2464. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  2465. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  2466. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  2467. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  2468. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  2469. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  2470. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  2471. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  2472. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  2473. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2474. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  2475. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  2476. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  2477. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  2478. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  2479. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  2480. }
  2481. SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
  2482. SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
  2483. SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
  2484. SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
  2485. SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
  2486. SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
  2487. SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
  2488. SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
  2489. SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
  2490. SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
  2491. SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
  2492. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
  2493. SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
  2494. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
  2495. SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
  2496. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
  2497. SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
  2498. SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
  2499. SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
  2500. SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
  2501. SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
  2502. SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
  2503. SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
  2504. SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
  2505. SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
  2506. SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
  2507. SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
  2508. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
  2509. SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
  2510. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
  2511. SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
  2512. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
  2513. #define dspfunc(PFX, IDX, NUM) \
  2514. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \
  2515. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \
  2516. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \
  2517. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \
  2518. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \
  2519. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \
  2520. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \
  2521. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \
  2522. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \
  2523. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \
  2524. c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \
  2525. c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \
  2526. c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \
  2527. c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \
  2528. c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \
  2529. c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow
  2530. dspfunc(put_h264_qpel, 0, 16);
  2531. dspfunc(put_h264_qpel, 1, 8);
  2532. dspfunc(put_h264_qpel, 2, 4);
  2533. dspfunc(avg_h264_qpel, 0, 16);
  2534. dspfunc(avg_h264_qpel, 1, 8);
  2535. dspfunc(avg_h264_qpel, 2, 4);
  2536. c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow;
  2537. }
  2538. }
  2539. #ifdef CONFIG_ENCODERS
  2540. dsputil_init_pix_mmx(c, avctx);
  2541. #endif //CONFIG_ENCODERS
  2542. #if 0
  2543. // for speed testing
  2544. get_pixels = just_return;
  2545. put_pixels_clamped = just_return;
  2546. add_pixels_clamped = just_return;
  2547. pix_abs16x16 = just_return;
  2548. pix_abs16x16_x2 = just_return;
  2549. pix_abs16x16_y2 = just_return;
  2550. pix_abs16x16_xy2 = just_return;
  2551. put_pixels_tab[0] = just_return;
  2552. put_pixels_tab[1] = just_return;
  2553. put_pixels_tab[2] = just_return;
  2554. put_pixels_tab[3] = just_return;
  2555. put_no_rnd_pixels_tab[0] = just_return;
  2556. put_no_rnd_pixels_tab[1] = just_return;
  2557. put_no_rnd_pixels_tab[2] = just_return;
  2558. put_no_rnd_pixels_tab[3] = just_return;
  2559. avg_pixels_tab[0] = just_return;
  2560. avg_pixels_tab[1] = just_return;
  2561. avg_pixels_tab[2] = just_return;
  2562. avg_pixels_tab[3] = just_return;
  2563. avg_no_rnd_pixels_tab[0] = just_return;
  2564. avg_no_rnd_pixels_tab[1] = just_return;
  2565. avg_no_rnd_pixels_tab[2] = just_return;
  2566. avg_no_rnd_pixels_tab[3] = just_return;
  2567. //av_fdct = just_return;
  2568. //ff_idct = just_return;
  2569. #endif
  2570. }