You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

632 lines
24KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "config.h"
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/cpu.h"
  27. #include "libavutil/x86/asm.h"
  28. #include "libavcodec/videodsp.h"
  29. #include "constants.h"
  30. #include "dsputil_x86.h"
  31. #include "diracdsp_mmx.h"
  32. #if HAVE_INLINE_ASM
  33. void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  34. int line_size)
  35. {
  36. const int16_t *p;
  37. uint8_t *pix;
  38. /* read the pixels */
  39. p = block;
  40. pix = pixels;
  41. /* unrolled loop */
  42. __asm__ volatile (
  43. "movq (%3), %%mm0 \n\t"
  44. "movq 8(%3), %%mm1 \n\t"
  45. "movq 16(%3), %%mm2 \n\t"
  46. "movq 24(%3), %%mm3 \n\t"
  47. "movq 32(%3), %%mm4 \n\t"
  48. "movq 40(%3), %%mm5 \n\t"
  49. "movq 48(%3), %%mm6 \n\t"
  50. "movq 56(%3), %%mm7 \n\t"
  51. "packuswb %%mm1, %%mm0 \n\t"
  52. "packuswb %%mm3, %%mm2 \n\t"
  53. "packuswb %%mm5, %%mm4 \n\t"
  54. "packuswb %%mm7, %%mm6 \n\t"
  55. "movq %%mm0, (%0) \n\t"
  56. "movq %%mm2, (%0, %1) \n\t"
  57. "movq %%mm4, (%0, %1, 2) \n\t"
  58. "movq %%mm6, (%0, %2) \n\t"
  59. :: "r" (pix), "r" ((x86_reg) line_size), "r" ((x86_reg) line_size * 3),
  60. "r" (p)
  61. : "memory");
  62. pix += line_size * 4;
  63. p += 32;
  64. // if here would be an exact copy of the code above
  65. // compiler would generate some very strange code
  66. // thus using "r"
  67. __asm__ volatile (
  68. "movq (%3), %%mm0 \n\t"
  69. "movq 8(%3), %%mm1 \n\t"
  70. "movq 16(%3), %%mm2 \n\t"
  71. "movq 24(%3), %%mm3 \n\t"
  72. "movq 32(%3), %%mm4 \n\t"
  73. "movq 40(%3), %%mm5 \n\t"
  74. "movq 48(%3), %%mm6 \n\t"
  75. "movq 56(%3), %%mm7 \n\t"
  76. "packuswb %%mm1, %%mm0 \n\t"
  77. "packuswb %%mm3, %%mm2 \n\t"
  78. "packuswb %%mm5, %%mm4 \n\t"
  79. "packuswb %%mm7, %%mm6 \n\t"
  80. "movq %%mm0, (%0) \n\t"
  81. "movq %%mm2, (%0, %1) \n\t"
  82. "movq %%mm4, (%0, %1, 2) \n\t"
  83. "movq %%mm6, (%0, %2) \n\t"
  84. :: "r" (pix), "r" ((x86_reg) line_size), "r" ((x86_reg) line_size * 3),
  85. "r" (p)
  86. : "memory");
  87. }
  88. #define put_signed_pixels_clamped_mmx_half(off) \
  89. "movq "#off"(%2), %%mm1 \n\t" \
  90. "movq 16 + "#off"(%2), %%mm2 \n\t" \
  91. "movq 32 + "#off"(%2), %%mm3 \n\t" \
  92. "movq 48 + "#off"(%2), %%mm4 \n\t" \
  93. "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
  94. "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
  95. "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
  96. "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
  97. "paddb %%mm0, %%mm1 \n\t" \
  98. "paddb %%mm0, %%mm2 \n\t" \
  99. "paddb %%mm0, %%mm3 \n\t" \
  100. "paddb %%mm0, %%mm4 \n\t" \
  101. "movq %%mm1, (%0) \n\t" \
  102. "movq %%mm2, (%0, %3) \n\t" \
  103. "movq %%mm3, (%0, %3, 2) \n\t" \
  104. "movq %%mm4, (%0, %1) \n\t"
  105. void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  106. int line_size)
  107. {
  108. x86_reg line_skip = line_size;
  109. x86_reg line_skip3;
  110. __asm__ volatile (
  111. "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
  112. "lea (%3, %3, 2), %1 \n\t"
  113. put_signed_pixels_clamped_mmx_half(0)
  114. "lea (%0, %3, 4), %0 \n\t"
  115. put_signed_pixels_clamped_mmx_half(64)
  116. : "+&r" (pixels), "=&r" (line_skip3)
  117. : "r" (block), "r" (line_skip)
  118. : "memory");
  119. }
  120. void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  121. int line_size)
  122. {
  123. const int16_t *p;
  124. uint8_t *pix;
  125. int i;
  126. /* read the pixels */
  127. p = block;
  128. pix = pixels;
  129. MOVQ_ZERO(mm7);
  130. i = 4;
  131. do {
  132. __asm__ volatile (
  133. "movq (%2), %%mm0 \n\t"
  134. "movq 8(%2), %%mm1 \n\t"
  135. "movq 16(%2), %%mm2 \n\t"
  136. "movq 24(%2), %%mm3 \n\t"
  137. "movq %0, %%mm4 \n\t"
  138. "movq %1, %%mm6 \n\t"
  139. "movq %%mm4, %%mm5 \n\t"
  140. "punpcklbw %%mm7, %%mm4 \n\t"
  141. "punpckhbw %%mm7, %%mm5 \n\t"
  142. "paddsw %%mm4, %%mm0 \n\t"
  143. "paddsw %%mm5, %%mm1 \n\t"
  144. "movq %%mm6, %%mm5 \n\t"
  145. "punpcklbw %%mm7, %%mm6 \n\t"
  146. "punpckhbw %%mm7, %%mm5 \n\t"
  147. "paddsw %%mm6, %%mm2 \n\t"
  148. "paddsw %%mm5, %%mm3 \n\t"
  149. "packuswb %%mm1, %%mm0 \n\t"
  150. "packuswb %%mm3, %%mm2 \n\t"
  151. "movq %%mm0, %0 \n\t"
  152. "movq %%mm2, %1 \n\t"
  153. : "+m" (*pix), "+m" (*(pix + line_size))
  154. : "r" (p)
  155. : "memory");
  156. pix += line_size * 2;
  157. p += 16;
  158. } while (--i);
  159. }
  160. #define CLEAR_BLOCKS(name, n) \
  161. void name(int16_t *blocks) \
  162. { \
  163. __asm__ volatile ( \
  164. "pxor %%mm7, %%mm7 \n\t" \
  165. "mov %1, %%"REG_a" \n\t" \
  166. "1: \n\t" \
  167. "movq %%mm7, (%0, %%"REG_a") \n\t" \
  168. "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
  169. "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
  170. "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
  171. "add $32, %%"REG_a" \n\t" \
  172. "js 1b \n\t" \
  173. :: "r"(((uint8_t *) blocks) + 128 * n), \
  174. "i"(-128 * n) \
  175. : "%"REG_a); \
  176. }
  177. CLEAR_BLOCKS(ff_clear_blocks_mmx, 6)
  178. CLEAR_BLOCKS(ff_clear_block_mmx, 1)
  179. void ff_clear_block_sse(int16_t *block)
  180. {
  181. __asm__ volatile (
  182. "xorps %%xmm0, %%xmm0 \n"
  183. "movaps %%xmm0, (%0) \n"
  184. "movaps %%xmm0, 16(%0) \n"
  185. "movaps %%xmm0, 32(%0) \n"
  186. "movaps %%xmm0, 48(%0) \n"
  187. "movaps %%xmm0, 64(%0) \n"
  188. "movaps %%xmm0, 80(%0) \n"
  189. "movaps %%xmm0, 96(%0) \n"
  190. "movaps %%xmm0, 112(%0) \n"
  191. :: "r" (block)
  192. : "memory");
  193. }
  194. void ff_clear_blocks_sse(int16_t *blocks)
  195. {
  196. __asm__ volatile (
  197. "xorps %%xmm0, %%xmm0 \n"
  198. "mov %1, %%"REG_a" \n"
  199. "1: \n"
  200. "movaps %%xmm0, (%0, %%"REG_a") \n"
  201. "movaps %%xmm0, 16(%0, %%"REG_a") \n"
  202. "movaps %%xmm0, 32(%0, %%"REG_a") \n"
  203. "movaps %%xmm0, 48(%0, %%"REG_a") \n"
  204. "movaps %%xmm0, 64(%0, %%"REG_a") \n"
  205. "movaps %%xmm0, 80(%0, %%"REG_a") \n"
  206. "movaps %%xmm0, 96(%0, %%"REG_a") \n"
  207. "movaps %%xmm0, 112(%0, %%"REG_a") \n"
  208. "add $128, %%"REG_a" \n"
  209. "js 1b \n"
  210. :: "r"(((uint8_t *) blocks) + 128 * 6), "i"(-128 * 6)
  211. : "%"REG_a);
  212. }
  213. void ff_add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
  214. {
  215. x86_reg i = 0;
  216. __asm__ volatile (
  217. "jmp 2f \n\t"
  218. "1: \n\t"
  219. "movq (%1, %0), %%mm0 \n\t"
  220. "movq (%2, %0), %%mm1 \n\t"
  221. "paddb %%mm0, %%mm1 \n\t"
  222. "movq %%mm1, (%2, %0) \n\t"
  223. "movq 8(%1, %0), %%mm0 \n\t"
  224. "movq 8(%2, %0), %%mm1 \n\t"
  225. "paddb %%mm0, %%mm1 \n\t"
  226. "movq %%mm1, 8(%2, %0) \n\t"
  227. "add $16, %0 \n\t"
  228. "2: \n\t"
  229. "cmp %3, %0 \n\t"
  230. "js 1b \n\t"
  231. : "+r" (i)
  232. : "r" (src), "r" (dst), "r" ((x86_reg) w - 15));
  233. for (; i < w; i++)
  234. dst[i + 0] += src[i + 0];
  235. }
  236. /* Draw the edges of width 'w' of an image of size width, height
  237. * this MMX version can only handle w == 8 || w == 16. */
  238. void ff_draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
  239. int w, int h, int sides)
  240. {
  241. uint8_t *ptr, *last_line;
  242. int i;
  243. last_line = buf + (height - 1) * wrap;
  244. /* left and right */
  245. ptr = buf;
  246. if (w == 8) {
  247. __asm__ volatile (
  248. "1: \n\t"
  249. "movd (%0), %%mm0 \n\t"
  250. "punpcklbw %%mm0, %%mm0 \n\t"
  251. "punpcklwd %%mm0, %%mm0 \n\t"
  252. "punpckldq %%mm0, %%mm0 \n\t"
  253. "movq %%mm0, -8(%0) \n\t"
  254. "movq -8(%0, %2), %%mm1 \n\t"
  255. "punpckhbw %%mm1, %%mm1 \n\t"
  256. "punpckhwd %%mm1, %%mm1 \n\t"
  257. "punpckhdq %%mm1, %%mm1 \n\t"
  258. "movq %%mm1, (%0, %2) \n\t"
  259. "add %1, %0 \n\t"
  260. "cmp %3, %0 \n\t"
  261. "jb 1b \n\t"
  262. : "+r" (ptr)
  263. : "r" ((x86_reg) wrap), "r" ((x86_reg) width),
  264. "r" (ptr + wrap * height));
  265. } else if (w == 16) {
  266. __asm__ volatile (
  267. "1: \n\t"
  268. "movd (%0), %%mm0 \n\t"
  269. "punpcklbw %%mm0, %%mm0 \n\t"
  270. "punpcklwd %%mm0, %%mm0 \n\t"
  271. "punpckldq %%mm0, %%mm0 \n\t"
  272. "movq %%mm0, -8(%0) \n\t"
  273. "movq %%mm0, -16(%0) \n\t"
  274. "movq -8(%0, %2), %%mm1 \n\t"
  275. "punpckhbw %%mm1, %%mm1 \n\t"
  276. "punpckhwd %%mm1, %%mm1 \n\t"
  277. "punpckhdq %%mm1, %%mm1 \n\t"
  278. "movq %%mm1, (%0, %2) \n\t"
  279. "movq %%mm1, 8(%0, %2) \n\t"
  280. "add %1, %0 \n\t"
  281. "cmp %3, %0 \n\t"
  282. "jb 1b \n\t"
  283. : "+r"(ptr)
  284. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  285. );
  286. } else {
  287. av_assert1(w == 4);
  288. __asm__ volatile (
  289. "1: \n\t"
  290. "movd (%0), %%mm0 \n\t"
  291. "punpcklbw %%mm0, %%mm0 \n\t"
  292. "punpcklwd %%mm0, %%mm0 \n\t"
  293. "movd %%mm0, -4(%0) \n\t"
  294. "movd -4(%0, %2), %%mm1 \n\t"
  295. "punpcklbw %%mm1, %%mm1 \n\t"
  296. "punpckhwd %%mm1, %%mm1 \n\t"
  297. "punpckhdq %%mm1, %%mm1 \n\t"
  298. "movd %%mm1, (%0, %2) \n\t"
  299. "add %1, %0 \n\t"
  300. "cmp %3, %0 \n\t"
  301. "jb 1b \n\t"
  302. : "+r" (ptr)
  303. : "r" ((x86_reg) wrap), "r" ((x86_reg) width),
  304. "r" (ptr + wrap * height));
  305. }
  306. /* top and bottom (and hopefully also the corners) */
  307. if (sides & EDGE_TOP) {
  308. for (i = 0; i < h; i += 4) {
  309. ptr = buf - (i + 1) * wrap - w;
  310. __asm__ volatile (
  311. "1: \n\t"
  312. "movq (%1, %0), %%mm0 \n\t"
  313. "movq %%mm0, (%0) \n\t"
  314. "movq %%mm0, (%0, %2) \n\t"
  315. "movq %%mm0, (%0, %2, 2) \n\t"
  316. "movq %%mm0, (%0, %3) \n\t"
  317. "add $8, %0 \n\t"
  318. "cmp %4, %0 \n\t"
  319. "jb 1b \n\t"
  320. : "+r" (ptr)
  321. : "r" ((x86_reg) buf - (x86_reg) ptr - w),
  322. "r" ((x86_reg) - wrap), "r" ((x86_reg) - wrap * 3),
  323. "r" (ptr + width + 2 * w));
  324. }
  325. }
  326. if (sides & EDGE_BOTTOM) {
  327. for (i = 0; i < h; i += 4) {
  328. ptr = last_line + (i + 1) * wrap - w;
  329. __asm__ volatile (
  330. "1: \n\t"
  331. "movq (%1, %0), %%mm0 \n\t"
  332. "movq %%mm0, (%0) \n\t"
  333. "movq %%mm0, (%0, %2) \n\t"
  334. "movq %%mm0, (%0, %2, 2) \n\t"
  335. "movq %%mm0, (%0, %3) \n\t"
  336. "add $8, %0 \n\t"
  337. "cmp %4, %0 \n\t"
  338. "jb 1b \n\t"
  339. : "+r" (ptr)
  340. : "r" ((x86_reg) last_line - (x86_reg) ptr - w),
  341. "r" ((x86_reg) wrap), "r" ((x86_reg) wrap * 3),
  342. "r" (ptr + width + 2 * w));
  343. }
  344. }
  345. }
  346. typedef void emulated_edge_mc_func(uint8_t *dst, const uint8_t *src,
  347. ptrdiff_t dst_stride,
  348. ptrdiff_t src_linesize,
  349. int block_w, int block_h,
  350. int src_x, int src_y, int w, int h);
  351. static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
  352. int stride, int h, int ox, int oy,
  353. int dxx, int dxy, int dyx, int dyy,
  354. int shift, int r, int width, int height,
  355. emulated_edge_mc_func *emu_edge_fn)
  356. {
  357. const int w = 8;
  358. const int ix = ox >> (16 + shift);
  359. const int iy = oy >> (16 + shift);
  360. const int oxs = ox >> 4;
  361. const int oys = oy >> 4;
  362. const int dxxs = dxx >> 4;
  363. const int dxys = dxy >> 4;
  364. const int dyxs = dyx >> 4;
  365. const int dyys = dyy >> 4;
  366. const uint16_t r4[4] = { r, r, r, r };
  367. const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
  368. const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
  369. const uint64_t shift2 = 2 * shift;
  370. #define MAX_STRIDE 4096U
  371. #define MAX_H 8U
  372. uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE];
  373. int x, y;
  374. const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
  375. const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
  376. const int dxh = dxy * (h - 1);
  377. const int dyw = dyx * (w - 1);
  378. int need_emu = (unsigned) ix >= width - w ||
  379. (unsigned) iy >= height - h;
  380. if ( // non-constant fullpel offset (3% of blocks)
  381. ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
  382. (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) ||
  383. // uses more than 16 bits of subpel mv (only at huge resolution)
  384. (dxx | dxy | dyx | dyy) & 15 ||
  385. (need_emu && (h > MAX_H || stride > MAX_STRIDE))) {
  386. // FIXME could still use mmx for some of the rows
  387. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
  388. shift, r, width, height);
  389. return;
  390. }
  391. src += ix + iy * stride;
  392. if (need_emu) {
  393. emu_edge_fn(edge_buf, src, stride, stride, w + 1, h + 1, ix, iy, width, height);
  394. src = edge_buf;
  395. }
  396. __asm__ volatile (
  397. "movd %0, %%mm6 \n\t"
  398. "pxor %%mm7, %%mm7 \n\t"
  399. "punpcklwd %%mm6, %%mm6 \n\t"
  400. "punpcklwd %%mm6, %%mm6 \n\t"
  401. :: "r" (1 << shift));
  402. for (x = 0; x < w; x += 4) {
  403. uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
  404. oxs - dxys + dxxs * (x + 1),
  405. oxs - dxys + dxxs * (x + 2),
  406. oxs - dxys + dxxs * (x + 3) };
  407. uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
  408. oys - dyys + dyxs * (x + 1),
  409. oys - dyys + dyxs * (x + 2),
  410. oys - dyys + dyxs * (x + 3) };
  411. for (y = 0; y < h; y++) {
  412. __asm__ volatile (
  413. "movq %0, %%mm4 \n\t"
  414. "movq %1, %%mm5 \n\t"
  415. "paddw %2, %%mm4 \n\t"
  416. "paddw %3, %%mm5 \n\t"
  417. "movq %%mm4, %0 \n\t"
  418. "movq %%mm5, %1 \n\t"
  419. "psrlw $12, %%mm4 \n\t"
  420. "psrlw $12, %%mm5 \n\t"
  421. : "+m" (*dx4), "+m" (*dy4)
  422. : "m" (*dxy4), "m" (*dyy4));
  423. __asm__ volatile (
  424. "movq %%mm6, %%mm2 \n\t"
  425. "movq %%mm6, %%mm1 \n\t"
  426. "psubw %%mm4, %%mm2 \n\t"
  427. "psubw %%mm5, %%mm1 \n\t"
  428. "movq %%mm2, %%mm0 \n\t"
  429. "movq %%mm4, %%mm3 \n\t"
  430. "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
  431. "pmullw %%mm5, %%mm3 \n\t" // dx * dy
  432. "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
  433. "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
  434. "movd %4, %%mm5 \n\t"
  435. "movd %3, %%mm4 \n\t"
  436. "punpcklbw %%mm7, %%mm5 \n\t"
  437. "punpcklbw %%mm7, %%mm4 \n\t"
  438. "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
  439. "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
  440. "movd %2, %%mm5 \n\t"
  441. "movd %1, %%mm4 \n\t"
  442. "punpcklbw %%mm7, %%mm5 \n\t"
  443. "punpcklbw %%mm7, %%mm4 \n\t"
  444. "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
  445. "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
  446. "paddw %5, %%mm1 \n\t"
  447. "paddw %%mm3, %%mm2 \n\t"
  448. "paddw %%mm1, %%mm0 \n\t"
  449. "paddw %%mm2, %%mm0 \n\t"
  450. "psrlw %6, %%mm0 \n\t"
  451. "packuswb %%mm0, %%mm0 \n\t"
  452. "movd %%mm0, %0 \n\t"
  453. : "=m" (dst[x + y * stride])
  454. : "m" (src[0]), "m" (src[1]),
  455. "m" (src[stride]), "m" (src[stride + 1]),
  456. "m" (*r4), "m" (shift2));
  457. src += stride;
  458. }
  459. src += 4 - h * stride;
  460. }
  461. }
  462. #if CONFIG_VIDEODSP
  463. #if HAVE_YASM
  464. #if ARCH_X86_32
  465. void ff_gmc_mmx(uint8_t *dst, uint8_t *src,
  466. int stride, int h, int ox, int oy,
  467. int dxx, int dxy, int dyx, int dyy,
  468. int shift, int r, int width, int height)
  469. {
  470. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  471. width, height, &ff_emulated_edge_mc_8);
  472. }
  473. #endif
  474. void ff_gmc_sse(uint8_t *dst, uint8_t *src,
  475. int stride, int h, int ox, int oy,
  476. int dxx, int dxy, int dyx, int dyy,
  477. int shift, int r, int width, int height)
  478. {
  479. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  480. width, height, &ff_emulated_edge_mc_8);
  481. }
  482. #else
  483. void ff_gmc_mmx(uint8_t *dst, uint8_t *src,
  484. int stride, int h, int ox, int oy,
  485. int dxx, int dxy, int dyx, int dyy,
  486. int shift, int r, int width, int height)
  487. {
  488. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  489. width, height, &ff_emulated_edge_mc_8);
  490. }
  491. #endif
  492. #endif
  493. #if CONFIG_DIRAC_DECODER
  494. #define DIRAC_PIXOP(OPNAME2, OPNAME, EXT)\
  495. void ff_ ## OPNAME2 ## _dirac_pixels8_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  496. {\
  497. if (h&3)\
  498. ff_ ## OPNAME2 ## _dirac_pixels8_c(dst, src, stride, h);\
  499. else\
  500. OPNAME ## _pixels8_ ## EXT(dst, src[0], stride, h);\
  501. }\
  502. void ff_ ## OPNAME2 ## _dirac_pixels16_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  503. {\
  504. if (h&3)\
  505. ff_ ## OPNAME2 ## _dirac_pixels16_c(dst, src, stride, h);\
  506. else\
  507. OPNAME ## _pixels16_ ## EXT(dst, src[0], stride, h);\
  508. }\
  509. void ff_ ## OPNAME2 ## _dirac_pixels32_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  510. {\
  511. if (h&3) {\
  512. ff_ ## OPNAME2 ## _dirac_pixels32_c(dst, src, stride, h);\
  513. } else {\
  514. OPNAME ## _pixels16_ ## EXT(dst , src[0] , stride, h);\
  515. OPNAME ## _pixels16_ ## EXT(dst+16, src[0]+16, stride, h);\
  516. }\
  517. }
  518. #if HAVE_MMX_INLINE
  519. PIXELS16(static, ff_avg, , , _mmxext)
  520. DIRAC_PIXOP(put, ff_put, mmx)
  521. DIRAC_PIXOP(avg, ff_avg, mmx)
  522. #endif
  523. #if HAVE_YASM
  524. DIRAC_PIXOP(avg, ff_avg, mmxext)
  525. void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  526. {
  527. if (h&3)
  528. ff_put_dirac_pixels16_c(dst, src, stride, h);
  529. else
  530. ff_put_pixels16_sse2(dst, src[0], stride, h);
  531. }
  532. void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  533. {
  534. if (h&3)
  535. ff_avg_dirac_pixels16_c(dst, src, stride, h);
  536. else
  537. ff_avg_pixels16_sse2(dst, src[0], stride, h);
  538. }
  539. void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  540. {
  541. if (h&3) {
  542. ff_put_dirac_pixels32_c(dst, src, stride, h);
  543. } else {
  544. ff_put_pixels16_sse2(dst , src[0] , stride, h);
  545. ff_put_pixels16_sse2(dst+16, src[0]+16, stride, h);
  546. }
  547. }
  548. void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  549. {
  550. if (h&3) {
  551. ff_avg_dirac_pixels32_c(dst, src, stride, h);
  552. } else {
  553. ff_avg_pixels16_sse2(dst , src[0] , stride, h);
  554. ff_avg_pixels16_sse2(dst+16, src[0]+16, stride, h);
  555. }
  556. }
  557. #endif
  558. #endif
  559. void ff_vector_clipf_sse(float *dst, const float *src,
  560. float min, float max, int len)
  561. {
  562. x86_reg i = (len - 16) * 4;
  563. __asm__ volatile (
  564. "movss %3, %%xmm4 \n\t"
  565. "movss %4, %%xmm5 \n\t"
  566. "shufps $0, %%xmm4, %%xmm4 \n\t"
  567. "shufps $0, %%xmm5, %%xmm5 \n\t"
  568. "1: \n\t"
  569. "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
  570. "movaps 16(%2, %0), %%xmm1 \n\t"
  571. "movaps 32(%2, %0), %%xmm2 \n\t"
  572. "movaps 48(%2, %0), %%xmm3 \n\t"
  573. "maxps %%xmm4, %%xmm0 \n\t"
  574. "maxps %%xmm4, %%xmm1 \n\t"
  575. "maxps %%xmm4, %%xmm2 \n\t"
  576. "maxps %%xmm4, %%xmm3 \n\t"
  577. "minps %%xmm5, %%xmm0 \n\t"
  578. "minps %%xmm5, %%xmm1 \n\t"
  579. "minps %%xmm5, %%xmm2 \n\t"
  580. "minps %%xmm5, %%xmm3 \n\t"
  581. "movaps %%xmm0, (%1, %0) \n\t"
  582. "movaps %%xmm1, 16(%1, %0) \n\t"
  583. "movaps %%xmm2, 32(%1, %0) \n\t"
  584. "movaps %%xmm3, 48(%1, %0) \n\t"
  585. "sub $64, %0 \n\t"
  586. "jge 1b \n\t"
  587. : "+&r" (i)
  588. : "r" (dst), "r" (src), "m" (min), "m" (max)
  589. : "memory");
  590. }
  591. #endif /* HAVE_INLINE_ASM */