You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

499 lines
19KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "config.h"
  25. #include "libavutil/cpu.h"
  26. #include "libavutil/x86/asm.h"
  27. #include "constants.h"
  28. #include "dsputil_x86.h"
  29. #if HAVE_INLINE_ASM
  30. void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  31. int line_size)
  32. {
  33. const int16_t *p;
  34. uint8_t *pix;
  35. /* read the pixels */
  36. p = block;
  37. pix = pixels;
  38. /* unrolled loop */
  39. __asm__ volatile (
  40. "movq (%3), %%mm0 \n\t"
  41. "movq 8(%3), %%mm1 \n\t"
  42. "movq 16(%3), %%mm2 \n\t"
  43. "movq 24(%3), %%mm3 \n\t"
  44. "movq 32(%3), %%mm4 \n\t"
  45. "movq 40(%3), %%mm5 \n\t"
  46. "movq 48(%3), %%mm6 \n\t"
  47. "movq 56(%3), %%mm7 \n\t"
  48. "packuswb %%mm1, %%mm0 \n\t"
  49. "packuswb %%mm3, %%mm2 \n\t"
  50. "packuswb %%mm5, %%mm4 \n\t"
  51. "packuswb %%mm7, %%mm6 \n\t"
  52. "movq %%mm0, (%0) \n\t"
  53. "movq %%mm2, (%0, %1) \n\t"
  54. "movq %%mm4, (%0, %1, 2) \n\t"
  55. "movq %%mm6, (%0, %2) \n\t"
  56. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
  57. "r"(p)
  58. : "memory");
  59. pix += line_size * 4;
  60. p += 32;
  61. // if here would be an exact copy of the code above
  62. // compiler would generate some very strange code
  63. // thus using "r"
  64. __asm__ volatile (
  65. "movq (%3), %%mm0 \n\t"
  66. "movq 8(%3), %%mm1 \n\t"
  67. "movq 16(%3), %%mm2 \n\t"
  68. "movq 24(%3), %%mm3 \n\t"
  69. "movq 32(%3), %%mm4 \n\t"
  70. "movq 40(%3), %%mm5 \n\t"
  71. "movq 48(%3), %%mm6 \n\t"
  72. "movq 56(%3), %%mm7 \n\t"
  73. "packuswb %%mm1, %%mm0 \n\t"
  74. "packuswb %%mm3, %%mm2 \n\t"
  75. "packuswb %%mm5, %%mm4 \n\t"
  76. "packuswb %%mm7, %%mm6 \n\t"
  77. "movq %%mm0, (%0) \n\t"
  78. "movq %%mm2, (%0, %1) \n\t"
  79. "movq %%mm4, (%0, %1, 2) \n\t"
  80. "movq %%mm6, (%0, %2) \n\t"
  81. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
  82. : "memory");
  83. }
  84. #define put_signed_pixels_clamped_mmx_half(off) \
  85. "movq "#off"(%2), %%mm1 \n\t" \
  86. "movq 16 + "#off"(%2), %%mm2 \n\t" \
  87. "movq 32 + "#off"(%2), %%mm3 \n\t" \
  88. "movq 48 + "#off"(%2), %%mm4 \n\t" \
  89. "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
  90. "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
  91. "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
  92. "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
  93. "paddb %%mm0, %%mm1 \n\t" \
  94. "paddb %%mm0, %%mm2 \n\t" \
  95. "paddb %%mm0, %%mm3 \n\t" \
  96. "paddb %%mm0, %%mm4 \n\t" \
  97. "movq %%mm1, (%0) \n\t" \
  98. "movq %%mm2, (%0, %3) \n\t" \
  99. "movq %%mm3, (%0, %3, 2) \n\t" \
  100. "movq %%mm4, (%0, %1) \n\t"
  101. void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  102. int line_size)
  103. {
  104. x86_reg line_skip = line_size;
  105. x86_reg line_skip3;
  106. __asm__ volatile (
  107. "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
  108. "lea (%3, %3, 2), %1 \n\t"
  109. put_signed_pixels_clamped_mmx_half(0)
  110. "lea (%0, %3, 4), %0 \n\t"
  111. put_signed_pixels_clamped_mmx_half(64)
  112. : "+&r"(pixels), "=&r"(line_skip3)
  113. : "r"(block), "r"(line_skip)
  114. : "memory");
  115. }
  116. void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  117. int line_size)
  118. {
  119. const int16_t *p;
  120. uint8_t *pix;
  121. int i;
  122. /* read the pixels */
  123. p = block;
  124. pix = pixels;
  125. MOVQ_ZERO(mm7);
  126. i = 4;
  127. do {
  128. __asm__ volatile (
  129. "movq (%2), %%mm0 \n\t"
  130. "movq 8(%2), %%mm1 \n\t"
  131. "movq 16(%2), %%mm2 \n\t"
  132. "movq 24(%2), %%mm3 \n\t"
  133. "movq %0, %%mm4 \n\t"
  134. "movq %1, %%mm6 \n\t"
  135. "movq %%mm4, %%mm5 \n\t"
  136. "punpcklbw %%mm7, %%mm4 \n\t"
  137. "punpckhbw %%mm7, %%mm5 \n\t"
  138. "paddsw %%mm4, %%mm0 \n\t"
  139. "paddsw %%mm5, %%mm1 \n\t"
  140. "movq %%mm6, %%mm5 \n\t"
  141. "punpcklbw %%mm7, %%mm6 \n\t"
  142. "punpckhbw %%mm7, %%mm5 \n\t"
  143. "paddsw %%mm6, %%mm2 \n\t"
  144. "paddsw %%mm5, %%mm3 \n\t"
  145. "packuswb %%mm1, %%mm0 \n\t"
  146. "packuswb %%mm3, %%mm2 \n\t"
  147. "movq %%mm0, %0 \n\t"
  148. "movq %%mm2, %1 \n\t"
  149. : "+m"(*pix), "+m"(*(pix + line_size))
  150. : "r"(p)
  151. : "memory");
  152. pix += line_size * 2;
  153. p += 16;
  154. } while (--i);
  155. }
  156. #define CLEAR_BLOCKS(name, n) \
  157. void name(int16_t *blocks) \
  158. { \
  159. __asm__ volatile ( \
  160. "pxor %%mm7, %%mm7 \n\t" \
  161. "mov %1, %%"REG_a" \n\t" \
  162. "1: \n\t" \
  163. "movq %%mm7, (%0, %%"REG_a") \n\t" \
  164. "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
  165. "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
  166. "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
  167. "add $32, %%"REG_a" \n\t" \
  168. "js 1b \n\t" \
  169. :: "r"(((uint8_t *)blocks) + 128 * n), \
  170. "i"(-128 * n) \
  171. : "%"REG_a \
  172. ); \
  173. }
  174. CLEAR_BLOCKS(ff_clear_blocks_mmx, 6)
  175. CLEAR_BLOCKS(ff_clear_block_mmx, 1)
  176. void ff_clear_block_sse(int16_t *block)
  177. {
  178. __asm__ volatile (
  179. "xorps %%xmm0, %%xmm0 \n"
  180. "movaps %%xmm0, (%0) \n"
  181. "movaps %%xmm0, 16(%0) \n"
  182. "movaps %%xmm0, 32(%0) \n"
  183. "movaps %%xmm0, 48(%0) \n"
  184. "movaps %%xmm0, 64(%0) \n"
  185. "movaps %%xmm0, 80(%0) \n"
  186. "movaps %%xmm0, 96(%0) \n"
  187. "movaps %%xmm0, 112(%0) \n"
  188. :: "r"(block)
  189. : "memory"
  190. );
  191. }
  192. void ff_clear_blocks_sse(int16_t *blocks)
  193. {
  194. __asm__ volatile (
  195. "xorps %%xmm0, %%xmm0 \n"
  196. "mov %1, %%"REG_a" \n"
  197. "1: \n"
  198. "movaps %%xmm0, (%0, %%"REG_a") \n"
  199. "movaps %%xmm0, 16(%0, %%"REG_a") \n"
  200. "movaps %%xmm0, 32(%0, %%"REG_a") \n"
  201. "movaps %%xmm0, 48(%0, %%"REG_a") \n"
  202. "movaps %%xmm0, 64(%0, %%"REG_a") \n"
  203. "movaps %%xmm0, 80(%0, %%"REG_a") \n"
  204. "movaps %%xmm0, 96(%0, %%"REG_a") \n"
  205. "movaps %%xmm0, 112(%0, %%"REG_a") \n"
  206. "add $128, %%"REG_a" \n"
  207. "js 1b \n"
  208. :: "r"(((uint8_t *)blocks) + 128 * 6),
  209. "i"(-128 * 6)
  210. : "%"REG_a
  211. );
  212. }
  213. void ff_add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
  214. {
  215. x86_reg i = 0;
  216. __asm__ volatile (
  217. "jmp 2f \n\t"
  218. "1: \n\t"
  219. "movq (%1, %0), %%mm0 \n\t"
  220. "movq (%2, %0), %%mm1 \n\t"
  221. "paddb %%mm0, %%mm1 \n\t"
  222. "movq %%mm1, (%2, %0) \n\t"
  223. "movq 8(%1, %0), %%mm0 \n\t"
  224. "movq 8(%2, %0), %%mm1 \n\t"
  225. "paddb %%mm0, %%mm1 \n\t"
  226. "movq %%mm1, 8(%2, %0) \n\t"
  227. "add $16, %0 \n\t"
  228. "2: \n\t"
  229. "cmp %3, %0 \n\t"
  230. "js 1b \n\t"
  231. : "+r"(i)
  232. : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
  233. );
  234. for ( ; i < w; i++)
  235. dst[i + 0] += src[i + 0];
  236. }
  237. /* Draw the edges of width 'w' of an image of size width, height
  238. * this MMX version can only handle w == 8 || w == 16. */
  239. void ff_draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
  240. int w, int h, int sides)
  241. {
  242. uint8_t *ptr, *last_line;
  243. int i;
  244. last_line = buf + (height - 1) * wrap;
  245. /* left and right */
  246. ptr = buf;
  247. if (w == 8) {
  248. __asm__ volatile (
  249. "1: \n\t"
  250. "movd (%0), %%mm0 \n\t"
  251. "punpcklbw %%mm0, %%mm0 \n\t"
  252. "punpcklwd %%mm0, %%mm0 \n\t"
  253. "punpckldq %%mm0, %%mm0 \n\t"
  254. "movq %%mm0, -8(%0) \n\t"
  255. "movq -8(%0, %2), %%mm1 \n\t"
  256. "punpckhbw %%mm1, %%mm1 \n\t"
  257. "punpckhwd %%mm1, %%mm1 \n\t"
  258. "punpckhdq %%mm1, %%mm1 \n\t"
  259. "movq %%mm1, (%0, %2) \n\t"
  260. "add %1, %0 \n\t"
  261. "cmp %3, %0 \n\t"
  262. "jb 1b \n\t"
  263. : "+r"(ptr)
  264. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  265. );
  266. } else {
  267. __asm__ volatile (
  268. "1: \n\t"
  269. "movd (%0), %%mm0 \n\t"
  270. "punpcklbw %%mm0, %%mm0 \n\t"
  271. "punpcklwd %%mm0, %%mm0 \n\t"
  272. "punpckldq %%mm0, %%mm0 \n\t"
  273. "movq %%mm0, -8(%0) \n\t"
  274. "movq %%mm0, -16(%0) \n\t"
  275. "movq -8(%0, %2), %%mm1 \n\t"
  276. "punpckhbw %%mm1, %%mm1 \n\t"
  277. "punpckhwd %%mm1, %%mm1 \n\t"
  278. "punpckhdq %%mm1, %%mm1 \n\t"
  279. "movq %%mm1, (%0, %2) \n\t"
  280. "movq %%mm1, 8(%0, %2) \n\t"
  281. "add %1, %0 \n\t"
  282. "cmp %3, %0 \n\t"
  283. "jb 1b \n\t"
  284. : "+r"(ptr)
  285. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  286. );
  287. }
  288. /* top and bottom (and hopefully also the corners) */
  289. if (sides & EDGE_TOP) {
  290. for (i = 0; i < h; i += 4) {
  291. ptr = buf - (i + 1) * wrap - w;
  292. __asm__ volatile (
  293. "1: \n\t"
  294. "movq (%1, %0), %%mm0 \n\t"
  295. "movq %%mm0, (%0) \n\t"
  296. "movq %%mm0, (%0, %2) \n\t"
  297. "movq %%mm0, (%0, %2, 2) \n\t"
  298. "movq %%mm0, (%0, %3) \n\t"
  299. "add $8, %0 \n\t"
  300. "cmp %4, %0 \n\t"
  301. "jb 1b \n\t"
  302. : "+r"(ptr)
  303. : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
  304. "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
  305. );
  306. }
  307. }
  308. if (sides & EDGE_BOTTOM) {
  309. for (i = 0; i < h; i += 4) {
  310. ptr = last_line + (i + 1) * wrap - w;
  311. __asm__ volatile (
  312. "1: \n\t"
  313. "movq (%1, %0), %%mm0 \n\t"
  314. "movq %%mm0, (%0) \n\t"
  315. "movq %%mm0, (%0, %2) \n\t"
  316. "movq %%mm0, (%0, %2, 2) \n\t"
  317. "movq %%mm0, (%0, %3) \n\t"
  318. "add $8, %0 \n\t"
  319. "cmp %4, %0 \n\t"
  320. "jb 1b \n\t"
  321. : "+r"(ptr)
  322. : "r"((x86_reg)last_line - (x86_reg)ptr - w),
  323. "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
  324. "r"(ptr + width + 2 * w)
  325. );
  326. }
  327. }
  328. }
  329. void ff_gmc_mmx(uint8_t *dst, uint8_t *src,
  330. int stride, int h, int ox, int oy,
  331. int dxx, int dxy, int dyx, int dyy,
  332. int shift, int r, int width, int height)
  333. {
  334. const int w = 8;
  335. const int ix = ox >> (16 + shift);
  336. const int iy = oy >> (16 + shift);
  337. const int oxs = ox >> 4;
  338. const int oys = oy >> 4;
  339. const int dxxs = dxx >> 4;
  340. const int dxys = dxy >> 4;
  341. const int dyxs = dyx >> 4;
  342. const int dyys = dyy >> 4;
  343. const uint16_t r4[4] = { r, r, r, r };
  344. const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
  345. const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
  346. const uint64_t shift2 = 2 * shift;
  347. int x, y;
  348. const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
  349. const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
  350. const int dxh = dxy * (h - 1);
  351. const int dyw = dyx * (w - 1);
  352. if ( // non-constant fullpel offset (3% of blocks)
  353. ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
  354. (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
  355. // uses more than 16 bits of subpel mv (only at huge resolution)
  356. || (dxx | dxy | dyx | dyy) & 15 ||
  357. (unsigned)ix >= width - w ||
  358. (unsigned)iy >= height - h) {
  359. // FIXME could still use mmx for some of the rows
  360. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
  361. shift, r, width, height);
  362. return;
  363. }
  364. src += ix + iy * stride;
  365. __asm__ volatile (
  366. "movd %0, %%mm6 \n\t"
  367. "pxor %%mm7, %%mm7 \n\t"
  368. "punpcklwd %%mm6, %%mm6 \n\t"
  369. "punpcklwd %%mm6, %%mm6 \n\t"
  370. :: "r"(1<<shift)
  371. );
  372. for (x = 0; x < w; x += 4) {
  373. uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
  374. oxs - dxys + dxxs * (x + 1),
  375. oxs - dxys + dxxs * (x + 2),
  376. oxs - dxys + dxxs * (x + 3) };
  377. uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
  378. oys - dyys + dyxs * (x + 1),
  379. oys - dyys + dyxs * (x + 2),
  380. oys - dyys + dyxs * (x + 3) };
  381. for (y = 0; y < h; y++) {
  382. __asm__ volatile (
  383. "movq %0, %%mm4 \n\t"
  384. "movq %1, %%mm5 \n\t"
  385. "paddw %2, %%mm4 \n\t"
  386. "paddw %3, %%mm5 \n\t"
  387. "movq %%mm4, %0 \n\t"
  388. "movq %%mm5, %1 \n\t"
  389. "psrlw $12, %%mm4 \n\t"
  390. "psrlw $12, %%mm5 \n\t"
  391. : "+m"(*dx4), "+m"(*dy4)
  392. : "m"(*dxy4), "m"(*dyy4)
  393. );
  394. __asm__ volatile (
  395. "movq %%mm6, %%mm2 \n\t"
  396. "movq %%mm6, %%mm1 \n\t"
  397. "psubw %%mm4, %%mm2 \n\t"
  398. "psubw %%mm5, %%mm1 \n\t"
  399. "movq %%mm2, %%mm0 \n\t"
  400. "movq %%mm4, %%mm3 \n\t"
  401. "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
  402. "pmullw %%mm5, %%mm3 \n\t" // dx * dy
  403. "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
  404. "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
  405. "movd %4, %%mm5 \n\t"
  406. "movd %3, %%mm4 \n\t"
  407. "punpcklbw %%mm7, %%mm5 \n\t"
  408. "punpcklbw %%mm7, %%mm4 \n\t"
  409. "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
  410. "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
  411. "movd %2, %%mm5 \n\t"
  412. "movd %1, %%mm4 \n\t"
  413. "punpcklbw %%mm7, %%mm5 \n\t"
  414. "punpcklbw %%mm7, %%mm4 \n\t"
  415. "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
  416. "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
  417. "paddw %5, %%mm1 \n\t"
  418. "paddw %%mm3, %%mm2 \n\t"
  419. "paddw %%mm1, %%mm0 \n\t"
  420. "paddw %%mm2, %%mm0 \n\t"
  421. "psrlw %6, %%mm0 \n\t"
  422. "packuswb %%mm0, %%mm0 \n\t"
  423. "movd %%mm0, %0 \n\t"
  424. : "=m"(dst[x + y * stride])
  425. : "m"(src[0]), "m"(src[1]),
  426. "m"(src[stride]), "m"(src[stride + 1]),
  427. "m"(*r4), "m"(shift2)
  428. );
  429. src += stride;
  430. }
  431. src += 4 - h * stride;
  432. }
  433. }
  434. void ff_vector_clipf_sse(float *dst, const float *src,
  435. float min, float max, int len)
  436. {
  437. x86_reg i = (len - 16) * 4;
  438. __asm__ volatile (
  439. "movss %3, %%xmm4 \n\t"
  440. "movss %4, %%xmm5 \n\t"
  441. "shufps $0, %%xmm4, %%xmm4 \n\t"
  442. "shufps $0, %%xmm5, %%xmm5 \n\t"
  443. "1: \n\t"
  444. "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
  445. "movaps 16(%2, %0), %%xmm1 \n\t"
  446. "movaps 32(%2, %0), %%xmm2 \n\t"
  447. "movaps 48(%2, %0), %%xmm3 \n\t"
  448. "maxps %%xmm4, %%xmm0 \n\t"
  449. "maxps %%xmm4, %%xmm1 \n\t"
  450. "maxps %%xmm4, %%xmm2 \n\t"
  451. "maxps %%xmm4, %%xmm3 \n\t"
  452. "minps %%xmm5, %%xmm0 \n\t"
  453. "minps %%xmm5, %%xmm1 \n\t"
  454. "minps %%xmm5, %%xmm2 \n\t"
  455. "minps %%xmm5, %%xmm3 \n\t"
  456. "movaps %%xmm0, (%1, %0) \n\t"
  457. "movaps %%xmm1, 16(%1, %0) \n\t"
  458. "movaps %%xmm2, 32(%1, %0) \n\t"
  459. "movaps %%xmm3, 48(%1, %0) \n\t"
  460. "sub $64, %0 \n\t"
  461. "jge 1b \n\t"
  462. : "+&r"(i)
  463. : "r"(dst), "r"(src), "m"(min), "m"(max)
  464. : "memory"
  465. );
  466. }
  467. #endif /* HAVE_INLINE_ASM */