You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

378 lines
15KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "config.h"
  25. #include "libavutil/cpu.h"
  26. #include "libavutil/x86/asm.h"
  27. #include "dsputil_x86.h"
  28. #include "inline_asm.h"
  29. #if HAVE_INLINE_ASM
  30. void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  31. int line_size)
  32. {
  33. const int16_t *p;
  34. uint8_t *pix;
  35. /* read the pixels */
  36. p = block;
  37. pix = pixels;
  38. /* unrolled loop */
  39. __asm__ volatile (
  40. "movq (%3), %%mm0 \n\t"
  41. "movq 8(%3), %%mm1 \n\t"
  42. "movq 16(%3), %%mm2 \n\t"
  43. "movq 24(%3), %%mm3 \n\t"
  44. "movq 32(%3), %%mm4 \n\t"
  45. "movq 40(%3), %%mm5 \n\t"
  46. "movq 48(%3), %%mm6 \n\t"
  47. "movq 56(%3), %%mm7 \n\t"
  48. "packuswb %%mm1, %%mm0 \n\t"
  49. "packuswb %%mm3, %%mm2 \n\t"
  50. "packuswb %%mm5, %%mm4 \n\t"
  51. "packuswb %%mm7, %%mm6 \n\t"
  52. "movq %%mm0, (%0) \n\t"
  53. "movq %%mm2, (%0, %1) \n\t"
  54. "movq %%mm4, (%0, %1, 2) \n\t"
  55. "movq %%mm6, (%0, %2) \n\t"
  56. :: "r" (pix), "r" ((x86_reg) line_size), "r" ((x86_reg) line_size * 3),
  57. "r" (p)
  58. : "memory");
  59. pix += line_size * 4;
  60. p += 32;
  61. // if here would be an exact copy of the code above
  62. // compiler would generate some very strange code
  63. // thus using "r"
  64. __asm__ volatile (
  65. "movq (%3), %%mm0 \n\t"
  66. "movq 8(%3), %%mm1 \n\t"
  67. "movq 16(%3), %%mm2 \n\t"
  68. "movq 24(%3), %%mm3 \n\t"
  69. "movq 32(%3), %%mm4 \n\t"
  70. "movq 40(%3), %%mm5 \n\t"
  71. "movq 48(%3), %%mm6 \n\t"
  72. "movq 56(%3), %%mm7 \n\t"
  73. "packuswb %%mm1, %%mm0 \n\t"
  74. "packuswb %%mm3, %%mm2 \n\t"
  75. "packuswb %%mm5, %%mm4 \n\t"
  76. "packuswb %%mm7, %%mm6 \n\t"
  77. "movq %%mm0, (%0) \n\t"
  78. "movq %%mm2, (%0, %1) \n\t"
  79. "movq %%mm4, (%0, %1, 2) \n\t"
  80. "movq %%mm6, (%0, %2) \n\t"
  81. :: "r" (pix), "r" ((x86_reg) line_size), "r" ((x86_reg) line_size * 3),
  82. "r" (p)
  83. : "memory");
  84. }
  85. #define put_signed_pixels_clamped_mmx_half(off) \
  86. "movq "#off"(%2), %%mm1 \n\t" \
  87. "movq 16 + "#off"(%2), %%mm2 \n\t" \
  88. "movq 32 + "#off"(%2), %%mm3 \n\t" \
  89. "movq 48 + "#off"(%2), %%mm4 \n\t" \
  90. "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
  91. "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
  92. "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
  93. "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
  94. "paddb %%mm0, %%mm1 \n\t" \
  95. "paddb %%mm0, %%mm2 \n\t" \
  96. "paddb %%mm0, %%mm3 \n\t" \
  97. "paddb %%mm0, %%mm4 \n\t" \
  98. "movq %%mm1, (%0) \n\t" \
  99. "movq %%mm2, (%0, %3) \n\t" \
  100. "movq %%mm3, (%0, %3, 2) \n\t" \
  101. "movq %%mm4, (%0, %1) \n\t"
  102. void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  103. int line_size)
  104. {
  105. x86_reg line_skip = line_size;
  106. x86_reg line_skip3;
  107. __asm__ volatile (
  108. "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
  109. "lea (%3, %3, 2), %1 \n\t"
  110. put_signed_pixels_clamped_mmx_half(0)
  111. "lea (%0, %3, 4), %0 \n\t"
  112. put_signed_pixels_clamped_mmx_half(64)
  113. : "+&r" (pixels), "=&r" (line_skip3)
  114. : "r" (block), "r" (line_skip)
  115. : "memory");
  116. }
  117. void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  118. int line_size)
  119. {
  120. const int16_t *p;
  121. uint8_t *pix;
  122. int i;
  123. /* read the pixels */
  124. p = block;
  125. pix = pixels;
  126. MOVQ_ZERO(mm7);
  127. i = 4;
  128. do {
  129. __asm__ volatile (
  130. "movq (%2), %%mm0 \n\t"
  131. "movq 8(%2), %%mm1 \n\t"
  132. "movq 16(%2), %%mm2 \n\t"
  133. "movq 24(%2), %%mm3 \n\t"
  134. "movq %0, %%mm4 \n\t"
  135. "movq %1, %%mm6 \n\t"
  136. "movq %%mm4, %%mm5 \n\t"
  137. "punpcklbw %%mm7, %%mm4 \n\t"
  138. "punpckhbw %%mm7, %%mm5 \n\t"
  139. "paddsw %%mm4, %%mm0 \n\t"
  140. "paddsw %%mm5, %%mm1 \n\t"
  141. "movq %%mm6, %%mm5 \n\t"
  142. "punpcklbw %%mm7, %%mm6 \n\t"
  143. "punpckhbw %%mm7, %%mm5 \n\t"
  144. "paddsw %%mm6, %%mm2 \n\t"
  145. "paddsw %%mm5, %%mm3 \n\t"
  146. "packuswb %%mm1, %%mm0 \n\t"
  147. "packuswb %%mm3, %%mm2 \n\t"
  148. "movq %%mm0, %0 \n\t"
  149. "movq %%mm2, %1 \n\t"
  150. : "+m" (*pix), "+m" (*(pix + line_size))
  151. : "r" (p)
  152. : "memory");
  153. pix += line_size * 2;
  154. p += 16;
  155. } while (--i);
  156. }
  157. /* Draw the edges of width 'w' of an image of size width, height
  158. * this MMX version can only handle w == 8 || w == 16. */
  159. void ff_draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
  160. int w, int h, int sides)
  161. {
  162. uint8_t *ptr, *last_line;
  163. int i;
  164. last_line = buf + (height - 1) * wrap;
  165. /* left and right */
  166. ptr = buf;
  167. if (w == 8) {
  168. __asm__ volatile (
  169. "1: \n\t"
  170. "movd (%0), %%mm0 \n\t"
  171. "punpcklbw %%mm0, %%mm0 \n\t"
  172. "punpcklwd %%mm0, %%mm0 \n\t"
  173. "punpckldq %%mm0, %%mm0 \n\t"
  174. "movq %%mm0, -8(%0) \n\t"
  175. "movq -8(%0, %2), %%mm1 \n\t"
  176. "punpckhbw %%mm1, %%mm1 \n\t"
  177. "punpckhwd %%mm1, %%mm1 \n\t"
  178. "punpckhdq %%mm1, %%mm1 \n\t"
  179. "movq %%mm1, (%0, %2) \n\t"
  180. "add %1, %0 \n\t"
  181. "cmp %3, %0 \n\t"
  182. "jb 1b \n\t"
  183. : "+r" (ptr)
  184. : "r" ((x86_reg) wrap), "r" ((x86_reg) width),
  185. "r" (ptr + wrap * height));
  186. } else {
  187. __asm__ volatile (
  188. "1: \n\t"
  189. "movd (%0), %%mm0 \n\t"
  190. "punpcklbw %%mm0, %%mm0 \n\t"
  191. "punpcklwd %%mm0, %%mm0 \n\t"
  192. "punpckldq %%mm0, %%mm0 \n\t"
  193. "movq %%mm0, -8(%0) \n\t"
  194. "movq %%mm0, -16(%0) \n\t"
  195. "movq -8(%0, %2), %%mm1 \n\t"
  196. "punpckhbw %%mm1, %%mm1 \n\t"
  197. "punpckhwd %%mm1, %%mm1 \n\t"
  198. "punpckhdq %%mm1, %%mm1 \n\t"
  199. "movq %%mm1, (%0, %2) \n\t"
  200. "movq %%mm1, 8(%0, %2) \n\t"
  201. "add %1, %0 \n\t"
  202. "cmp %3, %0 \n\t"
  203. "jb 1b \n\t"
  204. : "+r" (ptr)
  205. : "r" ((x86_reg) wrap), "r" ((x86_reg) width),
  206. "r" (ptr + wrap * height));
  207. }
  208. /* top and bottom (and hopefully also the corners) */
  209. if (sides & EDGE_TOP) {
  210. for (i = 0; i < h; i += 4) {
  211. ptr = buf - (i + 1) * wrap - w;
  212. __asm__ volatile (
  213. "1: \n\t"
  214. "movq (%1, %0), %%mm0 \n\t"
  215. "movq %%mm0, (%0) \n\t"
  216. "movq %%mm0, (%0, %2) \n\t"
  217. "movq %%mm0, (%0, %2, 2) \n\t"
  218. "movq %%mm0, (%0, %3) \n\t"
  219. "add $8, %0 \n\t"
  220. "cmp %4, %0 \n\t"
  221. "jb 1b \n\t"
  222. : "+r" (ptr)
  223. : "r" ((x86_reg) buf - (x86_reg) ptr - w),
  224. "r" ((x86_reg) - wrap), "r" ((x86_reg) - wrap * 3),
  225. "r" (ptr + width + 2 * w));
  226. }
  227. }
  228. if (sides & EDGE_BOTTOM) {
  229. for (i = 0; i < h; i += 4) {
  230. ptr = last_line + (i + 1) * wrap - w;
  231. __asm__ volatile (
  232. "1: \n\t"
  233. "movq (%1, %0), %%mm0 \n\t"
  234. "movq %%mm0, (%0) \n\t"
  235. "movq %%mm0, (%0, %2) \n\t"
  236. "movq %%mm0, (%0, %2, 2) \n\t"
  237. "movq %%mm0, (%0, %3) \n\t"
  238. "add $8, %0 \n\t"
  239. "cmp %4, %0 \n\t"
  240. "jb 1b \n\t"
  241. : "+r" (ptr)
  242. : "r" ((x86_reg) last_line - (x86_reg) ptr - w),
  243. "r" ((x86_reg) wrap), "r" ((x86_reg) wrap * 3),
  244. "r" (ptr + width + 2 * w));
  245. }
  246. }
  247. }
  248. void ff_gmc_mmx(uint8_t *dst, uint8_t *src,
  249. int stride, int h, int ox, int oy,
  250. int dxx, int dxy, int dyx, int dyy,
  251. int shift, int r, int width, int height)
  252. {
  253. const int w = 8;
  254. const int ix = ox >> (16 + shift);
  255. const int iy = oy >> (16 + shift);
  256. const int oxs = ox >> 4;
  257. const int oys = oy >> 4;
  258. const int dxxs = dxx >> 4;
  259. const int dxys = dxy >> 4;
  260. const int dyxs = dyx >> 4;
  261. const int dyys = dyy >> 4;
  262. const uint16_t r4[4] = { r, r, r, r };
  263. const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
  264. const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
  265. const uint64_t shift2 = 2 * shift;
  266. int x, y;
  267. const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
  268. const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
  269. const int dxh = dxy * (h - 1);
  270. const int dyw = dyx * (w - 1);
  271. if ( // non-constant fullpel offset (3% of blocks)
  272. ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
  273. (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) ||
  274. // uses more than 16 bits of subpel mv (only at huge resolution)
  275. (dxx | dxy | dyx | dyy) & 15 ||
  276. (unsigned) ix >= width - w ||
  277. (unsigned) iy >= height - h) {
  278. // FIXME could still use mmx for some of the rows
  279. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
  280. shift, r, width, height);
  281. return;
  282. }
  283. src += ix + iy * stride;
  284. __asm__ volatile (
  285. "movd %0, %%mm6 \n\t"
  286. "pxor %%mm7, %%mm7 \n\t"
  287. "punpcklwd %%mm6, %%mm6 \n\t"
  288. "punpcklwd %%mm6, %%mm6 \n\t"
  289. :: "r" (1 << shift));
  290. for (x = 0; x < w; x += 4) {
  291. uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
  292. oxs - dxys + dxxs * (x + 1),
  293. oxs - dxys + dxxs * (x + 2),
  294. oxs - dxys + dxxs * (x + 3) };
  295. uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
  296. oys - dyys + dyxs * (x + 1),
  297. oys - dyys + dyxs * (x + 2),
  298. oys - dyys + dyxs * (x + 3) };
  299. for (y = 0; y < h; y++) {
  300. __asm__ volatile (
  301. "movq %0, %%mm4 \n\t"
  302. "movq %1, %%mm5 \n\t"
  303. "paddw %2, %%mm4 \n\t"
  304. "paddw %3, %%mm5 \n\t"
  305. "movq %%mm4, %0 \n\t"
  306. "movq %%mm5, %1 \n\t"
  307. "psrlw $12, %%mm4 \n\t"
  308. "psrlw $12, %%mm5 \n\t"
  309. : "+m" (*dx4), "+m" (*dy4)
  310. : "m" (*dxy4), "m" (*dyy4));
  311. __asm__ volatile (
  312. "movq %%mm6, %%mm2 \n\t"
  313. "movq %%mm6, %%mm1 \n\t"
  314. "psubw %%mm4, %%mm2 \n\t"
  315. "psubw %%mm5, %%mm1 \n\t"
  316. "movq %%mm2, %%mm0 \n\t"
  317. "movq %%mm4, %%mm3 \n\t"
  318. "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
  319. "pmullw %%mm5, %%mm3 \n\t" // dx * dy
  320. "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
  321. "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
  322. "movd %4, %%mm5 \n\t"
  323. "movd %3, %%mm4 \n\t"
  324. "punpcklbw %%mm7, %%mm5 \n\t"
  325. "punpcklbw %%mm7, %%mm4 \n\t"
  326. "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
  327. "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
  328. "movd %2, %%mm5 \n\t"
  329. "movd %1, %%mm4 \n\t"
  330. "punpcklbw %%mm7, %%mm5 \n\t"
  331. "punpcklbw %%mm7, %%mm4 \n\t"
  332. "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
  333. "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
  334. "paddw %5, %%mm1 \n\t"
  335. "paddw %%mm3, %%mm2 \n\t"
  336. "paddw %%mm1, %%mm0 \n\t"
  337. "paddw %%mm2, %%mm0 \n\t"
  338. "psrlw %6, %%mm0 \n\t"
  339. "packuswb %%mm0, %%mm0 \n\t"
  340. "movd %%mm0, %0 \n\t"
  341. : "=m" (dst[x + y * stride])
  342. : "m" (src[0]), "m" (src[1]),
  343. "m" (src[stride]), "m" (src[stride + 1]),
  344. "m" (*r4), "m" (shift2));
  345. src += stride;
  346. }
  347. src += 4 - h * stride;
  348. }
  349. }
  350. #endif /* HAVE_INLINE_ASM */