You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

416 lines
17KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "libavutil/cpu.h"
  25. #include "libavutil/x86/asm.h"
  26. #include "libavcodec/hpeldsp.h"
  27. #include "dsputil_mmx.h"
  28. //#undef NDEBUG
  29. //#include <assert.h>
  30. #if HAVE_YASM
  31. void ff_put_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  32. ptrdiff_t line_size, int h);
  33. void ff_put_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  34. ptrdiff_t line_size, int h);
  35. void ff_put_pixels16_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  36. ptrdiff_t line_size, int h);
  37. void ff_put_pixels16_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  38. ptrdiff_t line_size, int h);
  39. void ff_put_no_rnd_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  40. ptrdiff_t line_size, int h);
  41. void ff_put_no_rnd_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  42. ptrdiff_t line_size, int h);
  43. void ff_put_no_rnd_pixels8_x2_exact_mmxext(uint8_t *block,
  44. const uint8_t *pixels,
  45. ptrdiff_t line_size, int h);
  46. void ff_put_no_rnd_pixels8_x2_exact_3dnow(uint8_t *block,
  47. const uint8_t *pixels,
  48. ptrdiff_t line_size, int h);
  49. void ff_put_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  50. ptrdiff_t line_size, int h);
  51. void ff_put_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  52. ptrdiff_t line_size, int h);
  53. void ff_put_no_rnd_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  54. ptrdiff_t line_size, int h);
  55. void ff_put_no_rnd_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  56. ptrdiff_t line_size, int h);
  57. void ff_put_no_rnd_pixels8_y2_exact_mmxext(uint8_t *block,
  58. const uint8_t *pixels,
  59. ptrdiff_t line_size, int h);
  60. void ff_put_no_rnd_pixels8_y2_exact_3dnow(uint8_t *block,
  61. const uint8_t *pixels,
  62. ptrdiff_t line_size, int h);
  63. void ff_avg_pixels8_3dnow(uint8_t *block, const uint8_t *pixels,
  64. ptrdiff_t line_size, int h);
  65. void ff_avg_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  66. ptrdiff_t line_size, int h);
  67. void ff_avg_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  68. ptrdiff_t line_size, int h);
  69. void ff_avg_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  70. ptrdiff_t line_size, int h);
  71. void ff_avg_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  72. ptrdiff_t line_size, int h);
  73. void ff_avg_pixels8_xy2_mmxext(uint8_t *block, const uint8_t *pixels,
  74. ptrdiff_t line_size, int h);
  75. void ff_avg_pixels8_xy2_3dnow(uint8_t *block, const uint8_t *pixels,
  76. ptrdiff_t line_size, int h);
  77. #endif /* HAVE_YASM */
  78. #if HAVE_INLINE_ASM
  79. #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
  80. #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
  81. #define MOVQ_BFE(regd) \
  82. __asm__ volatile ( \
  83. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  84. "paddb %%"#regd", %%"#regd" \n\t" ::)
  85. #ifndef PIC
  86. #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_bone))
  87. #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_wtwo))
  88. #else
  89. // for shared library it's better to use this way for accessing constants
  90. // pcmpeqd -> -1
  91. #define MOVQ_BONE(regd) \
  92. __asm__ volatile ( \
  93. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  94. "psrlw $15, %%"#regd" \n\t" \
  95. "packuswb %%"#regd", %%"#regd" \n\t" ::)
  96. #define MOVQ_WTWO(regd) \
  97. __asm__ volatile ( \
  98. "pcmpeqd %%"#regd", %%"#regd" \n\t" \
  99. "psrlw $15, %%"#regd" \n\t" \
  100. "psllw $1, %%"#regd" \n\t"::)
  101. #endif
  102. // using regr as temporary and for the output result
  103. // first argument is unmodifed and second is trashed
  104. // regfe is supposed to contain 0xfefefefefefefefe
  105. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  106. "movq "#rega", "#regr" \n\t" \
  107. "pand "#regb", "#regr" \n\t" \
  108. "pxor "#rega", "#regb" \n\t" \
  109. "pand "#regfe", "#regb" \n\t" \
  110. "psrlq $1, "#regb" \n\t" \
  111. "paddb "#regb", "#regr" \n\t"
  112. #define PAVGB_MMX(rega, regb, regr, regfe) \
  113. "movq "#rega", "#regr" \n\t" \
  114. "por "#regb", "#regr" \n\t" \
  115. "pxor "#rega", "#regb" \n\t" \
  116. "pand "#regfe", "#regb" \n\t" \
  117. "psrlq $1, "#regb" \n\t" \
  118. "psubb "#regb", "#regr" \n\t"
  119. // mm6 is supposed to contain 0xfefefefefefefefe
  120. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  121. "movq "#rega", "#regr" \n\t" \
  122. "movq "#regc", "#regp" \n\t" \
  123. "pand "#regb", "#regr" \n\t" \
  124. "pand "#regd", "#regp" \n\t" \
  125. "pxor "#rega", "#regb" \n\t" \
  126. "pxor "#regc", "#regd" \n\t" \
  127. "pand %%mm6, "#regb" \n\t" \
  128. "pand %%mm6, "#regd" \n\t" \
  129. "psrlq $1, "#regb" \n\t" \
  130. "psrlq $1, "#regd" \n\t" \
  131. "paddb "#regb", "#regr" \n\t" \
  132. "paddb "#regd", "#regp" \n\t"
  133. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  134. "movq "#rega", "#regr" \n\t" \
  135. "movq "#regc", "#regp" \n\t" \
  136. "por "#regb", "#regr" \n\t" \
  137. "por "#regd", "#regp" \n\t" \
  138. "pxor "#rega", "#regb" \n\t" \
  139. "pxor "#regc", "#regd" \n\t" \
  140. "pand %%mm6, "#regb" \n\t" \
  141. "pand %%mm6, "#regd" \n\t" \
  142. "psrlq $1, "#regd" \n\t" \
  143. "psrlq $1, "#regb" \n\t" \
  144. "psubb "#regb", "#regr" \n\t" \
  145. "psubb "#regd", "#regp" \n\t"
  146. /***********************************/
  147. /* MMX no rounding */
  148. #define NO_RND 1
  149. #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
  150. #define SET_RND MOVQ_WONE
  151. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  152. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  153. #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
  154. #include "hpeldsp_rnd_template.c"
  155. #undef DEF
  156. #undef SET_RND
  157. #undef PAVGBP
  158. #undef PAVGB
  159. #undef NO_RND
  160. /***********************************/
  161. /* MMX rounding */
  162. #define DEF(x, y) x ## _ ## y ## _mmx
  163. #define SET_RND MOVQ_WTWO
  164. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  165. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  166. #include "hpeldsp_rnd_template.c"
  167. #undef DEF
  168. #undef SET_RND
  169. #undef PAVGBP
  170. #undef PAVGB
  171. #undef OP_AVG
  172. #endif /* HAVE_INLINE_ASM */
  173. #if HAVE_YASM
  174. #define ff_put_pixels8_mmx ff_put_pixels8_mmxext
  175. /***********************************/
  176. /* 3Dnow specific */
  177. #define DEF(x) x ## _3dnow
  178. #include "hpeldsp_avg_template.c"
  179. #undef DEF
  180. /***********************************/
  181. /* MMXEXT specific */
  182. #define DEF(x) x ## _mmxext
  183. #include "hpeldsp_avg_template.c"
  184. #undef DEF
  185. #endif /* HAVE_YASM */
  186. #if HAVE_INLINE_ASM
  187. #define put_no_rnd_pixels16_mmx put_pixels16_mmx
  188. #define put_no_rnd_pixels8_mmx put_pixels8_mmx
  189. #define put_pixels16_mmxext put_pixels16_mmx
  190. #define put_pixels8_mmxext put_pixels8_mmx
  191. #define put_pixels4_mmxext put_pixels4_mmx
  192. #define put_no_rnd_pixels16_mmxext put_no_rnd_pixels16_mmx
  193. #define put_no_rnd_pixels8_mmxext put_no_rnd_pixels8_mmx
  194. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
  195. ptrdiff_t line_size, int h)
  196. {
  197. __asm__ volatile (
  198. "lea (%3, %3), %%"REG_a" \n\t"
  199. ".p2align 3 \n\t"
  200. "1: \n\t"
  201. "movq (%1 ), %%mm0 \n\t"
  202. "movq (%1, %3), %%mm1 \n\t"
  203. "movq %%mm0, (%2) \n\t"
  204. "movq %%mm1, (%2, %3) \n\t"
  205. "add %%"REG_a", %1 \n\t"
  206. "add %%"REG_a", %2 \n\t"
  207. "movq (%1 ), %%mm0 \n\t"
  208. "movq (%1, %3), %%mm1 \n\t"
  209. "movq %%mm0, (%2) \n\t"
  210. "movq %%mm1, (%2, %3) \n\t"
  211. "add %%"REG_a", %1 \n\t"
  212. "add %%"REG_a", %2 \n\t"
  213. "subl $4, %0 \n\t"
  214. "jnz 1b \n\t"
  215. : "+g"(h), "+r"(pixels), "+r"(block)
  216. : "r"((x86_reg)line_size)
  217. : "%"REG_a, "memory"
  218. );
  219. }
  220. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
  221. ptrdiff_t line_size, int h)
  222. {
  223. __asm__ volatile (
  224. "lea (%3, %3), %%"REG_a" \n\t"
  225. ".p2align 3 \n\t"
  226. "1: \n\t"
  227. "movq (%1 ), %%mm0 \n\t"
  228. "movq 8(%1 ), %%mm4 \n\t"
  229. "movq (%1, %3), %%mm1 \n\t"
  230. "movq 8(%1, %3), %%mm5 \n\t"
  231. "movq %%mm0, (%2) \n\t"
  232. "movq %%mm4, 8(%2) \n\t"
  233. "movq %%mm1, (%2, %3) \n\t"
  234. "movq %%mm5, 8(%2, %3) \n\t"
  235. "add %%"REG_a", %1 \n\t"
  236. "add %%"REG_a", %2 \n\t"
  237. "movq (%1 ), %%mm0 \n\t"
  238. "movq 8(%1 ), %%mm4 \n\t"
  239. "movq (%1, %3), %%mm1 \n\t"
  240. "movq 8(%1, %3), %%mm5 \n\t"
  241. "movq %%mm0, (%2) \n\t"
  242. "movq %%mm4, 8(%2) \n\t"
  243. "movq %%mm1, (%2, %3) \n\t"
  244. "movq %%mm5, 8(%2, %3) \n\t"
  245. "add %%"REG_a", %1 \n\t"
  246. "add %%"REG_a", %2 \n\t"
  247. "subl $4, %0 \n\t"
  248. "jnz 1b \n\t"
  249. : "+g"(h), "+r"(pixels), "+r"(block)
  250. : "r"((x86_reg)line_size)
  251. : "%"REG_a, "memory"
  252. );
  253. }
  254. #endif /* HAVE_INLINE_ASM */
  255. void ff_put_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
  256. ptrdiff_t line_size, int h);
  257. void ff_avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
  258. ptrdiff_t line_size, int h);
  259. #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  260. do { \
  261. c->PFX ## _pixels_tab IDX [0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
  262. c->PFX ## _pixels_tab IDX [1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
  263. c->PFX ## _pixels_tab IDX [2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
  264. c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
  265. } while (0)
  266. static void hpeldsp_init_mmx(HpelDSPContext *c, int flags, int mm_flags)
  267. {
  268. #if HAVE_INLINE_ASM
  269. SET_HPEL_FUNCS(put, [0], 16, mmx);
  270. SET_HPEL_FUNCS(put_no_rnd, [0], 16, mmx);
  271. SET_HPEL_FUNCS(avg, [0], 16, mmx);
  272. SET_HPEL_FUNCS(avg_no_rnd, , 16, mmx);
  273. SET_HPEL_FUNCS(put, [1], 8, mmx);
  274. SET_HPEL_FUNCS(put_no_rnd, [1], 8, mmx);
  275. SET_HPEL_FUNCS(avg, [1], 8, mmx);
  276. #endif /* HAVE_INLINE_ASM */
  277. }
  278. static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags, int mm_flags)
  279. {
  280. #if HAVE_YASM
  281. c->put_pixels_tab[0][1] = ff_put_pixels16_x2_mmxext;
  282. c->put_pixels_tab[0][2] = ff_put_pixels16_y2_mmxext;
  283. c->avg_pixels_tab[0][0] = ff_avg_pixels16_mmxext;
  284. c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_mmxext;
  285. c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_mmxext;
  286. c->put_pixels_tab[1][1] = ff_put_pixels8_x2_mmxext;
  287. c->put_pixels_tab[1][2] = ff_put_pixels8_y2_mmxext;
  288. c->avg_pixels_tab[1][0] = ff_avg_pixels8_mmxext;
  289. c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_mmxext;
  290. c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext;
  291. if (!(flags & CODEC_FLAG_BITEXACT)) {
  292. c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_mmxext;
  293. c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_mmxext;
  294. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext;
  295. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext;
  296. c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_mmxext;
  297. c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext;
  298. }
  299. #endif /* HAVE_YASM */
  300. #if HAVE_MMXEXT_EXTERNAL
  301. if (flags & CODEC_FLAG_BITEXACT && CONFIG_VP3_DECODER) {
  302. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_mmxext;
  303. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_mmxext;
  304. }
  305. #endif /* HAVE_MMXEXT_EXTERNAL */
  306. }
  307. static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags, int mm_flags)
  308. {
  309. #if HAVE_YASM
  310. c->put_pixels_tab[0][1] = ff_put_pixels16_x2_3dnow;
  311. c->put_pixels_tab[0][2] = ff_put_pixels16_y2_3dnow;
  312. c->avg_pixels_tab[0][0] = ff_avg_pixels16_3dnow;
  313. c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_3dnow;
  314. c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_3dnow;
  315. c->put_pixels_tab[1][1] = ff_put_pixels8_x2_3dnow;
  316. c->put_pixels_tab[1][2] = ff_put_pixels8_y2_3dnow;
  317. c->avg_pixels_tab[1][0] = ff_avg_pixels8_3dnow;
  318. c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_3dnow;
  319. c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow;
  320. if (!(flags & CODEC_FLAG_BITEXACT)){
  321. c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_3dnow;
  322. c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_3dnow;
  323. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow;
  324. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow;
  325. c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_3dnow;
  326. c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow;
  327. }
  328. if (flags & CODEC_FLAG_BITEXACT && CONFIG_VP3_DECODER) {
  329. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_3dnow;
  330. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_3dnow;
  331. }
  332. #endif /* HAVE_YASM */
  333. }
  334. static void hpeldsp_init_sse2(HpelDSPContext *c, int flags, int mm_flags)
  335. {
  336. #if HAVE_SSE2_EXTERNAL
  337. if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  338. // these functions are slower than mmx on AMD, but faster on Intel
  339. c->put_pixels_tab[0][0] = ff_put_pixels16_sse2;
  340. c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2;
  341. c->avg_pixels_tab[0][0] = ff_avg_pixels16_sse2;
  342. }
  343. #endif /* HAVE_SSE2_EXTERNAL */
  344. }
  345. void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags)
  346. {
  347. int mm_flags = av_get_cpu_flags();
  348. if (mm_flags & AV_CPU_FLAG_MMX)
  349. hpeldsp_init_mmx(c, flags, mm_flags);
  350. if (mm_flags & AV_CPU_FLAG_MMXEXT)
  351. hpeldsp_init_mmxext(c, flags, mm_flags);
  352. if (mm_flags & AV_CPU_FLAG_3DNOW)
  353. hpeldsp_init_3dnow(c, flags, mm_flags);
  354. if (mm_flags & AV_CPU_FLAG_SSE2)
  355. hpeldsp_init_sse2(c, flags, mm_flags);
  356. }