You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

321 lines
12KB

  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "libavutil/cpu.h"
  25. #include "libavutil/x86/asm.h"
  26. #include "libavcodec/hpeldsp.h"
  27. #include "dsputil_mmx.h"
  28. void ff_put_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  29. ptrdiff_t line_size, int h);
  30. void ff_put_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  31. ptrdiff_t line_size, int h);
  32. void ff_put_pixels16_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  33. ptrdiff_t line_size, int h);
  34. void ff_put_pixels16_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  35. ptrdiff_t line_size, int h);
  36. void ff_put_no_rnd_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  37. ptrdiff_t line_size, int h);
  38. void ff_put_no_rnd_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  39. ptrdiff_t line_size, int h);
  40. void ff_put_no_rnd_pixels8_x2_exact_mmxext(uint8_t *block,
  41. const uint8_t *pixels,
  42. ptrdiff_t line_size, int h);
  43. void ff_put_no_rnd_pixels8_x2_exact_3dnow(uint8_t *block,
  44. const uint8_t *pixels,
  45. ptrdiff_t line_size, int h);
  46. void ff_put_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  47. ptrdiff_t line_size, int h);
  48. void ff_put_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  49. ptrdiff_t line_size, int h);
  50. void ff_put_no_rnd_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  51. ptrdiff_t line_size, int h);
  52. void ff_put_no_rnd_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  53. ptrdiff_t line_size, int h);
  54. void ff_put_no_rnd_pixels8_y2_exact_mmxext(uint8_t *block,
  55. const uint8_t *pixels,
  56. ptrdiff_t line_size, int h);
  57. void ff_put_no_rnd_pixels8_y2_exact_3dnow(uint8_t *block,
  58. const uint8_t *pixels,
  59. ptrdiff_t line_size, int h);
  60. void ff_avg_pixels8_3dnow(uint8_t *block, const uint8_t *pixels,
  61. ptrdiff_t line_size, int h);
  62. void ff_avg_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
  63. ptrdiff_t line_size, int h);
  64. void ff_avg_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
  65. ptrdiff_t line_size, int h);
  66. void ff_avg_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
  67. ptrdiff_t line_size, int h);
  68. void ff_avg_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
  69. ptrdiff_t line_size, int h);
  70. void ff_avg_pixels8_xy2_mmxext(uint8_t *block, const uint8_t *pixels,
  71. ptrdiff_t line_size, int h);
  72. void ff_avg_pixels8_xy2_3dnow(uint8_t *block, const uint8_t *pixels,
  73. ptrdiff_t line_size, int h);
  74. #if HAVE_INLINE_ASM
  75. /***********************************/
  76. /* MMX no rounding */
  77. #define NO_RND 1
  78. #define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
  79. #define SET_RND MOVQ_WONE
  80. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  81. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  82. #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
  83. #include "hpeldsp_rnd_template.c"
  84. #undef DEF
  85. #undef SET_RND
  86. #undef PAVGBP
  87. #undef PAVGB
  88. #undef NO_RND
  89. /***********************************/
  90. /* MMX rounding */
  91. #define DEF(x, y) x ## _ ## y ## _mmx
  92. #define SET_RND MOVQ_WTWO
  93. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  94. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  95. #include "hpeldsp_rnd_template.c"
  96. #undef DEF
  97. #undef SET_RND
  98. #undef PAVGBP
  99. #undef PAVGB
  100. #undef OP_AVG
  101. #endif /* HAVE_INLINE_ASM */
  102. #if HAVE_YASM
  103. /***********************************/
  104. /* 3Dnow specific */
  105. #define DEF(x) x ## _3dnow
  106. #include "hpeldsp_avg_template.c"
  107. #undef DEF
  108. /***********************************/
  109. /* MMXEXT specific */
  110. #define DEF(x) x ## _mmxext
  111. #include "hpeldsp_avg_template.c"
  112. #undef DEF
  113. #endif /* HAVE_YASM */
  114. #if HAVE_INLINE_ASM
  115. #define put_no_rnd_pixels16_mmx put_pixels16_mmx
  116. #define put_no_rnd_pixels8_mmx put_pixels8_mmx
  117. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
  118. ptrdiff_t line_size, int h)
  119. {
  120. __asm__ volatile (
  121. "lea (%3, %3), %%"REG_a" \n\t"
  122. ".p2align 3 \n\t"
  123. "1: \n\t"
  124. "movq (%1 ), %%mm0 \n\t"
  125. "movq (%1, %3), %%mm1 \n\t"
  126. "movq %%mm0, (%2) \n\t"
  127. "movq %%mm1, (%2, %3) \n\t"
  128. "add %%"REG_a", %1 \n\t"
  129. "add %%"REG_a", %2 \n\t"
  130. "movq (%1 ), %%mm0 \n\t"
  131. "movq (%1, %3), %%mm1 \n\t"
  132. "movq %%mm0, (%2) \n\t"
  133. "movq %%mm1, (%2, %3) \n\t"
  134. "add %%"REG_a", %1 \n\t"
  135. "add %%"REG_a", %2 \n\t"
  136. "subl $4, %0 \n\t"
  137. "jnz 1b \n\t"
  138. : "+g"(h), "+r"(pixels), "+r"(block)
  139. : "r"((x86_reg)line_size)
  140. : "%"REG_a, "memory"
  141. );
  142. }
  143. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
  144. ptrdiff_t line_size, int h)
  145. {
  146. __asm__ volatile (
  147. "lea (%3, %3), %%"REG_a" \n\t"
  148. ".p2align 3 \n\t"
  149. "1: \n\t"
  150. "movq (%1 ), %%mm0 \n\t"
  151. "movq 8(%1 ), %%mm4 \n\t"
  152. "movq (%1, %3), %%mm1 \n\t"
  153. "movq 8(%1, %3), %%mm5 \n\t"
  154. "movq %%mm0, (%2) \n\t"
  155. "movq %%mm4, 8(%2) \n\t"
  156. "movq %%mm1, (%2, %3) \n\t"
  157. "movq %%mm5, 8(%2, %3) \n\t"
  158. "add %%"REG_a", %1 \n\t"
  159. "add %%"REG_a", %2 \n\t"
  160. "movq (%1 ), %%mm0 \n\t"
  161. "movq 8(%1 ), %%mm4 \n\t"
  162. "movq (%1, %3), %%mm1 \n\t"
  163. "movq 8(%1, %3), %%mm5 \n\t"
  164. "movq %%mm0, (%2) \n\t"
  165. "movq %%mm4, 8(%2) \n\t"
  166. "movq %%mm1, (%2, %3) \n\t"
  167. "movq %%mm5, 8(%2, %3) \n\t"
  168. "add %%"REG_a", %1 \n\t"
  169. "add %%"REG_a", %2 \n\t"
  170. "subl $4, %0 \n\t"
  171. "jnz 1b \n\t"
  172. : "+g"(h), "+r"(pixels), "+r"(block)
  173. : "r"((x86_reg)line_size)
  174. : "%"REG_a, "memory"
  175. );
  176. }
  177. #endif /* HAVE_INLINE_ASM */
  178. #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  179. do { \
  180. c->PFX ## _pixels_tab IDX [0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
  181. c->PFX ## _pixels_tab IDX [1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
  182. c->PFX ## _pixels_tab IDX [2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
  183. c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
  184. } while (0)
  185. static void hpeldsp_init_mmx(HpelDSPContext *c, int flags, int mm_flags)
  186. {
  187. #if HAVE_INLINE_ASM
  188. SET_HPEL_FUNCS(put, [0], 16, mmx);
  189. SET_HPEL_FUNCS(put_no_rnd, [0], 16, mmx);
  190. SET_HPEL_FUNCS(avg, [0], 16, mmx);
  191. SET_HPEL_FUNCS(avg_no_rnd, , 16, mmx);
  192. SET_HPEL_FUNCS(put, [1], 8, mmx);
  193. SET_HPEL_FUNCS(put_no_rnd, [1], 8, mmx);
  194. SET_HPEL_FUNCS(avg, [1], 8, mmx);
  195. #endif /* HAVE_INLINE_ASM */
  196. }
  197. static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags, int mm_flags)
  198. {
  199. #if HAVE_YASM
  200. c->put_pixels_tab[0][1] = ff_put_pixels16_x2_mmxext;
  201. c->put_pixels_tab[0][2] = put_pixels16_y2_mmxext;
  202. c->avg_pixels_tab[0][0] = avg_pixels16_mmxext;
  203. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmxext;
  204. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmxext;
  205. c->put_pixels_tab[1][1] = ff_put_pixels8_x2_mmxext;
  206. c->put_pixels_tab[1][2] = ff_put_pixels8_y2_mmxext;
  207. c->avg_pixels_tab[1][0] = ff_avg_pixels8_mmxext;
  208. c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_mmxext;
  209. c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext;
  210. if (!(flags & CODEC_FLAG_BITEXACT)) {
  211. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmxext;
  212. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmxext;
  213. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext;
  214. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext;
  215. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmxext;
  216. c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext;
  217. }
  218. if (flags & CODEC_FLAG_BITEXACT && CONFIG_VP3_DECODER) {
  219. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_mmxext;
  220. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_mmxext;
  221. }
  222. #endif /* HAVE_YASM */
  223. }
  224. static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags, int mm_flags)
  225. {
  226. #if HAVE_YASM
  227. c->put_pixels_tab[0][1] = ff_put_pixels16_x2_3dnow;
  228. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  229. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  230. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  231. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  232. c->put_pixels_tab[1][1] = ff_put_pixels8_x2_3dnow;
  233. c->put_pixels_tab[1][2] = ff_put_pixels8_y2_3dnow;
  234. c->avg_pixels_tab[1][0] = ff_avg_pixels8_3dnow;
  235. c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_3dnow;
  236. c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow;
  237. if (!(flags & CODEC_FLAG_BITEXACT)){
  238. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  239. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  240. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow;
  241. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow;
  242. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  243. c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow;
  244. }
  245. if (flags & CODEC_FLAG_BITEXACT && CONFIG_VP3_DECODER) {
  246. c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_3dnow;
  247. c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_3dnow;
  248. }
  249. #endif /* HAVE_YASM */
  250. }
  251. static void hpeldsp_init_sse2(HpelDSPContext *c, int flags, int mm_flags)
  252. {
  253. #if HAVE_YASM
  254. if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  255. // these functions are slower than mmx on AMD, but faster on Intel
  256. c->put_pixels_tab[0][0] = ff_put_pixels16_sse2;
  257. c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2;
  258. c->avg_pixels_tab[0][0] = ff_avg_pixels16_sse2;
  259. }
  260. #endif /* HAVE_YASM */
  261. }
  262. void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags)
  263. {
  264. int mm_flags = av_get_cpu_flags();
  265. if (HAVE_MMX && mm_flags & AV_CPU_FLAG_MMX)
  266. hpeldsp_init_mmx(c, flags, mm_flags);
  267. if (mm_flags & AV_CPU_FLAG_MMXEXT)
  268. hpeldsp_init_mmxext(c, flags, mm_flags);
  269. if (mm_flags & AV_CPU_FLAG_3DNOW)
  270. hpeldsp_init_3dnow(c, flags, mm_flags);
  271. if (mm_flags & AV_CPU_FLAG_SSE2)
  272. hpeldsp_init_sse2(c, flags, mm_flags);
  273. }