You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

280 lines
12KB

  1. /*
  2. * RV40 decoder motion compensation functions x86-optimised
  3. * Copyright (c) 2008 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * RV40 decoder motion compensation functions x86-optimised
  24. * 2,0 and 0,2 have h264 equivalents.
  25. * 3,3 is bugged in the rv40 format and maps to _xy2 version
  26. */
  27. #include "libavcodec/rv34dsp.h"
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/mem.h"
  30. #include "libavutil/mem_internal.h"
  31. #include "libavutil/x86/cpu.h"
  32. #include "hpeldsp.h"
  33. #define DEFINE_FN(op, size, insn) \
  34. static void op##_rv40_qpel##size##_mc33_##insn(uint8_t *dst, const uint8_t *src, \
  35. ptrdiff_t stride) \
  36. { \
  37. ff_##op##_pixels##size##_xy2_##insn(dst, src, stride, size); \
  38. }
  39. #if HAVE_X86ASM
  40. void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
  41. ptrdiff_t stride, int h, int x, int y);
  42. void ff_avg_rv40_chroma_mc8_mmxext(uint8_t *dst, uint8_t *src,
  43. ptrdiff_t stride, int h, int x, int y);
  44. void ff_avg_rv40_chroma_mc8_3dnow(uint8_t *dst, uint8_t *src,
  45. ptrdiff_t stride, int h, int x, int y);
  46. void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
  47. ptrdiff_t stride, int h, int x, int y);
  48. void ff_avg_rv40_chroma_mc4_mmxext(uint8_t *dst, uint8_t *src,
  49. ptrdiff_t stride, int h, int x, int y);
  50. void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src,
  51. ptrdiff_t stride, int h, int x, int y);
  52. #define DECLARE_WEIGHT(opt) \
  53. void ff_rv40_weight_func_rnd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
  54. int w1, int w2, ptrdiff_t stride); \
  55. void ff_rv40_weight_func_rnd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
  56. int w1, int w2, ptrdiff_t stride); \
  57. void ff_rv40_weight_func_nornd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
  58. int w1, int w2, ptrdiff_t stride); \
  59. void ff_rv40_weight_func_nornd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
  60. int w1, int w2, ptrdiff_t stride);
  61. DECLARE_WEIGHT(mmxext)
  62. DECLARE_WEIGHT(sse2)
  63. DECLARE_WEIGHT(ssse3)
  64. /** @{ */
  65. /**
  66. * Define one qpel function.
  67. * LOOPSIZE must be already set to the number of pixels processed per
  68. * iteration in the inner loop of the called functions.
  69. * COFF(x) must be already defined so as to provide the offset into any
  70. * array of coeffs used by the called function for the qpel position x.
  71. */
  72. #define QPEL_FUNC_DECL(OP, SIZE, PH, PV, OPT) \
  73. static void OP ## rv40_qpel ##SIZE ##_mc ##PH ##PV ##OPT(uint8_t *dst, \
  74. const uint8_t *src, \
  75. ptrdiff_t stride) \
  76. { \
  77. int i; \
  78. if (PH && PV) { \
  79. LOCAL_ALIGNED(16, uint8_t, tmp, [SIZE * (SIZE + 5)]); \
  80. uint8_t *tmpptr = tmp + SIZE * 2; \
  81. src -= stride * 2; \
  82. \
  83. for (i = 0; i < SIZE; i += LOOPSIZE) \
  84. ff_put_rv40_qpel_h ##OPT(tmp + i, SIZE, src + i, stride, \
  85. SIZE + 5, HCOFF(PH)); \
  86. for (i = 0; i < SIZE; i += LOOPSIZE) \
  87. ff_ ##OP ##rv40_qpel_v ##OPT(dst + i, stride, tmpptr + i, \
  88. SIZE, SIZE, VCOFF(PV)); \
  89. } else if (PV) { \
  90. for (i = 0; i < SIZE; i += LOOPSIZE) \
  91. ff_ ##OP ##rv40_qpel_v ## OPT(dst + i, stride, src + i, \
  92. stride, SIZE, VCOFF(PV)); \
  93. } else { \
  94. for (i = 0; i < SIZE; i += LOOPSIZE) \
  95. ff_ ##OP ##rv40_qpel_h ## OPT(dst + i, stride, src + i, \
  96. stride, SIZE, HCOFF(PH)); \
  97. } \
  98. }
  99. /** Declare functions for sizes 8 and 16 and given operations
  100. * and qpel position. */
  101. #define QPEL_FUNCS_DECL(OP, PH, PV, OPT) \
  102. QPEL_FUNC_DECL(OP, 8, PH, PV, OPT) \
  103. QPEL_FUNC_DECL(OP, 16, PH, PV, OPT)
  104. /** Declare all functions for all sizes and qpel positions */
  105. #define QPEL_MC_DECL(OP, OPT) \
  106. void ff_ ##OP ##rv40_qpel_h ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
  107. const uint8_t *src, \
  108. ptrdiff_t srcStride, \
  109. int len, int m); \
  110. void ff_ ##OP ##rv40_qpel_v ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
  111. const uint8_t *src, \
  112. ptrdiff_t srcStride, \
  113. int len, int m); \
  114. QPEL_FUNCS_DECL(OP, 0, 1, OPT) \
  115. QPEL_FUNCS_DECL(OP, 0, 3, OPT) \
  116. QPEL_FUNCS_DECL(OP, 1, 0, OPT) \
  117. QPEL_FUNCS_DECL(OP, 1, 1, OPT) \
  118. QPEL_FUNCS_DECL(OP, 1, 2, OPT) \
  119. QPEL_FUNCS_DECL(OP, 1, 3, OPT) \
  120. QPEL_FUNCS_DECL(OP, 2, 1, OPT) \
  121. QPEL_FUNCS_DECL(OP, 2, 2, OPT) \
  122. QPEL_FUNCS_DECL(OP, 2, 3, OPT) \
  123. QPEL_FUNCS_DECL(OP, 3, 0, OPT) \
  124. QPEL_FUNCS_DECL(OP, 3, 1, OPT) \
  125. QPEL_FUNCS_DECL(OP, 3, 2, OPT)
  126. /** @} */
  127. #define LOOPSIZE 8
  128. #define HCOFF(x) (32 * ((x) - 1))
  129. #define VCOFF(x) (32 * ((x) - 1))
  130. QPEL_MC_DECL(put_, _ssse3)
  131. QPEL_MC_DECL(avg_, _ssse3)
  132. #undef LOOPSIZE
  133. #undef HCOFF
  134. #undef VCOFF
  135. #define LOOPSIZE 8
  136. #define HCOFF(x) (64 * ((x) - 1))
  137. #define VCOFF(x) (64 * ((x) - 1))
  138. QPEL_MC_DECL(put_, _sse2)
  139. QPEL_MC_DECL(avg_, _sse2)
  140. #if ARCH_X86_32
  141. #undef LOOPSIZE
  142. #undef HCOFF
  143. #undef VCOFF
  144. #define LOOPSIZE 4
  145. #define HCOFF(x) (64 * ((x) - 1))
  146. #define VCOFF(x) (64 * ((x) - 1))
  147. QPEL_MC_DECL(put_, _mmx)
  148. #define ff_put_rv40_qpel_h_mmxext ff_put_rv40_qpel_h_mmx
  149. #define ff_put_rv40_qpel_v_mmxext ff_put_rv40_qpel_v_mmx
  150. QPEL_MC_DECL(avg_, _mmxext)
  151. #define ff_put_rv40_qpel_h_3dnow ff_put_rv40_qpel_h_mmx
  152. #define ff_put_rv40_qpel_v_3dnow ff_put_rv40_qpel_v_mmx
  153. QPEL_MC_DECL(avg_, _3dnow)
  154. #endif
  155. /** @{ */
  156. /** Set one function */
  157. #define QPEL_FUNC_SET(OP, SIZE, PH, PV, OPT) \
  158. c-> OP ## pixels_tab[2 - SIZE / 8][4 * PV + PH] = OP ## rv40_qpel ##SIZE ## _mc ##PH ##PV ##OPT;
  159. /** Set functions put and avg for sizes 8 and 16 and a given qpel position */
  160. #define QPEL_FUNCS_SET(OP, PH, PV, OPT) \
  161. QPEL_FUNC_SET(OP, 8, PH, PV, OPT) \
  162. QPEL_FUNC_SET(OP, 16, PH, PV, OPT)
  163. /** Set all functions for all sizes and qpel positions */
  164. #define QPEL_MC_SET(OP, OPT) \
  165. QPEL_FUNCS_SET (OP, 0, 1, OPT) \
  166. QPEL_FUNCS_SET (OP, 0, 3, OPT) \
  167. QPEL_FUNCS_SET (OP, 1, 0, OPT) \
  168. QPEL_FUNCS_SET (OP, 1, 1, OPT) \
  169. QPEL_FUNCS_SET (OP, 1, 2, OPT) \
  170. QPEL_FUNCS_SET (OP, 1, 3, OPT) \
  171. QPEL_FUNCS_SET (OP, 2, 1, OPT) \
  172. QPEL_FUNCS_SET (OP, 2, 2, OPT) \
  173. QPEL_FUNCS_SET (OP, 2, 3, OPT) \
  174. QPEL_FUNCS_SET (OP, 3, 0, OPT) \
  175. QPEL_FUNCS_SET (OP, 3, 1, OPT) \
  176. QPEL_FUNCS_SET (OP, 3, 2, OPT)
  177. /** @} */
  178. DEFINE_FN(put, 8, ssse3)
  179. DEFINE_FN(put, 16, sse2)
  180. DEFINE_FN(put, 16, ssse3)
  181. DEFINE_FN(avg, 8, mmxext)
  182. DEFINE_FN(avg, 8, ssse3)
  183. DEFINE_FN(avg, 16, sse2)
  184. DEFINE_FN(avg, 16, ssse3)
  185. #endif /* HAVE_X86ASM */
  186. #if HAVE_MMX_INLINE
  187. DEFINE_FN(put, 8, mmx)
  188. DEFINE_FN(avg, 8, mmx)
  189. DEFINE_FN(put, 16, mmx)
  190. DEFINE_FN(avg, 16, mmx)
  191. #endif
  192. av_cold void ff_rv40dsp_init_x86(RV34DSPContext *c)
  193. {
  194. av_unused int cpu_flags = av_get_cpu_flags();
  195. #if HAVE_MMX_INLINE
  196. if (INLINE_MMX(cpu_flags)) {
  197. c->put_pixels_tab[0][15] = put_rv40_qpel16_mc33_mmx;
  198. c->put_pixels_tab[1][15] = put_rv40_qpel8_mc33_mmx;
  199. c->avg_pixels_tab[0][15] = avg_rv40_qpel16_mc33_mmx;
  200. c->avg_pixels_tab[1][15] = avg_rv40_qpel8_mc33_mmx;
  201. }
  202. #endif /* HAVE_MMX_INLINE */
  203. #if HAVE_X86ASM
  204. if (EXTERNAL_MMX(cpu_flags)) {
  205. c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx;
  206. c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx;
  207. #if ARCH_X86_32
  208. QPEL_MC_SET(put_, _mmx)
  209. #endif
  210. }
  211. if (EXTERNAL_AMD3DNOW(cpu_flags)) {
  212. c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_3dnow;
  213. c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow;
  214. #if ARCH_X86_32
  215. QPEL_MC_SET(avg_, _3dnow)
  216. #endif
  217. }
  218. if (EXTERNAL_MMXEXT(cpu_flags)) {
  219. c->avg_pixels_tab[1][15] = avg_rv40_qpel8_mc33_mmxext;
  220. c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmxext;
  221. c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_mmxext;
  222. c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_mmxext;
  223. c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_mmxext;
  224. c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_mmxext;
  225. c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_mmxext;
  226. #if ARCH_X86_32
  227. QPEL_MC_SET(avg_, _mmxext)
  228. #endif
  229. }
  230. if (EXTERNAL_SSE2(cpu_flags)) {
  231. c->put_pixels_tab[0][15] = put_rv40_qpel16_mc33_sse2;
  232. c->avg_pixels_tab[0][15] = avg_rv40_qpel16_mc33_sse2;
  233. c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_sse2;
  234. c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_sse2;
  235. c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_sse2;
  236. c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_sse2;
  237. QPEL_MC_SET(put_, _sse2)
  238. QPEL_MC_SET(avg_, _sse2)
  239. }
  240. if (EXTERNAL_SSSE3(cpu_flags)) {
  241. c->put_pixels_tab[0][15] = put_rv40_qpel16_mc33_ssse3;
  242. c->put_pixels_tab[1][15] = put_rv40_qpel8_mc33_ssse3;
  243. c->avg_pixels_tab[0][15] = avg_rv40_qpel16_mc33_ssse3;
  244. c->avg_pixels_tab[1][15] = avg_rv40_qpel8_mc33_ssse3;
  245. c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_ssse3;
  246. c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_ssse3;
  247. c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_ssse3;
  248. c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_ssse3;
  249. QPEL_MC_SET(put_, _ssse3)
  250. QPEL_MC_SET(avg_, _ssse3)
  251. }
  252. #endif /* HAVE_X86ASM */
  253. }