You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

469 lines
21KB

  1. /*
  2. * VP8 DSP functions x86-optimized
  3. * Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
  4. * Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/attributes.h"
  23. #include "libavutil/cpu.h"
  24. #include "libavutil/mem.h"
  25. #include "libavutil/mem_internal.h"
  26. #include "libavutil/x86/cpu.h"
  27. #include "libavcodec/vp8dsp.h"
  28. #if HAVE_X86ASM
  29. /*
  30. * MC functions
  31. */
  32. void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, ptrdiff_t dststride,
  33. uint8_t *src, ptrdiff_t srcstride,
  34. int height, int mx, int my);
  35. void ff_put_vp8_epel4_h6_mmxext(uint8_t *dst, ptrdiff_t dststride,
  36. uint8_t *src, ptrdiff_t srcstride,
  37. int height, int mx, int my);
  38. void ff_put_vp8_epel4_v4_mmxext(uint8_t *dst, ptrdiff_t dststride,
  39. uint8_t *src, ptrdiff_t srcstride,
  40. int height, int mx, int my);
  41. void ff_put_vp8_epel4_v6_mmxext(uint8_t *dst, ptrdiff_t dststride,
  42. uint8_t *src, ptrdiff_t srcstride,
  43. int height, int mx, int my);
  44. void ff_put_vp8_epel8_h4_sse2 (uint8_t *dst, ptrdiff_t dststride,
  45. uint8_t *src, ptrdiff_t srcstride,
  46. int height, int mx, int my);
  47. void ff_put_vp8_epel8_h6_sse2 (uint8_t *dst, ptrdiff_t dststride,
  48. uint8_t *src, ptrdiff_t srcstride,
  49. int height, int mx, int my);
  50. void ff_put_vp8_epel8_v4_sse2 (uint8_t *dst, ptrdiff_t dststride,
  51. uint8_t *src, ptrdiff_t srcstride,
  52. int height, int mx, int my);
  53. void ff_put_vp8_epel8_v6_sse2 (uint8_t *dst, ptrdiff_t dststride,
  54. uint8_t *src, ptrdiff_t srcstride,
  55. int height, int mx, int my);
  56. void ff_put_vp8_epel4_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  57. uint8_t *src, ptrdiff_t srcstride,
  58. int height, int mx, int my);
  59. void ff_put_vp8_epel4_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  60. uint8_t *src, ptrdiff_t srcstride,
  61. int height, int mx, int my);
  62. void ff_put_vp8_epel4_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  63. uint8_t *src, ptrdiff_t srcstride,
  64. int height, int mx, int my);
  65. void ff_put_vp8_epel4_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  66. uint8_t *src, ptrdiff_t srcstride,
  67. int height, int mx, int my);
  68. void ff_put_vp8_epel8_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  69. uint8_t *src, ptrdiff_t srcstride,
  70. int height, int mx, int my);
  71. void ff_put_vp8_epel8_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  72. uint8_t *src, ptrdiff_t srcstride,
  73. int height, int mx, int my);
  74. void ff_put_vp8_epel8_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  75. uint8_t *src, ptrdiff_t srcstride,
  76. int height, int mx, int my);
  77. void ff_put_vp8_epel8_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  78. uint8_t *src, ptrdiff_t srcstride,
  79. int height, int mx, int my);
  80. void ff_put_vp8_bilinear4_h_mmxext(uint8_t *dst, ptrdiff_t dststride,
  81. uint8_t *src, ptrdiff_t srcstride,
  82. int height, int mx, int my);
  83. void ff_put_vp8_bilinear8_h_sse2 (uint8_t *dst, ptrdiff_t dststride,
  84. uint8_t *src, ptrdiff_t srcstride,
  85. int height, int mx, int my);
  86. void ff_put_vp8_bilinear4_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  87. uint8_t *src, ptrdiff_t srcstride,
  88. int height, int mx, int my);
  89. void ff_put_vp8_bilinear8_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  90. uint8_t *src, ptrdiff_t srcstride,
  91. int height, int mx, int my);
  92. void ff_put_vp8_bilinear4_v_mmxext(uint8_t *dst, ptrdiff_t dststride,
  93. uint8_t *src, ptrdiff_t srcstride,
  94. int height, int mx, int my);
  95. void ff_put_vp8_bilinear8_v_sse2 (uint8_t *dst, ptrdiff_t dststride,
  96. uint8_t *src, ptrdiff_t srcstride,
  97. int height, int mx, int my);
  98. void ff_put_vp8_bilinear4_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  99. uint8_t *src, ptrdiff_t srcstride,
  100. int height, int mx, int my);
  101. void ff_put_vp8_bilinear8_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
  102. uint8_t *src, ptrdiff_t srcstride,
  103. int height, int mx, int my);
  104. void ff_put_vp8_pixels8_mmx (uint8_t *dst, ptrdiff_t dststride,
  105. uint8_t *src, ptrdiff_t srcstride,
  106. int height, int mx, int my);
  107. void ff_put_vp8_pixels16_mmx(uint8_t *dst, ptrdiff_t dststride,
  108. uint8_t *src, ptrdiff_t srcstride,
  109. int height, int mx, int my);
  110. void ff_put_vp8_pixels16_sse(uint8_t *dst, ptrdiff_t dststride,
  111. uint8_t *src, ptrdiff_t srcstride,
  112. int height, int mx, int my);
  113. #define TAP_W16(OPT, FILTERTYPE, TAPTYPE) \
  114. static void ff_put_vp8_ ## FILTERTYPE ## 16_ ## TAPTYPE ## _ ## OPT( \
  115. uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
  116. ptrdiff_t srcstride, int height, int mx, int my) \
  117. { \
  118. ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
  119. dst, dststride, src, srcstride, height, mx, my); \
  120. ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
  121. dst + 8, dststride, src + 8, srcstride, height, mx, my); \
  122. }
  123. #define TAP_W8(OPT, FILTERTYPE, TAPTYPE) \
  124. static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
  125. uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
  126. ptrdiff_t srcstride, int height, int mx, int my) \
  127. { \
  128. ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
  129. dst, dststride, src, srcstride, height, mx, my); \
  130. ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
  131. dst + 4, dststride, src + 4, srcstride, height, mx, my); \
  132. }
  133. #if ARCH_X86_32
  134. TAP_W8 (mmxext, epel, h4)
  135. TAP_W8 (mmxext, epel, h6)
  136. TAP_W16(mmxext, epel, h6)
  137. TAP_W8 (mmxext, epel, v4)
  138. TAP_W8 (mmxext, epel, v6)
  139. TAP_W16(mmxext, epel, v6)
  140. TAP_W8 (mmxext, bilinear, h)
  141. TAP_W16(mmxext, bilinear, h)
  142. TAP_W8 (mmxext, bilinear, v)
  143. TAP_W16(mmxext, bilinear, v)
  144. #endif
  145. TAP_W16(sse2, epel, h6)
  146. TAP_W16(sse2, epel, v6)
  147. TAP_W16(sse2, bilinear, h)
  148. TAP_W16(sse2, bilinear, v)
  149. TAP_W16(ssse3, epel, h6)
  150. TAP_W16(ssse3, epel, v6)
  151. TAP_W16(ssse3, bilinear, h)
  152. TAP_W16(ssse3, bilinear, v)
  153. #define HVTAP(OPT, ALIGN, TAPNUMX, TAPNUMY, SIZE, MAXHEIGHT) \
  154. static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT( \
  155. uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
  156. ptrdiff_t srcstride, int height, int mx, int my) \
  157. { \
  158. LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + TAPNUMY - 1)]); \
  159. uint8_t *tmpptr = tmp + SIZE * (TAPNUMY / 2 - 1); \
  160. src -= srcstride * (TAPNUMY / 2 - 1); \
  161. ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## _ ## OPT( \
  162. tmp, SIZE, src, srcstride, height + TAPNUMY - 1, mx, my); \
  163. ff_put_vp8_epel ## SIZE ## _v ## TAPNUMY ## _ ## OPT( \
  164. dst, dststride, tmpptr, SIZE, height, mx, my); \
  165. }
  166. #if ARCH_X86_32
  167. #define HVTAPMMX(x, y) \
  168. HVTAP(mmxext, 8, x, y, 4, 8) \
  169. HVTAP(mmxext, 8, x, y, 8, 16)
  170. HVTAP(mmxext, 8, 6, 6, 16, 16)
  171. #else
  172. #define HVTAPMMX(x, y) \
  173. HVTAP(mmxext, 8, x, y, 4, 8)
  174. #endif
  175. HVTAPMMX(4, 4)
  176. HVTAPMMX(4, 6)
  177. HVTAPMMX(6, 4)
  178. HVTAPMMX(6, 6)
  179. #define HVTAPSSE2(x, y, w) \
  180. HVTAP(sse2, 16, x, y, w, 16) \
  181. HVTAP(ssse3, 16, x, y, w, 16)
  182. HVTAPSSE2(4, 4, 8)
  183. HVTAPSSE2(4, 6, 8)
  184. HVTAPSSE2(6, 4, 8)
  185. HVTAPSSE2(6, 6, 8)
  186. HVTAPSSE2(6, 6, 16)
  187. HVTAP(ssse3, 16, 4, 4, 4, 8)
  188. HVTAP(ssse3, 16, 4, 6, 4, 8)
  189. HVTAP(ssse3, 16, 6, 4, 4, 8)
  190. HVTAP(ssse3, 16, 6, 6, 4, 8)
  191. #define HVBILIN(OPT, ALIGN, SIZE, MAXHEIGHT) \
  192. static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
  193. uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
  194. ptrdiff_t srcstride, int height, int mx, int my) \
  195. { \
  196. LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + 2)]); \
  197. ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT( \
  198. tmp, SIZE, src, srcstride, height + 1, mx, my); \
  199. ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT( \
  200. dst, dststride, tmp, SIZE, height, mx, my); \
  201. }
  202. HVBILIN(mmxext, 8, 4, 8)
  203. #if ARCH_X86_32
  204. HVBILIN(mmxext, 8, 8, 16)
  205. HVBILIN(mmxext, 8, 16, 16)
  206. #endif
  207. HVBILIN(sse2, 8, 8, 16)
  208. HVBILIN(sse2, 8, 16, 16)
  209. HVBILIN(ssse3, 8, 4, 8)
  210. HVBILIN(ssse3, 8, 8, 16)
  211. HVBILIN(ssse3, 8, 16, 16)
  212. void ff_vp8_idct_dc_add_mmx(uint8_t *dst, int16_t block[16],
  213. ptrdiff_t stride);
  214. void ff_vp8_idct_dc_add_sse2(uint8_t *dst, int16_t block[16],
  215. ptrdiff_t stride);
  216. void ff_vp8_idct_dc_add_sse4(uint8_t *dst, int16_t block[16],
  217. ptrdiff_t stride);
  218. void ff_vp8_idct_dc_add4y_mmx(uint8_t *dst, int16_t block[4][16],
  219. ptrdiff_t stride);
  220. void ff_vp8_idct_dc_add4y_sse2(uint8_t *dst, int16_t block[4][16],
  221. ptrdiff_t stride);
  222. void ff_vp8_idct_dc_add4uv_mmx(uint8_t *dst, int16_t block[2][16],
  223. ptrdiff_t stride);
  224. void ff_vp8_luma_dc_wht_mmx(int16_t block[4][4][16], int16_t dc[16]);
  225. void ff_vp8_luma_dc_wht_sse(int16_t block[4][4][16], int16_t dc[16]);
  226. void ff_vp8_idct_add_mmx(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
  227. void ff_vp8_idct_add_sse(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
  228. #define DECLARE_LOOP_FILTER(NAME) \
  229. void ff_vp8_v_loop_filter_simple_ ## NAME(uint8_t *dst, \
  230. ptrdiff_t stride, \
  231. int flim); \
  232. void ff_vp8_h_loop_filter_simple_ ## NAME(uint8_t *dst, \
  233. ptrdiff_t stride, \
  234. int flim); \
  235. void ff_vp8_v_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
  236. ptrdiff_t stride, \
  237. int e, int i, int hvt); \
  238. void ff_vp8_h_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
  239. ptrdiff_t stride, \
  240. int e, int i, int hvt); \
  241. void ff_vp8_v_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
  242. uint8_t *dstV, \
  243. ptrdiff_t s, \
  244. int e, int i, int hvt); \
  245. void ff_vp8_h_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
  246. uint8_t *dstV, \
  247. ptrdiff_t s, \
  248. int e, int i, int hvt); \
  249. void ff_vp8_v_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
  250. ptrdiff_t stride, \
  251. int e, int i, int hvt); \
  252. void ff_vp8_h_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
  253. ptrdiff_t stride, \
  254. int e, int i, int hvt); \
  255. void ff_vp8_v_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
  256. uint8_t *dstV, \
  257. ptrdiff_t s, \
  258. int e, int i, int hvt); \
  259. void ff_vp8_h_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
  260. uint8_t *dstV, \
  261. ptrdiff_t s, \
  262. int e, int i, int hvt);
  263. DECLARE_LOOP_FILTER(mmx)
  264. DECLARE_LOOP_FILTER(mmxext)
  265. DECLARE_LOOP_FILTER(sse2)
  266. DECLARE_LOOP_FILTER(ssse3)
  267. DECLARE_LOOP_FILTER(sse4)
  268. #endif /* HAVE_X86ASM */
  269. #define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) \
  270. c->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel ## SIZE ## _h6_ ## OPT; \
  271. c->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel ## SIZE ## _v6_ ## OPT; \
  272. c->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel ## SIZE ## _h6v6_ ## OPT
  273. #define VP8_MC_FUNC(IDX, SIZE, OPT) \
  274. c->put_vp8_epel_pixels_tab[IDX][0][1] = ff_put_vp8_epel ## SIZE ## _h4_ ## OPT; \
  275. c->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel ## SIZE ## _v4_ ## OPT; \
  276. c->put_vp8_epel_pixels_tab[IDX][1][1] = ff_put_vp8_epel ## SIZE ## _h4v4_ ## OPT; \
  277. c->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel ## SIZE ## _h6v4_ ## OPT; \
  278. c->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel ## SIZE ## _h4v6_ ## OPT; \
  279. VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
  280. #define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT) \
  281. c->put_vp8_bilinear_pixels_tab[IDX][0][1] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
  282. c->put_vp8_bilinear_pixels_tab[IDX][0][2] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
  283. c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
  284. c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
  285. c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
  286. c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
  287. c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
  288. c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT
  289. av_cold void ff_vp78dsp_init_x86(VP8DSPContext *c)
  290. {
  291. #if HAVE_X86ASM
  292. int cpu_flags = av_get_cpu_flags();
  293. if (EXTERNAL_MMX(cpu_flags)) {
  294. #if ARCH_X86_32
  295. c->put_vp8_epel_pixels_tab[0][0][0] =
  296. c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx;
  297. #endif
  298. c->put_vp8_epel_pixels_tab[1][0][0] =
  299. c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
  300. }
  301. /* note that 4-tap width=16 functions are missing because w=16
  302. * is only used for luma, and luma is always a copy or sixtap. */
  303. if (EXTERNAL_MMXEXT(cpu_flags)) {
  304. VP8_MC_FUNC(2, 4, mmxext);
  305. VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
  306. #if ARCH_X86_32
  307. VP8_LUMA_MC_FUNC(0, 16, mmxext);
  308. VP8_MC_FUNC(1, 8, mmxext);
  309. VP8_BILINEAR_MC_FUNC(0, 16, mmxext);
  310. VP8_BILINEAR_MC_FUNC(1, 8, mmxext);
  311. #endif
  312. }
  313. if (EXTERNAL_SSE(cpu_flags)) {
  314. c->put_vp8_epel_pixels_tab[0][0][0] =
  315. c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse;
  316. }
  317. if (EXTERNAL_SSE2(cpu_flags) || EXTERNAL_SSE2_SLOW(cpu_flags)) {
  318. VP8_LUMA_MC_FUNC(0, 16, sse2);
  319. VP8_MC_FUNC(1, 8, sse2);
  320. VP8_BILINEAR_MC_FUNC(0, 16, sse2);
  321. VP8_BILINEAR_MC_FUNC(1, 8, sse2);
  322. }
  323. if (EXTERNAL_SSSE3(cpu_flags)) {
  324. VP8_LUMA_MC_FUNC(0, 16, ssse3);
  325. VP8_MC_FUNC(1, 8, ssse3);
  326. VP8_MC_FUNC(2, 4, ssse3);
  327. VP8_BILINEAR_MC_FUNC(0, 16, ssse3);
  328. VP8_BILINEAR_MC_FUNC(1, 8, ssse3);
  329. VP8_BILINEAR_MC_FUNC(2, 4, ssse3);
  330. }
  331. #endif /* HAVE_X86ASM */
  332. }
  333. av_cold void ff_vp8dsp_init_x86(VP8DSPContext *c)
  334. {
  335. #if HAVE_X86ASM
  336. int cpu_flags = av_get_cpu_flags();
  337. if (EXTERNAL_MMX(cpu_flags)) {
  338. c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
  339. #if ARCH_X86_32
  340. c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
  341. c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
  342. c->vp8_idct_add = ff_vp8_idct_add_mmx;
  343. c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
  344. c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx;
  345. c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx;
  346. c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx;
  347. c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx;
  348. c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx;
  349. c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx;
  350. c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx;
  351. c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx;
  352. c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx;
  353. c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx;
  354. #endif
  355. }
  356. /* note that 4-tap width=16 functions are missing because w=16
  357. * is only used for luma, and luma is always a copy or sixtap. */
  358. if (EXTERNAL_MMXEXT(cpu_flags)) {
  359. #if ARCH_X86_32
  360. c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext;
  361. c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext;
  362. c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext;
  363. c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext;
  364. c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext;
  365. c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext;
  366. c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext;
  367. c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext;
  368. c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext;
  369. c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
  370. #endif
  371. }
  372. if (EXTERNAL_SSE(cpu_flags)) {
  373. c->vp8_idct_add = ff_vp8_idct_add_sse;
  374. c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse;
  375. }
  376. if (EXTERNAL_SSE2(cpu_flags) || EXTERNAL_SSE2_SLOW(cpu_flags)) {
  377. c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2;
  378. c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2;
  379. c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2;
  380. c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2;
  381. c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2;
  382. }
  383. if (EXTERNAL_SSE2(cpu_flags)) {
  384. c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse2;
  385. c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2;
  386. c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2;
  387. c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2;
  388. c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2;
  389. c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2;
  390. c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2;
  391. }
  392. if (EXTERNAL_SSSE3(cpu_flags)) {
  393. c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3;
  394. c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3;
  395. c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3;
  396. c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3;
  397. c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3;
  398. c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3;
  399. c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3;
  400. c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3;
  401. c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3;
  402. c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3;
  403. }
  404. if (EXTERNAL_SSE4(cpu_flags)) {
  405. c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4;
  406. c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4;
  407. c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4;
  408. c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4;
  409. }
  410. #endif /* HAVE_X86ASM */
  411. }