You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

379 lines
17KB

  1. /*
  2. * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/cpu.h"
  21. #include "libavutil/x86_cpu.h"
  22. #include "libavcodec/h264dsp.h"
  23. #include "dsputil_mmx.h"
  24. DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
  25. /***********************************/
  26. /* IDCT */
  27. void ff_h264_idct_add_mmx (uint8_t *dst, int16_t *block, int stride);
  28. void ff_h264_idct8_add_mmx (uint8_t *dst, int16_t *block, int stride);
  29. void ff_h264_idct8_add_sse2 (uint8_t *dst, int16_t *block, int stride);
  30. void ff_h264_idct_dc_add_mmx2 (uint8_t *dst, int16_t *block, int stride);
  31. void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride);
  32. void ff_h264_idct_add16_mmx (uint8_t *dst, const int *block_offset,
  33. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  34. void ff_h264_idct8_add4_mmx (uint8_t *dst, const int *block_offset,
  35. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  36. void ff_h264_idct_add16_mmx2 (uint8_t *dst, const int *block_offset,
  37. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  38. void ff_h264_idct_add16intra_mmx (uint8_t *dst, const int *block_offset,
  39. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  40. void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset,
  41. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  42. void ff_h264_idct8_add4_mmx2 (uint8_t *dst, const int *block_offset,
  43. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  44. void ff_h264_idct8_add4_sse2 (uint8_t *dst, const int *block_offset,
  45. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  46. void ff_h264_idct_add8_mmx (uint8_t **dest, const int *block_offset,
  47. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  48. void ff_h264_idct_add8_mmx2 (uint8_t **dest, const int *block_offset,
  49. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  50. void ff_h264_idct_add16_sse2 (uint8_t *dst, const int *block_offset, DCTELEM *block,
  51. int stride, const uint8_t nnzc[6*8]);
  52. void ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block,
  53. int stride, const uint8_t nnzc[6*8]);
  54. void ff_h264_idct_add8_sse2 (uint8_t **dest, const int *block_offset, DCTELEM *block,
  55. int stride, const uint8_t nnzc[6*8]);
  56. /***********************************/
  57. /* deblocking */
  58. #define h264_loop_filter_strength_iteration_mmx2(bS, nz, ref, mv, bidir, edges, step, mask_mv, dir, d_idx, mask_dir) \
  59. do { \
  60. x86_reg b_idx; \
  61. mask_mv <<= 3; \
  62. for( b_idx=0; b_idx<edges; b_idx+=step ) { \
  63. if (!mask_dir) \
  64. __asm__ volatile( \
  65. "pxor %%mm0, %%mm0 \n\t" \
  66. :: \
  67. ); \
  68. if(!(mask_mv & b_idx)) { \
  69. if(bidir) { \
  70. __asm__ volatile( \
  71. "movd %a3(%0,%2), %%mm2 \n" \
  72. "punpckldq %a4(%0,%2), %%mm2 \n" /* { ref0[bn], ref1[bn] } */ \
  73. "pshufw $0x44, 12(%0,%2), %%mm0 \n" /* { ref0[b], ref0[b] } */ \
  74. "pshufw $0x44, 52(%0,%2), %%mm1 \n" /* { ref1[b], ref1[b] } */ \
  75. "pshufw $0x4E, %%mm2, %%mm3 \n" \
  76. "psubb %%mm2, %%mm0 \n" /* { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } */ \
  77. "psubb %%mm3, %%mm1 \n" /* { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } */ \
  78. \
  79. "por %%mm1, %%mm0 \n" \
  80. "movq %a5(%1,%2,4), %%mm1 \n" \
  81. "movq %a6(%1,%2,4), %%mm2 \n" \
  82. "movq %%mm1, %%mm3 \n" \
  83. "movq %%mm2, %%mm4 \n" \
  84. "psubw 48(%1,%2,4), %%mm1 \n" \
  85. "psubw 56(%1,%2,4), %%mm2 \n" \
  86. "psubw 208(%1,%2,4), %%mm3 \n" \
  87. "psubw 216(%1,%2,4), %%mm4 \n" \
  88. "packsswb %%mm2, %%mm1 \n" \
  89. "packsswb %%mm4, %%mm3 \n" \
  90. "paddb %%mm6, %%mm1 \n" \
  91. "paddb %%mm6, %%mm3 \n" \
  92. "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
  93. "psubusb %%mm5, %%mm3 \n" \
  94. "packsswb %%mm3, %%mm1 \n" \
  95. \
  96. "por %%mm1, %%mm0 \n" \
  97. "movq %a7(%1,%2,4), %%mm1 \n" \
  98. "movq %a8(%1,%2,4), %%mm2 \n" \
  99. "movq %%mm1, %%mm3 \n" \
  100. "movq %%mm2, %%mm4 \n" \
  101. "psubw 48(%1,%2,4), %%mm1 \n" \
  102. "psubw 56(%1,%2,4), %%mm2 \n" \
  103. "psubw 208(%1,%2,4), %%mm3 \n" \
  104. "psubw 216(%1,%2,4), %%mm4 \n" \
  105. "packsswb %%mm2, %%mm1 \n" \
  106. "packsswb %%mm4, %%mm3 \n" \
  107. "paddb %%mm6, %%mm1 \n" \
  108. "paddb %%mm6, %%mm3 \n" \
  109. "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
  110. "psubusb %%mm5, %%mm3 \n" \
  111. "packsswb %%mm3, %%mm1 \n" \
  112. \
  113. "pshufw $0x4E, %%mm1, %%mm1 \n" \
  114. "por %%mm1, %%mm0 \n" \
  115. "pshufw $0x4E, %%mm0, %%mm1 \n" \
  116. "pminub %%mm1, %%mm0 \n" \
  117. ::"r"(ref), \
  118. "r"(mv), \
  119. "r"(b_idx), \
  120. "i"(d_idx+12), \
  121. "i"(d_idx+52), \
  122. "i"(d_idx*4+48), \
  123. "i"(d_idx*4+56), \
  124. "i"(d_idx*4+208), \
  125. "i"(d_idx*4+216) \
  126. ); \
  127. } else { \
  128. __asm__ volatile( \
  129. "movd 12(%0,%2), %%mm0 \n" \
  130. "psubb %a3(%0,%2), %%mm0 \n" /* ref[b] != ref[bn] */ \
  131. "movq 48(%1,%2,4), %%mm1 \n" \
  132. "movq 56(%1,%2,4), %%mm2 \n" \
  133. "psubw %a4(%1,%2,4), %%mm1 \n" \
  134. "psubw %a5(%1,%2,4), %%mm2 \n" \
  135. "packsswb %%mm2, %%mm1 \n" \
  136. "paddb %%mm6, %%mm1 \n" \
  137. "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
  138. "packsswb %%mm1, %%mm1 \n" \
  139. "por %%mm1, %%mm0 \n" \
  140. ::"r"(ref), \
  141. "r"(mv), \
  142. "r"(b_idx), \
  143. "i"(d_idx+12), \
  144. "i"(d_idx*4+48), \
  145. "i"(d_idx*4+56) \
  146. ); \
  147. } \
  148. } \
  149. __asm__ volatile( \
  150. "movd 12(%0,%1), %%mm1 \n" \
  151. "por %a2(%0,%1), %%mm1 \n" /* nnz[b] || nnz[bn] */ \
  152. ::"r"(nnz), \
  153. "r"(b_idx), \
  154. "i"(d_idx+12) \
  155. ); \
  156. __asm__ volatile( \
  157. "pminub %%mm7, %%mm1 \n" \
  158. "pminub %%mm7, %%mm0 \n" \
  159. "psllw $1, %%mm1 \n" \
  160. "pxor %%mm2, %%mm2 \n" \
  161. "pmaxub %%mm0, %%mm1 \n" \
  162. "punpcklbw %%mm2, %%mm1 \n" \
  163. "movq %%mm1, %a1(%0,%2) \n" \
  164. ::"r"(bS), \
  165. "i"(32*dir), \
  166. "r"(b_idx) \
  167. :"memory" \
  168. ); \
  169. } \
  170. } while (0)
  171. static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
  172. int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
  173. __asm__ volatile(
  174. "movq %0, %%mm7 \n"
  175. "movq %1, %%mm6 \n"
  176. ::"m"(ff_pb_1), "m"(ff_pb_3)
  177. );
  178. if(field)
  179. __asm__ volatile(
  180. "movq %0, %%mm6 \n"
  181. ::"m"(ff_pb_3_1)
  182. );
  183. __asm__ volatile(
  184. "movq %%mm6, %%mm5 \n"
  185. "paddb %%mm5, %%mm5 \n"
  186. :);
  187. // could do a special case for dir==0 && edges==1, but it only reduces the
  188. // average filter time by 1.2%
  189. step <<= 3;
  190. edges <<= 3;
  191. h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8, 0);
  192. h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, 32, 8, mask_mv0, 0, -1, -1);
  193. __asm__ volatile(
  194. "movq (%0), %%mm0 \n\t"
  195. "movq 8(%0), %%mm1 \n\t"
  196. "movq 16(%0), %%mm2 \n\t"
  197. "movq 24(%0), %%mm3 \n\t"
  198. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
  199. "movq %%mm0, (%0) \n\t"
  200. "movq %%mm3, 8(%0) \n\t"
  201. "movq %%mm4, 16(%0) \n\t"
  202. "movq %%mm2, 24(%0) \n\t"
  203. ::"r"(bS[0])
  204. :"memory"
  205. );
  206. }
  207. #define LF_FUNC(DIR, TYPE, OPT) \
  208. void ff_x264_deblock_ ## DIR ## _ ## TYPE ## _ ## OPT (uint8_t *pix, int stride, \
  209. int alpha, int beta, int8_t *tc0);
  210. #define LF_IFUNC(DIR, TYPE, OPT) \
  211. void ff_x264_deblock_ ## DIR ## _ ## TYPE ## _ ## OPT (uint8_t *pix, int stride, \
  212. int alpha, int beta);
  213. LF_FUNC (h, chroma, mmxext)
  214. LF_IFUNC(h, chroma_intra, mmxext)
  215. LF_FUNC (v, chroma, mmxext)
  216. LF_IFUNC(v, chroma_intra, mmxext)
  217. LF_FUNC (h, luma, mmxext)
  218. LF_IFUNC(h, luma_intra, mmxext)
  219. #if HAVE_YASM && ARCH_X86_32
  220. LF_FUNC (v8, luma, mmxext)
  221. static void ff_x264_deblock_v_luma_mmxext(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  222. {
  223. if((tc0[0] & tc0[1]) >= 0)
  224. ff_x264_deblock_v8_luma_mmxext(pix+0, stride, alpha, beta, tc0);
  225. if((tc0[2] & tc0[3]) >= 0)
  226. ff_x264_deblock_v8_luma_mmxext(pix+8, stride, alpha, beta, tc0+2);
  227. }
  228. LF_IFUNC(v8, luma_intra, mmxext)
  229. static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
  230. {
  231. ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
  232. ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
  233. }
  234. #endif
  235. LF_FUNC (h, luma, sse2)
  236. LF_IFUNC(h, luma_intra, sse2)
  237. LF_FUNC (v, luma, sse2)
  238. LF_IFUNC(v, luma_intra, sse2)
  239. /***********************************/
  240. /* weighted prediction */
  241. #define H264_WEIGHT(W, H, OPT) \
  242. void ff_h264_weight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
  243. int stride, int log2_denom, int weight, int offset);
  244. #define H264_BIWEIGHT(W, H, OPT) \
  245. void ff_h264_biweight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
  246. uint8_t *src, int stride, int log2_denom, int weightd, \
  247. int weights, int offset);
  248. #define H264_BIWEIGHT_MMX(W,H) \
  249. H264_WEIGHT (W, H, mmx2) \
  250. H264_BIWEIGHT(W, H, mmx2)
  251. #define H264_BIWEIGHT_MMX_SSE(W,H) \
  252. H264_BIWEIGHT_MMX(W, H) \
  253. H264_WEIGHT (W, H, sse2) \
  254. H264_BIWEIGHT (W, H, sse2) \
  255. H264_BIWEIGHT (W, H, ssse3)
  256. H264_BIWEIGHT_MMX_SSE(16, 16)
  257. H264_BIWEIGHT_MMX_SSE(16, 8)
  258. H264_BIWEIGHT_MMX_SSE( 8, 16)
  259. H264_BIWEIGHT_MMX_SSE( 8, 8)
  260. H264_BIWEIGHT_MMX_SSE( 8, 4)
  261. H264_BIWEIGHT_MMX ( 4, 8)
  262. H264_BIWEIGHT_MMX ( 4, 4)
  263. H264_BIWEIGHT_MMX ( 4, 2)
  264. void ff_h264dsp_init_x86(H264DSPContext *c)
  265. {
  266. int mm_flags = av_get_cpu_flags();
  267. if (mm_flags & AV_CPU_FLAG_MMX2) {
  268. c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
  269. }
  270. #if HAVE_YASM
  271. if (mm_flags & AV_CPU_FLAG_MMX) {
  272. c->h264_idct_dc_add=
  273. c->h264_idct_add= ff_h264_idct_add_mmx;
  274. c->h264_idct8_dc_add=
  275. c->h264_idct8_add= ff_h264_idct8_add_mmx;
  276. c->h264_idct_add16 = ff_h264_idct_add16_mmx;
  277. c->h264_idct8_add4 = ff_h264_idct8_add4_mmx;
  278. c->h264_idct_add8 = ff_h264_idct_add8_mmx;
  279. c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
  280. if (mm_flags & AV_CPU_FLAG_MMX2) {
  281. c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
  282. c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
  283. c->h264_idct_add16 = ff_h264_idct_add16_mmx2;
  284. c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2;
  285. c->h264_idct_add8 = ff_h264_idct_add8_mmx2;
  286. c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
  287. c->h264_v_loop_filter_chroma= ff_x264_deblock_v_chroma_mmxext;
  288. c->h264_h_loop_filter_chroma= ff_x264_deblock_h_chroma_mmxext;
  289. c->h264_v_loop_filter_chroma_intra= ff_x264_deblock_v_chroma_intra_mmxext;
  290. c->h264_h_loop_filter_chroma_intra= ff_x264_deblock_h_chroma_intra_mmxext;
  291. #if ARCH_X86_32
  292. c->h264_v_loop_filter_luma= ff_x264_deblock_v_luma_mmxext;
  293. c->h264_h_loop_filter_luma= ff_x264_deblock_h_luma_mmxext;
  294. c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
  295. c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
  296. #endif
  297. c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
  298. c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
  299. c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
  300. c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
  301. c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
  302. c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
  303. c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
  304. c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
  305. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
  306. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
  307. c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
  308. c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
  309. c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
  310. c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
  311. c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
  312. c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
  313. if (mm_flags&AV_CPU_FLAG_SSE2) {
  314. c->h264_idct8_add = ff_h264_idct8_add_sse2;
  315. c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
  316. c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
  317. c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2;
  318. c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2;
  319. c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_sse2;
  320. c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_sse2;
  321. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2;
  322. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_sse2;
  323. c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_sse2;
  324. c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
  325. c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
  326. #if HAVE_ALIGNED_STACK
  327. c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
  328. c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
  329. c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
  330. c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
  331. #endif
  332. c->h264_idct_add16 = ff_h264_idct_add16_sse2;
  333. c->h264_idct_add8 = ff_h264_idct_add8_sse2;
  334. c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
  335. }
  336. if (mm_flags&AV_CPU_FLAG_SSSE3) {
  337. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
  338. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3;
  339. c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3;
  340. c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
  341. c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
  342. }
  343. }
  344. }
  345. #endif
  346. }