fate failures on Win64. Originally committed as revision 24989 to svn://svn.ffmpeg.org/ffmpeg/trunktags/n0.8
| @@ -36,6 +36,7 @@ YASM-OBJS-$(CONFIG_VP8_DECODER) += x86/vp8dsp.o | |||
| MMX-OBJS-$(CONFIG_VP8_DECODER) += x86/vp8dsp-init.o | |||
| MMX-OBJS-$(HAVE_YASM) += x86/dsputil_yasm.o \ | |||
| x86/deinterlace.o \ | |||
| x86/h264_chromamc.o \ | |||
| $(YASM-OBJS-yes) | |||
| MMX-OBJS-$(CONFIG_FFT) += x86/fft.o | |||
| @@ -1,304 +0,0 @@ | |||
| /* | |||
| * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>, | |||
| * Loren Merritt | |||
| * | |||
| * This file is part of FFmpeg. | |||
| * | |||
| * FFmpeg is free software; you can redistribute it and/or | |||
| * modify it under the terms of the GNU Lesser General Public | |||
| * License as published by the Free Software Foundation; either | |||
| * version 2.1 of the License, or (at your option) any later version. | |||
| * | |||
| * FFmpeg is distributed in the hope that it will be useful, | |||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||
| * Lesser General Public License for more details. | |||
| * | |||
| * You should have received a copy of the GNU Lesser General Public | |||
| * License along with FFmpeg; if not, write to the Free Software | |||
| * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||
| */ | |||
| /** | |||
| * MMX optimized version of (put|avg)_h264_chroma_mc8. | |||
| * H264_CHROMA_MC8_TMPL must be defined to the desired function name | |||
| * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg | |||
| * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function | |||
| */ | |||
| static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, const uint64_t *rnd_reg) | |||
| { | |||
| DECLARE_ALIGNED(8, uint64_t, AA); | |||
| DECLARE_ALIGNED(8, uint64_t, DD); | |||
| int i; | |||
| if(y==0 && x==0) { | |||
| /* no filter needed */ | |||
| H264_CHROMA_MC8_MV0(dst, src, stride, h); | |||
| return; | |||
| } | |||
| assert(x<8 && y<8 && x>=0 && y>=0); | |||
| if(y==0 || x==0) | |||
| { | |||
| /* 1 dimensional filter only */ | |||
| const int dxy = x ? 1 : stride; | |||
| __asm__ volatile( | |||
| "movd %0, %%mm5\n\t" | |||
| "movq %1, %%mm4\n\t" | |||
| "movq %2, %%mm6\n\t" /* mm6 = rnd >> 3 */ | |||
| "punpcklwd %%mm5, %%mm5\n\t" | |||
| "punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */ | |||
| "pxor %%mm7, %%mm7\n\t" | |||
| "psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */ | |||
| :: "rm"(x+y), "m"(ff_pw_8), "m"(*(rnd_reg+1))); | |||
| for(i=0; i<h; i++) { | |||
| __asm__ volatile( | |||
| /* mm0 = src[0..7], mm1 = src[1..8] */ | |||
| "movq %0, %%mm0\n\t" | |||
| "movq %1, %%mm2\n\t" | |||
| :: "m"(src[0]), "m"(src[dxy])); | |||
| __asm__ volatile( | |||
| /* [mm0,mm1] = A * src[0..7] */ | |||
| /* [mm2,mm3] = B * src[1..8] */ | |||
| "movq %%mm0, %%mm1\n\t" | |||
| "movq %%mm2, %%mm3\n\t" | |||
| "punpcklbw %%mm7, %%mm0\n\t" | |||
| "punpckhbw %%mm7, %%mm1\n\t" | |||
| "punpcklbw %%mm7, %%mm2\n\t" | |||
| "punpckhbw %%mm7, %%mm3\n\t" | |||
| "pmullw %%mm4, %%mm0\n\t" | |||
| "pmullw %%mm4, %%mm1\n\t" | |||
| "pmullw %%mm5, %%mm2\n\t" | |||
| "pmullw %%mm5, %%mm3\n\t" | |||
| /* dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3 */ | |||
| "paddw %%mm6, %%mm0\n\t" | |||
| "paddw %%mm6, %%mm1\n\t" | |||
| "paddw %%mm2, %%mm0\n\t" | |||
| "paddw %%mm3, %%mm1\n\t" | |||
| "psrlw $3, %%mm0\n\t" | |||
| "psrlw $3, %%mm1\n\t" | |||
| "packuswb %%mm1, %%mm0\n\t" | |||
| H264_CHROMA_OP(%0, %%mm0) | |||
| "movq %%mm0, %0\n\t" | |||
| : "=m" (dst[0])); | |||
| src += stride; | |||
| dst += stride; | |||
| } | |||
| return; | |||
| } | |||
| /* general case, bilinear */ | |||
| __asm__ volatile("movd %2, %%mm4\n\t" | |||
| "movd %3, %%mm6\n\t" | |||
| "punpcklwd %%mm4, %%mm4\n\t" | |||
| "punpcklwd %%mm6, %%mm6\n\t" | |||
| "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */ | |||
| "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */ | |||
| "movq %%mm4, %%mm5\n\t" | |||
| "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */ | |||
| "psllw $3, %%mm5\n\t" | |||
| "psllw $3, %%mm6\n\t" | |||
| "movq %%mm5, %%mm7\n\t" | |||
| "paddw %%mm6, %%mm7\n\t" | |||
| "movq %%mm4, %1\n\t" /* DD = x * y */ | |||
| "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */ | |||
| "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */ | |||
| "paddw %4, %%mm4\n\t" | |||
| "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */ | |||
| "pxor %%mm7, %%mm7\n\t" | |||
| "movq %%mm4, %0\n\t" | |||
| : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64)); | |||
| __asm__ volatile( | |||
| /* mm0 = src[0..7], mm1 = src[1..8] */ | |||
| "movq %0, %%mm0\n\t" | |||
| "movq %1, %%mm1\n\t" | |||
| : : "m" (src[0]), "m" (src[1])); | |||
| for(i=0; i<h; i++) { | |||
| src += stride; | |||
| __asm__ volatile( | |||
| /* mm2 = A * src[0..3] + B * src[1..4] */ | |||
| /* mm3 = A * src[4..7] + B * src[5..8] */ | |||
| "movq %%mm0, %%mm2\n\t" | |||
| "movq %%mm1, %%mm3\n\t" | |||
| "punpckhbw %%mm7, %%mm0\n\t" | |||
| "punpcklbw %%mm7, %%mm1\n\t" | |||
| "punpcklbw %%mm7, %%mm2\n\t" | |||
| "punpckhbw %%mm7, %%mm3\n\t" | |||
| "pmullw %0, %%mm0\n\t" | |||
| "pmullw %0, %%mm2\n\t" | |||
| "pmullw %%mm5, %%mm1\n\t" | |||
| "pmullw %%mm5, %%mm3\n\t" | |||
| "paddw %%mm1, %%mm2\n\t" | |||
| "paddw %%mm0, %%mm3\n\t" | |||
| : : "m" (AA)); | |||
| __asm__ volatile( | |||
| /* [mm2,mm3] += C * src[0..7] */ | |||
| "movq %0, %%mm0\n\t" | |||
| "movq %%mm0, %%mm1\n\t" | |||
| "punpcklbw %%mm7, %%mm0\n\t" | |||
| "punpckhbw %%mm7, %%mm1\n\t" | |||
| "pmullw %%mm6, %%mm0\n\t" | |||
| "pmullw %%mm6, %%mm1\n\t" | |||
| "paddw %%mm0, %%mm2\n\t" | |||
| "paddw %%mm1, %%mm3\n\t" | |||
| : : "m" (src[0])); | |||
| __asm__ volatile( | |||
| /* [mm2,mm3] += D * src[1..8] */ | |||
| "movq %1, %%mm1\n\t" | |||
| "movq %%mm1, %%mm0\n\t" | |||
| "movq %%mm1, %%mm4\n\t" | |||
| "punpcklbw %%mm7, %%mm0\n\t" | |||
| "punpckhbw %%mm7, %%mm4\n\t" | |||
| "pmullw %2, %%mm0\n\t" | |||
| "pmullw %2, %%mm4\n\t" | |||
| "paddw %%mm0, %%mm2\n\t" | |||
| "paddw %%mm4, %%mm3\n\t" | |||
| "movq %0, %%mm0\n\t" | |||
| : : "m" (src[0]), "m" (src[1]), "m" (DD)); | |||
| __asm__ volatile( | |||
| /* dst[0..7] = ([mm2,mm3] + rnd) >> 6 */ | |||
| "paddw %1, %%mm2\n\t" | |||
| "paddw %1, %%mm3\n\t" | |||
| "psrlw $6, %%mm2\n\t" | |||
| "psrlw $6, %%mm3\n\t" | |||
| "packuswb %%mm3, %%mm2\n\t" | |||
| H264_CHROMA_OP(%0, %%mm2) | |||
| "movq %%mm2, %0\n\t" | |||
| : "=m" (dst[0]) : "m" (*rnd_reg)); | |||
| dst+= stride; | |||
| } | |||
| } | |||
| static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, const uint64_t *rnd_reg) | |||
| { | |||
| __asm__ volatile( | |||
| "pxor %%mm7, %%mm7 \n\t" | |||
| "movd %5, %%mm2 \n\t" | |||
| "movd %6, %%mm3 \n\t" | |||
| "movq "MANGLE(ff_pw_8)", %%mm4\n\t" | |||
| "movq "MANGLE(ff_pw_8)", %%mm5\n\t" | |||
| "punpcklwd %%mm2, %%mm2 \n\t" | |||
| "punpcklwd %%mm3, %%mm3 \n\t" | |||
| "punpcklwd %%mm2, %%mm2 \n\t" | |||
| "punpcklwd %%mm3, %%mm3 \n\t" | |||
| "psubw %%mm2, %%mm4 \n\t" | |||
| "psubw %%mm3, %%mm5 \n\t" | |||
| "movd (%1), %%mm0 \n\t" | |||
| "movd 1(%1), %%mm6 \n\t" | |||
| "add %3, %1 \n\t" | |||
| "punpcklbw %%mm7, %%mm0 \n\t" | |||
| "punpcklbw %%mm7, %%mm6 \n\t" | |||
| "pmullw %%mm4, %%mm0 \n\t" | |||
| "pmullw %%mm2, %%mm6 \n\t" | |||
| "paddw %%mm0, %%mm6 \n\t" | |||
| "1: \n\t" | |||
| "movd (%1), %%mm0 \n\t" | |||
| "movd 1(%1), %%mm1 \n\t" | |||
| "add %3, %1 \n\t" | |||
| "punpcklbw %%mm7, %%mm0 \n\t" | |||
| "punpcklbw %%mm7, %%mm1 \n\t" | |||
| "pmullw %%mm4, %%mm0 \n\t" | |||
| "pmullw %%mm2, %%mm1 \n\t" | |||
| "paddw %%mm0, %%mm1 \n\t" | |||
| "movq %%mm1, %%mm0 \n\t" | |||
| "pmullw %%mm5, %%mm6 \n\t" | |||
| "pmullw %%mm3, %%mm1 \n\t" | |||
| "paddw %4, %%mm6 \n\t" | |||
| "paddw %%mm6, %%mm1 \n\t" | |||
| "psrlw $6, %%mm1 \n\t" | |||
| "packuswb %%mm1, %%mm1 \n\t" | |||
| H264_CHROMA_OP4((%0), %%mm1, %%mm6) | |||
| "movd %%mm1, (%0) \n\t" | |||
| "add %3, %0 \n\t" | |||
| "movd (%1), %%mm6 \n\t" | |||
| "movd 1(%1), %%mm1 \n\t" | |||
| "add %3, %1 \n\t" | |||
| "punpcklbw %%mm7, %%mm6 \n\t" | |||
| "punpcklbw %%mm7, %%mm1 \n\t" | |||
| "pmullw %%mm4, %%mm6 \n\t" | |||
| "pmullw %%mm2, %%mm1 \n\t" | |||
| "paddw %%mm6, %%mm1 \n\t" | |||
| "movq %%mm1, %%mm6 \n\t" | |||
| "pmullw %%mm5, %%mm0 \n\t" | |||
| "pmullw %%mm3, %%mm1 \n\t" | |||
| "paddw %4, %%mm0 \n\t" | |||
| "paddw %%mm0, %%mm1 \n\t" | |||
| "psrlw $6, %%mm1 \n\t" | |||
| "packuswb %%mm1, %%mm1 \n\t" | |||
| H264_CHROMA_OP4((%0), %%mm1, %%mm0) | |||
| "movd %%mm1, (%0) \n\t" | |||
| "add %3, %0 \n\t" | |||
| "sub $2, %2 \n\t" | |||
| "jnz 1b \n\t" | |||
| : "+r"(dst), "+r"(src), "+r"(h) | |||
| : "r"((x86_reg)stride), "m"(*rnd_reg), "m"(x), "m"(y) | |||
| ); | |||
| } | |||
| #ifdef H264_CHROMA_MC2_TMPL | |||
| static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| int tmp = ((1<<16)-1)*x + 8; | |||
| int CD= tmp*y; | |||
| int AB= (tmp<<3) - CD; | |||
| __asm__ volatile( | |||
| /* mm5 = {A,B,A,B} */ | |||
| /* mm6 = {C,D,C,D} */ | |||
| "movd %0, %%mm5\n\t" | |||
| "movd %1, %%mm6\n\t" | |||
| "punpckldq %%mm5, %%mm5\n\t" | |||
| "punpckldq %%mm6, %%mm6\n\t" | |||
| "pxor %%mm7, %%mm7\n\t" | |||
| /* mm0 = src[0,1,1,2] */ | |||
| "movd %2, %%mm2\n\t" | |||
| "punpcklbw %%mm7, %%mm2\n\t" | |||
| "pshufw $0x94, %%mm2, %%mm2\n\t" | |||
| :: "r"(AB), "r"(CD), "m"(src[0])); | |||
| __asm__ volatile( | |||
| "1:\n\t" | |||
| "add %4, %1\n\t" | |||
| /* mm1 = A * src[0,1] + B * src[1,2] */ | |||
| "movq %%mm2, %%mm1\n\t" | |||
| "pmaddwd %%mm5, %%mm1\n\t" | |||
| /* mm0 = src[0,1,1,2] */ | |||
| "movd (%1), %%mm0\n\t" | |||
| "punpcklbw %%mm7, %%mm0\n\t" | |||
| "pshufw $0x94, %%mm0, %%mm0\n\t" | |||
| /* mm1 += C * src[0,1] + D * src[1,2] */ | |||
| "movq %%mm0, %%mm2\n\t" | |||
| "pmaddwd %%mm6, %%mm0\n\t" | |||
| "paddw %3, %%mm1\n\t" | |||
| "paddw %%mm0, %%mm1\n\t" | |||
| /* dst[0,1] = pack((mm1 + 32) >> 6) */ | |||
| "psrlw $6, %%mm1\n\t" | |||
| "packssdw %%mm7, %%mm1\n\t" | |||
| "packuswb %%mm7, %%mm1\n\t" | |||
| H264_CHROMA_OP4((%0), %%mm1, %%mm3) | |||
| "movd %%mm1, %%esi\n\t" | |||
| "movw %%si, (%0)\n\t" | |||
| "add %4, %0\n\t" | |||
| "sub $1, %2\n\t" | |||
| "jnz 1b\n\t" | |||
| : "+r" (dst), "+r"(src), "+r"(h) | |||
| : "m" (ff_pw_32), "r"((x86_reg)stride) | |||
| : "%esi"); | |||
| } | |||
| #endif | |||
| @@ -1,208 +0,0 @@ | |||
| /* | |||
| * Copyright (c) 2008 Loren Merritt | |||
| * | |||
| * This file is part of FFmpeg. | |||
| * | |||
| * FFmpeg is free software; you can redistribute it and/or | |||
| * modify it under the terms of the GNU Lesser General Public | |||
| * License as published by the Free Software Foundation; either | |||
| * version 2.1 of the License, or (at your option) any later version. | |||
| * | |||
| * FFmpeg is distributed in the hope that it will be useful, | |||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||
| * Lesser General Public License for more details. | |||
| * | |||
| * You should have received a copy of the GNU Lesser General Public | |||
| * License along with FFmpeg; if not, write to the Free Software | |||
| * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||
| */ | |||
| /** | |||
| * SSSE3 optimized version of (put|avg)_h264_chroma_mc8. | |||
| * H264_CHROMA_MC8_TMPL must be defined to the desired function name | |||
| * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function | |||
| * AVG_OP must be defined to empty for put and the identify for avg | |||
| */ | |||
| static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, int rnd) | |||
| { | |||
| if(y==0 && x==0) { | |||
| /* no filter needed */ | |||
| H264_CHROMA_MC8_MV0(dst, src, stride, h); | |||
| return; | |||
| } | |||
| assert(x<8 && y<8 && x>=0 && y>=0); | |||
| if(y==0 || x==0) | |||
| { | |||
| /* 1 dimensional filter only */ | |||
| __asm__ volatile( | |||
| "movd %0, %%xmm7 \n\t" | |||
| "movq %1, %%xmm6 \n\t" | |||
| "pshuflw $0, %%xmm7, %%xmm7 \n\t" | |||
| "movlhps %%xmm6, %%xmm6 \n\t" | |||
| "movlhps %%xmm7, %%xmm7 \n\t" | |||
| :: "r"(255*(x+y)+8), "m"(*(rnd?&ff_pw_4.a:&ff_pw_3)) | |||
| ); | |||
| if(x) { | |||
| __asm__ volatile( | |||
| "1: \n\t" | |||
| "movq (%1), %%xmm0 \n\t" | |||
| "movq 1(%1), %%xmm1 \n\t" | |||
| "movq (%1,%3), %%xmm2 \n\t" | |||
| "movq 1(%1,%3), %%xmm3 \n\t" | |||
| "punpcklbw %%xmm1, %%xmm0 \n\t" | |||
| "punpcklbw %%xmm3, %%xmm2 \n\t" | |||
| "pmaddubsw %%xmm7, %%xmm0 \n\t" | |||
| "pmaddubsw %%xmm7, %%xmm2 \n\t" | |||
| AVG_OP("movq (%0), %%xmm4 \n\t") | |||
| AVG_OP("movhps (%0,%3), %%xmm4 \n\t") | |||
| "paddw %%xmm6, %%xmm0 \n\t" | |||
| "paddw %%xmm6, %%xmm2 \n\t" | |||
| "psrlw $3, %%xmm0 \n\t" | |||
| "psrlw $3, %%xmm2 \n\t" | |||
| "packuswb %%xmm2, %%xmm0 \n\t" | |||
| AVG_OP("pavgb %%xmm4, %%xmm0 \n\t") | |||
| "movq %%xmm0, (%0) \n\t" | |||
| "movhps %%xmm0, (%0,%3) \n\t" | |||
| "sub $2, %2 \n\t" | |||
| "lea (%1,%3,2), %1 \n\t" | |||
| "lea (%0,%3,2), %0 \n\t" | |||
| "jg 1b \n\t" | |||
| :"+r"(dst), "+r"(src), "+r"(h) | |||
| :"r"((x86_reg)stride) | |||
| ); | |||
| } else { | |||
| __asm__ volatile( | |||
| "1: \n\t" | |||
| "movq (%1), %%xmm0 \n\t" | |||
| "movq (%1,%3), %%xmm1 \n\t" | |||
| "movdqa %%xmm1, %%xmm2 \n\t" | |||
| "movq (%1,%3,2), %%xmm3 \n\t" | |||
| "punpcklbw %%xmm1, %%xmm0 \n\t" | |||
| "punpcklbw %%xmm3, %%xmm2 \n\t" | |||
| "pmaddubsw %%xmm7, %%xmm0 \n\t" | |||
| "pmaddubsw %%xmm7, %%xmm2 \n\t" | |||
| AVG_OP("movq (%0), %%xmm4 \n\t") | |||
| AVG_OP("movhps (%0,%3), %%xmm4 \n\t") | |||
| "paddw %%xmm6, %%xmm0 \n\t" | |||
| "paddw %%xmm6, %%xmm2 \n\t" | |||
| "psrlw $3, %%xmm0 \n\t" | |||
| "psrlw $3, %%xmm2 \n\t" | |||
| "packuswb %%xmm2, %%xmm0 \n\t" | |||
| AVG_OP("pavgb %%xmm4, %%xmm0 \n\t") | |||
| "movq %%xmm0, (%0) \n\t" | |||
| "movhps %%xmm0, (%0,%3) \n\t" | |||
| "sub $2, %2 \n\t" | |||
| "lea (%1,%3,2), %1 \n\t" | |||
| "lea (%0,%3,2), %0 \n\t" | |||
| "jg 1b \n\t" | |||
| :"+r"(dst), "+r"(src), "+r"(h) | |||
| :"r"((x86_reg)stride) | |||
| ); | |||
| } | |||
| return; | |||
| } | |||
| /* general case, bilinear */ | |||
| __asm__ volatile( | |||
| "movd %0, %%xmm7 \n\t" | |||
| "movd %1, %%xmm6 \n\t" | |||
| "movdqa %2, %%xmm5 \n\t" | |||
| "pshuflw $0, %%xmm7, %%xmm7 \n\t" | |||
| "pshuflw $0, %%xmm6, %%xmm6 \n\t" | |||
| "movlhps %%xmm7, %%xmm7 \n\t" | |||
| "movlhps %%xmm6, %%xmm6 \n\t" | |||
| :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(*(rnd?&ff_pw_32:&ff_pw_28)) | |||
| ); | |||
| __asm__ volatile( | |||
| "movq (%1), %%xmm0 \n\t" | |||
| "movq 1(%1), %%xmm1 \n\t" | |||
| "punpcklbw %%xmm1, %%xmm0 \n\t" | |||
| "add %3, %1 \n\t" | |||
| "1: \n\t" | |||
| "movq (%1), %%xmm1 \n\t" | |||
| "movq 1(%1), %%xmm2 \n\t" | |||
| "movq (%1,%3), %%xmm3 \n\t" | |||
| "movq 1(%1,%3), %%xmm4 \n\t" | |||
| "lea (%1,%3,2), %1 \n\t" | |||
| "punpcklbw %%xmm2, %%xmm1 \n\t" | |||
| "punpcklbw %%xmm4, %%xmm3 \n\t" | |||
| "movdqa %%xmm1, %%xmm2 \n\t" | |||
| "movdqa %%xmm3, %%xmm4 \n\t" | |||
| "pmaddubsw %%xmm7, %%xmm0 \n\t" | |||
| "pmaddubsw %%xmm6, %%xmm1 \n\t" | |||
| "pmaddubsw %%xmm7, %%xmm2 \n\t" | |||
| "pmaddubsw %%xmm6, %%xmm3 \n\t" | |||
| "paddw %%xmm5, %%xmm0 \n\t" | |||
| "paddw %%xmm5, %%xmm2 \n\t" | |||
| "paddw %%xmm0, %%xmm1 \n\t" | |||
| "paddw %%xmm2, %%xmm3 \n\t" | |||
| "movdqa %%xmm4, %%xmm0 \n\t" | |||
| "psrlw $6, %%xmm1 \n\t" | |||
| "psrlw $6, %%xmm3 \n\t" | |||
| AVG_OP("movq (%0), %%xmm2 \n\t") | |||
| AVG_OP("movhps (%0,%3), %%xmm2 \n\t") | |||
| "packuswb %%xmm3, %%xmm1 \n\t" | |||
| AVG_OP("pavgb %%xmm2, %%xmm1 \n\t") | |||
| "movq %%xmm1, (%0)\n\t" | |||
| "movhps %%xmm1, (%0,%3)\n\t" | |||
| "sub $2, %2 \n\t" | |||
| "lea (%0,%3,2), %0 \n\t" | |||
| "jg 1b \n\t" | |||
| :"+r"(dst), "+r"(src), "+r"(h) | |||
| :"r"((x86_reg)stride) | |||
| ); | |||
| } | |||
| static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| __asm__ volatile( | |||
| "movd %0, %%mm7 \n\t" | |||
| "movd %1, %%mm6 \n\t" | |||
| "movq %2, %%mm5 \n\t" | |||
| "pshufw $0, %%mm7, %%mm7 \n\t" | |||
| "pshufw $0, %%mm6, %%mm6 \n\t" | |||
| :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(ff_pw_32) | |||
| ); | |||
| __asm__ volatile( | |||
| "movd (%1), %%mm0 \n\t" | |||
| "punpcklbw 1(%1), %%mm0 \n\t" | |||
| "add %3, %1 \n\t" | |||
| "1: \n\t" | |||
| "movd (%1), %%mm1 \n\t" | |||
| "movd (%1,%3), %%mm3 \n\t" | |||
| "punpcklbw 1(%1), %%mm1 \n\t" | |||
| "punpcklbw 1(%1,%3), %%mm3 \n\t" | |||
| "lea (%1,%3,2), %1 \n\t" | |||
| "movq %%mm1, %%mm2 \n\t" | |||
| "movq %%mm3, %%mm4 \n\t" | |||
| "pmaddubsw %%mm7, %%mm0 \n\t" | |||
| "pmaddubsw %%mm6, %%mm1 \n\t" | |||
| "pmaddubsw %%mm7, %%mm2 \n\t" | |||
| "pmaddubsw %%mm6, %%mm3 \n\t" | |||
| "paddw %%mm5, %%mm0 \n\t" | |||
| "paddw %%mm5, %%mm2 \n\t" | |||
| "paddw %%mm0, %%mm1 \n\t" | |||
| "paddw %%mm2, %%mm3 \n\t" | |||
| "movq %%mm4, %%mm0 \n\t" | |||
| "psrlw $6, %%mm1 \n\t" | |||
| "psrlw $6, %%mm3 \n\t" | |||
| "packuswb %%mm1, %%mm1 \n\t" | |||
| "packuswb %%mm3, %%mm3 \n\t" | |||
| AVG_OP("pavgb (%0), %%mm1 \n\t") | |||
| AVG_OP("pavgb (%0,%3), %%mm3 \n\t") | |||
| "movd %%mm1, (%0)\n\t" | |||
| "movd %%mm3, (%0,%3)\n\t" | |||
| "sub $2, %2 \n\t" | |||
| "lea (%0,%3,2), %0 \n\t" | |||
| "jg 1b \n\t" | |||
| :"+r"(dst), "+r"(src), "+r"(h) | |||
| :"r"((x86_reg)stride) | |||
| ); | |||
| } | |||
| @@ -1819,7 +1819,58 @@ PREFETCH(prefetch_3dnow, prefetch) | |||
| #undef PREFETCH | |||
| #include "h264dsp_mmx.c" | |||
| #include "rv40dsp_mmx.c" | |||
| void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_put_vc1_chroma_mc8_mmx_nornd (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_vc1_chroma_mc8_mmx2_nornd (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_rv40_chroma_mc8_mmx2 (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_vc1_chroma_mc8_3dnow_nornd(uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_rv40_chroma_mc8_3dnow (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_rv40_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_put_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src, | |||
| int stride, int h, int x, int y); | |||
| /* CAVS specific */ | |||
| void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |||
| @@ -2628,12 +2679,15 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) | |||
| c->h263_v_loop_filter= h263_v_loop_filter_mmx; | |||
| c->h263_h_loop_filter= h263_h_loop_filter_mmx; | |||
| } | |||
| c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd; | |||
| c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx; | |||
| c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_mmx_nornd; | |||
| c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx; | |||
| c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx; | |||
| #if HAVE_YASM | |||
| c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd; | |||
| c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx; | |||
| c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_mmx_nornd; | |||
| c->put_rv40_chroma_pixels_tab[0]= ff_put_rv40_chroma_mc8_mmx; | |||
| c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx; | |||
| #endif | |||
| if (mm_flags & FF_MM_MMX2) { | |||
| c->prefetch = prefetch_mmx2; | |||
| @@ -2712,17 +2766,17 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) | |||
| SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2); | |||
| SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2); | |||
| c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2; | |||
| c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2; | |||
| #if HAVE_YASM | |||
| c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2; | |||
| c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_mmx2; | |||
| c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_mmx2_nornd; | |||
| c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_mmx2_nornd; | |||
| c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd; | |||
| c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2; | |||
| c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2; | |||
| c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2; | |||
| c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd; | |||
| c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2; | |||
| c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2; | |||
| c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2; | |||
| #if HAVE_YASM | |||
| c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; | |||
| #endif | |||
| #if HAVE_7REGS && HAVE_TEN_OPERANDS | |||
| @@ -2785,11 +2839,15 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) | |||
| SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow); | |||
| SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow); | |||
| c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd; | |||
| c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow; | |||
| #if HAVE_YASM | |||
| c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd; | |||
| c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow; | |||
| c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow; | |||
| c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow; | |||
| c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_3dnow_nornd; | |||
| c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_3dnow; | |||
| c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_3dnow; | |||
| #endif | |||
| } | |||
| @@ -2832,14 +2890,14 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) | |||
| H264_QPEL_FUNCS(3, 1, ssse3); | |||
| H264_QPEL_FUNCS(3, 2, ssse3); | |||
| H264_QPEL_FUNCS(3, 3, ssse3); | |||
| c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_ssse3_nornd; | |||
| c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_ssse3_nornd; | |||
| c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd; | |||
| c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd; | |||
| c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3; | |||
| c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3; | |||
| c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3; | |||
| #if HAVE_YASM | |||
| c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_ssse3_nornd; | |||
| c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_ssse3_nornd; | |||
| c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd; | |||
| c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd; | |||
| c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3; | |||
| c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3; | |||
| c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3; | |||
| if (mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe | |||
| c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4; | |||
| @@ -0,0 +1,671 @@ | |||
| ;****************************************************************************** | |||
| ;* MMX/SSSE3-optimized functions for H264 chroma MC | |||
| ;* Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>, | |||
| ;* 2005-2008 Loren Merritt | |||
| ;* | |||
| ;* This file is part of FFmpeg. | |||
| ;* | |||
| ;* FFmpeg is free software; you can redistribute it and/or | |||
| ;* modify it under the terms of the GNU Lesser General Public | |||
| ;* License as published by the Free Software Foundation; either | |||
| ;* version 2.1 of the License, or (at your option) any later version. | |||
| ;* | |||
| ;* FFmpeg is distributed in the hope that it will be useful, | |||
| ;* but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
| ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||
| ;* Lesser General Public License for more details. | |||
| ;* | |||
| ;* You should have received a copy of the GNU Lesser General Public | |||
| ;* License along with FFmpeg; if not, write to the Free Software | |||
| ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||
| ;****************************************************************************** | |||
| %include "x86inc.asm" | |||
| %include "x86util.asm" | |||
| SECTION_RODATA | |||
| rnd_rv40_2d_tbl: times 4 dw 0 | |||
| times 4 dw 16 | |||
| times 4 dw 32 | |||
| times 4 dw 16 | |||
| times 4 dw 32 | |||
| times 4 dw 28 | |||
| times 4 dw 32 | |||
| times 4 dw 28 | |||
| times 4 dw 0 | |||
| times 4 dw 32 | |||
| times 4 dw 16 | |||
| times 4 dw 32 | |||
| times 4 dw 32 | |||
| times 4 dw 28 | |||
| times 4 dw 32 | |||
| times 4 dw 28 | |||
| rnd_rv40_1d_tbl: times 4 dw 0 | |||
| times 4 dw 2 | |||
| times 4 dw 4 | |||
| times 4 dw 2 | |||
| times 4 dw 4 | |||
| times 4 dw 3 | |||
| times 4 dw 4 | |||
| times 4 dw 3 | |||
| times 4 dw 0 | |||
| times 4 dw 4 | |||
| times 4 dw 2 | |||
| times 4 dw 4 | |||
| times 4 dw 4 | |||
| times 4 dw 3 | |||
| times 4 dw 4 | |||
| times 4 dw 3 | |||
| cextern pw_3 | |||
| cextern pw_4 | |||
| cextern pw_8 | |||
| cextern pw_28 | |||
| cextern pw_32 | |||
| cextern pw_64 | |||
| SECTION .text | |||
| %macro mv0_pixels_mc8 0 | |||
| lea r4, [r2*2 ] | |||
| .next4rows | |||
| movq mm0, [r1 ] | |||
| movq mm1, [r1+r2] | |||
| CHROMAMC_AVG mm0, [r0 ] | |||
| CHROMAMC_AVG mm1, [r0+r2] | |||
| movq [r0 ], mm0 | |||
| movq [r0+r2], mm1 | |||
| add r0, r4 | |||
| add r1, r4 | |||
| movq mm0, [r1 ] | |||
| movq mm1, [r1+r2] | |||
| CHROMAMC_AVG mm0, [r0 ] | |||
| CHROMAMC_AVG mm1, [r0+r2] | |||
| add r1, r4 | |||
| movq [r0 ], mm0 | |||
| movq [r0+r2], mm1 | |||
| add r0, r4 | |||
| sub r3d, 4 | |||
| jne .next4rows | |||
| %endmacro | |||
| %macro chroma_mc8_mmx_func 3 | |||
| ; put/avg_h264_chroma_mc8_mmx_*(uint8_t *dst /*align 8*/, uint8_t *src /*align 1*/, | |||
| ; int stride, int h, int mx, int my) | |||
| cglobal %1_%2_chroma_mc8_%3, 6, 7, 0 | |||
| %ifdef ARCH_X86_64 | |||
| movsxd r2, r2d | |||
| %endif | |||
| mov r6d, r5d | |||
| or r6d, r4d | |||
| jne .at_least_one_non_zero | |||
| ; mx == 0 AND my == 0 - no filter needed | |||
| mv0_pixels_mc8 | |||
| REP_RET | |||
| .at_least_one_non_zero | |||
| %ifidn %2, rv40 | |||
| %ifdef PIC | |||
| %define rnd_1d_rv40 r11 | |||
| %define rnd_2d_rv40 r11 | |||
| %else ; no-PIC | |||
| %define rnd_1d_rv40 rnd_rv40_1d_tbl | |||
| %define rnd_2d_rv40 rnd_rv40_2d_tbl | |||
| %endif | |||
| %ifdef ARCH_X86_64 | |||
| mov r10, r5 | |||
| and r10, 6 ; &~1 for mx/my=[0,7] | |||
| lea r10, [r10*4+r4] | |||
| sar r10d, 1 | |||
| %define rnd_bias r10 | |||
| %define dest_reg r0 | |||
| %else ; x86-32 | |||
| mov r0, r5 | |||
| and r0, 6 ; &~1 for mx/my=[0,7] | |||
| lea r0, [r0*4+r4] | |||
| sar r0d, 1 | |||
| %define rnd_bias r0 | |||
| %define dest_reg r5 | |||
| %endif | |||
| %else ; vc1, h264 | |||
| %define rnd_bias 0 | |||
| %define dest_reg r0 | |||
| %endif | |||
| test r5d, r5d | |||
| mov r6, 1 | |||
| je .my_is_zero | |||
| test r4d, r4d | |||
| mov r6, r2 ; dxy = x ? 1 : stride | |||
| jne .both_non_zero | |||
| .my_is_zero | |||
| ; mx == 0 XOR my == 0 - 1 dimensional filter only | |||
| or r4d, r5d ; x + y | |||
| %ifidn %2, rv40 | |||
| %ifdef PIC | |||
| lea r11, [rnd_rv40_1d_tbl] | |||
| %endif | |||
| %ifndef ARCH_X86_64 | |||
| mov r5, r0m | |||
| %endif | |||
| %endif | |||
| movd m5, r4d | |||
| movq m4, [pw_8] | |||
| movq m6, [rnd_1d_%2+rnd_bias*8] ; mm6 = rnd >> 3 | |||
| punpcklwd m5, m5 | |||
| punpckldq m5, m5 ; mm5 = B = x | |||
| pxor m7, m7 | |||
| psubw m4, m5 ; mm4 = A = 8-x | |||
| .next1drow | |||
| movq m0, [r1 ] ; mm0 = src[0..7] | |||
| movq m2, [r1+r6] ; mm1 = src[1..8] | |||
| movq m1, m0 | |||
| movq m3, m2 | |||
| punpcklbw m0, m7 | |||
| punpckhbw m1, m7 | |||
| punpcklbw m2, m7 | |||
| punpckhbw m3, m7 | |||
| pmullw m0, m4 ; [mm0,mm1] = A * src[0..7] | |||
| pmullw m1, m4 | |||
| pmullw m2, m5 ; [mm2,mm3] = B * src[1..8] | |||
| pmullw m3, m5 | |||
| paddw m0, m6 | |||
| paddw m1, m6 | |||
| paddw m0, m2 | |||
| paddw m1, m3 | |||
| psrlw m0, 3 | |||
| psrlw m1, 3 | |||
| packuswb m0, m1 | |||
| CHROMAMC_AVG m0, [dest_reg] | |||
| movq [dest_reg], m0 ; dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3 | |||
| add dest_reg, r2 | |||
| add r1, r2 | |||
| dec r3d | |||
| jne .next1drow | |||
| REP_RET | |||
| .both_non_zero ; general case, bilinear | |||
| movd m4, r4d ; x | |||
| movd m6, r5d ; y | |||
| %ifidn %2, rv40 | |||
| %ifdef PIC | |||
| lea r11, [rnd_rv40_2d_tbl] | |||
| %endif | |||
| %ifndef ARCH_X86_64 | |||
| mov r5, r0m | |||
| %endif | |||
| %endif | |||
| mov r6, rsp ; backup stack pointer | |||
| and rsp, ~(mmsize-1) ; align stack | |||
| sub rsp, 16 ; AA and DD | |||
| punpcklwd m4, m4 | |||
| punpcklwd m6, m6 | |||
| punpckldq m4, m4 ; mm4 = x words | |||
| punpckldq m6, m6 ; mm6 = y words | |||
| movq m5, m4 | |||
| pmullw m4, m6 ; mm4 = x * y | |||
| psllw m5, 3 | |||
| psllw m6, 3 | |||
| movq m7, m5 | |||
| paddw m7, m6 | |||
| movq [rsp+8], m4 ; DD = x * y | |||
| psubw m5, m4 ; mm5 = B = 8x - xy | |||
| psubw m6, m4 ; mm6 = C = 8y - xy | |||
| paddw m4, [pw_64] | |||
| psubw m4, m7 ; mm4 = A = xy - (8x+8y) + 64 | |||
| pxor m7, m7 | |||
| movq [rsp ], m4 | |||
| movq m0, [r1 ] ; mm0 = src[0..7] | |||
| movq m1, [r1+1] ; mm1 = src[1..8] | |||
| .next2drow | |||
| add r1, r2 | |||
| movq m2, m0 | |||
| movq m3, m1 | |||
| punpckhbw m0, m7 | |||
| punpcklbw m1, m7 | |||
| punpcklbw m2, m7 | |||
| punpckhbw m3, m7 | |||
| pmullw m0, [rsp] | |||
| pmullw m2, [rsp] | |||
| pmullw m1, m5 | |||
| pmullw m3, m5 | |||
| paddw m2, m1 ; mm2 = A * src[0..3] + B * src[1..4] | |||
| paddw m3, m0 ; mm3 = A * src[4..7] + B * src[5..8] | |||
| movq m0, [r1] | |||
| movq m1, m0 | |||
| punpcklbw m0, m7 | |||
| punpckhbw m1, m7 | |||
| pmullw m0, m6 | |||
| pmullw m1, m6 | |||
| paddw m2, m0 | |||
| paddw m3, m1 ; [mm2,mm3] += C * src[0..7] | |||
| movq m1, [r1+1] | |||
| movq m0, m1 | |||
| movq m4, m1 | |||
| punpcklbw m0, m7 | |||
| punpckhbw m4, m7 | |||
| pmullw m0, [rsp+8] | |||
| pmullw m4, [rsp+8] | |||
| paddw m2, m0 | |||
| paddw m3, m4 ; [mm2,mm3] += D * src[1..8] | |||
| movq m0, [r1] | |||
| paddw m2, [rnd_2d_%2+rnd_bias*8] | |||
| paddw m3, [rnd_2d_%2+rnd_bias*8] | |||
| psrlw m2, 6 | |||
| psrlw m3, 6 | |||
| packuswb m2, m3 | |||
| CHROMAMC_AVG m2, [dest_reg] | |||
| movq [dest_reg], m2 ; dst[0..7] = ([mm2,mm3] + rnd) >> 6 | |||
| add dest_reg, r2 | |||
| dec r3d | |||
| jne .next2drow | |||
| mov rsp, r6 ; restore stack pointer | |||
| RET | |||
| %endmacro | |||
| %macro chroma_mc4_mmx_func 3 | |||
| cglobal %1_%2_chroma_mc4_%3, 6, 6, 0 | |||
| %ifdef ARCH_X86_64 | |||
| movsxd r2, r2d | |||
| %endif | |||
| pxor m7, m7 | |||
| movd m2, r4d ; x | |||
| movd m3, r5d ; y | |||
| movq m4, [pw_8] | |||
| movq m5, [pw_8] | |||
| punpcklwd m2, m2 | |||
| punpcklwd m3, m3 | |||
| punpcklwd m2, m2 | |||
| punpcklwd m3, m3 | |||
| psubw m4, m2 | |||
| psubw m5, m3 | |||
| %ifidn %2, rv40 | |||
| %ifdef PIC | |||
| lea r11, [rnd_rv40_2d_tbl] | |||
| %define rnd_2d_rv40 r11 | |||
| %else | |||
| %define rnd_2d_rv40 rnd_rv40_2d_tbl | |||
| %endif | |||
| and r5, 6 ; &~1 for mx/my=[0,7] | |||
| lea r5, [r5*4+r4] | |||
| sar r5d, 1 | |||
| %define rnd_bias r5 | |||
| %else ; vc1, h264 | |||
| %define rnd_bias 0 | |||
| %endif | |||
| movd m0, [r1 ] | |||
| movd m6, [r1+1] | |||
| add r1, r2 | |||
| punpcklbw m0, m7 | |||
| punpcklbw m6, m7 | |||
| pmullw m0, m4 | |||
| pmullw m6, m2 | |||
| paddw m6, m0 | |||
| .next2rows | |||
| movd m0, [r1 ] | |||
| movd m1, [r1+1] | |||
| add r1, r2 | |||
| punpcklbw m0, m7 | |||
| punpcklbw m1, m7 | |||
| pmullw m0, m4 | |||
| pmullw m1, m2 | |||
| paddw m1, m0 | |||
| movq m0, m1 | |||
| pmullw m6, m5 | |||
| pmullw m1, m3 | |||
| paddw m6, [rnd_2d_%2+rnd_bias*8] | |||
| paddw m1, m6 | |||
| psrlw m1, 6 | |||
| packuswb m1, m1 | |||
| CHROMAMC_AVG4 m1, m6, [r0] | |||
| movd [r0], m1 | |||
| add r0, r2 | |||
| movd m6, [r1 ] | |||
| movd m1, [r1+1] | |||
| add r1, r2 | |||
| punpcklbw m6, m7 | |||
| punpcklbw m1, m7 | |||
| pmullw m6, m4 | |||
| pmullw m1, m2 | |||
| paddw m1, m6 | |||
| movq m6, m1 | |||
| pmullw m0, m5 | |||
| pmullw m1, m3 | |||
| paddw m0, [rnd_2d_%2+rnd_bias*8] | |||
| paddw m1, m0 | |||
| psrlw m1, 6 | |||
| packuswb m1, m1 | |||
| CHROMAMC_AVG4 m1, m0, [r0] | |||
| movd [r0], m1 | |||
| add r0, r2 | |||
| sub r3d, 2 | |||
| jnz .next2rows | |||
| REP_RET | |||
| %endmacro | |||
| %macro chroma_mc2_mmx_func 3 | |||
| cglobal %1_%2_chroma_mc2_%3, 6, 7, 0 | |||
| %ifdef ARCH_X86_64 | |||
| movsxd r2, r2d | |||
| %endif | |||
| mov r6d, r4d | |||
| shl r4d, 16 | |||
| sub r4d, r6d | |||
| add r4d, 8 | |||
| imul r5d, r4d ; x*y<<16 | y*(8-x) | |||
| shl r4d, 3 | |||
| sub r4d, r5d ; x*(8-y)<<16 | (8-x)*(8-y) | |||
| movd m5, r4d | |||
| movd m6, r5d | |||
| punpckldq m5, m5 ; mm5 = {A,B,A,B} | |||
| punpckldq m6, m6 ; mm6 = {C,D,C,D} | |||
| pxor m7, m7 | |||
| movd m2, [r1] | |||
| punpcklbw m2, m7 | |||
| pshufw m2, m2, 0x94 ; mm0 = src[0,1,1,2] | |||
| .nextrow | |||
| add r1, r2 | |||
| movq m1, m2 | |||
| pmaddwd m1, m5 ; mm1 = A * src[0,1] + B * src[1,2] | |||
| movd m0, [r1] | |||
| punpcklbw m0, m7 | |||
| pshufw m0, m0, 0x94 ; mm0 = src[0,1,1,2] | |||
| movq m2, m0 | |||
| pmaddwd m0, m6 | |||
| paddw m1, [rnd_2d_%2] | |||
| paddw m1, m0 ; mm1 += C * src[0,1] + D * src[1,2] | |||
| psrlw m1, 6 | |||
| packssdw m1, m7 | |||
| packuswb m1, m7 | |||
| CHROMAMC_AVG4 m1, m3, [r0] | |||
| movd r5d, m1 | |||
| mov [r0], r5w | |||
| add r0, r2 | |||
| sub r3d, 1 | |||
| jnz .nextrow | |||
| REP_RET | |||
| %endmacro | |||
| %define rnd_1d_h264 pw_4 | |||
| %define rnd_2d_h264 pw_32 | |||
| %define rnd_1d_vc1 pw_3 | |||
| %define rnd_2d_vc1 pw_28 | |||
| %macro NOTHING 2-3 | |||
| %endmacro | |||
| %macro DIRECT_AVG 2 | |||
| PAVG %1, %2 | |||
| %endmacro | |||
| %macro COPY_AVG 3 | |||
| movd %2, %3 | |||
| PAVG %1, %2 | |||
| %endmacro | |||
| INIT_MMX | |||
| %define CHROMAMC_AVG NOTHING | |||
| %define CHROMAMC_AVG4 NOTHING | |||
| chroma_mc8_mmx_func put, h264, mmx_rnd | |||
| chroma_mc8_mmx_func put, vc1, mmx_nornd | |||
| chroma_mc8_mmx_func put, rv40, mmx | |||
| chroma_mc4_mmx_func put, h264, mmx | |||
| chroma_mc4_mmx_func put, rv40, mmx | |||
| chroma_mc2_mmx_func put, h264, mmx2 | |||
| %define CHROMAMC_AVG DIRECT_AVG | |||
| %define CHROMAMC_AVG4 COPY_AVG | |||
| %define PAVG pavgb | |||
| chroma_mc8_mmx_func avg, h264, mmx2_rnd | |||
| chroma_mc8_mmx_func avg, vc1, mmx2_nornd | |||
| chroma_mc8_mmx_func avg, rv40, mmx2 | |||
| chroma_mc4_mmx_func avg, h264, mmx2 | |||
| chroma_mc4_mmx_func avg, rv40, mmx2 | |||
| chroma_mc2_mmx_func avg, h264, mmx2 | |||
| %define PAVG pavgusb | |||
| chroma_mc8_mmx_func avg, h264, 3dnow_rnd | |||
| chroma_mc8_mmx_func avg, vc1, 3dnow_nornd | |||
| chroma_mc8_mmx_func avg, rv40, 3dnow | |||
| chroma_mc4_mmx_func avg, h264, 3dnow | |||
| chroma_mc4_mmx_func avg, rv40, 3dnow | |||
| %macro chroma_mc8_ssse3_func 3 | |||
| cglobal %1_%2_chroma_mc8_%3, 6, 7, 8 | |||
| %ifdef ARCH_X86_64 | |||
| movsxd r2, r2d | |||
| %endif | |||
| mov r6d, r5d | |||
| or r6d, r4d | |||
| jne .at_least_one_non_zero | |||
| ; mx == 0 AND my == 0 - no filter needed | |||
| mv0_pixels_mc8 | |||
| REP_RET | |||
| .at_least_one_non_zero | |||
| test r5d, r5d | |||
| je .my_is_zero | |||
| test r4d, r4d | |||
| je .mx_is_zero | |||
| ; general case, bilinear | |||
| mov r6d, r4d | |||
| shl r4d, 8 | |||
| sub r4, r6 | |||
| add r4, 8 ; x*288+8 = x<<8 | (8-x) | |||
| mov r6, 8 | |||
| sub r6d, r5d | |||
| imul r6, r4 ; (8-y)*(x*255+8) = (8-y)*x<<8 | (8-y)*(8-x) | |||
| imul r4d, r5d ; y *(x*255+8) = y *x<<8 | y *(8-x) | |||
| movd m7, r6d | |||
| movd m6, r4d | |||
| movdqa m5, [rnd_2d_%2] | |||
| pshuflw m7, m7, 0 | |||
| pshuflw m6, m6, 0 | |||
| movlhps m7, m7 | |||
| movlhps m6, m6 | |||
| movq m0, [r1 ] | |||
| movq m1, [r1 +1] | |||
| punpcklbw m0, m1 | |||
| add r1, r2 | |||
| .next2rows | |||
| movq m1, [r1 ] | |||
| movq m2, [r1 +1] | |||
| movq m3, [r1+r2 ] | |||
| movq m4, [r1+r2+1] | |||
| lea r1, [r1+r2*2] | |||
| punpcklbw m1, m2 | |||
| punpcklbw m3, m4 | |||
| movdqa m2, m1 | |||
| movdqa m4, m3 | |||
| pmaddubsw m0, m7 | |||
| pmaddubsw m1, m6 | |||
| pmaddubsw m2, m7 | |||
| pmaddubsw m3, m6 | |||
| paddw m0, m5 | |||
| paddw m2, m5 | |||
| paddw m1, m0 | |||
| paddw m3, m2 | |||
| movdqa m0, m4 | |||
| psrlw m1, 6 | |||
| psrlw m3, 6 | |||
| %ifidn %1, avg | |||
| movq m2, [r0 ] | |||
| movhps m2, [r0+r2] | |||
| %endif | |||
| packuswb m1, m3 | |||
| CHROMAMC_AVG m1, m2 | |||
| movq [r0 ], m1 | |||
| movhps [r0+r2], m1 | |||
| sub r3d, 2 | |||
| lea r0, [r0+r2*2] | |||
| jg .next2rows | |||
| REP_RET | |||
| .my_is_zero | |||
| mov r5d, r4d | |||
| shl r4d, 8 | |||
| add r4, 8 | |||
| sub r4, r5 ; 255*x+8 = x<<8 | (8-x) | |||
| movd m7, r4d | |||
| movq m6, [rnd_1d_%2] | |||
| pshuflw m7, m7, 0 | |||
| movlhps m6, m6 | |||
| movlhps m7, m7 | |||
| .next2xrows | |||
| movq m0, [r1 ] | |||
| movq m1, [r1 +1] | |||
| movq m2, [r1+r2 ] | |||
| movq m3, [r1+r2+1] | |||
| punpcklbw m0, m1 | |||
| punpcklbw m2, m3 | |||
| pmaddubsw m0, m7 | |||
| pmaddubsw m2, m7 | |||
| %ifidn %1, avg | |||
| movq m4, [r0 ] | |||
| movhps m4, [r0+r2] | |||
| %endif | |||
| paddw m0, m6 | |||
| paddw m2, m6 | |||
| psrlw m0, 3 | |||
| psrlw m2, 3 | |||
| packuswb m0, m2 | |||
| CHROMAMC_AVG m0, m4 | |||
| movq [r0 ], m0 | |||
| movhps [r0+r2], m0 | |||
| sub r3d, 2 | |||
| lea r0, [r0+r2*2] | |||
| lea r1, [r1+r2*2] | |||
| jg .next2xrows | |||
| REP_RET | |||
| .mx_is_zero | |||
| mov r4d, r5d | |||
| shl r5d, 8 | |||
| add r5, 8 | |||
| sub r5, r4 ; 255*y+8 = y<<8 | (8-y) | |||
| movd m7, r5d | |||
| movq m6, [rnd_1d_%2] | |||
| pshuflw m7, m7, 0 | |||
| movlhps m6, m6 | |||
| movlhps m7, m7 | |||
| .next2yrows | |||
| movq m0, [r1 ] | |||
| movq m1, [r1+r2 ] | |||
| movdqa m2, m1 | |||
| movq m3, [r1+r2*2] | |||
| punpcklbw m0, m1 | |||
| punpcklbw m2, m3 | |||
| pmaddubsw m0, m7 | |||
| pmaddubsw m2, m7 | |||
| %ifidn %1, avg | |||
| movq m4, [r0 ] | |||
| movhps m4, [r0+r2] | |||
| %endif | |||
| paddw m0, m6 | |||
| paddw m2, m6 | |||
| psrlw m0, 3 | |||
| psrlw m2, 3 | |||
| packuswb m0, m2 | |||
| CHROMAMC_AVG m0, m4 | |||
| movq [r0 ], m0 | |||
| movhps [r0+r2], m0 | |||
| sub r3d, 2 | |||
| lea r0, [r0+r2*2] | |||
| lea r1, [r1+r2*2] | |||
| jg .next2yrows | |||
| REP_RET | |||
| %endmacro | |||
| %macro chroma_mc4_ssse3_func 3 | |||
| cglobal %1_%2_chroma_mc4_%3, 6, 7, 0 | |||
| %ifdef ARCH_X86_64 | |||
| movsxd r2, r2d | |||
| %endif | |||
| mov r6, r4 | |||
| shl r4d, 8 | |||
| sub r4d, r6d | |||
| add r4d, 8 ; x*288+8 | |||
| mov r6, 8 | |||
| sub r6d, r5d | |||
| imul r6d, r4d ; (8-y)*(x*255+8) = (8-y)*x<<8 | (8-y)*(8-x) | |||
| imul r4d, r5d ; y *(x*255+8) = y *x<<8 | y *(8-x) | |||
| movd m7, r6d | |||
| movd m6, r4d | |||
| movq m5, [pw_32] | |||
| pshufw m7, m7, 0 | |||
| pshufw m6, m6, 0 | |||
| movd m0, [r1 ] | |||
| punpcklbw m0, [r1 +1] | |||
| add r1, r2 | |||
| .next2rows | |||
| movd m1, [r1 ] | |||
| movd m3, [r1+r2 ] | |||
| punpcklbw m1, [r1 +1] | |||
| punpcklbw m3, [r1+r2+1] | |||
| lea r1, [r1+r2*2] | |||
| movq m2, m1 | |||
| movq m4, m3 | |||
| pmaddubsw m0, m7 | |||
| pmaddubsw m1, m6 | |||
| pmaddubsw m2, m7 | |||
| pmaddubsw m3, m6 | |||
| paddw m0, m5 | |||
| paddw m2, m5 | |||
| paddw m1, m0 | |||
| paddw m3, m2 | |||
| movq m0, m4 | |||
| psrlw m1, 6 | |||
| psrlw m3, 6 | |||
| packuswb m1, m1 | |||
| packuswb m3, m3 | |||
| CHROMAMC_AVG m1, [r0 ] | |||
| CHROMAMC_AVG m3, [r0+r2] | |||
| movd [r0 ], m1 | |||
| movd [r0+r2], m3 | |||
| sub r3d, 2 | |||
| lea r0, [r0+r2*2] | |||
| jg .next2rows | |||
| REP_RET | |||
| %endmacro | |||
| %define CHROMAMC_AVG NOTHING | |||
| INIT_XMM | |||
| chroma_mc8_ssse3_func put, h264, ssse3_rnd | |||
| chroma_mc8_ssse3_func put, vc1, ssse3_nornd | |||
| INIT_MMX | |||
| chroma_mc4_ssse3_func put, h264, ssse3 | |||
| %define CHROMAMC_AVG DIRECT_AVG | |||
| %define PAVG pavgb | |||
| INIT_XMM | |||
| chroma_mc8_ssse3_func avg, h264, ssse3_rnd | |||
| chroma_mc8_ssse3_func avg, vc1, ssse3_nornd | |||
| INIT_MMX | |||
| chroma_mc4_ssse3_func avg, h264, ssse3 | |||
| @@ -2105,127 +2105,6 @@ H264_MC_816(H264_MC_H, ssse3) | |||
| H264_MC_816(H264_MC_HV, ssse3) | |||
| #endif | |||
| /* rnd interleaved with rnd div 8, use p+1 to access rnd div 8 */ | |||
| DECLARE_ALIGNED(8, static const uint64_t, h264_rnd_reg)[4] = { | |||
| 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL | |||
| }; | |||
| #define H264_CHROMA_OP(S,D) | |||
| #define H264_CHROMA_OP4(S,D,T) | |||
| #define H264_CHROMA_MC8_TMPL put_h264_chroma_generic_mc8_mmx | |||
| #define H264_CHROMA_MC4_TMPL put_h264_chroma_generic_mc4_mmx | |||
| #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2 | |||
| #define H264_CHROMA_MC8_MV0 put_pixels8_mmx | |||
| #include "dsputil_h264_template_mmx.c" | |||
| static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg); | |||
| } | |||
| static void put_vc1_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg+2); | |||
| } | |||
| static void put_h264_chroma_mc4_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| put_h264_chroma_generic_mc4_mmx(dst, src, stride, h, x, y, h264_rnd_reg); | |||
| } | |||
| #undef H264_CHROMA_OP | |||
| #undef H264_CHROMA_OP4 | |||
| #undef H264_CHROMA_MC8_TMPL | |||
| #undef H264_CHROMA_MC4_TMPL | |||
| #undef H264_CHROMA_MC2_TMPL | |||
| #undef H264_CHROMA_MC8_MV0 | |||
| #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t" | |||
| #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ | |||
| "pavgb " #T ", " #D " \n\t" | |||
| #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_mmx2 | |||
| #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_mmx2 | |||
| #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2 | |||
| #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2 | |||
| #include "dsputil_h264_template_mmx.c" | |||
| static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg); | |||
| } | |||
| static void avg_vc1_chroma_mc8_mmx2_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg+2); | |||
| } | |||
| static void avg_h264_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, h264_rnd_reg); | |||
| } | |||
| #undef H264_CHROMA_OP | |||
| #undef H264_CHROMA_OP4 | |||
| #undef H264_CHROMA_MC8_TMPL | |||
| #undef H264_CHROMA_MC4_TMPL | |||
| #undef H264_CHROMA_MC2_TMPL | |||
| #undef H264_CHROMA_MC8_MV0 | |||
| #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t" | |||
| #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ | |||
| "pavgusb " #T ", " #D " \n\t" | |||
| #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_3dnow | |||
| #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_3dnow | |||
| #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow | |||
| #include "dsputil_h264_template_mmx.c" | |||
| static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| avg_h264_chroma_generic_mc8_3dnow(dst, src, stride, h, x, y, h264_rnd_reg); | |||
| } | |||
| static void avg_h264_chroma_mc4_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| avg_h264_chroma_generic_mc4_3dnow(dst, src, stride, h, x, y, h264_rnd_reg); | |||
| } | |||
| #undef H264_CHROMA_OP | |||
| #undef H264_CHROMA_OP4 | |||
| #undef H264_CHROMA_MC8_TMPL | |||
| #undef H264_CHROMA_MC4_TMPL | |||
| #undef H264_CHROMA_MC8_MV0 | |||
| #if HAVE_SSSE3 | |||
| #define AVG_OP(X) | |||
| #undef H264_CHROMA_MC8_TMPL | |||
| #undef H264_CHROMA_MC4_TMPL | |||
| #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3 | |||
| #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3 | |||
| #define H264_CHROMA_MC8_MV0 put_pixels8_mmx | |||
| #include "dsputil_h264_template_ssse3.c" | |||
| static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1); | |||
| } | |||
| static void put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0); | |||
| } | |||
| #undef AVG_OP | |||
| #undef H264_CHROMA_MC8_TMPL | |||
| #undef H264_CHROMA_MC4_TMPL | |||
| #undef H264_CHROMA_MC8_MV0 | |||
| #define AVG_OP(X) X | |||
| #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3 | |||
| #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3 | |||
| #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2 | |||
| #include "dsputil_h264_template_ssse3.c" | |||
| static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1); | |||
| } | |||
| static void avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0); | |||
| } | |||
| #undef AVG_OP | |||
| #undef H264_CHROMA_MC8_TMPL | |||
| #undef H264_CHROMA_MC4_TMPL | |||
| #undef H264_CHROMA_MC8_MV0 | |||
| #endif | |||
| /***********************************/ | |||
| /* weighted prediction */ | |||
| @@ -1,61 +0,0 @@ | |||
| /* | |||
| * Copyright (c) 2008 Konstantin Shishkov, Mathieu Velten | |||
| * | |||
| * MMX-optimized DSP functions for RV40, based on H.264 optimizations by | |||
| * Michael Niedermayer and Loren Merritt | |||
| * | |||
| * This file is part of FFmpeg. | |||
| * | |||
| * FFmpeg is free software; you can redistribute it and/or | |||
| * modify it under the terms of the GNU Lesser General Public | |||
| * License as published by the Free Software Foundation; either | |||
| * version 2.1 of the License, or (at your option) any later version. | |||
| * | |||
| * FFmpeg is distributed in the hope that it will be useful, | |||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||
| * Lesser General Public License for more details. | |||
| * | |||
| * You should have received a copy of the GNU Lesser General Public | |||
| * License along with FFmpeg; if not, write to the Free Software | |||
| * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||
| */ | |||
| #include "dsputil_mmx.h" | |||
| /* bias interleaved with bias div 8, use p+1 to access bias div 8 */ | |||
| DECLARE_ALIGNED(8, static const uint64_t, rv40_bias_reg)[4][8] = { | |||
| { 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0010001000100010ULL, 0x0002000200020002ULL, | |||
| 0x0020002000200020ULL, 0x0004000400040004ULL, 0x0010001000100010ULL, 0x0002000200020002ULL }, | |||
| { 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL, | |||
| 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL }, | |||
| { 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0020002000200020ULL, 0x0004000400040004ULL, | |||
| 0x0010001000100010ULL, 0x0002000200020002ULL, 0x0020002000200020ULL, 0x0004000400040004ULL }, | |||
| { 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL, | |||
| 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL } | |||
| }; | |||
| static void put_rv40_chroma_mc8_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); | |||
| } | |||
| static void put_rv40_chroma_mc4_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| put_h264_chroma_generic_mc4_mmx(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); | |||
| } | |||
| static void avg_rv40_chroma_mc8_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); | |||
| } | |||
| static void avg_rv40_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); | |||
| } | |||
| static void avg_rv40_chroma_mc8_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| avg_h264_chroma_generic_mc8_3dnow(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); | |||
| } | |||
| static void avg_rv40_chroma_mc4_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |||
| { | |||
| avg_h264_chroma_generic_mc4_3dnow(dst, src, stride, h, x, y, &rv40_bias_reg[y>>1][x&(~1)]); | |||
| } | |||