Browse Source

bgr24toY in MMX

Originally committed as revision 4613 to svn://svn.mplayerhq.hu/mplayer/trunk/postproc
tags/v0.5
Michael Niedermayer 24 years ago
parent
commit
ac6a2e4550
2 changed files with 80 additions and 1 deletions
  1. +10
    -0
      postproc/swscale.c
  2. +70
    -1
      postproc/swscale_template.c

+ 10
- 0
postproc/swscale.c View File

@@ -68,6 +68,8 @@ untested special converters
//#undef ARCH_X86
#define DITHER1XBPP

#define FAST_BGR2YV12 // use 7 bit coeffs instead of 15bit

#define RET 0xC3 //near return opcode for X86

#ifdef MP_DEBUG
@@ -178,6 +180,14 @@ static uint64_t __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
static uint64_t __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
static uint64_t __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;

#ifdef FAST_BGR2YV12
static const uint64_t bgr2YCoeff __attribute__((aligned(8))) = 0x000000210041000DULL;
#else
static const uint64_t bgr2YCoeff __attribute__((aligned(8))) = 0x000020E540830C8BULL;
#endif
static const uint64_t bgr2YOffset __attribute__((aligned(8))) = 0x1010101010101010ULL;
static const uint64_t w1111 __attribute__((aligned(8))) = 0x0001000100010001ULL;

// FIXME remove
static uint64_t __attribute__((aligned(8))) asm_yalpha1;
static uint64_t __attribute__((aligned(8))) asm_uvalpha1;


+ 70
- 1
postproc/swscale_template.c View File

@@ -1635,7 +1635,76 @@ static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1

static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width)
{
#ifdef HAVE_MMXFIXME
#ifdef HAVE_MMX
asm volatile(
"movl %2, %%eax \n\t"
"movq bgr2YCoeff, %%mm6 \n\t"
"movq w1111, %%mm5 \n\t"
"pxor %%mm7, %%mm7 \n\t"
"leal (%%eax, %%eax, 2), %%ebx \n\t"
".balign 16 \n\t"
"1: \n\t"
PREFETCH" 64(%0, %%ebx) \n\t"
"movd (%0, %%ebx), %%mm0 \n\t"
"movd 3(%0, %%ebx), %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"movd 6(%0, %%ebx), %%mm2 \n\t"
"movd 9(%0, %%ebx), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"punpcklbw %%mm7, %%mm3 \n\t"
"pmaddwd %%mm6, %%mm0 \n\t"
"pmaddwd %%mm6, %%mm1 \n\t"
"pmaddwd %%mm6, %%mm2 \n\t"
"pmaddwd %%mm6, %%mm3 \n\t"
#ifndef FAST_BGR2YV12
"psrad $8, %%mm0 \n\t"
"psrad $8, %%mm1 \n\t"
"psrad $8, %%mm2 \n\t"
"psrad $8, %%mm3 \n\t"
#endif
"packssdw %%mm1, %%mm0 \n\t"
"packssdw %%mm3, %%mm2 \n\t"
"pmaddwd %%mm5, %%mm0 \n\t"
"pmaddwd %%mm5, %%mm2 \n\t"
"packssdw %%mm2, %%mm0 \n\t"
"psraw $7, %%mm0 \n\t"

"movd 12(%0, %%ebx), %%mm4 \n\t"
"movd 15(%0, %%ebx), %%mm1 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"movd 18(%0, %%ebx), %%mm2 \n\t"
"movd 21(%0, %%ebx), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"punpcklbw %%mm7, %%mm3 \n\t"
"pmaddwd %%mm6, %%mm4 \n\t"
"pmaddwd %%mm6, %%mm1 \n\t"
"pmaddwd %%mm6, %%mm2 \n\t"
"pmaddwd %%mm6, %%mm3 \n\t"
#ifndef FAST_BGR2YV12
"psrad $8, %%mm4 \n\t"
"psrad $8, %%mm1 \n\t"
"psrad $8, %%mm2 \n\t"
"psrad $8, %%mm3 \n\t"
#endif
"packssdw %%mm1, %%mm4 \n\t"
"packssdw %%mm3, %%mm2 \n\t"
"pmaddwd %%mm5, %%mm4 \n\t"
"pmaddwd %%mm5, %%mm2 \n\t"
"addl $24, %%ebx \n\t"
"packssdw %%mm2, %%mm4 \n\t"
"psraw $7, %%mm4 \n\t"

"packuswb %%mm4, %%mm0 \n\t"
"paddusb bgr2YOffset, %%mm0 \n\t"

MOVNTQ(%%mm0, (%1, %%eax))
"addl $8, %%eax \n\t"
" js 1b \n\t"
: : "r" (src+width*3), "r" (dst+width), "g" (-width)
: "%eax", "%ebx"
);
#else
int i;
for(i=0; i<width; i++)


Loading…
Cancel
Save