c is 1.9x faster than previous c (on various x86 cpus), sse is 1.6x faster than previous sse. Originally committed as revision 14698 to svn://svn.ffmpeg.org/ffmpeg/trunktags/v0.5
@@ -388,6 +388,8 @@ OBJS += i386/fdct_mmx.o \ | |||||
i386/simple_idct_mmx.o \ | i386/simple_idct_mmx.o \ | ||||
i386/idct_mmx_xvid.o \ | i386/idct_mmx_xvid.o \ | ||||
i386/idct_sse2_xvid.o \ | i386/idct_sse2_xvid.o \ | ||||
OBJS-$(HAVE_YASM) += i386/fft_mmx.o \ | |||||
i386/fft_sse.o \ | i386/fft_sse.o \ | ||||
i386/fft_3dn.o \ | i386/fft_3dn.o \ | ||||
i386/fft_3dn2.o \ | i386/fft_3dn2.o \ | ||||
@@ -639,6 +639,8 @@ typedef struct FFTContext { | |||||
uint16_t *revtab; | uint16_t *revtab; | ||||
FFTComplex *exptab; | FFTComplex *exptab; | ||||
FFTComplex *exptab1; /* only used by SSE code */ | FFTComplex *exptab1; /* only used by SSE code */ | ||||
FFTComplex *tmp_buf; | |||||
void (*fft_permute)(struct FFTContext *s, FFTComplex *z); | |||||
void (*fft_calc)(struct FFTContext *s, FFTComplex *z); | void (*fft_calc)(struct FFTContext *s, FFTComplex *z); | ||||
void (*imdct_calc)(struct MDCTContext *s, FFTSample *output, | void (*imdct_calc)(struct MDCTContext *s, FFTSample *output, | ||||
const FFTSample *input, FFTSample *tmp); | const FFTSample *input, FFTSample *tmp); | ||||
@@ -647,13 +649,18 @@ typedef struct FFTContext { | |||||
} FFTContext; | } FFTContext; | ||||
int ff_fft_init(FFTContext *s, int nbits, int inverse); | int ff_fft_init(FFTContext *s, int nbits, int inverse); | ||||
void ff_fft_permute(FFTContext *s, FFTComplex *z); | |||||
void ff_fft_permute_c(FFTContext *s, FFTComplex *z); | |||||
void ff_fft_permute_sse(FFTContext *s, FFTComplex *z); | |||||
void ff_fft_calc_c(FFTContext *s, FFTComplex *z); | void ff_fft_calc_c(FFTContext *s, FFTComplex *z); | ||||
void ff_fft_calc_sse(FFTContext *s, FFTComplex *z); | void ff_fft_calc_sse(FFTContext *s, FFTComplex *z); | ||||
void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z); | void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z); | ||||
void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z); | void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z); | ||||
void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z); | void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z); | ||||
static inline void ff_fft_permute(FFTContext *s, FFTComplex *z) | |||||
{ | |||||
s->fft_permute(s, z); | |||||
} | |||||
static inline void ff_fft_calc(FFTContext *s, FFTComplex *z) | static inline void ff_fft_calc(FFTContext *s, FFTComplex *z) | ||||
{ | { | ||||
s->fft_calc(s, z); | s->fft_calc(s, z); | ||||
@@ -1,6 +1,8 @@ | |||||
/* | /* | ||||
* FFT/IFFT transforms | * FFT/IFFT transforms | ||||
* Copyright (c) 2008 Loren Merritt | |||||
* Copyright (c) 2002 Fabrice Bellard. | * Copyright (c) 2002 Fabrice Bellard. | ||||
* Partly based on libdjbfft by D. J. Bernstein | |||||
* | * | ||||
* This file is part of FFmpeg. | * This file is part of FFmpeg. | ||||
* | * | ||||
@@ -26,6 +28,36 @@ | |||||
#include "dsputil.h" | #include "dsputil.h" | ||||
/* cos(2*pi*x/n) for 0<=x<=n/4, followed by its reverse */ | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_16[8]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_32[16]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_64[32]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_128[64]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_256[128]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_512[256]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_1024[512]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_2048[1024]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_4096[2048]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_8192[4096]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_16384[8192]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_32768[16384]); | |||||
DECLARE_ALIGNED_16(FFTSample, ff_cos_65536[32768]); | |||||
static FFTSample *ff_cos_tabs[] = { | |||||
ff_cos_16, ff_cos_32, ff_cos_64, ff_cos_128, ff_cos_256, ff_cos_512, ff_cos_1024, | |||||
ff_cos_2048, ff_cos_4096, ff_cos_8192, ff_cos_16384, ff_cos_32768, ff_cos_65536, | |||||
}; | |||||
static int split_radix_permutation(int i, int n, int inverse) | |||||
{ | |||||
int m; | |||||
if(n <= 2) return i&1; | |||||
m = n >> 1; | |||||
if(!(i&m)) return split_radix_permutation(i, m, inverse)*2; | |||||
m >>= 1; | |||||
if(inverse == !(i&m)) return split_radix_permutation(i, m, inverse)*4 + 1; | |||||
else return split_radix_permutation(i, m, inverse)*4 - 1; | |||||
} | |||||
/** | /** | ||||
* The size of the FFT is 2^nbits. If inverse is TRUE, inverse FFT is | * The size of the FFT is 2^nbits. If inverse is TRUE, inverse FFT is | ||||
* done | * done | ||||
@@ -34,12 +66,15 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse) | |||||
{ | { | ||||
int i, j, m, n; | int i, j, m, n; | ||||
float alpha, c1, s1, s2; | float alpha, c1, s1, s2; | ||||
int shuffle = 0; | |||||
int split_radix = 1; | |||||
int av_unused has_vectors; | int av_unused has_vectors; | ||||
if (nbits < 2 || nbits > 16) | |||||
goto fail; | |||||
s->nbits = nbits; | s->nbits = nbits; | ||||
n = 1 << nbits; | n = 1 << nbits; | ||||
s->tmp_buf = NULL; | |||||
s->exptab = av_malloc((n / 2) * sizeof(FFTComplex)); | s->exptab = av_malloc((n / 2) * sizeof(FFTComplex)); | ||||
if (!s->exptab) | if (!s->exptab) | ||||
goto fail; | goto fail; | ||||
@@ -50,50 +85,62 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse) | |||||
s2 = inverse ? 1.0 : -1.0; | s2 = inverse ? 1.0 : -1.0; | ||||
for(i=0;i<(n/2);i++) { | |||||
alpha = 2 * M_PI * (float)i / (float)n; | |||||
c1 = cos(alpha); | |||||
s1 = sin(alpha) * s2; | |||||
s->exptab[i].re = c1; | |||||
s->exptab[i].im = s1; | |||||
} | |||||
s->fft_permute = ff_fft_permute_c; | |||||
s->fft_calc = ff_fft_calc_c; | s->fft_calc = ff_fft_calc_c; | ||||
s->imdct_calc = ff_imdct_calc; | s->imdct_calc = ff_imdct_calc; | ||||
s->imdct_half = ff_imdct_half; | s->imdct_half = ff_imdct_half; | ||||
s->exptab1 = NULL; | s->exptab1 = NULL; | ||||
#ifdef HAVE_MMX | |||||
#if defined HAVE_MMX && defined HAVE_YASM | |||||
has_vectors = mm_support(); | has_vectors = mm_support(); | ||||
shuffle = 1; | |||||
if (has_vectors & MM_3DNOWEXT) { | |||||
/* 3DNowEx for K7/K8 */ | |||||
if (has_vectors & MM_SSE) { | |||||
/* SSE for P3/P4/K8 */ | |||||
s->imdct_calc = ff_imdct_calc_sse; | |||||
s->imdct_half = ff_imdct_half_sse; | |||||
s->fft_permute = ff_fft_permute_sse; | |||||
s->fft_calc = ff_fft_calc_sse; | |||||
} else if (has_vectors & MM_3DNOWEXT) { | |||||
/* 3DNowEx for K7 */ | |||||
s->imdct_calc = ff_imdct_calc_3dn2; | s->imdct_calc = ff_imdct_calc_3dn2; | ||||
s->imdct_half = ff_imdct_half_3dn2; | s->imdct_half = ff_imdct_half_3dn2; | ||||
s->fft_calc = ff_fft_calc_3dn2; | s->fft_calc = ff_fft_calc_3dn2; | ||||
} else if (has_vectors & MM_3DNOW) { | } else if (has_vectors & MM_3DNOW) { | ||||
/* 3DNow! for K6-2/3 */ | /* 3DNow! for K6-2/3 */ | ||||
s->fft_calc = ff_fft_calc_3dn; | s->fft_calc = ff_fft_calc_3dn; | ||||
} else if (has_vectors & MM_SSE) { | |||||
/* SSE for P3/P4 */ | |||||
s->imdct_calc = ff_imdct_calc_sse; | |||||
s->imdct_half = ff_imdct_half_sse; | |||||
s->fft_calc = ff_fft_calc_sse; | |||||
} else { | |||||
shuffle = 0; | |||||
} | } | ||||
#elif defined HAVE_ALTIVEC && !defined ALTIVEC_USE_REFERENCE_C_CODE | #elif defined HAVE_ALTIVEC && !defined ALTIVEC_USE_REFERENCE_C_CODE | ||||
has_vectors = mm_support(); | has_vectors = mm_support(); | ||||
if (has_vectors & MM_ALTIVEC) { | if (has_vectors & MM_ALTIVEC) { | ||||
s->fft_calc = ff_fft_calc_altivec; | s->fft_calc = ff_fft_calc_altivec; | ||||
shuffle = 1; | |||||
split_radix = 0; | |||||
} | } | ||||
#endif | #endif | ||||
/* compute constant table for HAVE_SSE version */ | |||||
if (shuffle) { | |||||
if (split_radix) { | |||||
for(j=4; j<=nbits; j++) { | |||||
int m = 1<<j; | |||||
double freq = 2*M_PI/m; | |||||
FFTSample *tab = ff_cos_tabs[j-4]; | |||||
for(i=0; i<=m/4; i++) | |||||
tab[i] = cos(i*freq); | |||||
for(i=1; i<m/4; i++) | |||||
tab[m/2-i] = tab[i]; | |||||
} | |||||
for(i=0; i<n; i++) | |||||
s->revtab[-split_radix_permutation(i, n, s->inverse) & (n-1)] = i; | |||||
s->tmp_buf = av_malloc(n * sizeof(FFTComplex)); | |||||
} else { | |||||
int np, nblocks, np2, l; | int np, nblocks, np2, l; | ||||
FFTComplex *q; | FFTComplex *q; | ||||
for(i=0; i<(n/2); i++) { | |||||
alpha = 2 * M_PI * (float)i / (float)n; | |||||
c1 = cos(alpha); | |||||
s1 = sin(alpha) * s2; | |||||
s->exptab[i].re = c1; | |||||
s->exptab[i].im = s1; | |||||
} | |||||
np = 1 << nbits; | np = 1 << nbits; | ||||
nblocks = np >> 3; | nblocks = np >> 3; | ||||
np2 = np >> 1; | np2 = np >> 1; | ||||
@@ -116,7 +163,6 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse) | |||||
nblocks = nblocks >> 1; | nblocks = nblocks >> 1; | ||||
} while (nblocks != 0); | } while (nblocks != 0); | ||||
av_freep(&s->exptab); | av_freep(&s->exptab); | ||||
} | |||||
/* compute bit reverse table */ | /* compute bit reverse table */ | ||||
@@ -127,126 +173,35 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse) | |||||
} | } | ||||
s->revtab[i]=m; | s->revtab[i]=m; | ||||
} | } | ||||
} | |||||
return 0; | return 0; | ||||
fail: | fail: | ||||
av_freep(&s->revtab); | av_freep(&s->revtab); | ||||
av_freep(&s->exptab); | av_freep(&s->exptab); | ||||
av_freep(&s->exptab1); | av_freep(&s->exptab1); | ||||
av_freep(&s->tmp_buf); | |||||
return -1; | return -1; | ||||
} | } | ||||
/* butter fly op */ | |||||
#define BF(pre, pim, qre, qim, pre1, pim1, qre1, qim1) \ | |||||
{\ | |||||
FFTSample ax, ay, bx, by;\ | |||||
bx=pre1;\ | |||||
by=pim1;\ | |||||
ax=qre1;\ | |||||
ay=qim1;\ | |||||
pre = (bx + ax);\ | |||||
pim = (by + ay);\ | |||||
qre = (bx - ax);\ | |||||
qim = (by - ay);\ | |||||
} | |||||
#define MUL16(a,b) ((a) * (b)) | |||||
#define CMUL(pre, pim, are, aim, bre, bim) \ | |||||
{\ | |||||
pre = (MUL16(are, bre) - MUL16(aim, bim));\ | |||||
pim = (MUL16(are, bim) + MUL16(bre, aim));\ | |||||
} | |||||
/** | |||||
* Do a complex FFT with the parameters defined in ff_fft_init(). The | |||||
* input data must be permuted before with s->revtab table. No | |||||
* 1.0/sqrt(n) normalization is done. | |||||
*/ | |||||
void ff_fft_calc_c(FFTContext *s, FFTComplex *z) | |||||
{ | |||||
int ln = s->nbits; | |||||
int j, np, np2; | |||||
int nblocks, nloops; | |||||
register FFTComplex *p, *q; | |||||
FFTComplex *exptab = s->exptab; | |||||
int l; | |||||
FFTSample tmp_re, tmp_im; | |||||
np = 1 << ln; | |||||
/* pass 0 */ | |||||
p=&z[0]; | |||||
j=(np >> 1); | |||||
do { | |||||
BF(p[0].re, p[0].im, p[1].re, p[1].im, | |||||
p[0].re, p[0].im, p[1].re, p[1].im); | |||||
p+=2; | |||||
} while (--j != 0); | |||||
/* pass 1 */ | |||||
p=&z[0]; | |||||
j=np >> 2; | |||||
if (s->inverse) { | |||||
do { | |||||
BF(p[0].re, p[0].im, p[2].re, p[2].im, | |||||
p[0].re, p[0].im, p[2].re, p[2].im); | |||||
BF(p[1].re, p[1].im, p[3].re, p[3].im, | |||||
p[1].re, p[1].im, -p[3].im, p[3].re); | |||||
p+=4; | |||||
} while (--j != 0); | |||||
} else { | |||||
do { | |||||
BF(p[0].re, p[0].im, p[2].re, p[2].im, | |||||
p[0].re, p[0].im, p[2].re, p[2].im); | |||||
BF(p[1].re, p[1].im, p[3].re, p[3].im, | |||||
p[1].re, p[1].im, p[3].im, -p[3].re); | |||||
p+=4; | |||||
} while (--j != 0); | |||||
} | |||||
/* pass 2 .. ln-1 */ | |||||
nblocks = np >> 3; | |||||
nloops = 1 << 2; | |||||
np2 = np >> 1; | |||||
do { | |||||
p = z; | |||||
q = z + nloops; | |||||
for (j = 0; j < nblocks; ++j) { | |||||
BF(p->re, p->im, q->re, q->im, | |||||
p->re, p->im, q->re, q->im); | |||||
p++; | |||||
q++; | |||||
for(l = nblocks; l < np2; l += nblocks) { | |||||
CMUL(tmp_re, tmp_im, exptab[l].re, exptab[l].im, q->re, q->im); | |||||
BF(p->re, p->im, q->re, q->im, | |||||
p->re, p->im, tmp_re, tmp_im); | |||||
p++; | |||||
q++; | |||||
} | |||||
p += nloops; | |||||
q += nloops; | |||||
} | |||||
nblocks = nblocks >> 1; | |||||
nloops = nloops << 1; | |||||
} while (nblocks != 0); | |||||
} | |||||
/** | /** | ||||
* Do the permutation needed BEFORE calling ff_fft_calc() | * Do the permutation needed BEFORE calling ff_fft_calc() | ||||
*/ | */ | ||||
void ff_fft_permute(FFTContext *s, FFTComplex *z) | |||||
void ff_fft_permute_c(FFTContext *s, FFTComplex *z) | |||||
{ | { | ||||
int j, k, np; | int j, k, np; | ||||
FFTComplex tmp; | FFTComplex tmp; | ||||
const uint16_t *revtab = s->revtab; | const uint16_t *revtab = s->revtab; | ||||
np = 1 << s->nbits; | |||||
if (s->tmp_buf) { | |||||
/* TODO: handle split-radix permute in a more optimal way, probably in-place */ | |||||
for(j=0;j<np;j++) s->tmp_buf[revtab[j]] = z[j]; | |||||
memcpy(z, s->tmp_buf, np * sizeof(FFTComplex)); | |||||
return; | |||||
} | |||||
/* reverse */ | /* reverse */ | ||||
np = 1 << s->nbits; | |||||
for(j=0;j<np;j++) { | for(j=0;j<np;j++) { | ||||
k = revtab[j]; | k = revtab[j]; | ||||
if (k < j) { | if (k < j) { | ||||
@@ -262,5 +217,169 @@ void ff_fft_end(FFTContext *s) | |||||
av_freep(&s->revtab); | av_freep(&s->revtab); | ||||
av_freep(&s->exptab); | av_freep(&s->exptab); | ||||
av_freep(&s->exptab1); | av_freep(&s->exptab1); | ||||
av_freep(&s->tmp_buf); | |||||
} | |||||
#define sqrthalf (float)M_SQRT1_2 | |||||
#define BF(x,y,a,b) {\ | |||||
x = a - b;\ | |||||
y = a + b;\ | |||||
} | |||||
#define BUTTERFLIES(a0,a1,a2,a3) {\ | |||||
BF(t3, t5, t5, t1);\ | |||||
BF(a2.re, a0.re, a0.re, t5);\ | |||||
BF(a3.im, a1.im, a1.im, t3);\ | |||||
BF(t4, t6, t2, t6);\ | |||||
BF(a3.re, a1.re, a1.re, t4);\ | |||||
BF(a2.im, a0.im, a0.im, t6);\ | |||||
} | |||||
// force loading all the inputs before storing any. | |||||
// this is slightly slower for small data, but avoids store->load aliasing | |||||
// for addresses separated by large powers of 2. | |||||
#define BUTTERFLIES_BIG(a0,a1,a2,a3) {\ | |||||
FFTSample r0=a0.re, i0=a0.im, r1=a1.re, i1=a1.im;\ | |||||
BF(t3, t5, t5, t1);\ | |||||
BF(a2.re, a0.re, r0, t5);\ | |||||
BF(a3.im, a1.im, i1, t3);\ | |||||
BF(t4, t6, t2, t6);\ | |||||
BF(a3.re, a1.re, r1, t4);\ | |||||
BF(a2.im, a0.im, i0, t6);\ | |||||
} | |||||
#define TRANSFORM(a0,a1,a2,a3,wre,wim) {\ | |||||
t1 = a2.re * wre + a2.im * wim;\ | |||||
t2 = a2.im * wre - a2.re * wim;\ | |||||
t5 = a3.re * wre - a3.im * wim;\ | |||||
t6 = a3.im * wre + a3.re * wim;\ | |||||
BUTTERFLIES(a0,a1,a2,a3)\ | |||||
} | |||||
#define TRANSFORM_ZERO(a0,a1,a2,a3) {\ | |||||
t1 = a2.re;\ | |||||
t2 = a2.im;\ | |||||
t5 = a3.re;\ | |||||
t6 = a3.im;\ | |||||
BUTTERFLIES(a0,a1,a2,a3)\ | |||||
} | |||||
/* z[0...8n-1], w[1...2n-1] */ | |||||
#define PASS(name)\ | |||||
static void name(FFTComplex *z, const FFTSample *wre, unsigned int n)\ | |||||
{\ | |||||
FFTSample t1, t2, t3, t4, t5, t6;\ | |||||
int o1 = 2*n;\ | |||||
int o2 = 4*n;\ | |||||
int o3 = 6*n;\ | |||||
const FFTSample *wim = wre+o1;\ | |||||
n--;\ | |||||
\ | |||||
TRANSFORM_ZERO(z[0],z[o1],z[o2],z[o3]);\ | |||||
TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\ | |||||
do {\ | |||||
z += 2;\ | |||||
wre += 2;\ | |||||
wim -= 2;\ | |||||
TRANSFORM(z[0],z[o1],z[o2],z[o3],wre[0],wim[0]);\ | |||||
TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\ | |||||
} while(--n);\ | |||||
} | |||||
PASS(pass) | |||||
#undef BUTTERFLIES | |||||
#define BUTTERFLIES BUTTERFLIES_BIG | |||||
PASS(pass_big) | |||||
#define DECL_FFT(n,n2,n4)\ | |||||
static void fft##n(FFTComplex *z)\ | |||||
{\ | |||||
fft##n2(z);\ | |||||
fft##n4(z+n4*2);\ | |||||
fft##n4(z+n4*3);\ | |||||
pass(z,ff_cos_##n,n4/2);\ | |||||
} | |||||
static void fft4(FFTComplex *z) | |||||
{ | |||||
FFTSample t1, t2, t3, t4, t5, t6, t7, t8; | |||||
BF(t3, t1, z[0].re, z[1].re); | |||||
BF(t8, t6, z[3].re, z[2].re); | |||||
BF(z[2].re, z[0].re, t1, t6); | |||||
BF(t4, t2, z[0].im, z[1].im); | |||||
BF(t7, t5, z[2].im, z[3].im); | |||||
BF(z[3].im, z[1].im, t4, t8); | |||||
BF(z[3].re, z[1].re, t3, t7); | |||||
BF(z[2].im, z[0].im, t2, t5); | |||||
} | |||||
static void fft8(FFTComplex *z) | |||||
{ | |||||
FFTSample t1, t2, t3, t4, t5, t6, t7, t8; | |||||
fft4(z); | |||||
BF(t1, z[5].re, z[4].re, -z[5].re); | |||||
BF(t2, z[5].im, z[4].im, -z[5].im); | |||||
BF(t3, z[7].re, z[6].re, -z[7].re); | |||||
BF(t4, z[7].im, z[6].im, -z[7].im); | |||||
BF(t8, t1, t3, t1); | |||||
BF(t7, t2, t2, t4); | |||||
BF(z[4].re, z[0].re, z[0].re, t1); | |||||
BF(z[4].im, z[0].im, z[0].im, t2); | |||||
BF(z[6].re, z[2].re, z[2].re, t7); | |||||
BF(z[6].im, z[2].im, z[2].im, t8); | |||||
TRANSFORM(z[1],z[3],z[5],z[7],sqrthalf,sqrthalf); | |||||
} | |||||
#ifndef CONFIG_SMALL | |||||
static void fft16(FFTComplex *z) | |||||
{ | |||||
FFTSample t1, t2, t3, t4, t5, t6; | |||||
fft8(z); | |||||
fft4(z+8); | |||||
fft4(z+12); | |||||
TRANSFORM_ZERO(z[0],z[4],z[8],z[12]); | |||||
TRANSFORM(z[2],z[6],z[10],z[14],sqrthalf,sqrthalf); | |||||
TRANSFORM(z[1],z[5],z[9],z[13],ff_cos_16[1],ff_cos_16[3]); | |||||
TRANSFORM(z[3],z[7],z[11],z[15],ff_cos_16[3],ff_cos_16[1]); | |||||
} | |||||
#else | |||||
DECL_FFT(16,8,4) | |||||
#endif | |||||
DECL_FFT(32,16,8) | |||||
DECL_FFT(64,32,16) | |||||
DECL_FFT(128,64,32) | |||||
DECL_FFT(256,128,64) | |||||
DECL_FFT(512,256,128) | |||||
#ifndef CONFIG_SMALL | |||||
#define pass pass_big | |||||
#endif | |||||
DECL_FFT(1024,512,256) | |||||
DECL_FFT(2048,1024,512) | |||||
DECL_FFT(4096,2048,1024) | |||||
DECL_FFT(8192,4096,2048) | |||||
DECL_FFT(16384,8192,4096) | |||||
DECL_FFT(32768,16384,8192) | |||||
DECL_FFT(65536,32768,16384) | |||||
static void (*fft_dispatch[])(FFTComplex*) = { | |||||
fft4, fft8, fft16, fft32, fft64, fft128, fft256, fft512, fft1024, | |||||
fft2048, fft4096, fft8192, fft16384, fft32768, fft65536, | |||||
}; | |||||
/** | |||||
* Do a complex FFT with the parameters defined in ff_fft_init(). The | |||||
* input data must be permuted before with s->revtab table. No | |||||
* 1.0/sqrt(n) normalization is done. | |||||
*/ | |||||
void ff_fft_calc_c(FFTContext *s, FFTComplex *z) | |||||
{ | |||||
fft_dispatch[s->nbits-2](z); | |||||
} | } | ||||
@@ -1,7 +1,6 @@ | |||||
/* | /* | ||||
* FFT/MDCT transform with 3DNow! optimizations | * FFT/MDCT transform with 3DNow! optimizations | ||||
* Copyright (c) 2006 Zuxy MENG Jie, Loren Merritt | |||||
* Based on fft_sse.c copyright (c) 2002 Fabrice Bellard. | |||||
* Copyright (c) 2008 Loren Merritt | |||||
* | * | ||||
* This file is part of FFmpeg. | * This file is part of FFmpeg. | ||||
* | * | ||||
@@ -20,109 +19,5 @@ | |||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||||
*/ | */ | ||||
#include "libavutil/x86_cpu.h" | |||||
#include "libavcodec/dsputil.h" | |||||
static const int p1m1[2] __attribute__((aligned(8))) = | |||||
{ 0, 1 << 31 }; | |||||
static const int m1p1[2] __attribute__((aligned(8))) = | |||||
{ 1 << 31, 0 }; | |||||
void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z) | |||||
{ | |||||
int ln = s->nbits; | |||||
long j; | |||||
x86_reg i; | |||||
long nblocks, nloops; | |||||
FFTComplex *p, *cptr; | |||||
asm volatile( | |||||
/* FEMMS is not a must here but recommended by AMD */ | |||||
"femms \n\t" | |||||
"movq %0, %%mm7 \n\t" | |||||
::"m"(*(s->inverse ? m1p1 : p1m1)) | |||||
); | |||||
i = 8 << ln; | |||||
asm volatile( | |||||
"1: \n\t" | |||||
"sub $32, %0 \n\t" | |||||
"movq (%0,%1), %%mm0 \n\t" | |||||
"movq 16(%0,%1), %%mm1 \n\t" | |||||
"movq 8(%0,%1), %%mm2 \n\t" | |||||
"movq 24(%0,%1), %%mm3 \n\t" | |||||
"movq %%mm0, %%mm4 \n\t" | |||||
"movq %%mm1, %%mm5 \n\t" | |||||
"pfadd %%mm2, %%mm0 \n\t" | |||||
"pfadd %%mm3, %%mm1 \n\t" | |||||
"pfsub %%mm2, %%mm4 \n\t" | |||||
"pfsub %%mm3, %%mm5 \n\t" | |||||
"movq %%mm0, %%mm2 \n\t" | |||||
"punpckldq %%mm5, %%mm6 \n\t" | |||||
"punpckhdq %%mm6, %%mm5 \n\t" | |||||
"movq %%mm4, %%mm3 \n\t" | |||||
"pxor %%mm7, %%mm5 \n\t" | |||||
"pfadd %%mm1, %%mm0 \n\t" | |||||
"pfadd %%mm5, %%mm4 \n\t" | |||||
"pfsub %%mm1, %%mm2 \n\t" | |||||
"pfsub %%mm5, %%mm3 \n\t" | |||||
"movq %%mm0, (%0,%1) \n\t" | |||||
"movq %%mm4, 8(%0,%1) \n\t" | |||||
"movq %%mm2, 16(%0,%1) \n\t" | |||||
"movq %%mm3, 24(%0,%1) \n\t" | |||||
"jg 1b \n\t" | |||||
:"+r"(i) | |||||
:"r"(z) | |||||
); | |||||
/* pass 2 .. ln-1 */ | |||||
nblocks = 1 << (ln-3); | |||||
nloops = 1 << 2; | |||||
cptr = s->exptab1; | |||||
do { | |||||
p = z; | |||||
j = nblocks; | |||||
do { | |||||
i = nloops*8; | |||||
asm volatile( | |||||
"1: \n\t" | |||||
"sub $16, %0 \n\t" | |||||
"movq (%1,%0), %%mm0 \n\t" | |||||
"movq 8(%1,%0), %%mm1 \n\t" | |||||
"movq (%2,%0), %%mm2 \n\t" | |||||
"movq 8(%2,%0), %%mm3 \n\t" | |||||
"movq %%mm2, %%mm4 \n\t" | |||||
"movq %%mm3, %%mm5 \n\t" | |||||
"punpckldq %%mm2, %%mm2 \n\t" | |||||
"punpckldq %%mm3, %%mm3 \n\t" | |||||
"punpckhdq %%mm4, %%mm4 \n\t" | |||||
"punpckhdq %%mm5, %%mm5 \n\t" | |||||
"pfmul (%3,%0,2), %%mm2 \n\t" // cre*re cim*re | |||||
"pfmul 8(%3,%0,2), %%mm3 \n\t" | |||||
"pfmul 16(%3,%0,2), %%mm4 \n\t" // -cim*im cre*im | |||||
"pfmul 24(%3,%0,2), %%mm5 \n\t" | |||||
"pfadd %%mm2, %%mm4 \n\t" // cre*re-cim*im cim*re+cre*im | |||||
"pfadd %%mm3, %%mm5 \n\t" | |||||
"movq %%mm0, %%mm2 \n\t" | |||||
"movq %%mm1, %%mm3 \n\t" | |||||
"pfadd %%mm4, %%mm0 \n\t" | |||||
"pfadd %%mm5, %%mm1 \n\t" | |||||
"pfsub %%mm4, %%mm2 \n\t" | |||||
"pfsub %%mm5, %%mm3 \n\t" | |||||
"movq %%mm0, (%1,%0) \n\t" | |||||
"movq %%mm1, 8(%1,%0) \n\t" | |||||
"movq %%mm2, (%2,%0) \n\t" | |||||
"movq %%mm3, 8(%2,%0) \n\t" | |||||
"jg 1b \n\t" | |||||
:"+r"(i) | |||||
:"r"(p), "r"(p + nloops), "r"(cptr) | |||||
); | |||||
p += nloops*2; | |||||
} while (--j); | |||||
cptr += nloops*2; | |||||
nblocks >>= 1; | |||||
nloops <<= 1; | |||||
} while (nblocks != 0); | |||||
asm volatile("femms"); | |||||
} | |||||
#define EMULATE_3DNOWEXT | |||||
#include "fft_3dn2.c" |
@@ -23,105 +23,26 @@ | |||||
#include "libavutil/x86_cpu.h" | #include "libavutil/x86_cpu.h" | ||||
#include "libavcodec/dsputil.h" | #include "libavcodec/dsputil.h" | ||||
static const int p1m1[2] __attribute__((aligned(8))) = | |||||
{ 0, 1 << 31 }; | |||||
#ifdef EMULATE_3DNOWEXT | |||||
#define ff_fft_calc_3dn2 ff_fft_calc_3dn | |||||
#define ff_fft_dispatch_3dn2 ff_fft_dispatch_3dn | |||||
#define ff_fft_dispatch_interleave_3dn2 ff_fft_dispatch_interleave_3dn | |||||
#define ff_imdct_calc_3dn2 ff_imdct_calc_3dn | |||||
#define ff_imdct_half_3dn2 ff_imdct_half_3dn | |||||
#endif | |||||
static const int m1p1[2] __attribute__((aligned(8))) = | |||||
{ 1 << 31, 0 }; | |||||
void ff_fft_dispatch_3dn2(FFTComplex *z, int nbits); | |||||
void ff_fft_dispatch_interleave_3dn2(FFTComplex *z, int nbits); | |||||
void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z) | void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z) | ||||
{ | { | ||||
int ln = s->nbits; | |||||
long j; | |||||
x86_reg i; | |||||
long nblocks, nloops; | |||||
FFTComplex *p, *cptr; | |||||
asm volatile( | |||||
/* FEMMS is not a must here but recommended by AMD */ | |||||
"femms \n\t" | |||||
"movq %0, %%mm7 \n\t" | |||||
::"m"(*(s->inverse ? m1p1 : p1m1)) | |||||
); | |||||
i = 8 << ln; | |||||
asm volatile( | |||||
"1: \n\t" | |||||
"sub $32, %0 \n\t" | |||||
"movq (%0,%1), %%mm0 \n\t" | |||||
"movq 16(%0,%1), %%mm1 \n\t" | |||||
"movq 8(%0,%1), %%mm2 \n\t" | |||||
"movq 24(%0,%1), %%mm3 \n\t" | |||||
"movq %%mm0, %%mm4 \n\t" | |||||
"movq %%mm1, %%mm5 \n\t" | |||||
"pfadd %%mm2, %%mm0 \n\t" | |||||
"pfadd %%mm3, %%mm1 \n\t" | |||||
"pfsub %%mm2, %%mm4 \n\t" | |||||
"pfsub %%mm3, %%mm5 \n\t" | |||||
"movq %%mm0, %%mm2 \n\t" | |||||
"pswapd %%mm5, %%mm5 \n\t" | |||||
"movq %%mm4, %%mm3 \n\t" | |||||
"pxor %%mm7, %%mm5 \n\t" | |||||
"pfadd %%mm1, %%mm0 \n\t" | |||||
"pfadd %%mm5, %%mm4 \n\t" | |||||
"pfsub %%mm1, %%mm2 \n\t" | |||||
"pfsub %%mm5, %%mm3 \n\t" | |||||
"movq %%mm0, (%0,%1) \n\t" | |||||
"movq %%mm4, 8(%0,%1) \n\t" | |||||
"movq %%mm2, 16(%0,%1) \n\t" | |||||
"movq %%mm3, 24(%0,%1) \n\t" | |||||
"jg 1b \n\t" | |||||
:"+r"(i) | |||||
:"r"(z) | |||||
); | |||||
/* pass 2 .. ln-1 */ | |||||
nblocks = 1 << (ln-3); | |||||
nloops = 1 << 2; | |||||
cptr = s->exptab1; | |||||
do { | |||||
p = z; | |||||
j = nblocks; | |||||
do { | |||||
i = nloops*8; | |||||
asm volatile( | |||||
"1: \n\t" | |||||
"sub $16, %0 \n\t" | |||||
"movq (%1,%0), %%mm0 \n\t" | |||||
"movq 8(%1,%0), %%mm1 \n\t" | |||||
"movq (%2,%0), %%mm2 \n\t" | |||||
"movq 8(%2,%0), %%mm3 \n\t" | |||||
"movq (%3,%0,2), %%mm4 \n\t" | |||||
"movq 8(%3,%0,2), %%mm5 \n\t" | |||||
"pswapd %%mm4, %%mm6 \n\t" // no need for cptr[2] & cptr[3] | |||||
"pswapd %%mm5, %%mm7 \n\t" | |||||
"pfmul %%mm2, %%mm4 \n\t" // cre*re cim*im | |||||
"pfmul %%mm3, %%mm5 \n\t" | |||||
"pfmul %%mm2, %%mm6 \n\t" // cim*re cre*im | |||||
"pfmul %%mm3, %%mm7 \n\t" | |||||
"pfpnacc %%mm6, %%mm4 \n\t" // cre*re-cim*im cim*re+cre*im | |||||
"pfpnacc %%mm7, %%mm5 \n\t" | |||||
"movq %%mm0, %%mm2 \n\t" | |||||
"movq %%mm1, %%mm3 \n\t" | |||||
"pfadd %%mm4, %%mm0 \n\t" | |||||
"pfadd %%mm5, %%mm1 \n\t" | |||||
"pfsub %%mm4, %%mm2 \n\t" | |||||
"pfsub %%mm5, %%mm3 \n\t" | |||||
"movq %%mm0, (%1,%0) \n\t" | |||||
"movq %%mm1, 8(%1,%0) \n\t" | |||||
"movq %%mm2, (%2,%0) \n\t" | |||||
"movq %%mm3, 8(%2,%0) \n\t" | |||||
"jg 1b \n\t" | |||||
:"+r"(i) | |||||
:"r"(p), "r"(p + nloops), "r"(cptr) | |||||
); | |||||
p += nloops*2; | |||||
} while (--j); | |||||
cptr += nloops*2; | |||||
nblocks >>= 1; | |||||
nloops <<= 1; | |||||
} while (nblocks != 0); | |||||
int n = 1<<s->nbits; | |||||
int i; | |||||
ff_fft_dispatch_interleave_3dn2(z, s->nbits); | |||||
asm volatile("femms"); | asm volatile("femms"); | ||||
if(n <= 8) | |||||
for(i=0; i<n; i+=2) | |||||
FFSWAP(FFTSample, z[i].im, z[i+1].re); | |||||
} | } | ||||
static void imdct_3dn2(MDCTContext *s, const FFTSample *input, FFTSample *tmp) | static void imdct_3dn2(MDCTContext *s, const FFTSample *input, FFTSample *tmp) | ||||
@@ -162,7 +83,7 @@ static void imdct_3dn2(MDCTContext *s, const FFTSample *input, FFTSample *tmp) | |||||
); | ); | ||||
} | } | ||||
ff_fft_calc(&s->fft, z); | |||||
ff_fft_calc_3dn2(&s->fft, z); | |||||
/* post rotation + reordering */ | /* post rotation + reordering */ | ||||
for(k = 0; k < n4; k++) { | for(k = 0; k < n4; k++) { | ||||
@@ -0,0 +1,467 @@ | |||||
;****************************************************************************** | |||||
;* FFT transform with SSE/3DNow optimizations | |||||
;* Copyright (c) 2008 Loren Merritt | |||||
;* | |||||
;* This file is part of FFmpeg. | |||||
;* | |||||
;* FFmpeg is free software; you can redistribute it and/or | |||||
;* modify it under the terms of the GNU Lesser General Public | |||||
;* License as published by the Free Software Foundation; either | |||||
;* version 2.1 of the License, or (at your option) any later version. | |||||
;* | |||||
;* FFmpeg is distributed in the hope that it will be useful, | |||||
;* but WITHOUT ANY WARRANTY; without even the implied warranty of | |||||
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||||
;* Lesser General Public License for more details. | |||||
;* | |||||
;* You should have received a copy of the GNU Lesser General Public | |||||
;* License along with FFmpeg; if not, write to the Free Software | |||||
;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||||
;****************************************************************************** | |||||
; These functions are not individually interchangeable with the C versions. | |||||
; While C takes arrays of FFTComplex, SSE/3DNow leave intermediate results | |||||
; in blocks as conventient to the vector size. | |||||
; i.e. {4x real, 4x imaginary, 4x real, ...} (or 2x respectively) | |||||
%include "x86inc.asm" | |||||
SECTION_RODATA | |||||
%define M_SQRT1_2 0.70710678118654752440 | |||||
ps_root2: times 4 dd M_SQRT1_2 | |||||
ps_root2mppm: dd -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2 | |||||
ps_m1p1: dd 1<<31, 0 | |||||
%assign i 16 | |||||
%rep 13 | |||||
cextern ff_cos_ %+ i | |||||
%assign i i<<1 | |||||
%endrep | |||||
%ifdef ARCH_X86_64 | |||||
%define pointer dq | |||||
%else | |||||
%define pointer dd | |||||
%endif | |||||
%macro IF0 1+ | |||||
%endmacro | |||||
%macro IF1 1+ | |||||
%1 | |||||
%endmacro | |||||
section .text align=16 | |||||
%macro T2_3DN 4 ; z0, z1, mem0, mem1 | |||||
mova %1, %3 | |||||
mova %2, %1 | |||||
pfadd %1, %4 | |||||
pfsub %2, %4 | |||||
%endmacro | |||||
%macro T4_3DN 6 ; z0, z1, z2, z3, tmp0, tmp1 | |||||
mova %5, %3 | |||||
pfsub %3, %4 | |||||
pfadd %5, %4 ; {t6,t5} | |||||
pxor %3, [ps_m1p1 GLOBAL] ; {t8,t7} | |||||
mova %6, %1 | |||||
pswapd %3, %3 | |||||
pfadd %1, %5 ; {r0,i0} | |||||
pfsub %6, %5 ; {r2,i2} | |||||
mova %4, %2 | |||||
pfadd %2, %3 ; {r1,i1} | |||||
pfsub %4, %3 ; {r3,i3} | |||||
SWAP %3, %6 | |||||
%endmacro | |||||
; in: %1={r0,i0,r1,i1} %2={r2,i2,r3,i3} | |||||
; out: %1={r0,r1,r2,r3} %2={i0,i1,i2,i3} | |||||
%macro T4_SSE 3 | |||||
mova %3, %1 | |||||
shufps %1, %2, 0x64 ; {r0,i0,r3,i2} | |||||
shufps %3, %2, 0xce ; {r1,i1,r2,i3} | |||||
mova %2, %1 | |||||
addps %1, %3 ; {t1,t2,t6,t5} | |||||
subps %2, %3 ; {t3,t4,t8,t7} | |||||
mova %3, %1 | |||||
shufps %1, %2, 0x44 ; {t1,t2,t3,t4} | |||||
shufps %3, %2, 0xbe ; {t6,t5,t7,t8} | |||||
mova %2, %1 | |||||
addps %1, %3 ; {r0,i0,r1,i1} | |||||
subps %2, %3 ; {r2,i2,r3,i3} | |||||
mova %3, %1 | |||||
shufps %1, %2, 0x88 ; {r0,r1,r2,r3} | |||||
shufps %3, %2, 0xdd ; {i0,i1,i2,i3} | |||||
SWAP %2, %3 | |||||
%endmacro | |||||
%macro T8_SSE 6 ; r0,i0,r1,i1,t0,t1 | |||||
mova %5, %3 | |||||
shufps %3, %4, 0x44 ; {r4,i4,r6,i6} | |||||
shufps %5, %4, 0xee ; {r5,i5,r7,i7} | |||||
mova %6, %3 | |||||
subps %3, %5 ; {r5,i5,r7,i7} | |||||
addps %6, %5 ; {t1,t2,t3,t4} | |||||
mova %5, %3 | |||||
shufps %5, %5, 0xb1 ; {i5,r5,i7,r7} | |||||
mulps %3, [ps_root2mppm GLOBAL] ; {-r5,i5,r7,-i7} | |||||
mulps %5, [ps_root2 GLOBAL] | |||||
addps %3, %5 ; {t8,t7,ta,t9} | |||||
mova %5, %6 | |||||
shufps %6, %3, 0x36 ; {t3,t2,t9,t8} | |||||
shufps %5, %3, 0x9c ; {t1,t4,t7,ta} | |||||
mova %3, %6 | |||||
addps %6, %5 ; {t1,t2,t9,ta} | |||||
subps %3, %5 ; {t6,t5,tc,tb} | |||||
mova %5, %6 | |||||
shufps %6, %3, 0xd8 ; {t1,t9,t5,tb} | |||||
shufps %5, %3, 0x8d ; {t2,ta,t6,tc} | |||||
mova %3, %1 | |||||
mova %4, %2 | |||||
addps %1, %6 ; {r0,r1,r2,r3} | |||||
addps %2, %5 ; {i0,i1,i2,i3} | |||||
subps %3, %6 ; {r4,r5,r6,r7} | |||||
subps %4, %5 ; {i4,i5,i6,i7} | |||||
%endmacro | |||||
; scheduled for cpu-bound sizes | |||||
%macro PASS_SMALL 3 ; (to load m4-m7), wre, wim | |||||
IF%1 mova m4, Z(4) | |||||
IF%1 mova m5, Z(5) | |||||
mova m0, %2 ; wre | |||||
mova m2, m4 | |||||
mova m1, %3 ; wim | |||||
mova m3, m5 | |||||
mulps m2, m0 ; r2*wre | |||||
IF%1 mova m6, Z(6) | |||||
mulps m3, m1 ; i2*wim | |||||
IF%1 mova m7, Z(7) | |||||
mulps m4, m1 ; r2*wim | |||||
mulps m5, m0 ; i2*wre | |||||
addps m2, m3 ; r2*wre + i2*wim | |||||
mova m3, m1 | |||||
mulps m1, m6 ; r3*wim | |||||
subps m5, m4 ; i2*wre - r2*wim | |||||
mova m4, m0 | |||||
mulps m3, m7 ; i3*wim | |||||
mulps m4, m6 ; r3*wre | |||||
mulps m0, m7 ; i3*wre | |||||
subps m4, m3 ; r3*wre - i3*wim | |||||
mova m3, Z(0) | |||||
addps m0, m1 ; i3*wre + r3*wim | |||||
mova m1, m4 | |||||
addps m4, m2 ; t5 | |||||
subps m1, m2 ; t3 | |||||
subps m3, m4 ; r2 | |||||
addps m4, Z(0) ; r0 | |||||
mova m6, Z(2) | |||||
mova Z(4), m3 | |||||
mova Z(0), m4 | |||||
mova m3, m5 | |||||
subps m5, m0 ; t4 | |||||
mova m4, m6 | |||||
subps m6, m5 ; r3 | |||||
addps m5, m4 ; r1 | |||||
mova Z(6), m6 | |||||
mova Z(2), m5 | |||||
mova m2, Z(3) | |||||
addps m3, m0 ; t6 | |||||
subps m2, m1 ; i3 | |||||
mova m7, Z(1) | |||||
addps m1, Z(3) ; i1 | |||||
mova Z(7), m2 | |||||
mova Z(3), m1 | |||||
mova m4, m7 | |||||
subps m7, m3 ; i2 | |||||
addps m3, m4 ; i0 | |||||
mova Z(5), m7 | |||||
mova Z(1), m3 | |||||
%endmacro | |||||
; scheduled to avoid store->load aliasing | |||||
%macro PASS_BIG 1 ; (!interleave) | |||||
mova m4, Z(4) ; r2 | |||||
mova m5, Z(5) ; i2 | |||||
mova m2, m4 | |||||
mova m0, [wq] ; wre | |||||
mova m3, m5 | |||||
mova m1, [wq+o1q] ; wim | |||||
mulps m2, m0 ; r2*wre | |||||
mova m6, Z(6) ; r3 | |||||
mulps m3, m1 ; i2*wim | |||||
mova m7, Z(7) ; i3 | |||||
mulps m4, m1 ; r2*wim | |||||
mulps m5, m0 ; i2*wre | |||||
addps m2, m3 ; r2*wre + i2*wim | |||||
mova m3, m1 | |||||
mulps m1, m6 ; r3*wim | |||||
subps m5, m4 ; i2*wre - r2*wim | |||||
mova m4, m0 | |||||
mulps m3, m7 ; i3*wim | |||||
mulps m4, m6 ; r3*wre | |||||
mulps m0, m7 ; i3*wre | |||||
subps m4, m3 ; r3*wre - i3*wim | |||||
mova m3, Z(0) | |||||
addps m0, m1 ; i3*wre + r3*wim | |||||
mova m1, m4 | |||||
addps m4, m2 ; t5 | |||||
subps m1, m2 ; t3 | |||||
subps m3, m4 ; r2 | |||||
addps m4, Z(0) ; r0 | |||||
mova m6, Z(2) | |||||
mova Z(4), m3 | |||||
mova Z(0), m4 | |||||
mova m3, m5 | |||||
subps m5, m0 ; t4 | |||||
mova m4, m6 | |||||
subps m6, m5 ; r3 | |||||
addps m5, m4 ; r1 | |||||
IF%1 mova Z(6), m6 | |||||
IF%1 mova Z(2), m5 | |||||
mova m2, Z(3) | |||||
addps m3, m0 ; t6 | |||||
subps m2, m1 ; i3 | |||||
mova m7, Z(1) | |||||
addps m1, Z(3) ; i1 | |||||
IF%1 mova Z(7), m2 | |||||
IF%1 mova Z(3), m1 | |||||
mova m4, m7 | |||||
subps m7, m3 ; i2 | |||||
addps m3, m4 ; i0 | |||||
IF%1 mova Z(5), m7 | |||||
IF%1 mova Z(1), m3 | |||||
%if %1==0 | |||||
mova m4, m5 ; r1 | |||||
mova m0, m6 ; r3 | |||||
unpcklps m5, m1 | |||||
unpckhps m4, m1 | |||||
unpcklps m6, m2 | |||||
unpckhps m0, m2 | |||||
mova m1, Z(0) | |||||
mova m2, Z(4) | |||||
mova Z(2), m5 | |||||
mova Z(3), m4 | |||||
mova Z(6), m6 | |||||
mova Z(7), m0 | |||||
mova m5, m1 ; r0 | |||||
mova m4, m2 ; r2 | |||||
unpcklps m1, m3 | |||||
unpckhps m5, m3 | |||||
unpcklps m2, m7 | |||||
unpckhps m4, m7 | |||||
mova Z(0), m1 | |||||
mova Z(1), m5 | |||||
mova Z(4), m2 | |||||
mova Z(5), m4 | |||||
%endif | |||||
%endmacro | |||||
%macro PUNPCK 3 | |||||
mova %3, %1 | |||||
punpckldq %1, %2 | |||||
punpckhdq %3, %2 | |||||
%endmacro | |||||
INIT_XMM | |||||
%define Z(x) [r0+mmsize*x] | |||||
align 16 | |||||
fft4_sse: | |||||
mova m0, Z(0) | |||||
mova m1, Z(1) | |||||
T4_SSE m0, m1, m2 | |||||
mova Z(0), m0 | |||||
mova Z(1), m1 | |||||
ret | |||||
align 16 | |||||
fft8_sse: | |||||
mova m0, Z(0) | |||||
mova m1, Z(1) | |||||
T4_SSE m0, m1, m2 | |||||
mova m2, Z(2) | |||||
mova m3, Z(3) | |||||
T8_SSE m0, m1, m2, m3, m4, m5 | |||||
mova Z(0), m0 | |||||
mova Z(1), m1 | |||||
mova Z(2), m2 | |||||
mova Z(3), m3 | |||||
ret | |||||
align 16 | |||||
fft16_sse: | |||||
mova m0, Z(0) | |||||
mova m1, Z(1) | |||||
T4_SSE m0, m1, m2 | |||||
mova m2, Z(2) | |||||
mova m3, Z(3) | |||||
T8_SSE m0, m1, m2, m3, m4, m5 | |||||
mova m4, Z(4) | |||||
mova m5, Z(5) | |||||
mova Z(0), m0 | |||||
mova Z(1), m1 | |||||
mova Z(2), m2 | |||||
mova Z(3), m3 | |||||
T4_SSE m4, m5, m6 | |||||
mova m6, Z(6) | |||||
mova m7, Z(7) | |||||
T4_SSE m6, m7, m0 | |||||
PASS_SMALL 0, [ff_cos_16 GLOBAL], [ff_cos_16+16 GLOBAL] | |||||
ret | |||||
INIT_MMX | |||||
%macro FFT48_3DN 1 | |||||
align 16 | |||||
fft4%1: | |||||
T2_3DN m0, m1, Z(0), Z(1) | |||||
mova m2, Z(2) | |||||
mova m3, Z(3) | |||||
T4_3DN m0, m1, m2, m3, m4, m5 | |||||
PUNPCK m0, m1, m4 | |||||
PUNPCK m2, m3, m5 | |||||
mova Z(0), m0 | |||||
mova Z(1), m4 | |||||
mova Z(2), m2 | |||||
mova Z(3), m5 | |||||
ret | |||||
align 16 | |||||
fft8%1: | |||||
T2_3DN m0, m1, Z(0), Z(1) | |||||
mova m2, Z(2) | |||||
mova m3, Z(3) | |||||
T4_3DN m0, m1, m2, m3, m4, m5 | |||||
mova Z(0), m0 | |||||
mova Z(2), m2 | |||||
T2_3DN m4, m5, Z(4), Z(5) | |||||
T2_3DN m6, m7, Z(6), Z(7) | |||||
pswapd m0, m5 | |||||
pswapd m2, m7 | |||||
pxor m0, [ps_m1p1 GLOBAL] | |||||
pxor m2, [ps_m1p1 GLOBAL] | |||||
pfsub m5, m0 | |||||
pfadd m7, m2 | |||||
pfmul m5, [ps_root2 GLOBAL] | |||||
pfmul m7, [ps_root2 GLOBAL] | |||||
T4_3DN m1, m3, m5, m7, m0, m2 | |||||
mova Z(5), m5 | |||||
mova Z(7), m7 | |||||
mova m0, Z(0) | |||||
mova m2, Z(2) | |||||
T4_3DN m0, m2, m4, m6, m5, m7 | |||||
PUNPCK m0, m1, m5 | |||||
PUNPCK m2, m3, m7 | |||||
mova Z(0), m0 | |||||
mova Z(1), m5 | |||||
mova Z(2), m2 | |||||
mova Z(3), m7 | |||||
PUNPCK m4, Z(5), m5 | |||||
PUNPCK m6, Z(7), m7 | |||||
mova Z(4), m4 | |||||
mova Z(5), m5 | |||||
mova Z(6), m6 | |||||
mova Z(7), m7 | |||||
ret | |||||
%endmacro | |||||
FFT48_3DN _3dn2 | |||||
%macro pswapd 2 | |||||
%ifidn %1, %2 | |||||
movd [r0+12], %1 | |||||
punpckhdq %1, [r0+8] | |||||
%else | |||||
movq %1, %2 | |||||
psrlq %1, 32 | |||||
punpckldq %1, %2 | |||||
%endif | |||||
%endmacro | |||||
FFT48_3DN _3dn | |||||
%define Z(x) [zq + o1q*(x&6)*((x/6)^1) + o3q*(x/6) + mmsize*(x&1)] | |||||
%macro DECL_PASS 2+ ; name, payload | |||||
align 16 | |||||
%1: | |||||
DEFINE_ARGS z, w, n, o1, o3 | |||||
lea o3q, [nq*3] | |||||
lea o1q, [nq*8] | |||||
shl o3q, 4 | |||||
.loop: | |||||
%2 | |||||
add zq, mmsize*2 | |||||
add wq, mmsize | |||||
sub nd, mmsize/8 | |||||
jg .loop | |||||
rep ret | |||||
%endmacro | |||||
INIT_XMM | |||||
DECL_PASS pass_sse, PASS_BIG 1 | |||||
DECL_PASS pass_interleave_sse, PASS_BIG 0 | |||||
INIT_MMX | |||||
%define mulps pfmul | |||||
%define addps pfadd | |||||
%define subps pfsub | |||||
%define unpcklps punpckldq | |||||
%define unpckhps punpckhdq | |||||
DECL_PASS pass_3dn, PASS_SMALL 1, [wq], [wq+o1q] | |||||
DECL_PASS pass_interleave_3dn, PASS_BIG 0 | |||||
%define pass_3dn2 pass_3dn | |||||
%define pass_interleave_3dn2 pass_interleave_3dn | |||||
%macro DECL_FFT 2-3 ; nbits, cpu, suffix | |||||
%xdefine list_of_fft fft4%2, fft8%2 | |||||
%if %1==5 | |||||
%xdefine list_of_fft list_of_fft, fft16%2 | |||||
%endif | |||||
%assign n 1<<%1 | |||||
%rep 17-%1 | |||||
%assign n2 n/2 | |||||
%assign n4 n/4 | |||||
%xdefine list_of_fft list_of_fft, fft %+ n %+ %3%2 | |||||
align 16 | |||||
fft %+ n %+ %3%2: | |||||
call fft %+ n2 %+ %2 | |||||
add r0, n*4 - (n&(-2<<%1)) | |||||
call fft %+ n4 %+ %2 | |||||
add r0, n*2 - (n2&(-2<<%1)) | |||||
call fft %+ n4 %+ %2 | |||||
sub r0, n*6 + (n2&(-2<<%1)) | |||||
lea r1, [ff_cos_ %+ n GLOBAL] | |||||
mov r2d, n4/2 | |||||
jmp pass%3%2 | |||||
%assign n n*2 | |||||
%endrep | |||||
%undef n | |||||
align 8 | |||||
dispatch_tab%3%2: pointer list_of_fft | |||||
; On x86_32, this function does the register saving and restoring for all of fft. | |||||
; The others pass args in registers and don't spill anything. | |||||
cglobal ff_fft_dispatch%3%2, 2,5,0, z, nbits | |||||
lea r2, [dispatch_tab%3%2 GLOBAL] | |||||
mov r2, [r2 + (nbitsq-2)*gprsize] | |||||
call r2 | |||||
RET | |||||
%endmacro ; DECL_FFT | |||||
DECL_FFT 5, _sse | |||||
DECL_FFT 5, _sse, _interleave | |||||
DECL_FFT 4, _3dn | |||||
DECL_FFT 4, _3dn, _interleave | |||||
DECL_FFT 4, _3dn2 | |||||
DECL_FFT 4, _3dn2, _interleave | |||||
@@ -22,124 +22,55 @@ | |||||
#include "libavutil/x86_cpu.h" | #include "libavutil/x86_cpu.h" | ||||
#include "libavcodec/dsputil.h" | #include "libavcodec/dsputil.h" | ||||
static const int p1p1p1m1[4] __attribute__((aligned(16))) = | |||||
{ 0, 0, 0, 1 << 31 }; | |||||
static const int p1p1m1p1[4] __attribute__((aligned(16))) = | |||||
{ 0, 0, 1 << 31, 0 }; | |||||
static const int p1p1m1m1[4] __attribute__((aligned(16))) = | |||||
{ 0, 0, 1 << 31, 1 << 31 }; | |||||
static const int p1m1p1m1[4] __attribute__((aligned(16))) = | static const int p1m1p1m1[4] __attribute__((aligned(16))) = | ||||
{ 0, 1 << 31, 0, 1 << 31 }; | { 0, 1 << 31, 0, 1 << 31 }; | ||||
static const int m1m1m1m1[4] __attribute__((aligned(16))) = | static const int m1m1m1m1[4] __attribute__((aligned(16))) = | ||||
{ 1 << 31, 1 << 31, 1 << 31, 1 << 31 }; | { 1 << 31, 1 << 31, 1 << 31, 1 << 31 }; | ||||
#if 0 | |||||
static void print_v4sf(const char *str, __m128 a) | |||||
{ | |||||
float *p = (float *)&a; | |||||
printf("%s: %f %f %f %f\n", | |||||
str, p[0], p[1], p[2], p[3]); | |||||
} | |||||
#endif | |||||
void ff_fft_dispatch_sse(FFTComplex *z, int nbits); | |||||
void ff_fft_dispatch_interleave_sse(FFTComplex *z, int nbits); | |||||
/* XXX: handle reverse case */ | |||||
void ff_fft_calc_sse(FFTContext *s, FFTComplex *z) | void ff_fft_calc_sse(FFTContext *s, FFTComplex *z) | ||||
{ | { | ||||
int ln = s->nbits; | |||||
x86_reg i; | |||||
long j; | |||||
long nblocks, nloops; | |||||
FFTComplex *p, *cptr; | |||||
int n = 1 << s->nbits; | |||||
asm volatile( | |||||
"movaps %0, %%xmm4 \n\t" | |||||
"movaps %1, %%xmm5 \n\t" | |||||
::"m"(*p1p1m1m1), | |||||
"m"(*(s->inverse ? p1p1m1p1 : p1p1p1m1)) | |||||
); | |||||
ff_fft_dispatch_interleave_sse(z, s->nbits); | |||||
i = 8 << ln; | |||||
asm volatile( | |||||
"1: \n\t" | |||||
"sub $32, %0 \n\t" | |||||
/* do the pass 0 butterfly */ | |||||
"movaps (%0,%1), %%xmm0 \n\t" | |||||
"movaps %%xmm0, %%xmm1 \n\t" | |||||
"shufps $0x4E, %%xmm0, %%xmm0 \n\t" | |||||
"xorps %%xmm4, %%xmm1 \n\t" | |||||
"addps %%xmm1, %%xmm0 \n\t" | |||||
"movaps 16(%0,%1), %%xmm2 \n\t" | |||||
"movaps %%xmm2, %%xmm3 \n\t" | |||||
"shufps $0x4E, %%xmm2, %%xmm2 \n\t" | |||||
"xorps %%xmm4, %%xmm3 \n\t" | |||||
"addps %%xmm3, %%xmm2 \n\t" | |||||
/* multiply third by -i */ | |||||
/* by toggling the sign bit */ | |||||
"shufps $0xB4, %%xmm2, %%xmm2 \n\t" | |||||
"xorps %%xmm5, %%xmm2 \n\t" | |||||
/* do the pass 1 butterfly */ | |||||
"movaps %%xmm0, %%xmm1 \n\t" | |||||
"addps %%xmm2, %%xmm0 \n\t" | |||||
"subps %%xmm2, %%xmm1 \n\t" | |||||
"movaps %%xmm0, (%0,%1) \n\t" | |||||
"movaps %%xmm1, 16(%0,%1) \n\t" | |||||
"jg 1b \n\t" | |||||
:"+r"(i) | |||||
:"r"(z) | |||||
); | |||||
/* pass 2 .. ln-1 */ | |||||
if(n <= 16) { | |||||
x86_reg i = -8*n; | |||||
asm volatile( | |||||
"1: \n" | |||||
"movaps (%0,%1), %%xmm0 \n" | |||||
"movaps %%xmm0, %%xmm1 \n" | |||||
"unpcklps 16(%0,%1), %%xmm0 \n" | |||||
"unpckhps 16(%0,%1), %%xmm1 \n" | |||||
"movaps %%xmm0, (%0,%1) \n" | |||||
"movaps %%xmm1, 16(%0,%1) \n" | |||||
"add $32, %0 \n" | |||||
"jl 1b \n" | |||||
:"+r"(i) | |||||
:"r"(z+n) | |||||
:"memory" | |||||
); | |||||
} | |||||
} | |||||
nblocks = 1 << (ln-3); | |||||
nloops = 1 << 2; | |||||
cptr = s->exptab1; | |||||
do { | |||||
p = z; | |||||
j = nblocks; | |||||
do { | |||||
i = nloops*8; | |||||
asm volatile( | |||||
"1: \n\t" | |||||
"sub $32, %0 \n\t" | |||||
"movaps (%2,%0), %%xmm1 \n\t" | |||||
"movaps (%1,%0), %%xmm0 \n\t" | |||||
"movaps 16(%2,%0), %%xmm5 \n\t" | |||||
"movaps 16(%1,%0), %%xmm4 \n\t" | |||||
"movaps %%xmm1, %%xmm2 \n\t" | |||||
"movaps %%xmm5, %%xmm6 \n\t" | |||||
"shufps $0xA0, %%xmm1, %%xmm1 \n\t" | |||||
"shufps $0xF5, %%xmm2, %%xmm2 \n\t" | |||||
"shufps $0xA0, %%xmm5, %%xmm5 \n\t" | |||||
"shufps $0xF5, %%xmm6, %%xmm6 \n\t" | |||||
"mulps (%3,%0,2), %%xmm1 \n\t" // cre*re cim*re | |||||
"mulps 16(%3,%0,2), %%xmm2 \n\t" // -cim*im cre*im | |||||
"mulps 32(%3,%0,2), %%xmm5 \n\t" // cre*re cim*re | |||||
"mulps 48(%3,%0,2), %%xmm6 \n\t" // -cim*im cre*im | |||||
"addps %%xmm2, %%xmm1 \n\t" | |||||
"addps %%xmm6, %%xmm5 \n\t" | |||||
"movaps %%xmm0, %%xmm3 \n\t" | |||||
"movaps %%xmm4, %%xmm7 \n\t" | |||||
"addps %%xmm1, %%xmm0 \n\t" | |||||
"subps %%xmm1, %%xmm3 \n\t" | |||||
"addps %%xmm5, %%xmm4 \n\t" | |||||
"subps %%xmm5, %%xmm7 \n\t" | |||||
"movaps %%xmm0, (%1,%0) \n\t" | |||||
"movaps %%xmm3, (%2,%0) \n\t" | |||||
"movaps %%xmm4, 16(%1,%0) \n\t" | |||||
"movaps %%xmm7, 16(%2,%0) \n\t" | |||||
"jg 1b \n\t" | |||||
:"+r"(i) | |||||
:"r"(p), "r"(p + nloops), "r"(cptr) | |||||
); | |||||
p += nloops*2; | |||||
} while (--j); | |||||
cptr += nloops*2; | |||||
nblocks >>= 1; | |||||
nloops <<= 1; | |||||
} while (nblocks != 0); | |||||
void ff_fft_permute_sse(FFTContext *s, FFTComplex *z) | |||||
{ | |||||
int n = 1 << s->nbits; | |||||
int i; | |||||
for(i=0; i<n; i+=2) { | |||||
asm volatile( | |||||
"movaps %2, %%xmm0 \n" | |||||
"movlps %%xmm0, %0 \n" | |||||
"movhps %%xmm0, %1 \n" | |||||
:"=m"(s->tmp_buf[s->revtab[i]]), | |||||
"=m"(s->tmp_buf[s->revtab[i+1]]) | |||||
:"m"(z[i]) | |||||
); | |||||
} | |||||
memcpy(z, s->tmp_buf, n*sizeof(FFTComplex)); | |||||
} | } | ||||
static void imdct_sse(MDCTContext *s, const FFTSample *input, FFTSample *tmp) | static void imdct_sse(MDCTContext *s, const FFTSample *input, FFTSample *tmp) | ||||