|
|
|
@@ -35,6 +35,34 @@ |
|
|
|
|
|
|
|
#if HAVE_ALTIVEC |
|
|
|
|
|
|
|
#if HAVE_VSX |
|
|
|
static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels, |
|
|
|
ptrdiff_t line_size) |
|
|
|
{ |
|
|
|
int i; |
|
|
|
vector unsigned char perm = |
|
|
|
(vector unsigned char) {0x00,0x10, 0x01,0x11,0x02,0x12,0x03,0x13,\ |
|
|
|
0x04,0x14,0x05,0x15,0x06,0x16,0x07,0x17}; |
|
|
|
const vector unsigned char zero = |
|
|
|
(const vector unsigned char) vec_splat_u8(0); |
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) { |
|
|
|
/* Read potentially unaligned pixels. |
|
|
|
* We're reading 16 pixels, and actually only want 8, |
|
|
|
* but we simply ignore the extras. */ |
|
|
|
vector unsigned char bytes = vec_vsx_ld(0, pixels); |
|
|
|
|
|
|
|
// Convert the bytes into shorts. |
|
|
|
//vector signed short shorts = (vector signed short) vec_perm(zero, bytes, perm); |
|
|
|
vector signed short shorts = (vector signed short) vec_perm(bytes, zero, perm); |
|
|
|
|
|
|
|
// Save the data to the block, we assume the block is 16-byte aligned. |
|
|
|
vec_vsx_st(shorts, i * 16, (vector signed short *) block); |
|
|
|
|
|
|
|
pixels += line_size; |
|
|
|
} |
|
|
|
} |
|
|
|
#else |
|
|
|
static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels, |
|
|
|
ptrdiff_t line_size) |
|
|
|
{ |
|
|
|
@@ -62,6 +90,71 @@ static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
#endif /* HAVE_VSX */ |
|
|
|
|
|
|
|
#if HAVE_VSX |
|
|
|
static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1, |
|
|
|
const uint8_t *s2, int stride) |
|
|
|
{ |
|
|
|
int i; |
|
|
|
const vector unsigned char zero = |
|
|
|
(const vector unsigned char) vec_splat_u8(0); |
|
|
|
vector signed short shorts1, shorts2; |
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) { |
|
|
|
/* Read potentially unaligned pixels. |
|
|
|
* We're reading 16 pixels, and actually only want 8, |
|
|
|
* but we simply ignore the extras. */ |
|
|
|
vector unsigned char bytes = vec_vsx_ld(0, s1); |
|
|
|
|
|
|
|
// Convert the bytes into shorts. |
|
|
|
shorts1 = (vector signed short) vec_mergeh(bytes, zero); |
|
|
|
|
|
|
|
// Do the same for the second block of pixels. |
|
|
|
bytes =vec_vsx_ld(0, s2); |
|
|
|
|
|
|
|
// Convert the bytes into shorts. |
|
|
|
shorts2 = (vector signed short) vec_mergeh(bytes, zero); |
|
|
|
|
|
|
|
// Do the subtraction. |
|
|
|
shorts1 = vec_sub(shorts1, shorts2); |
|
|
|
|
|
|
|
// Save the data to the block, we assume the block is 16-byte aligned. |
|
|
|
vec_vsx_st(shorts1, 0, (vector signed short *) block); |
|
|
|
|
|
|
|
s1 += stride; |
|
|
|
s2 += stride; |
|
|
|
block += 8; |
|
|
|
|
|
|
|
/* The code below is a copy of the code above... |
|
|
|
* This is a manual unroll. */ |
|
|
|
|
|
|
|
/* Read potentially unaligned pixels. |
|
|
|
* We're reading 16 pixels, and actually only want 8, |
|
|
|
* but we simply ignore the extras. */ |
|
|
|
bytes = vec_vsx_ld(0, s1); |
|
|
|
|
|
|
|
// Convert the bytes into shorts. |
|
|
|
shorts1 = (vector signed short) vec_mergeh(bytes, zero); |
|
|
|
|
|
|
|
// Do the same for the second block of pixels. |
|
|
|
bytes = vec_vsx_ld(0, s2); |
|
|
|
|
|
|
|
// Convert the bytes into shorts. |
|
|
|
shorts2 = (vector signed short) vec_mergeh(bytes, zero); |
|
|
|
|
|
|
|
// Do the subtraction. |
|
|
|
shorts1 = vec_sub(shorts1, shorts2); |
|
|
|
|
|
|
|
// Save the data to the block, we assume the block is 16-byte aligned. |
|
|
|
vec_vsx_st(shorts1, 0, (vector signed short *) block); |
|
|
|
|
|
|
|
s1 += stride; |
|
|
|
s2 += stride; |
|
|
|
block += 8; |
|
|
|
} |
|
|
|
} |
|
|
|
#else |
|
|
|
static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1, |
|
|
|
const uint8_t *s2, int stride) |
|
|
|
{ |
|
|
|
@@ -134,6 +227,8 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
#endif /* HAVE_VSX */ |
|
|
|
|
|
|
|
#endif /* HAVE_ALTIVEC */ |
|
|
|
|
|
|
|
av_cold void ff_pixblockdsp_init_ppc(PixblockDSPContext *c, |
|
|
|
|