|
|
|
@@ -40,7 +40,7 @@ DECLARE_ASM_CONST(8, uint64_t, bone)= 0x0101010101010101LL; |
|
|
|
|
|
|
|
static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) |
|
|
|
{ |
|
|
|
x86_reg len= -(stride*h); |
|
|
|
x86_reg len= -(x86_reg)stride*h; |
|
|
|
__asm__ volatile( |
|
|
|
".p2align 4 \n\t" |
|
|
|
"1: \n\t" |
|
|
|
@@ -199,7 +199,7 @@ static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) |
|
|
|
|
|
|
|
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h) |
|
|
|
{ |
|
|
|
x86_reg len= -(stride*h); |
|
|
|
x86_reg len= -(x86_reg)stride*h; |
|
|
|
__asm__ volatile( |
|
|
|
".p2align 4 \n\t" |
|
|
|
"1: \n\t" |
|
|
|
@@ -237,7 +237,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int |
|
|
|
|
|
|
|
static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) |
|
|
|
{ |
|
|
|
x86_reg len= -(stride*h); |
|
|
|
x86_reg len= -(x86_reg)stride*h; |
|
|
|
__asm__ volatile( |
|
|
|
"movq (%1, %%"REG_a"), %%mm0 \n\t" |
|
|
|
"movq 1(%1, %%"REG_a"), %%mm2 \n\t" |
|
|
|
|