|
|
@@ -1,5 +1,5 @@ |
|
|
|
;***************************************************************************** |
|
|
|
;* x86inc.asm |
|
|
|
;* x86inc.asm: x264asm abstraction layer |
|
|
|
;***************************************************************************** |
|
|
|
;* Copyright (C) 2005-2011 x264 project |
|
|
|
;* |
|
|
@@ -112,7 +112,7 @@ |
|
|
|
; we need more flexible macro. |
|
|
|
|
|
|
|
; RET: |
|
|
|
; Pops anything that was pushed by PROLOGUE |
|
|
|
; Pops anything that was pushed by PROLOGUE, and returns. |
|
|
|
|
|
|
|
; REP_RET: |
|
|
|
; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons |
|
|
@@ -297,6 +297,9 @@ DECLARE_REG 6, rax, eax, ax, al, [rsp + stack_offset + 56] |
|
|
|
|
|
|
|
%macro WIN64_SPILL_XMM 1 |
|
|
|
%assign xmm_regs_used %1 |
|
|
|
%if mmsize == 8 |
|
|
|
%assign xmm_regs_used 0 |
|
|
|
%endif |
|
|
|
ASSERT xmm_regs_used <= 16 |
|
|
|
%if xmm_regs_used > 6 |
|
|
|
sub rsp, (xmm_regs_used-6)*16+16 |
|
|
@@ -459,10 +462,24 @@ DECLARE_REG 6, ebp, ebp, bp, null, [esp + stack_offset + 28] |
|
|
|
|
|
|
|
%assign function_align 16 |
|
|
|
|
|
|
|
; Symbol prefix for C linkage |
|
|
|
%macro cglobal 1-2+ |
|
|
|
%xdefine %1 mangle(program_name %+ _ %+ %1) |
|
|
|
%xdefine %1.skip_prologue %1 %+ .skip_prologue |
|
|
|
; Begin a function. |
|
|
|
; Applies any symbol mangling needed for C linkage, and sets up a define such that |
|
|
|
; subsequent uses of the function name automatically refer to the mangled version. |
|
|
|
; Appends cpuflags to the function name if cpuflags has been specified. |
|
|
|
%macro cglobal 1-2+ ; name, [PROLOGUE args] |
|
|
|
%if %0 == 1 |
|
|
|
cglobal_internal %1 %+ SUFFIX |
|
|
|
%else |
|
|
|
cglobal_internal %1 %+ SUFFIX, %2 |
|
|
|
%endif |
|
|
|
%endmacro |
|
|
|
%macro cglobal_internal 1-2+ |
|
|
|
%ifndef cglobaled_%1 |
|
|
|
%xdefine %1 mangle(program_name %+ _ %+ %1) |
|
|
|
%xdefine %1.skip_prologue %1 %+ .skip_prologue |
|
|
|
CAT_XDEFINE cglobaled_, %1, 1 |
|
|
|
%endif |
|
|
|
%xdefine current_function %1 |
|
|
|
%ifidn __OUTPUT_FORMAT__,elf |
|
|
|
global %1:function hidden |
|
|
|
%else |
|
|
@@ -479,12 +496,14 @@ DECLARE_REG 6, ebp, ebp, bp, null, [esp + stack_offset + 28] |
|
|
|
|
|
|
|
%macro cextern 1 |
|
|
|
%xdefine %1 mangle(program_name %+ _ %+ %1) |
|
|
|
CAT_XDEFINE cglobaled_, %1, 1 |
|
|
|
extern %1 |
|
|
|
%endmacro |
|
|
|
|
|
|
|
;like cextern, but without the prefix |
|
|
|
; like cextern, but without the prefix |
|
|
|
%macro cextern_naked 1 |
|
|
|
%xdefine %1 mangle(%1) |
|
|
|
CAT_XDEFINE cglobaled_, %1, 1 |
|
|
|
extern %1 |
|
|
|
%endmacro |
|
|
|
|
|
|
@@ -500,6 +519,61 @@ DECLARE_REG 6, ebp, ebp, bp, null, [esp + stack_offset + 28] |
|
|
|
SECTION .note.GNU-stack noalloc noexec nowrite progbits |
|
|
|
%endif |
|
|
|
|
|
|
|
; cpuflags |
|
|
|
|
|
|
|
%assign cpuflags_mmx (1<<0) |
|
|
|
%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx |
|
|
|
%assign cpuflags_3dnow (1<<2) | cpuflags_mmx |
|
|
|
%assign cpuflags_3dnow2 (1<<3) | cpuflags_3dnow |
|
|
|
%assign cpuflags_sse (1<<4) | cpuflags_mmx2 |
|
|
|
%assign cpuflags_sse2 (1<<5) | cpuflags_sse |
|
|
|
%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2 |
|
|
|
%assign cpuflags_sse3 (1<<7) | cpuflags_sse2 |
|
|
|
%assign cpuflags_ssse3 (1<<8) | cpuflags_sse3 |
|
|
|
%assign cpuflags_sse4 (1<<9) | cpuflags_ssse3 |
|
|
|
%assign cpuflags_sse42 (1<<10)| cpuflags_sse4 |
|
|
|
%assign cpuflags_avx (1<<11)| cpuflags_sse42 |
|
|
|
%assign cpuflags_xop (1<<12)| cpuflags_avx |
|
|
|
%assign cpuflags_fma4 (1<<13)| cpuflags_avx |
|
|
|
|
|
|
|
%assign cpuflags_cache32 (1<<16) |
|
|
|
%assign cpuflags_cache64 (1<<17) |
|
|
|
%assign cpuflags_slowctz (1<<18) |
|
|
|
%assign cpuflags_lzcnt (1<<19) |
|
|
|
%assign cpuflags_misalign (1<<20) |
|
|
|
%assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant |
|
|
|
%assign cpuflags_atom (1<<22) |
|
|
|
|
|
|
|
%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x)) |
|
|
|
%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x)) |
|
|
|
|
|
|
|
; Takes up to 2 cpuflags from the above list. |
|
|
|
; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu. |
|
|
|
; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co. |
|
|
|
%macro INIT_CPUFLAGS 0-2 |
|
|
|
%if %0 >= 1 |
|
|
|
%xdefine cpuname %1 |
|
|
|
%assign cpuflags cpuflags_%1 |
|
|
|
%if %0 >= 2 |
|
|
|
%xdefine cpuname %1_%2 |
|
|
|
%assign cpuflags cpuflags | cpuflags_%2 |
|
|
|
%endif |
|
|
|
%xdefine SUFFIX _ %+ cpuname |
|
|
|
%if cpuflag(avx) |
|
|
|
%assign avx_enabled 1 |
|
|
|
%endif |
|
|
|
%if cpuflag(aligned) |
|
|
|
%define movu mova |
|
|
|
%elifidn %1, sse3 |
|
|
|
%define movu lddqu |
|
|
|
%endif |
|
|
|
%else |
|
|
|
%xdefine SUFFIX |
|
|
|
%undef cpuname |
|
|
|
%undef cpuflags |
|
|
|
%endif |
|
|
|
%endmacro |
|
|
|
|
|
|
|
; merge mmx and sse* |
|
|
|
|
|
|
|
%macro CAT_XDEFINE 3 |
|
|
@@ -510,9 +584,9 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits |
|
|
|
%undef %1%2 |
|
|
|
%endmacro |
|
|
|
|
|
|
|
%macro INIT_MMX 0 |
|
|
|
%macro INIT_MMX 0-1+ |
|
|
|
%assign avx_enabled 0 |
|
|
|
%define RESET_MM_PERMUTATION INIT_MMX |
|
|
|
%define RESET_MM_PERMUTATION INIT_MMX %1 |
|
|
|
%define mmsize 8 |
|
|
|
%define num_mmregs 8 |
|
|
|
%define mova movq |
|
|
@@ -530,11 +604,12 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits |
|
|
|
CAT_UNDEF nmm, %%i |
|
|
|
%assign %%i %%i+1 |
|
|
|
%endrep |
|
|
|
INIT_CPUFLAGS %1 |
|
|
|
%endmacro |
|
|
|
|
|
|
|
%macro INIT_XMM 0 |
|
|
|
%macro INIT_XMM 0-1+ |
|
|
|
%assign avx_enabled 0 |
|
|
|
%define RESET_MM_PERMUTATION INIT_XMM |
|
|
|
%define RESET_MM_PERMUTATION INIT_XMM %1 |
|
|
|
%define mmsize 16 |
|
|
|
%define num_mmregs 8 |
|
|
|
%ifdef ARCH_X86_64 |
|
|
@@ -550,8 +625,10 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits |
|
|
|
CAT_XDEFINE nxmm, %%i, %%i |
|
|
|
%assign %%i %%i+1 |
|
|
|
%endrep |
|
|
|
INIT_CPUFLAGS %1 |
|
|
|
%endmacro |
|
|
|
|
|
|
|
; FIXME: INIT_AVX can be replaced by INIT_XMM avx |
|
|
|
%macro INIT_AVX 0 |
|
|
|
INIT_XMM |
|
|
|
%assign avx_enabled 1 |
|
|
@@ -559,9 +636,9 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits |
|
|
|
%define RESET_MM_PERMUTATION INIT_AVX |
|
|
|
%endmacro |
|
|
|
|
|
|
|
%macro INIT_YMM 0 |
|
|
|
%macro INIT_YMM 0-1+ |
|
|
|
%assign avx_enabled 1 |
|
|
|
%define RESET_MM_PERMUTATION INIT_YMM |
|
|
|
%define RESET_MM_PERMUTATION INIT_YMM %1 |
|
|
|
%define mmsize 32 |
|
|
|
%define num_mmregs 8 |
|
|
|
%ifdef ARCH_X86_64 |
|
|
@@ -569,15 +646,18 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits |
|
|
|
%endif |
|
|
|
%define mova vmovaps |
|
|
|
%define movu vmovups |
|
|
|
%undef movh |
|
|
|
%define movnta vmovntps |
|
|
|
%assign %%i 0 |
|
|
|
%rep num_mmregs |
|
|
|
CAT_XDEFINE m, %%i, ymm %+ %%i |
|
|
|
CAT_XDEFINE nymm, %%i, %%i |
|
|
|
%assign %%i %%i+1 |
|
|
|
%endrep |
|
|
|
INIT_CPUFLAGS %1 |
|
|
|
%endmacro |
|
|
|
|
|
|
|
INIT_MMX |
|
|
|
INIT_XMM |
|
|
|
|
|
|
|
; I often want to use macros that permute their arguments. e.g. there's no |
|
|
|
; efficient way to implement butterfly or transpose or dct without swapping some |
|
|
@@ -633,31 +713,46 @@ INIT_MMX |
|
|
|
%endrep |
|
|
|
%endmacro |
|
|
|
|
|
|
|
; If SAVE_MM_PERMUTATION is placed at the end of a function and given the |
|
|
|
; function name, then any later calls to that function will automatically |
|
|
|
; load the permutation, so values can be returned in mmregs. |
|
|
|
%macro SAVE_MM_PERMUTATION 1 ; name to save as |
|
|
|
; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later |
|
|
|
; calls to that function will automatically load the permutation, so values can |
|
|
|
; be returned in mmregs. |
|
|
|
%macro SAVE_MM_PERMUTATION 0-1 |
|
|
|
%if %0 |
|
|
|
%xdefine %%f %1_m |
|
|
|
%else |
|
|
|
%xdefine %%f current_function %+ _m |
|
|
|
%endif |
|
|
|
%assign %%i 0 |
|
|
|
%rep num_mmregs |
|
|
|
CAT_XDEFINE %1_m, %%i, m %+ %%i |
|
|
|
CAT_XDEFINE %%f, %%i, m %+ %%i |
|
|
|
%assign %%i %%i+1 |
|
|
|
%endrep |
|
|
|
%endmacro |
|
|
|
|
|
|
|
%macro LOAD_MM_PERMUTATION 1 ; name to load from |
|
|
|
%assign %%i 0 |
|
|
|
%rep num_mmregs |
|
|
|
CAT_XDEFINE m, %%i, %1_m %+ %%i |
|
|
|
CAT_XDEFINE n, m %+ %%i, %%i |
|
|
|
%assign %%i %%i+1 |
|
|
|
%endrep |
|
|
|
%ifdef %1_m0 |
|
|
|
%assign %%i 0 |
|
|
|
%rep num_mmregs |
|
|
|
CAT_XDEFINE m, %%i, %1_m %+ %%i |
|
|
|
CAT_XDEFINE n, m %+ %%i, %%i |
|
|
|
%assign %%i %%i+1 |
|
|
|
%endrep |
|
|
|
%endif |
|
|
|
%endmacro |
|
|
|
|
|
|
|
; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't |
|
|
|
%macro call 1 |
|
|
|
call %1 |
|
|
|
%ifdef %1_m0 |
|
|
|
LOAD_MM_PERMUTATION %1 |
|
|
|
call_internal %1, %1 %+ SUFFIX |
|
|
|
%endmacro |
|
|
|
%macro call_internal 2 |
|
|
|
%xdefine %%i %1 |
|
|
|
%ifndef cglobaled_%1 |
|
|
|
%ifdef cglobaled_%2 |
|
|
|
%xdefine %%i %2 |
|
|
|
%endif |
|
|
|
%endif |
|
|
|
call %%i |
|
|
|
LOAD_MM_PERMUTATION %%i |
|
|
|
%endmacro |
|
|
|
|
|
|
|
; Substitutions that reduce instruction size but are functionally equivalent |
|
|
@@ -702,14 +797,19 @@ INIT_MMX |
|
|
|
|
|
|
|
;%1 == instruction |
|
|
|
;%2 == 1 if float, 0 if int |
|
|
|
;%3 == 0 if 3-operand (xmm, xmm, xmm), 1 if 4-operand (xmm, xmm, xmm, imm) |
|
|
|
;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 3-operand (xmm, xmm, xmm) |
|
|
|
;%4 == number of operands given |
|
|
|
;%5+: operands |
|
|
|
%macro RUN_AVX_INSTR 6-7+ |
|
|
|
%if sizeof%5==32 |
|
|
|
%ifid %5 |
|
|
|
%define %%size sizeof%5 |
|
|
|
%else |
|
|
|
%define %%size mmsize |
|
|
|
%endif |
|
|
|
%if %%size==32 |
|
|
|
v%1 %5, %6, %7 |
|
|
|
%else |
|
|
|
%if sizeof%5==8 |
|
|
|
%if %%size==8 |
|
|
|
%define %%regmov movq |
|
|
|
%elif %2 |
|
|
|
%define %%regmov movaps |
|
|
@@ -736,15 +836,37 @@ INIT_MMX |
|
|
|
%endif |
|
|
|
%endmacro |
|
|
|
|
|
|
|
; 3arg AVX ops with a memory arg can only have it in src2, |
|
|
|
; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov). |
|
|
|
; So, if the op is symmetric and the wrong one is memory, swap them. |
|
|
|
%macro RUN_AVX_INSTR1 8 |
|
|
|
%assign %%swap 0 |
|
|
|
%if avx_enabled |
|
|
|
%ifnid %6 |
|
|
|
%assign %%swap 1 |
|
|
|
%endif |
|
|
|
%elifnidn %5, %6 |
|
|
|
%ifnid %7 |
|
|
|
%assign %%swap 1 |
|
|
|
%endif |
|
|
|
%endif |
|
|
|
%if %%swap && %3 == 0 && %8 == 1 |
|
|
|
RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6 |
|
|
|
%else |
|
|
|
RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7 |
|
|
|
%endif |
|
|
|
%endmacro |
|
|
|
|
|
|
|
;%1 == instruction |
|
|
|
;%2 == 1 if float, 0 if int |
|
|
|
;%3 == 0 if 3-operand (xmm, xmm, xmm), 1 if 4-operand (xmm, xmm, xmm, imm) |
|
|
|
%macro AVX_INSTR 3 |
|
|
|
%macro %1 2-8 fnord, fnord, fnord, %1, %2, %3 |
|
|
|
;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 3-operand (xmm, xmm, xmm) |
|
|
|
;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not |
|
|
|
%macro AVX_INSTR 4 |
|
|
|
%macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4 |
|
|
|
%ifidn %3, fnord |
|
|
|
RUN_AVX_INSTR %6, %7, %8, 2, %1, %2 |
|
|
|
%elifidn %4, fnord |
|
|
|
RUN_AVX_INSTR %6, %7, %8, 3, %1, %2, %3 |
|
|
|
RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9 |
|
|
|
%elifidn %5, fnord |
|
|
|
RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4 |
|
|
|
%else |
|
|
@@ -753,153 +875,188 @@ INIT_MMX |
|
|
|
%endmacro |
|
|
|
%endmacro |
|
|
|
|
|
|
|
AVX_INSTR addpd, 1, 0 |
|
|
|
AVX_INSTR addps, 1, 0 |
|
|
|
AVX_INSTR addsd, 1, 0 |
|
|
|
AVX_INSTR addss, 1, 0 |
|
|
|
AVX_INSTR addsubpd, 1, 0 |
|
|
|
AVX_INSTR addsubps, 1, 0 |
|
|
|
AVX_INSTR andpd, 1, 0 |
|
|
|
AVX_INSTR andps, 1, 0 |
|
|
|
AVX_INSTR andnpd, 1, 0 |
|
|
|
AVX_INSTR andnps, 1, 0 |
|
|
|
AVX_INSTR blendpd, 1, 0 |
|
|
|
AVX_INSTR blendps, 1, 0 |
|
|
|
AVX_INSTR blendvpd, 1, 0 |
|
|
|
AVX_INSTR blendvps, 1, 0 |
|
|
|
AVX_INSTR cmppd, 1, 0 |
|
|
|
AVX_INSTR cmpps, 1, 0 |
|
|
|
AVX_INSTR cmpsd, 1, 0 |
|
|
|
AVX_INSTR cmpss, 1, 0 |
|
|
|
AVX_INSTR divpd, 1, 0 |
|
|
|
AVX_INSTR divps, 1, 0 |
|
|
|
AVX_INSTR divsd, 1, 0 |
|
|
|
AVX_INSTR divss, 1, 0 |
|
|
|
AVX_INSTR dppd, 1, 0 |
|
|
|
AVX_INSTR dpps, 1, 0 |
|
|
|
AVX_INSTR haddpd, 1, 0 |
|
|
|
AVX_INSTR haddps, 1, 0 |
|
|
|
AVX_INSTR hsubpd, 1, 0 |
|
|
|
AVX_INSTR hsubps, 1, 0 |
|
|
|
AVX_INSTR maxpd, 1, 0 |
|
|
|
AVX_INSTR maxps, 1, 0 |
|
|
|
AVX_INSTR maxsd, 1, 0 |
|
|
|
AVX_INSTR maxss, 1, 0 |
|
|
|
AVX_INSTR minpd, 1, 0 |
|
|
|
AVX_INSTR minps, 1, 0 |
|
|
|
AVX_INSTR minsd, 1, 0 |
|
|
|
AVX_INSTR minss, 1, 0 |
|
|
|
AVX_INSTR mpsadbw, 0, 1 |
|
|
|
AVX_INSTR mulpd, 1, 0 |
|
|
|
AVX_INSTR mulps, 1, 0 |
|
|
|
AVX_INSTR mulsd, 1, 0 |
|
|
|
AVX_INSTR mulss, 1, 0 |
|
|
|
AVX_INSTR orpd, 1, 0 |
|
|
|
AVX_INSTR orps, 1, 0 |
|
|
|
AVX_INSTR packsswb, 0, 0 |
|
|
|
AVX_INSTR packssdw, 0, 0 |
|
|
|
AVX_INSTR packuswb, 0, 0 |
|
|
|
AVX_INSTR packusdw, 0, 0 |
|
|
|
AVX_INSTR paddb, 0, 0 |
|
|
|
AVX_INSTR paddw, 0, 0 |
|
|
|
AVX_INSTR paddd, 0, 0 |
|
|
|
AVX_INSTR paddq, 0, 0 |
|
|
|
AVX_INSTR paddsb, 0, 0 |
|
|
|
AVX_INSTR paddsw, 0, 0 |
|
|
|
AVX_INSTR paddusb, 0, 0 |
|
|
|
AVX_INSTR paddusw, 0, 0 |
|
|
|
AVX_INSTR palignr, 0, 1 |
|
|
|
AVX_INSTR pand, 0, 0 |
|
|
|
AVX_INSTR pandn, 0, 0 |
|
|
|
AVX_INSTR pavgb, 0, 0 |
|
|
|
AVX_INSTR pavgw, 0, 0 |
|
|
|
AVX_INSTR pblendvb, 0, 0 |
|
|
|
AVX_INSTR pblendw, 0, 1 |
|
|
|
AVX_INSTR pcmpestri, 0, 0 |
|
|
|
AVX_INSTR pcmpestrm, 0, 0 |
|
|
|
AVX_INSTR pcmpistri, 0, 0 |
|
|
|
AVX_INSTR pcmpistrm, 0, 0 |
|
|
|
AVX_INSTR pcmpeqb, 0, 0 |
|
|
|
AVX_INSTR pcmpeqw, 0, 0 |
|
|
|
AVX_INSTR pcmpeqd, 0, 0 |
|
|
|
AVX_INSTR pcmpeqq, 0, 0 |
|
|
|
AVX_INSTR pcmpgtb, 0, 0 |
|
|
|
AVX_INSTR pcmpgtw, 0, 0 |
|
|
|
AVX_INSTR pcmpgtd, 0, 0 |
|
|
|
AVX_INSTR pcmpgtq, 0, 0 |
|
|
|
AVX_INSTR phaddw, 0, 0 |
|
|
|
AVX_INSTR phaddd, 0, 0 |
|
|
|
AVX_INSTR phaddsw, 0, 0 |
|
|
|
AVX_INSTR phsubw, 0, 0 |
|
|
|
AVX_INSTR phsubd, 0, 0 |
|
|
|
AVX_INSTR phsubsw, 0, 0 |
|
|
|
AVX_INSTR pmaddwd, 0, 0 |
|
|
|
AVX_INSTR pmaddubsw, 0, 0 |
|
|
|
AVX_INSTR pmaxsb, 0, 0 |
|
|
|
AVX_INSTR pmaxsw, 0, 0 |
|
|
|
AVX_INSTR pmaxsd, 0, 0 |
|
|
|
AVX_INSTR pmaxub, 0, 0 |
|
|
|
AVX_INSTR pmaxuw, 0, 0 |
|
|
|
AVX_INSTR pmaxud, 0, 0 |
|
|
|
AVX_INSTR pminsb, 0, 0 |
|
|
|
AVX_INSTR pminsw, 0, 0 |
|
|
|
AVX_INSTR pminsd, 0, 0 |
|
|
|
AVX_INSTR pminub, 0, 0 |
|
|
|
AVX_INSTR pminuw, 0, 0 |
|
|
|
AVX_INSTR pminud, 0, 0 |
|
|
|
AVX_INSTR pmulhuw, 0, 0 |
|
|
|
AVX_INSTR pmulhrsw, 0, 0 |
|
|
|
AVX_INSTR pmulhw, 0, 0 |
|
|
|
AVX_INSTR pmullw, 0, 0 |
|
|
|
AVX_INSTR pmulld, 0, 0 |
|
|
|
AVX_INSTR pmuludq, 0, 0 |
|
|
|
AVX_INSTR pmuldq, 0, 0 |
|
|
|
AVX_INSTR por, 0, 0 |
|
|
|
AVX_INSTR psadbw, 0, 0 |
|
|
|
AVX_INSTR pshufb, 0, 0 |
|
|
|
AVX_INSTR psignb, 0, 0 |
|
|
|
AVX_INSTR psignw, 0, 0 |
|
|
|
AVX_INSTR psignd, 0, 0 |
|
|
|
AVX_INSTR psllw, 0, 0 |
|
|
|
AVX_INSTR pslld, 0, 0 |
|
|
|
AVX_INSTR psllq, 0, 0 |
|
|
|
AVX_INSTR pslldq, 0, 0 |
|
|
|
AVX_INSTR psraw, 0, 0 |
|
|
|
AVX_INSTR psrad, 0, 0 |
|
|
|
AVX_INSTR psrlw, 0, 0 |
|
|
|
AVX_INSTR psrld, 0, 0 |
|
|
|
AVX_INSTR psrlq, 0, 0 |
|
|
|
AVX_INSTR psrldq, 0, 0 |
|
|
|
AVX_INSTR psubb, 0, 0 |
|
|
|
AVX_INSTR psubw, 0, 0 |
|
|
|
AVX_INSTR psubd, 0, 0 |
|
|
|
AVX_INSTR psubq, 0, 0 |
|
|
|
AVX_INSTR psubsb, 0, 0 |
|
|
|
AVX_INSTR psubsw, 0, 0 |
|
|
|
AVX_INSTR psubusb, 0, 0 |
|
|
|
AVX_INSTR psubusw, 0, 0 |
|
|
|
AVX_INSTR punpckhbw, 0, 0 |
|
|
|
AVX_INSTR punpckhwd, 0, 0 |
|
|
|
AVX_INSTR punpckhdq, 0, 0 |
|
|
|
AVX_INSTR punpckhqdq, 0, 0 |
|
|
|
AVX_INSTR punpcklbw, 0, 0 |
|
|
|
AVX_INSTR punpcklwd, 0, 0 |
|
|
|
AVX_INSTR punpckldq, 0, 0 |
|
|
|
AVX_INSTR punpcklqdq, 0, 0 |
|
|
|
AVX_INSTR pxor, 0, 0 |
|
|
|
AVX_INSTR shufps, 0, 1 |
|
|
|
AVX_INSTR subpd, 1, 0 |
|
|
|
AVX_INSTR subps, 1, 0 |
|
|
|
AVX_INSTR subsd, 1, 0 |
|
|
|
AVX_INSTR subss, 1, 0 |
|
|
|
AVX_INSTR unpckhpd, 1, 0 |
|
|
|
AVX_INSTR unpckhps, 1, 0 |
|
|
|
AVX_INSTR unpcklpd, 1, 0 |
|
|
|
AVX_INSTR unpcklps, 1, 0 |
|
|
|
AVX_INSTR xorpd, 1, 0 |
|
|
|
AVX_INSTR xorps, 1, 0 |
|
|
|
AVX_INSTR addpd, 1, 0, 1 |
|
|
|
AVX_INSTR addps, 1, 0, 1 |
|
|
|
AVX_INSTR addsd, 1, 0, 1 |
|
|
|
AVX_INSTR addss, 1, 0, 1 |
|
|
|
AVX_INSTR addsubpd, 1, 0, 0 |
|
|
|
AVX_INSTR addsubps, 1, 0, 0 |
|
|
|
AVX_INSTR andpd, 1, 0, 1 |
|
|
|
AVX_INSTR andps, 1, 0, 1 |
|
|
|
AVX_INSTR andnpd, 1, 0, 0 |
|
|
|
AVX_INSTR andnps, 1, 0, 0 |
|
|
|
AVX_INSTR blendpd, 1, 0, 0 |
|
|
|
AVX_INSTR blendps, 1, 0, 0 |
|
|
|
AVX_INSTR blendvpd, 1, 0, 0 |
|
|
|
AVX_INSTR blendvps, 1, 0, 0 |
|
|
|
AVX_INSTR cmppd, 1, 0, 0 |
|
|
|
AVX_INSTR cmpps, 1, 0, 0 |
|
|
|
AVX_INSTR cmpsd, 1, 0, 0 |
|
|
|
AVX_INSTR cmpss, 1, 0, 0 |
|
|
|
AVX_INSTR divpd, 1, 0, 0 |
|
|
|
AVX_INSTR divps, 1, 0, 0 |
|
|
|
AVX_INSTR divsd, 1, 0, 0 |
|
|
|
AVX_INSTR divss, 1, 0, 0 |
|
|
|
AVX_INSTR dppd, 1, 1, 0 |
|
|
|
AVX_INSTR dpps, 1, 1, 0 |
|
|
|
AVX_INSTR haddpd, 1, 0, 0 |
|
|
|
AVX_INSTR haddps, 1, 0, 0 |
|
|
|
AVX_INSTR hsubpd, 1, 0, 0 |
|
|
|
AVX_INSTR hsubps, 1, 0, 0 |
|
|
|
AVX_INSTR maxpd, 1, 0, 1 |
|
|
|
AVX_INSTR maxps, 1, 0, 1 |
|
|
|
AVX_INSTR maxsd, 1, 0, 1 |
|
|
|
AVX_INSTR maxss, 1, 0, 1 |
|
|
|
AVX_INSTR minpd, 1, 0, 1 |
|
|
|
AVX_INSTR minps, 1, 0, 1 |
|
|
|
AVX_INSTR minsd, 1, 0, 1 |
|
|
|
AVX_INSTR minss, 1, 0, 1 |
|
|
|
AVX_INSTR movsd, 1, 0, 0 |
|
|
|
AVX_INSTR movss, 1, 0, 0 |
|
|
|
AVX_INSTR mpsadbw, 0, 1, 0 |
|
|
|
AVX_INSTR mulpd, 1, 0, 1 |
|
|
|
AVX_INSTR mulps, 1, 0, 1 |
|
|
|
AVX_INSTR mulsd, 1, 0, 1 |
|
|
|
AVX_INSTR mulss, 1, 0, 1 |
|
|
|
AVX_INSTR orpd, 1, 0, 1 |
|
|
|
AVX_INSTR orps, 1, 0, 1 |
|
|
|
AVX_INSTR packsswb, 0, 0, 0 |
|
|
|
AVX_INSTR packssdw, 0, 0, 0 |
|
|
|
AVX_INSTR packuswb, 0, 0, 0 |
|
|
|
AVX_INSTR packusdw, 0, 0, 0 |
|
|
|
AVX_INSTR paddb, 0, 0, 1 |
|
|
|
AVX_INSTR paddw, 0, 0, 1 |
|
|
|
AVX_INSTR paddd, 0, 0, 1 |
|
|
|
AVX_INSTR paddq, 0, 0, 1 |
|
|
|
AVX_INSTR paddsb, 0, 0, 1 |
|
|
|
AVX_INSTR paddsw, 0, 0, 1 |
|
|
|
AVX_INSTR paddusb, 0, 0, 1 |
|
|
|
AVX_INSTR paddusw, 0, 0, 1 |
|
|
|
AVX_INSTR palignr, 0, 1, 0 |
|
|
|
AVX_INSTR pand, 0, 0, 1 |
|
|
|
AVX_INSTR pandn, 0, 0, 0 |
|
|
|
AVX_INSTR pavgb, 0, 0, 1 |
|
|
|
AVX_INSTR pavgw, 0, 0, 1 |
|
|
|
AVX_INSTR pblendvb, 0, 0, 0 |
|
|
|
AVX_INSTR pblendw, 0, 1, 0 |
|
|
|
AVX_INSTR pcmpestri, 0, 0, 0 |
|
|
|
AVX_INSTR pcmpestrm, 0, 0, 0 |
|
|
|
AVX_INSTR pcmpistri, 0, 0, 0 |
|
|
|
AVX_INSTR pcmpistrm, 0, 0, 0 |
|
|
|
AVX_INSTR pcmpeqb, 0, 0, 1 |
|
|
|
AVX_INSTR pcmpeqw, 0, 0, 1 |
|
|
|
AVX_INSTR pcmpeqd, 0, 0, 1 |
|
|
|
AVX_INSTR pcmpeqq, 0, 0, 1 |
|
|
|
AVX_INSTR pcmpgtb, 0, 0, 0 |
|
|
|
AVX_INSTR pcmpgtw, 0, 0, 0 |
|
|
|
AVX_INSTR pcmpgtd, 0, 0, 0 |
|
|
|
AVX_INSTR pcmpgtq, 0, 0, 0 |
|
|
|
AVX_INSTR phaddw, 0, 0, 0 |
|
|
|
AVX_INSTR phaddd, 0, 0, 0 |
|
|
|
AVX_INSTR phaddsw, 0, 0, 0 |
|
|
|
AVX_INSTR phsubw, 0, 0, 0 |
|
|
|
AVX_INSTR phsubd, 0, 0, 0 |
|
|
|
AVX_INSTR phsubsw, 0, 0, 0 |
|
|
|
AVX_INSTR pmaddwd, 0, 0, 1 |
|
|
|
AVX_INSTR pmaddubsw, 0, 0, 0 |
|
|
|
AVX_INSTR pmaxsb, 0, 0, 1 |
|
|
|
AVX_INSTR pmaxsw, 0, 0, 1 |
|
|
|
AVX_INSTR pmaxsd, 0, 0, 1 |
|
|
|
AVX_INSTR pmaxub, 0, 0, 1 |
|
|
|
AVX_INSTR pmaxuw, 0, 0, 1 |
|
|
|
AVX_INSTR pmaxud, 0, 0, 1 |
|
|
|
AVX_INSTR pminsb, 0, 0, 1 |
|
|
|
AVX_INSTR pminsw, 0, 0, 1 |
|
|
|
AVX_INSTR pminsd, 0, 0, 1 |
|
|
|
AVX_INSTR pminub, 0, 0, 1 |
|
|
|
AVX_INSTR pminuw, 0, 0, 1 |
|
|
|
AVX_INSTR pminud, 0, 0, 1 |
|
|
|
AVX_INSTR pmulhuw, 0, 0, 1 |
|
|
|
AVX_INSTR pmulhrsw, 0, 0, 1 |
|
|
|
AVX_INSTR pmulhw, 0, 0, 1 |
|
|
|
AVX_INSTR pmullw, 0, 0, 1 |
|
|
|
AVX_INSTR pmulld, 0, 0, 1 |
|
|
|
AVX_INSTR pmuludq, 0, 0, 1 |
|
|
|
AVX_INSTR pmuldq, 0, 0, 1 |
|
|
|
AVX_INSTR por, 0, 0, 1 |
|
|
|
AVX_INSTR psadbw, 0, 0, 1 |
|
|
|
AVX_INSTR pshufb, 0, 0, 0 |
|
|
|
AVX_INSTR psignb, 0, 0, 0 |
|
|
|
AVX_INSTR psignw, 0, 0, 0 |
|
|
|
AVX_INSTR psignd, 0, 0, 0 |
|
|
|
AVX_INSTR psllw, 0, 0, 0 |
|
|
|
AVX_INSTR pslld, 0, 0, 0 |
|
|
|
AVX_INSTR psllq, 0, 0, 0 |
|
|
|
AVX_INSTR pslldq, 0, 0, 0 |
|
|
|
AVX_INSTR psraw, 0, 0, 0 |
|
|
|
AVX_INSTR psrad, 0, 0, 0 |
|
|
|
AVX_INSTR psrlw, 0, 0, 0 |
|
|
|
AVX_INSTR psrld, 0, 0, 0 |
|
|
|
AVX_INSTR psrlq, 0, 0, 0 |
|
|
|
AVX_INSTR psrldq, 0, 0, 0 |
|
|
|
AVX_INSTR psubb, 0, 0, 0 |
|
|
|
AVX_INSTR psubw, 0, 0, 0 |
|
|
|
AVX_INSTR psubd, 0, 0, 0 |
|
|
|
AVX_INSTR psubq, 0, 0, 0 |
|
|
|
AVX_INSTR psubsb, 0, 0, 0 |
|
|
|
AVX_INSTR psubsw, 0, 0, 0 |
|
|
|
AVX_INSTR psubusb, 0, 0, 0 |
|
|
|
AVX_INSTR psubusw, 0, 0, 0 |
|
|
|
AVX_INSTR punpckhbw, 0, 0, 0 |
|
|
|
AVX_INSTR punpckhwd, 0, 0, 0 |
|
|
|
AVX_INSTR punpckhdq, 0, 0, 0 |
|
|
|
AVX_INSTR punpckhqdq, 0, 0, 0 |
|
|
|
AVX_INSTR punpcklbw, 0, 0, 0 |
|
|
|
AVX_INSTR punpcklwd, 0, 0, 0 |
|
|
|
AVX_INSTR punpckldq, 0, 0, 0 |
|
|
|
AVX_INSTR punpcklqdq, 0, 0, 0 |
|
|
|
AVX_INSTR pxor, 0, 0, 1 |
|
|
|
AVX_INSTR shufps, 0, 1, 0 |
|
|
|
AVX_INSTR subpd, 1, 0, 0 |
|
|
|
AVX_INSTR subps, 1, 0, 0 |
|
|
|
AVX_INSTR subsd, 1, 0, 0 |
|
|
|
AVX_INSTR subss, 1, 0, 0 |
|
|
|
AVX_INSTR unpckhpd, 1, 0, 0 |
|
|
|
AVX_INSTR unpckhps, 1, 0, 0 |
|
|
|
AVX_INSTR unpcklpd, 1, 0, 0 |
|
|
|
AVX_INSTR unpcklps, 1, 0, 0 |
|
|
|
AVX_INSTR xorpd, 1, 0, 1 |
|
|
|
AVX_INSTR xorps, 1, 0, 1 |
|
|
|
|
|
|
|
; 3DNow instructions, for sharing code between AVX, SSE and 3DN |
|
|
|
AVX_INSTR pfadd, 1, 0 |
|
|
|
AVX_INSTR pfsub, 1, 0 |
|
|
|
AVX_INSTR pfmul, 1, 0 |
|
|
|
AVX_INSTR pfadd, 1, 0, 1 |
|
|
|
AVX_INSTR pfsub, 1, 0, 0 |
|
|
|
AVX_INSTR pfmul, 1, 0, 1 |
|
|
|
|
|
|
|
; base-4 constants for shuffles |
|
|
|
%assign i 0 |
|
|
|
%rep 256 |
|
|
|
%assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3) |
|
|
|
%if j < 10 |
|
|
|
CAT_XDEFINE q000, j, i |
|
|
|
%elif j < 100 |
|
|
|
CAT_XDEFINE q00, j, i |
|
|
|
%elif j < 1000 |
|
|
|
CAT_XDEFINE q0, j, i |
|
|
|
%else |
|
|
|
CAT_XDEFINE q, j, i |
|
|
|
%endif |
|
|
|
%assign i i+1 |
|
|
|
%endrep |
|
|
|
%undef i |
|
|
|
%undef j |
|
|
|
|
|
|
|
%macro FMA_INSTR 3 |
|
|
|
%macro %1 4-7 %1, %2, %3 |
|
|
|
%if cpuflag(xop) |
|
|
|
v%5 %1, %2, %3, %4 |
|
|
|
%else |
|
|
|
%6 %1, %2, %3 |
|
|
|
%7 %1, %4 |
|
|
|
%endif |
|
|
|
%endmacro |
|
|
|
%endmacro |
|
|
|
|
|
|
|
FMA_INSTR pmacsdd, pmulld, paddd |
|
|
|
FMA_INSTR pmacsww, pmullw, paddw |
|
|
|
FMA_INSTR pmadcswd, pmaddwd, paddd |