summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRodger Combs <rodger.combs@gmail.com>2017-06-20 23:15:31 -0500
committerRodger Combs <rodger.combs@gmail.com>2017-09-05 20:43:09 -0500
commit93114892c905895df5695c164469503fead273a7 (patch)
treeb7e2604b33cfc5eecd78a006927dd5284bc81eed
parent8bddaa2a72d0e949d5a2f7b2e1033b3d53a09fa3 (diff)
downloadlibass-93114892c905895df5695c164469503fead273a7.tar.bz2
libass-93114892c905895df5695c164469503fead273a7.tar.xz
x86: update x86inc.asm
-rw-r--r--libass/x86/x86inc.asm1096
1 files changed, 599 insertions, 497 deletions
diff --git a/libass/x86/x86inc.asm b/libass/x86/x86inc.asm
index 53e104d..128ddc1 100644
--- a/libass/x86/x86inc.asm
+++ b/libass/x86/x86inc.asm
@@ -1,11 +1,11 @@
;*****************************************************************************
;* x86inc.asm: x264asm abstraction layer
;*****************************************************************************
-;* Copyright (C) 2005-2013 x264 project
+;* Copyright (C) 2005-2016 x264 project
;*
;* Authors: Loren Merritt <lorenm@u.washington.edu>
;* Anton Mitrofanov <BugMaster@narod.ru>
-;* Jason Garrett-Glaser <darkshikari@gmail.com>
+;* Fiona Glaser <fiona@x264.com>
;* Henrik Gramner <henrik@gramner.com>
;*
;* Permission to use, copy, modify, and/or distribute this software for any
@@ -35,13 +35,24 @@
; to x264-devel@videolan.org .
%ifndef private_prefix
- %define private_prefix ass
+ %define private_prefix x264
%endif
%ifndef public_prefix
%define public_prefix private_prefix
%endif
+%if HAVE_ALIGNED_STACK
+ %define STACK_ALIGNMENT 16
+%endif
+%ifndef STACK_ALIGNMENT
+ %if ARCH_X86_64
+ %define STACK_ALIGNMENT 16
+ %else
+ %define STACK_ALIGNMENT 4
+ %endif
+%endif
+
%define WIN64 0
%define UNIX64 0
%if ARCH_X86_64
@@ -56,18 +67,30 @@
%endif
%endif
+%define FORMAT_ELF 0
+%ifidn __OUTPUT_FORMAT__,elf
+ %define FORMAT_ELF 1
+%elifidn __OUTPUT_FORMAT__,elf32
+ %define FORMAT_ELF 1
+%elifidn __OUTPUT_FORMAT__,elf64
+ %define FORMAT_ELF 1
+%endif
+
%ifdef PREFIX
%define mangle(x) _ %+ x
%else
%define mangle(x) x
%endif
+; aout does not support align=
+; NOTE: This section is out of sync with x264, in order to
+; keep supporting OS/2.
%macro SECTION_RODATA 0-1 16
- SECTION .rodata align=%1
-%endmacro
-
-%macro SECTION_TEXT 0-1 16
- SECTION .text align=%1
+ %ifidn __OUTPUT_FORMAT__,aout
+ section .text
+ %else
+ SECTION .rodata align=%1
+ %endif
%endmacro
%if WIN64
@@ -82,8 +105,11 @@
default rel
%endif
-; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
-CPU amdnop
+%macro CPUNOP 1
+ %if HAVE_CPUNOP
+ CPU %1
+ %endif
+%endmacro
; Macros to eliminate most code duplication between x86_32 and x86_64:
; Currently this works only for leaf functions which load all their arguments
@@ -94,8 +120,9 @@ CPU amdnop
; %1 = number of arguments. loads them from stack if needed.
; %2 = number of registers used. pushes callee-saved regs if needed.
; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
-; %4 = (optional) stack size to be allocated. If not aligned (x86-32 ICC 10.x,
-; MSVC or YMM), the stack will be manually aligned (to 16 or 32 bytes),
+; %4 = (optional) stack size to be allocated. The stack will be aligned before
+; allocating the specified stack size. If the required stack alignment is
+; larger than the known stack alignment the stack will be manually aligned
; and an extra register will be allocated to hold the original stack
; pointer (to not invalidate r0m etc.). To prevent the use of an extra
; register as stack pointer, request a negative stack size.
@@ -103,8 +130,10 @@ CPU amdnop
; PROLOGUE can also be invoked by adding the same options to cglobal
; e.g.
-; cglobal foo, 2,3,0, dst, src, tmp
-; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
+; cglobal foo, 2,3,7,0x40, dst, src, tmp
+; declares a function (foo) that automatically loads two arguments (dst and
+; src) into registers, uses one additional register (tmp) plus 7 vector
+; registers (m0-m6) and allocates 0x40 bytes of stack space.
; TODO Some functions can use some args directly from the stack. If they're the
; last args then you can just not declare them, but if they're in the middle
@@ -129,6 +158,7 @@ CPU amdnop
%define r%1w %2w
%define r%1b %2b
%define r%1h %2h
+ %define %2q %2
%if %0 == 2
%define r%1m %2d
%define r%1mp %2
@@ -153,9 +183,9 @@ CPU amdnop
%define e%1h %3
%define r%1b %2
%define e%1b %2
-%if ARCH_X86_64 == 0
- %define r%1 e%1
-%endif
+ %if ARCH_X86_64 == 0
+ %define r%1 e%1
+ %endif
%endmacro
DECLARE_REG_SIZE ax, al, ah
@@ -265,7 +295,7 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
%macro ASSERT 1
%if (%1) == 0
- %error assert failed
+ %error assertion ``%1'' failed
%endif
%endmacro
@@ -304,26 +334,28 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
%assign n_arg_names %0
%endmacro
+%define required_stack_alignment ((mmsize + 15) & ~15)
+
%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
%ifnum %1
%if %1 != 0
- %assign %%stack_alignment ((mmsize + 15) & ~15)
+ %assign %%pad 0
%assign stack_size %1
%if stack_size < 0
%assign stack_size -stack_size
%endif
- %assign stack_size_padded stack_size
%if WIN64
- %assign stack_size_padded stack_size_padded + 32 ; reserve 32 bytes for shadow space
+ %assign %%pad %%pad + 32 ; shadow space
%if mmsize != 8
%assign xmm_regs_used %2
%if xmm_regs_used > 8
- %assign stack_size_padded stack_size_padded + (xmm_regs_used-8)*16
+ %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers
%endif
%endif
%endif
- %if mmsize <= 16 && HAVE_ALIGNED_STACK
- %assign stack_size_padded stack_size_padded + %%stack_alignment - gprsize - (stack_offset & (%%stack_alignment - 1))
+ %if required_stack_alignment <= STACK_ALIGNMENT
+ ; maintain the current stack alignment
+ %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
SUB rsp, stack_size_padded
%else
%assign %%reg_num (regs_used - 1)
@@ -332,17 +364,17 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
; it, i.e. in [rsp+stack_size_padded], so we can restore the
; stack in a single instruction (i.e. mov rsp, rstk or mov
; rsp, [rsp+stack_size_padded])
- mov rstk, rsp
%if %1 < 0 ; need to store rsp on stack
- sub rsp, gprsize+stack_size_padded
- and rsp, ~(%%stack_alignment-1)
- %xdefine rstkm [rsp+stack_size_padded]
- mov rstkm, rstk
+ %xdefine rstkm [rsp + stack_size + %%pad]
+ %assign %%pad %%pad + gprsize
%else ; can keep rsp in rstk during whole function
- sub rsp, stack_size_padded
- and rsp, ~(%%stack_alignment-1)
%xdefine rstkm rstk
%endif
+ %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1))
+ mov rstk, rsp
+ and rsp, ~(required_stack_alignment-1)
+ sub rsp, stack_size_padded
+ movifnidn rstkm, rstk
%endif
WIN64_PUSH_XMM
%endif
@@ -351,11 +383,21 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
%macro SETUP_STACK_POINTER 1
%ifnum %1
- %if %1 != 0 && (HAVE_ALIGNED_STACK == 0 || mmsize == 32)
+ %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
%if %1 > 0
+ ; Reserve an additional register for storing the original stack pointer, but avoid using
+ ; eax/rax for this purpose since it can potentially get overwritten as a return value.
%assign regs_used (regs_used + 1)
- %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2
- %warning "Stack pointer will overwrite register argument"
+ %if ARCH_X86_64 && regs_used == 7
+ %assign regs_used 8
+ %elif ARCH_X86_64 == 0 && regs_used == 1
+ %assign regs_used 2
+ %endif
+ %endif
+ %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3
+ ; Ensure that we don't clobber any registers containing arguments. For UNIX64 we also preserve r6 (rax)
+ ; since it's used as a hidden argument in vararg functions to specify the number of vector registers used.
+ %assign regs_used 5 + UNIX64 * 3
%endif
%endif
%endif
@@ -425,7 +467,9 @@ DECLARE_REG 14, R15, 120
%assign xmm_regs_used %1
ASSERT xmm_regs_used <= 16
%if xmm_regs_used > 8
- %assign stack_size_padded (xmm_regs_used-8)*16 + (~stack_offset&8) + 32
+ ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack.
+ %assign %%pad (xmm_regs_used-8)*16 + 32
+ %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
SUB rsp, stack_size_padded
%endif
WIN64_PUSH_XMM
@@ -441,7 +485,7 @@ DECLARE_REG 14, R15, 120
%endrep
%endif
%if stack_size_padded > 0
- %if stack_size > 0 && (mmsize == 32 || HAVE_ALIGNED_STACK == 0)
+ %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT
mov rsp, rstkm
%else
add %1, stack_size_padded
@@ -467,9 +511,9 @@ DECLARE_REG 14, R15, 120
%macro RET 0
WIN64_RESTORE_XMM_INTERNAL rsp
POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
-%if mmsize == 32
- vzeroupper
-%endif
+ %if mmsize == 32
+ vzeroupper
+ %endif
AUTO_REP_RET
%endmacro
@@ -506,17 +550,17 @@ DECLARE_REG 14, R15, 72
%define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
%macro RET 0
-%if stack_size_padded > 0
-%if mmsize == 32 || HAVE_ALIGNED_STACK == 0
- mov rsp, rstkm
-%else
- add rsp, stack_size_padded
-%endif
-%endif
+ %if stack_size_padded > 0
+ %if required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add rsp, stack_size_padded
+ %endif
+ %endif
POP_IF_USED 14, 13, 12, 11, 10, 9
-%if mmsize == 32
- vzeroupper
-%endif
+ %if mmsize == 32
+ vzeroupper
+ %endif
AUTO_REP_RET
%endmacro
@@ -562,29 +606,29 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
%define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
%macro RET 0
-%if stack_size_padded > 0
-%if mmsize == 32 || HAVE_ALIGNED_STACK == 0
- mov rsp, rstkm
-%else
- add rsp, stack_size_padded
-%endif
-%endif
+ %if stack_size_padded > 0
+ %if required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add rsp, stack_size_padded
+ %endif
+ %endif
POP_IF_USED 6, 5, 4, 3
-%if mmsize == 32
- vzeroupper
-%endif
+ %if mmsize == 32
+ vzeroupper
+ %endif
AUTO_REP_RET
%endmacro
%endif ;======================================================================
%if WIN64 == 0
-%macro WIN64_SPILL_XMM 1
-%endmacro
-%macro WIN64_RESTORE_XMM 1
-%endmacro
-%macro WIN64_PUSH_XMM 0
-%endmacro
+ %macro WIN64_SPILL_XMM 1
+ %endmacro
+ %macro WIN64_RESTORE_XMM 1
+ %endmacro
+ %macro WIN64_PUSH_XMM 0
+ %endmacro
%endif
; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
@@ -597,24 +641,26 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
%else
rep ret
%endif
+ annotate_function_size
%endmacro
%define last_branch_adr $$
%macro AUTO_REP_RET 0
- %ifndef cpuflags
- times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ != last_branch_adr.
- %elif notcpuflag(ssse3)
- times ((last_branch_adr-$)>>31)+1 rep
+ %if notcpuflag(ssse3)
+ times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr.
%endif
ret
+ annotate_function_size
%endmacro
%macro BRANCH_INSTR 0-*
%rep %0
%macro %1 1-2 %1
%2 %1
- %%branch_instr:
- %xdefine last_branch_adr %%branch_instr
+ %if notcpuflag(ssse3)
+ %%branch_instr equ $
+ %xdefine last_branch_adr %%branch_instr
+ %endif
%endmacro
%rotate 1
%endrep
@@ -629,6 +675,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
%elif %2
jmp %1
%endif
+ annotate_function_size
%endmacro
;=============================================================================
@@ -650,6 +697,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
cglobal_internal 0, %1 %+ SUFFIX, %2
%endmacro
%macro cglobal_internal 2-3+
+ annotate_function_size
%if %1
%xdefine %%FUNCTION_PREFIX private_prefix
%xdefine %%VISIBILITY hidden
@@ -663,7 +711,8 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
CAT_XDEFINE cglobaled_, %2, 1
%endif
%xdefine current_function %2
- %ifidn __OUTPUT_FORMAT__,elf
+ %xdefine current_function_section __SECT__
+ %if FORMAT_ELF
global %2:function %%VISIBILITY
%else
global %2
@@ -689,14 +738,16 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
; like cextern, but without the prefix
%macro cextern_naked 1
- %xdefine %1 mangle(%1)
+ %ifdef PREFIX
+ %xdefine %1 mangle(%1)
+ %endif
CAT_XDEFINE cglobaled_, %1, 1
extern %1
%endmacro
%macro const 1-2+
%xdefine %1 mangle(private_prefix %+ _ %+ %1)
- %ifidn __OUTPUT_FORMAT__,elf
+ %if FORMAT_ELF
global %1:data hidden
%else
global %1
@@ -704,12 +755,29 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
%1: %2
%endmacro
-; This is needed for ELF, otherwise the GNU linker assumes the stack is
-; executable by default.
-%ifidn __OUTPUT_FORMAT__,elf
-SECTION .note.GNU-stack noalloc noexec nowrite progbits
+; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default.
+%if FORMAT_ELF
+ [SECTION .note.GNU-stack noalloc noexec nowrite progbits]
%endif
+; Tell debuggers how large the function was.
+; This may be invoked multiple times per function; we rely on later instances overriding earlier ones.
+; This is invoked by RET and similar macros, and also cglobal does it for the previous function,
+; but if the last function in a source file doesn't use any of the standard macros for its epilogue,
+; then its size might be unspecified.
+%macro annotate_function_size 0
+ %ifdef __YASM_VER__
+ %ifdef current_function
+ %if FORMAT_ELF
+ current_function_section
+ %%ecf equ $
+ size current_function %%ecf - current_function
+ __SECT__
+ %endif
+ %endif
+ %endif
+%endmacro
+
; cpuflags
%assign cpuflags_mmx (1<<0)
@@ -726,8 +794,8 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%assign cpuflags_avx (1<<11)| cpuflags_sse42
%assign cpuflags_xop (1<<12)| cpuflags_avx
%assign cpuflags_fma4 (1<<13)| cpuflags_avx
-%assign cpuflags_avx2 (1<<14)| cpuflags_avx
-%assign cpuflags_fma3 (1<<15)| cpuflags_avx
+%assign cpuflags_fma3 (1<<14)| cpuflags_avx
+%assign cpuflags_avx2 (1<<15)| cpuflags_fma3
%assign cpuflags_cache32 (1<<16)
%assign cpuflags_cache64 (1<<17)
@@ -737,23 +805,32 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%assign cpuflags_atom (1<<21)
%assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt
%assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1
+%assign cpuflags_aesni (1<<24)|cpuflags_sse42
-%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
-%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
+; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
+%define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)
+%define notcpuflag(x) (cpuflag(x) ^ 1)
-; Takes up to 2 cpuflags from the above list.
+; Takes an arbitrary number of cpuflags from the above list.
; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
-%macro INIT_CPUFLAGS 0-2
- CPU amdnop
+%macro INIT_CPUFLAGS 0-*
+ %xdefine SUFFIX
+ %undef cpuname
+ %assign cpuflags 0
+
%if %0 >= 1
- %xdefine cpuname %1
- %assign cpuflags cpuflags_%1
- %if %0 >= 2
- %xdefine cpuname %1_%2
- %assign cpuflags cpuflags | cpuflags_%2
- %endif
+ %rep %0
+ %ifdef cpuname
+ %xdefine cpuname cpuname %+ _%1
+ %else
+ %xdefine cpuname %1
+ %endif
+ %assign cpuflags cpuflags | cpuflags_%1
+ %rotate 1
+ %endrep
%xdefine SUFFIX _ %+ cpuname
+
%if cpuflag(avx)
%assign avx_enabled 1
%endif
@@ -764,16 +841,15 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%endif
%if cpuflag(aligned)
%define movu mova
- %elifidn %1, sse3
+ %elif cpuflag(sse3) && notcpuflag(ssse3)
%define movu lddqu
%endif
- %if ARCH_X86_64 == 0 && notcpuflag(sse2)
- CPU basicnop
- %endif
+ %endif
+
+ %if ARCH_X86_64 || cpuflag(sse2)
+ CPUNOP amdnop
%else
- %xdefine SUFFIX
- %undef cpuname
- %undef cpuflags
+ CPUNOP basicnop
%endif
%endmacro
@@ -802,14 +878,14 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%define movnta movntq
%assign %%i 0
%rep 8
- CAT_XDEFINE m, %%i, mm %+ %%i
- CAT_XDEFINE nmm, %%i, %%i
- %assign %%i %%i+1
+ CAT_XDEFINE m, %%i, mm %+ %%i
+ CAT_XDEFINE nnmm, %%i, %%i
+ %assign %%i %%i+1
%endrep
%rep 8
- CAT_UNDEF m, %%i
- CAT_UNDEF nmm, %%i
- %assign %%i %%i+1
+ CAT_UNDEF m, %%i
+ CAT_UNDEF nnmm, %%i
+ %assign %%i %%i+1
%endrep
INIT_CPUFLAGS %1
%endmacro
@@ -820,7 +896,7 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%define mmsize 16
%define num_mmregs 8
%if ARCH_X86_64
- %define num_mmregs 16
+ %define num_mmregs 16
%endif
%define mova movdqa
%define movu movdqu
@@ -828,9 +904,9 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%define movnta movntdq
%assign %%i 0
%rep num_mmregs
- CAT_XDEFINE m, %%i, xmm %+ %%i
- CAT_XDEFINE nxmm, %%i, %%i
- %assign %%i %%i+1
+ CAT_XDEFINE m, %%i, xmm %+ %%i
+ CAT_XDEFINE nnxmm, %%i, %%i
+ %assign %%i %%i+1
%endrep
INIT_CPUFLAGS %1
%endmacro
@@ -841,7 +917,7 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%define mmsize 32
%define num_mmregs 8
%if ARCH_X86_64
- %define num_mmregs 16
+ %define num_mmregs 16
%endif
%define mova movdqa
%define movu movdqu
@@ -849,9 +925,9 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%define movnta movntdq
%assign %%i 0
%rep num_mmregs
- CAT_XDEFINE m, %%i, ymm %+ %%i
- CAT_XDEFINE nymm, %%i, %%i
- %assign %%i %%i+1
+ CAT_XDEFINE m, %%i, ymm %+ %%i
+ CAT_XDEFINE nnymm, %%i, %%i
+ %assign %%i %%i+1
%endrep
INIT_CPUFLAGS %1
%endmacro
@@ -875,7 +951,7 @@ INIT_XMM
%assign i 0
%rep 16
DECLARE_MMCAST i
-%assign i i+1
+ %assign i i+1
%endrep
; I often want to use macros that permute their arguments. e.g. there's no
@@ -893,23 +969,23 @@ INIT_XMM
; doesn't cost any cycles.
%macro PERMUTE 2-* ; takes a list of pairs to swap
-%rep %0/2
- %xdefine %%tmp%2 m%2
- %rotate 2
-%endrep
-%rep %0/2
- %xdefine m%1 %%tmp%2
- CAT_XDEFINE n, m%1, %1
- %rotate 2
-%endrep
+ %rep %0/2
+ %xdefine %%tmp%2 m%2
+ %rotate 2
+ %endrep
+ %rep %0/2
+ %xdefine m%1 %%tmp%2
+ CAT_XDEFINE nn, m%1, %1
+ %rotate 2
+ %endrep
%endmacro
%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
-%ifnum %1 ; SWAP 0, 1, ...
- SWAP_INTERNAL_NUM %1, %2
-%else ; SWAP m0, m1, ...
- SWAP_INTERNAL_NAME %1, %2
-%endif
+ %ifnum %1 ; SWAP 0, 1, ...
+ SWAP_INTERNAL_NUM %1, %2
+ %else ; SWAP m0, m1, ...
+ SWAP_INTERNAL_NAME %1, %2
+ %endif
%endmacro
%macro SWAP_INTERNAL_NUM 2-*
@@ -917,17 +993,17 @@ INIT_XMM
%xdefine %%tmp m%1
%xdefine m%1 m%2
%xdefine m%2 %%tmp
- CAT_XDEFINE n, m%1, %1
- CAT_XDEFINE n, m%2, %2
- %rotate 1
+ CAT_XDEFINE nn, m%1, %1
+ CAT_XDEFINE nn, m%2, %2
+ %rotate 1
%endrep
%endmacro
%macro SWAP_INTERNAL_NAME 2-*
- %xdefine %%args n %+ %1
+ %xdefine %%args nn %+ %1
%rep %0-1
- %xdefine %%args %%args, n %+ %2
- %rotate 1
+ %xdefine %%args %%args, nn %+ %2
+ %rotate 1
%endrep
SWAP_INTERNAL_NUM %%args
%endmacro
@@ -944,7 +1020,7 @@ INIT_XMM
%assign %%i 0
%rep num_mmregs
CAT_XDEFINE %%f, %%i, m %+ %%i
- %assign %%i %%i+1
+ %assign %%i %%i+1
%endrep
%endmacro
@@ -953,21 +1029,21 @@ INIT_XMM
%assign %%i 0
%rep num_mmregs
CAT_XDEFINE m, %%i, %1_m %+ %%i
- CAT_XDEFINE n, m %+ %%i, %%i
- %assign %%i %%i+1
+ CAT_XDEFINE nn, m %+ %%i, %%i
+ %assign %%i %%i+1
%endrep
%endif
%endmacro
; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
%macro call 1
- call_internal %1, %1 %+ SUFFIX
+ call_internal %1 %+ SUFFIX, %1
%endmacro
%macro call_internal 2
- %xdefine %%i %1
- %ifndef cglobaled_%1
- %ifdef cglobaled_%2
- %xdefine %%i %2
+ %xdefine %%i %2
+ %ifndef cglobaled_%2
+ %ifdef cglobaled_%1
+ %xdefine %%i %1
%endif
%endif
call %%i
@@ -1010,7 +1086,7 @@ INIT_XMM
%endif
CAT_XDEFINE sizeofxmm, i, 16
CAT_XDEFINE sizeofymm, i, 32
-%assign i i+1
+ %assign i i+1
%endrep
%undef i
@@ -1026,342 +1102,362 @@ INIT_XMM
%endmacro
;%1 == instruction
-;%2 == 1 if float, 0 if int
-;%3 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
-;%4 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
-;%5+: operands
-%macro RUN_AVX_INSTR 5-8+
- %ifnum sizeof%6
- %assign %%sizeofreg sizeof%6
- %elifnum sizeof%5
- %assign %%sizeofreg sizeof%5
+;%2 == minimal instruction set
+;%3 == 1 if float, 0 if int
+;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
+;%6+: operands
+%macro RUN_AVX_INSTR 6-9+
+ %ifnum sizeof%7
+ %assign __sizeofreg sizeof%7
+ %elifnum sizeof%6
+ %assign __sizeofreg sizeof%6
%else
- %assign %%sizeofreg mmsize
+ %assign __sizeofreg mmsize
%endif
- %assign %%emulate_avx 0
- %if avx_enabled && %%sizeofreg >= 16
- %xdefine %%instr v%1
+ %assign __emulate_avx 0
+ %if avx_enabled && __sizeofreg >= 16
+ %xdefine __instr v%1
%else
- %xdefine %%instr %1
- %if %0 >= 7+%3
- %assign %%emulate_avx 1
+ %xdefine __instr %1
+ %if %0 >= 8+%4
+ %assign __emulate_avx 1
%endif
%endif
-
- %if %%emulate_avx
- %xdefine %%src1 %6
- %xdefine %%src2 %7
- %ifnidn %5, %6
- %if %0 >= 8
- CHECK_AVX_INSTR_EMU {%1 %5, %6, %7, %8}, %5, %7, %8
- %else
- CHECK_AVX_INSTR_EMU {%1 %5, %6, %7}, %5, %7
+ %ifnidn %2, fnord
+ %ifdef cpuname
+ %if notcpuflag(%2)
+ %error use of ``%1'' %2 instruction in cpuname function: current_function
+ %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8
+ %error use of ``%1'' sse2 instruction in cpuname function: current_function
%endif
- %if %4 && %3 == 0
- %ifnid %7
+ %endif
+ %endif
+
+ %if __emulate_avx
+ %xdefine __src1 %7
+ %xdefine __src2 %8
+ %if %5 && %4 == 0
+ %ifnidn %6, %7
+ %ifidn %6, %8
+ %xdefine __src1 %8
+ %xdefine __src2 %7
+ %elifnnum sizeof%8
; 3-operand AVX instructions with a memory arg can only have it in src2,
; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
; So, if the instruction is commutative with a memory arg, swap them.
- %xdefine %%src1 %7
- %xdefine %%src2 %6
+ %xdefine __src1 %8
+ %xdefine __src2 %7
%endif
%endif
- %if %%sizeofreg == 8
- MOVQ %5, %%src1
- %elif %2
- MOVAPS %5, %%src1
+ %endif
+ %ifnidn %6, __src1
+ %if %0 >= 9
+ CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, __src2, %9
+ %else
+ CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, __src2
+ %endif
+ %if __sizeofreg == 8
+ MOVQ %6, __src1
+ %elif %3
+ MOVAPS %6, __src1
%else
- MOVDQA %5, %%src1
+ MOVDQA %6, __src1
%endif
%endif
- %if %0 >= 8
- %1 %5, %%src2, %8
+ %if %0 >= 9
+ %1 %6, __src2, %9
%else
- %1 %5, %%src2
+ %1 %6, __src2
%endif
- %elif %0 >= 8
- %%instr %5, %6, %7, %8
+ %elif %0 >= 9
+ __instr %6, %7, %8, %9
+ %elif %0 == 8
+ __instr %6, %7, %8
%elif %0 == 7
- %%instr %5, %6, %7
- %elif %0 == 6
- %%instr %5, %6
+ __instr %6, %7
%else
- %%instr %5
+ __instr %6
%endif
%endmacro
;%1 == instruction
-;%2 == 1 if float, 0 if int
-;%3 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
-;%4 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
-%macro AVX_INSTR 1-4 0, 1, 0
- %macro %1 1-9 fnord, fnord, fnord, fnord, %1, %2, %3, %4
+;%2 == minimal instruction set
+;%3 == 1 if float, 0 if int
+;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
+%macro AVX_INSTR 1-5 fnord, 0, 255, 0
+ %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
%ifidn %2, fnord
- RUN_AVX_INSTR %6, %7, %8, %9, %1
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
%elifidn %3, fnord
- RUN_AVX_INSTR %6, %7, %8, %9, %1, %2
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2
%elifidn %4, fnord
- RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3
%elifidn %5, fnord
- RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3, %4
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4
%else
- RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3, %4, %5
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5
%endif
%endmacro
%endmacro
; Instructions with both VEX and non-VEX encodings
; Non-destructive instructions are written without parameters
-AVX_INSTR addpd, 1, 0, 1
-AVX_INSTR addps, 1, 0, 1
-AVX_INSTR addsd, 1, 0, 1
-AVX_INSTR addss, 1, 0, 1
-AVX_INSTR addsubpd, 1, 0, 0
-AVX_INSTR addsubps, 1, 0, 0
-AVX_INSTR aesdec, 0, 0, 0
-AVX_INSTR aesdeclast, 0, 0, 0
-AVX_INSTR aesenc, 0, 0, 0
-AVX_INSTR aesenclast, 0, 0, 0
-AVX_INSTR aesimc
-AVX_INSTR aeskeygenassist
-AVX_INSTR andnpd, 1, 0, 0
-AVX_INSTR andnps, 1, 0, 0
-AVX_INSTR andpd, 1, 0, 1
-AVX_INSTR andps, 1, 0, 1
-AVX_INSTR blendpd, 1, 0, 0
-AVX_INSTR blendps, 1, 0, 0
-AVX_INSTR blendvpd, 1, 0, 0
-AVX_INSTR blendvps, 1, 0, 0
-AVX_INSTR cmppd, 1, 1, 0
-AVX_INSTR cmpps, 1, 1, 0
-AVX_INSTR cmpsd, 1, 1, 0
-AVX_INSTR cmpss, 1, 1, 0
-AVX_INSTR comisd
-AVX_INSTR comiss
-AVX_INSTR cvtdq2pd
-AVX_INSTR cvtdq2ps
-AVX_INSTR cvtpd2dq
-AVX_INSTR cvtpd2ps
-AVX_INSTR cvtps2dq
-AVX_INSTR cvtps2pd
-AVX_INSTR cvtsd2si
-AVX_INSTR cvtsd2ss
-AVX_INSTR cvtsi2sd
-AVX_INSTR cvtsi2ss
-AVX_INSTR cvtss2sd
-AVX_INSTR cvtss2si
-AVX_INSTR cvttpd2dq
-AVX_INSTR cvttps2dq
-AVX_INSTR cvttsd2si
-AVX_INSTR cvttss2si
-AVX_INSTR divpd, 1, 0, 0
-AVX_INSTR divps, 1, 0, 0
-AVX_INSTR divsd, 1, 0, 0
-AVX_INSTR divss, 1, 0, 0
-AVX_INSTR dppd, 1, 1, 0
-AVX_INSTR dpps, 1, 1, 0
-AVX_INSTR extractps
-AVX_INSTR haddpd, 1, 0, 0
-AVX_INSTR haddps, 1, 0, 0
-AVX_INSTR hsubpd, 1, 0, 0
-AVX_INSTR hsubps, 1, 0, 0
-AVX_INSTR insertps, 1, 1, 0
-AVX_INSTR lddqu
-AVX_INSTR ldmxcsr
-AVX_INSTR maskmovdqu
-AVX_INSTR maxpd, 1, 0, 1
-AVX_INSTR maxps, 1, 0, 1
-AVX_INSTR maxsd, 1, 0, 1
-AVX_INSTR maxss, 1, 0, 1
-AVX_INSTR minpd, 1, 0, 1
-AVX_INSTR minps, 1, 0, 1
-AVX_INSTR minsd, 1, 0, 1
-AVX_INSTR minss, 1, 0, 1
-AVX_INSTR movapd
-AVX_INSTR movaps
-AVX_INSTR movd
-AVX_INSTR movddup
-AVX_INSTR movdqa
-AVX_INSTR movdqu
-AVX_INSTR movhlps, 1, 0, 0
-AVX_INSTR movhpd, 1, 0, 0
-AVX_INSTR movhps, 1, 0, 0
-AVX_INSTR movlhps, 1, 0, 0
-AVX_INSTR movlpd, 1, 0, 0
-AVX_INSTR movlps, 1, 0, 0
-AVX_INSTR movmskpd
-AVX_INSTR movmskps
-AVX_INSTR movntdq
-AVX_INSTR movntdqa
-AVX_INSTR movntpd
-AVX_INSTR movntps
-AVX_INSTR movq
-AVX_INSTR movsd, 1, 0, 0
-AVX_INSTR movshdup
-AVX_INSTR movsldup
-AVX_INSTR movss, 1, 0, 0
-AVX_INSTR movupd
-AVX_INSTR movups
-AVX_INSTR mpsadbw, 0, 1, 0
-AVX_INSTR mulpd, 1, 0, 1
-AVX_INSTR mulps, 1, 0, 1
-AVX_INSTR mulsd, 1, 0, 1
-AVX_INSTR mulss, 1, 0, 1
-AVX_INSTR orpd, 1, 0, 1
-AVX_INSTR orps, 1, 0, 1
-AVX_INSTR pabsb
-AVX_INSTR pabsd
-AVX_INSTR pabsw
-AVX_INSTR packsswb, 0, 0, 0
-AVX_INSTR packssdw, 0, 0, 0
-AVX_INSTR packuswb, 0, 0, 0
-AVX_INSTR packusdw, 0, 0, 0
-AVX_INSTR paddb, 0, 0, 1
-AVX_INSTR paddw, 0, 0, 1
-AVX_INSTR paddd, 0, 0, 1
-AVX_INSTR paddq, 0, 0, 1
-AVX_INSTR paddsb, 0, 0, 1
-AVX_INSTR paddsw, 0, 0, 1
-AVX_INSTR paddusb, 0, 0, 1
-AVX_INSTR paddusw, 0, 0, 1
-AVX_INSTR palignr, 0, 1, 0
-AVX_INSTR pand, 0, 0, 1
-AVX_INSTR pandn, 0, 0, 0
-AVX_INSTR pavgb, 0, 0, 1
-AVX_INSTR pavgw, 0, 0, 1
-AVX_INSTR pblendvb, 0, 0, 0
-AVX_INSTR pblendw, 0, 1, 0
-AVX_INSTR pclmulqdq, 0, 1, 0
-AVX_INSTR pcmpestri
-AVX_INSTR pcmpestrm
-AVX_INSTR pcmpistri
-AVX_INSTR pcmpistrm
-AVX_INSTR pcmpeqb, 0, 0, 1
-AVX_INSTR pcmpeqw, 0, 0, 1
-AVX_INSTR pcmpeqd, 0, 0, 1
-AVX_INSTR pcmpeqq, 0, 0, 1
-AVX_INSTR pcmpgtb, 0, 0, 0
-AVX_INSTR pcmpgtw, 0, 0, 0
-AVX_INSTR pcmpgtd, 0, 0, 0
-AVX_INSTR pcmpgtq, 0, 0, 0
-AVX_INSTR pextrb
-AVX_INSTR pextrd
-AVX_INSTR pextrq
-AVX_INSTR pextrw
-AVX_INSTR phaddw, 0, 0, 0
-AVX_INSTR phaddd, 0, 0, 0
-AVX_INSTR phaddsw, 0, 0, 0
-AVX_INSTR phminposuw
-AVX_INSTR phsubw, 0, 0, 0
-AVX_INSTR phsubd, 0, 0, 0
-AVX_INSTR phsubsw, 0, 0, 0
-AVX_INSTR pinsrb, 0, 1, 0
-AVX_INSTR pinsrd, 0, 1, 0
-AVX_INSTR pinsrq, 0, 1, 0
-AVX_INSTR pinsrw, 0, 1, 0
-AVX_INSTR pmaddwd, 0, 0, 1
-AVX_INSTR pmaddubsw, 0, 0, 0
-AVX_INSTR pmaxsb, 0, 0, 1
-AVX_INSTR pmaxsw, 0, 0, 1
-AVX_INSTR pmaxsd, 0, 0, 1
-AVX_INSTR pmaxub, 0, 0, 1
-AVX_INSTR pmaxuw, 0, 0, 1
-AVX_INSTR pmaxud, 0, 0, 1
-AVX_INSTR pminsb, 0, 0, 1
-AVX_INSTR pminsw, 0, 0, 1
-AVX_INSTR pminsd, 0, 0, 1
-AVX_INSTR pminub, 0, 0, 1
-AVX_INSTR pminuw, 0, 0, 1
-AVX_INSTR pminud, 0, 0, 1
-AVX_INSTR pmovmskb
-AVX_INSTR pmovsxbw
-AVX_INSTR pmovsxbd
-AVX_INSTR pmovsxbq
-AVX_INSTR pmovsxwd
-AVX_INSTR pmovsxwq
-AVX_INSTR pmovsxdq
-AVX_INSTR pmovzxbw
-AVX_INSTR pmovzxbd
-AVX_INSTR pmovzxbq
-AVX_INSTR pmovzxwd
-AVX_INSTR pmovzxwq
-AVX_INSTR pmovzxdq
-AVX_INSTR pmuldq, 0, 0, 1
-AVX_INSTR pmulhrsw, 0, 0, 1
-AVX_INSTR pmulhuw, 0, 0, 1
-AVX_INSTR pmulhw, 0, 0, 1
-AVX_INSTR pmullw, 0, 0, 1
-AVX_INSTR pmulld, 0, 0, 1
-AVX_INSTR pmuludq, 0, 0, 1
-AVX_INSTR por, 0, 0, 1
-AVX_INSTR psadbw, 0, 0, 1
-AVX_INSTR pshufb, 0, 0, 0
-AVX_INSTR pshufd
-AVX_INSTR pshufhw
-AVX_INSTR pshuflw
-AVX_INSTR psignb, 0, 0, 0
-AVX_INSTR psignw, 0, 0, 0
-AVX_INSTR psignd, 0, 0, 0
-AVX_INSTR psllw, 0, 0, 0
-AVX_INSTR pslld, 0, 0, 0
-AVX_INSTR psllq, 0, 0, 0
-AVX_INSTR pslldq, 0, 0, 0
-AVX_INSTR psraw, 0, 0, 0
-AVX_INSTR psrad, 0, 0, 0
-AVX_INSTR psrlw, 0, 0, 0
-AVX_INSTR psrld, 0, 0, 0
-AVX_INSTR psrlq, 0, 0, 0
-AVX_INSTR psrldq, 0, 0, 0
-AVX_INSTR psubb, 0, 0, 0
-AVX_INSTR psubw, 0, 0, 0
-AVX_INSTR psubd, 0, 0, 0
-AVX_INSTR psubq, 0, 0, 0
-AVX_INSTR psubsb, 0, 0, 0
-AVX_INSTR psubsw, 0, 0, 0
-AVX_INSTR psubusb, 0, 0, 0
-AVX_INSTR psubusw, 0, 0, 0
-AVX_INSTR ptest
-AVX_INSTR punpckhbw, 0, 0, 0
-AVX_INSTR punpckhwd, 0, 0, 0
-AVX_INSTR punpckhdq, 0, 0, 0
-AVX_INSTR punpckhqdq, 0, 0, 0
-AVX_INSTR punpcklbw, 0, 0, 0
-AVX_INSTR punpcklwd, 0, 0, 0
-AVX_INSTR punpckldq, 0, 0, 0
-AVX_INSTR punpcklqdq, 0, 0, 0
-AVX_INSTR pxor, 0, 0, 1
-AVX_INSTR rcpps, 1, 0, 0
-AVX_INSTR rcpss, 1, 0, 0
-AVX_INSTR roundpd
-AVX_INSTR roundps
-AVX_INSTR roundsd
-AVX_INSTR roundss
-AVX_INSTR rsqrtps, 1, 0, 0
-AVX_INSTR rsqrtss, 1, 0, 0
-AVX_INSTR shufpd, 1, 1, 0
-AVX_INSTR shufps, 1, 1, 0
-AVX_INSTR sqrtpd, 1, 0, 0
-AVX_INSTR sqrtps, 1, 0, 0
-AVX_INSTR sqrtsd, 1, 0, 0
-AVX_INSTR sqrtss, 1, 0, 0
-AVX_INSTR stmxcsr
-AVX_INSTR subpd, 1, 0, 0
-AVX_INSTR subps, 1, 0, 0
-AVX_INSTR subsd, 1, 0, 0