From c398563bc6d17ba831d8db76b7f523b2fd3a8b3d Mon Sep 17 00:00:00 2001 From: loqs Date: Wed, 27 Mar 2024 13:26:02 +0000 Subject: x86: update x86inc.asm Updated to https://code.videolan.org/videolan/x86inc.asm/-/commit/04f14f431ce07ca349b5d87c9e5930f5950cf712 --- libass/x86/x86inc.asm | 309 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 225 insertions(+), 84 deletions(-) (limited to 'libass/x86') diff --git a/libass/x86/x86inc.asm b/libass/x86/x86inc.asm index 60384bf..d2bd758 100644 --- a/libass/x86/x86inc.asm +++ b/libass/x86/x86inc.asm @@ -1,7 +1,7 @@ ;***************************************************************************** ;* x86inc.asm: x86 abstraction layer ;***************************************************************************** -;* Copyright (C) 2005-2020 x264 project +;* Copyright (C) 2005-2024 x264 project ;* ;* Authors: Loren Merritt ;* Henrik Gramner @@ -79,6 +79,11 @@ %define mangle(x) x %endif +; Use VEX-encoding even in non-AVX functions +%ifndef FORCE_VEX_ENCODING + %define FORCE_VEX_ENCODING 0 +%endif + %macro SECTION_RODATA 0-1 16 %ifidn __OUTPUT_FORMAT__,win32 SECTION .rdata align=%1 @@ -99,7 +104,7 @@ %endif %define HAVE_PRIVATE_EXTERN 1 -%ifdef __NASM_VER__ +%ifdef __NASM_VERSION_ID__ %use smartalign %if __NASM_VERSION_ID__ < 0x020e0000 ; 2.14 %define HAVE_PRIVATE_EXTERN 0 @@ -233,6 +238,16 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %endif %endmacro +; Repeats an instruction/operation for multiple arguments. +; Example usage: "REPX {psrlw x, 8}, m0, m1, m2, m3" +%macro REPX 2-* ; operation, args + %xdefine %%f(x) %1 + %rep %0 - 1 + %rotate 1 + %%f(%1) + %endrep +%endmacro + %macro PUSH 1 push %1 %ifidn rstk, rsp @@ -349,7 +364,46 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %define vzeroupper_required (mmsize > 16 && (ARCH_X86_64 == 0 || xmm_regs_used > 16 || notcpuflag(avx512))) %define high_mm_regs (16*cpuflag(avx512)) -%macro ALLOC_STACK 0-2 0, 0 ; stack_size, n_xmm_regs (for win64 only) +; Large stack allocations on Windows need to use stack probing in order +; to guarantee that all stack memory is committed before accessing it. +; This is done by ensuring that the guard page(s) at the end of the +; currently committed pages are touched prior to any pages beyond that. +%if WIN64 + %assign STACK_PROBE_SIZE 8192 +%elifidn __OUTPUT_FORMAT__, win32 + %assign STACK_PROBE_SIZE 4096 +%else + %assign STACK_PROBE_SIZE 0 +%endif + +%macro PROBE_STACK 1 ; stack_size + %if STACK_PROBE_SIZE + %assign %%i STACK_PROBE_SIZE + %rep %1 / STACK_PROBE_SIZE + mov eax, [rsp-%%i] + %assign %%i %%i+STACK_PROBE_SIZE + %endrep + %endif +%endmacro + +%macro RESET_STACK_STATE 0 + %ifidn rstk, rsp + %assign stack_offset stack_offset - stack_size_padded + %else + %xdefine rstk rsp + %endif + %assign stack_size 0 + %assign stack_size_padded 0 + %assign xmm_regs_used 0 +%endmacro + +%macro ALLOC_STACK 0-2 0, 0 ; stack_size, n_xmm_regs + RESET_STACK_STATE + %ifnum %2 + %if mmsize != 8 + %assign xmm_regs_used %2 + %endif + %endif %ifnum %1 %if %1 != 0 %assign %%pad 0 @@ -359,16 +413,14 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %endif %if WIN64 %assign %%pad %%pad + 32 ; shadow space - %if mmsize != 8 - %assign xmm_regs_used %2 - %if xmm_regs_used > 8 - %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers - %endif + %if xmm_regs_used > 8 + %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers %endif %endif %if required_stack_alignment <= STACK_ALIGNMENT ; maintain the current stack alignment %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1)) + PROBE_STACK stack_size_padded SUB rsp, stack_size_padded %else %assign %%reg_num (regs_used - 1) @@ -384,6 +436,7 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %xdefine rstkm rstk %endif %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1)) + PROBE_STACK stack_size_padded mov rstk, rsp and rsp, ~(required_stack_alignment-1) sub rsp, stack_size_padded @@ -457,35 +510,62 @@ DECLARE_REG 14, R13, 120 %endif %endmacro -%macro WIN64_PUSH_XMM 0 - ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated. - %if xmm_regs_used > 6 + high_mm_regs - movaps [rstk + stack_offset + 8], xmm6 - %endif - %if xmm_regs_used > 7 + high_mm_regs - movaps [rstk + stack_offset + 24], xmm7 - %endif - %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8 - %if %%xmm_regs_on_stack > 0 - %assign %%i 8 - %rep %%xmm_regs_on_stack - movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i - %assign %%i %%i+1 - %endrep +; Push XMM registers to the stack. If no argument is specified all used register +; will be pushed, otherwise only push previously unpushed registers. +%macro WIN64_PUSH_XMM 0-2 ; new_xmm_regs_used, xmm_regs_pushed + %if mmsize != 8 + %if %0 == 2 + %assign %%pushed %2 + %assign xmm_regs_used %1 + %elif %0 == 1 + %assign %%pushed xmm_regs_used + %assign xmm_regs_used %1 + %else + %assign %%pushed 0 + %endif + ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated. + %if %%pushed <= 6 + high_mm_regs && xmm_regs_used > 6 + high_mm_regs + movaps [rstk + stack_offset + 8], xmm6 + %endif + %if %%pushed <= 7 + high_mm_regs && xmm_regs_used > 7 + high_mm_regs + movaps [rstk + stack_offset + 24], xmm7 + %endif + %assign %%pushed %%pushed - high_mm_regs - 8 + %if %%pushed < 0 + %assign %%pushed 0 + %endif + %assign %%regs_to_push xmm_regs_used - %%pushed - high_mm_regs - 8 + %if %%regs_to_push > 0 + ASSERT (%%regs_to_push + %%pushed) * 16 <= stack_size_padded - stack_size - 32 + %assign %%i %%pushed + 8 + %rep %%regs_to_push + movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i + %assign %%i %%i+1 + %endrep + %endif %endif %endmacro -%macro WIN64_SPILL_XMM 1 - %assign xmm_regs_used %1 - ASSERT xmm_regs_used <= 16 + high_mm_regs - %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8 - %if %%xmm_regs_on_stack > 0 - ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack. - %assign %%pad %%xmm_regs_on_stack*16 + 32 - %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1)) - SUB rsp, stack_size_padded +; Allocated stack space for XMM registers and push all, or a subset, of those +%macro WIN64_SPILL_XMM 1-2 ; xmm_regs_used, xmm_regs_reserved + RESET_STACK_STATE + %if mmsize != 8 + %assign xmm_regs_used %1 + ASSERT xmm_regs_used <= 16 + high_mm_regs + %if %0 == 2 + ASSERT %2 >= %1 + %assign %%xmm_regs_on_stack %2 - high_mm_regs - 8 + %else + %assign %%xmm_regs_on_stack %1 - high_mm_regs - 8 + %endif + %if %%xmm_regs_on_stack > 0 + ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack. + %assign %%pad %%xmm_regs_on_stack*16 + 32 + %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1)) + SUB rsp, stack_size_padded + %endif + WIN64_PUSH_XMM %endif - WIN64_PUSH_XMM %endmacro %macro WIN64_RESTORE_XMM_INTERNAL 0 @@ -516,9 +596,7 @@ DECLARE_REG 14, R13, 120 %macro WIN64_RESTORE_XMM 0 WIN64_RESTORE_XMM_INTERNAL - %assign stack_offset (stack_offset-stack_size_padded) - %assign stack_size_padded 0 - %assign xmm_regs_used 0 + RESET_STACK_STATE %endmacro %define has_epilogue regs_used > 7 || stack_size > 0 || vzeroupper_required || xmm_regs_used > 6+high_mm_regs @@ -553,12 +631,11 @@ DECLARE_REG 14, R13, 72 %macro PROLOGUE 2-5+ 0, 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names... %assign num_args %1 %assign regs_used %2 - %assign xmm_regs_used %3 ASSERT regs_used >= num_args SETUP_STACK_POINTER %4 ASSERT regs_used <= 15 PUSH_IF_USED 9, 10, 11, 12, 13, 14 - ALLOC_STACK %4 + ALLOC_STACK %4, %3 LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14 %if %0 > 4 %ifnum %4 @@ -622,7 +699,7 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 SETUP_STACK_POINTER %4 ASSERT regs_used <= 7 PUSH_IF_USED 3, 4, 5, 6 - ALLOC_STACK %4 + ALLOC_STACK %4, %3 LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6 %if %0 > 4 %ifnum %4 @@ -655,13 +732,19 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 %endif ;====================================================================== %if WIN64 == 0 - %macro WIN64_SPILL_XMM 1 - %assign xmm_regs_used %1 + %macro WIN64_SPILL_XMM 1-2 + RESET_STACK_STATE + %if mmsize != 8 + %assign xmm_regs_used %1 + %endif %endmacro %macro WIN64_RESTORE_XMM 0 - %assign xmm_regs_used 0 + RESET_STACK_STATE %endmacro - %macro WIN64_PUSH_XMM 0 + %macro WIN64_PUSH_XMM 0-2 + %if mmsize != 8 && %0 >= 1 + %assign xmm_regs_used %1 + %endif %endmacro %endif @@ -806,9 +889,26 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %1: %2 %endmacro -; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default. %if FORMAT_ELF + ; The GNU linker assumes the stack is executable by default. [SECTION .note.GNU-stack noalloc noexec nowrite progbits] + + %ifdef __NASM_VERSION_ID__ + %if __NASM_VERSION_ID__ >= 0x020e0300 ; 2.14.03 + %if ARCH_X86_64 + ; Control-flow Enforcement Technology (CET) properties. + [SECTION .note.gnu.property alloc noexec nowrite note align=gprsize] + dd 0x00000004 ; n_namesz + dd gprsize + 8 ; n_descsz + dd 0x00000005 ; n_type = NT_GNU_PROPERTY_TYPE_0 + db "GNU",0 ; n_name + dd 0xc0000002 ; pr_type = GNU_PROPERTY_X86_FEATURE_1_AND + dd 0x00000004 ; pr_datasz + dd 0x00000002 ; pr_data = GNU_PROPERTY_X86_FEATURE_1_SHSTK + dd 0x00000000 ; pr_padding + %endif + %endif + %endif %endif ; Tell debuggers how large the function was. @@ -844,21 +944,22 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %assign cpuflags_sse4 (1<<10) | cpuflags_ssse3 %assign cpuflags_sse42 (1<<11) | cpuflags_sse4 %assign cpuflags_aesni (1<<12) | cpuflags_sse42 -%assign cpuflags_gfni (1<<13) | cpuflags_sse42 -%assign cpuflags_avx (1<<14) | cpuflags_sse42 -%assign cpuflags_xop (1<<15) | cpuflags_avx -%assign cpuflags_fma4 (1<<16) | cpuflags_avx -%assign cpuflags_fma3 (1<<17) | cpuflags_avx -%assign cpuflags_bmi1 (1<<18) | cpuflags_avx|cpuflags_lzcnt -%assign cpuflags_bmi2 (1<<19) | cpuflags_bmi1 -%assign cpuflags_avx2 (1<<20) | cpuflags_fma3|cpuflags_bmi2 -%assign cpuflags_avx512 (1<<21) | cpuflags_avx2 ; F, CD, BW, DQ, VL -%assign cpuflags_avx512icl (1<<22) | cpuflags_avx512|cpuflags_gfni ; VNNI, IFMA, VBMI, VBMI2, VPOPCNTDQ, BITALG, VAES, VPCLMULQDQ - -%assign cpuflags_cache32 (1<<23) -%assign cpuflags_cache64 (1<<24) -%assign cpuflags_aligned (1<<25) ; not a cpu feature, but a function variant -%assign cpuflags_atom (1<<26) +%assign cpuflags_clmul (1<<13) | cpuflags_sse42 +%assign cpuflags_gfni (1<<14) | cpuflags_aesni|cpuflags_clmul +%assign cpuflags_avx (1<<15) | cpuflags_sse42 +%assign cpuflags_xop (1<<16) | cpuflags_avx +%assign cpuflags_fma4 (1<<17) | cpuflags_avx +%assign cpuflags_fma3 (1<<18) | cpuflags_avx +%assign cpuflags_bmi1 (1<<19) | cpuflags_avx|cpuflags_lzcnt +%assign cpuflags_bmi2 (1<<20) | cpuflags_bmi1 +%assign cpuflags_avx2 (1<<21) | cpuflags_fma3|cpuflags_bmi2 +%assign cpuflags_avx512 (1<<22) | cpuflags_avx2 ; F, CD, BW, DQ, VL +%assign cpuflags_avx512icl (1<<23) | cpuflags_avx512|cpuflags_gfni ; VNNI, IFMA, VBMI, VBMI2, VPOPCNTDQ, BITALG, VAES, VPCLMULQDQ + +%assign cpuflags_cache32 (1<<24) +%assign cpuflags_cache64 (1<<25) +%assign cpuflags_aligned (1<<26) ; not a cpu feature, but a function variant +%assign cpuflags_atom (1<<27) ; Returns a boolean value expressing whether or not the specified cpuflag is enabled. %define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1) @@ -900,13 +1001,13 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %endif %if ARCH_X86_64 || cpuflag(sse2) - %ifdef __NASM_VER__ + %ifdef __NASM_VERSION_ID__ ALIGNMODE p6 %else CPU amdnop %endif %else - %ifdef __NASM_VER__ + %ifdef __NASM_VERSION_ID__ ALIGNMODE nop %else CPU basicnop @@ -984,7 +1085,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %endmacro %macro INIT_XMM 0-1+ - %assign avx_enabled 0 + %assign avx_enabled FORCE_VEX_ENCODING %define RESET_MM_PERMUTATION INIT_XMM %1 %define mmsize 16 %define mova movdqa @@ -996,6 +1097,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %if WIN64 AVX512_MM_PERMUTATION 6 ; Swap callee-saved registers with volatile registers %endif + %xdefine bcstw 1to8 %xdefine bcstd 1to4 %xdefine bcstq 1to2 %endmacro @@ -1011,6 +1113,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, INIT_CPUFLAGS %1 DEFINE_MMREGS ymm AVX512_MM_PERMUTATION + %xdefine bcstw 1to16 %xdefine bcstd 1to8 %xdefine bcstq 1to4 %endmacro @@ -1026,6 +1129,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, INIT_CPUFLAGS %1 DEFINE_MMREGS zmm AVX512_MM_PERMUTATION + %xdefine bcstw 1to32 %xdefine bcstd 1to16 %xdefine bcstq 1to8 %endmacro @@ -1139,8 +1243,7 @@ INIT_XMM %endif %xdefine %%tmp %%f %+ 0 %ifnum %%tmp - RESET_MM_PERMUTATION - AVX512_MM_PERMUTATION + DEFINE_MMREGS mmtype %assign %%i 0 %rep num_mmregs %xdefine %%tmp %%f %+ %%i @@ -1314,28 +1417,65 @@ INIT_XMM %1 %6, __src2 %endif %elif %0 >= 9 - __instr %6, %7, %8, %9 + %if avx_enabled && __sizeofreg >= 16 && %4 == 1 + %ifnnum regnumof%7 + %if %3 + vmovaps %6, %7 + %else + vmovdqa %6, %7 + %endif + __instr %6, %6, %8, %9 + %else + __instr %6, %7, %8, %9 + %endif + %else + __instr %6, %7, %8, %9 + %endif %elif %0 == 8 - %if avx_enabled && %5 + %if avx_enabled && __sizeofreg >= 16 && %4 == 0 %xdefine __src1 %7 %xdefine __src2 %8 - %ifnum regnumof%7 - %ifnum regnumof%8 - %if regnumof%7 < 8 && regnumof%8 >= 8 && regnumof%8 < 16 && sizeof%8 <= 32 - ; Most VEX-encoded instructions require an additional byte to encode when - ; src2 is a high register (e.g. m8..15). If the instruction is commutative - ; we can swap src1 and src2 when doing so reduces the instruction length. - %xdefine __src1 %8 - %xdefine __src2 %7 + %if %5 + %ifnum regnumof%7 + %ifnum regnumof%8 + %if regnumof%7 < 8 && regnumof%8 >= 8 && regnumof%8 < 16 && sizeof%8 <= 32 + ; Most VEX-encoded instructions require an additional byte to encode when + ; src2 is a high register (e.g. m8..15). If the instruction is commutative + ; we can swap src1 and src2 when doing so reduces the instruction length. + %xdefine __src1 %8 + %xdefine __src2 %7 + %endif %endif + %elifnum regnumof%8 ; put memory operands in src2 when possible + %xdefine __src1 %8 + %xdefine __src2 %7 + %else + %assign __emulate_avx 1 + %endif + %elifnnum regnumof%7 + ; EVEX allows imm8 shift instructions to be used with memory operands, + ; but VEX does not. This handles those special cases. + %ifnnum %8 + %assign __emulate_avx 1 + %elif notcpuflag(avx512) + %assign __emulate_avx 1 %endif %endif - __instr %6, __src1, __src2 + %if __emulate_avx ; a separate load is required + %if %3 + vmovaps %6, %7 + %else + vmovdqa %6, %7 + %endif + __instr %6, %6, %8 + %else + __instr %6, __src1, __src2 + %endif %else __instr %6, %7, %8 %endif %elif %0 == 7 - %if avx_enabled && %5 + %if avx_enabled && __sizeofreg >= 16 && %5 %xdefine __src1 %6 %xdefine __src2 %7 %ifnum regnumof%6 @@ -1396,8 +1536,8 @@ AVX_INSTR andpd, sse2, 1, 0, 1 AVX_INSTR andps, sse, 1, 0, 1 AVX_INSTR blendpd, sse4, 1, 1, 0 AVX_INSTR blendps, sse4, 1, 1, 0 -AVX_INSTR blendvpd, sse4 ; can't be emulated -AVX_INSTR blendvps, sse4 ; can't be emulated +AVX_INSTR blendvpd, sse4, 1, 1, 0 ; last operand must be xmm0 with legacy encoding +AVX_INSTR blendvps, sse4, 1, 1, 0 ; last operand must be xmm0 with legacy encoding AVX_INSTR cmpeqpd, sse2, 1, 0, 1 AVX_INSTR cmpeqps, sse, 1, 0, 1 AVX_INSTR cmpeqsd, sse2, 1, 0, 0 @@ -1530,13 +1670,13 @@ AVX_INSTR pand, mmx, 0, 0, 1 AVX_INSTR pandn, mmx, 0, 0, 0 AVX_INSTR pavgb, mmx2, 0, 0, 1 AVX_INSTR pavgw, mmx2, 0, 0, 1 -AVX_INSTR pblendvb, sse4 ; can't be emulated +AVX_INSTR pblendvb, sse4, 0, 1, 0 ; last operand must be xmm0 with legacy encoding AVX_INSTR pblendw, sse4, 0, 1, 0 -AVX_INSTR pclmulhqhqdq, fnord, 0, 0, 0 -AVX_INSTR pclmulhqlqdq, fnord, 0, 0, 0 -AVX_INSTR pclmullqhqdq, fnord, 0, 0, 0 -AVX_INSTR pclmullqlqdq, fnord, 0, 0, 0 -AVX_INSTR pclmulqdq, fnord, 0, 1, 0 +AVX_INSTR pclmulhqhqdq, clmul, 0, 0, 0 +AVX_INSTR pclmulhqlqdq, clmul, 0, 0, 0 +AVX_INSTR pclmullqhqdq, clmul, 0, 0, 0 +AVX_INSTR pclmullqlqdq, clmul, 0, 0, 0 +AVX_INSTR pclmulqdq, clmul, 0, 1, 0 AVX_INSTR pcmpeqb, mmx, 0, 0, 1 AVX_INSTR pcmpeqd, mmx, 0, 0, 1 AVX_INSTR pcmpeqq, sse4, 0, 0, 1 @@ -1691,6 +1831,7 @@ GPR_INSTR blsi, bmi1 GPR_INSTR blsmsk, bmi1 GPR_INSTR blsr, bmi1 GPR_INSTR bzhi, bmi2 +GPR_INSTR crc32, sse42 GPR_INSTR mulx, bmi2 GPR_INSTR pdep, bmi2 GPR_INSTR pext, bmi2 -- cgit v1.2.3