Merge 5.10.133 into android12-5.10-lts

Changes in 5.10.133
	KVM/VMX: Use TEST %REG,%REG instead of CMP $0,%REG in vmenter.SKVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw
	objtool: Refactor ORC section generation
	objtool: Add 'alt_group' struct
	objtool: Support stack layout changes in alternatives
	objtool: Support retpoline jump detection for vmlinux.o
	objtool: Assume only ELF functions do sibling calls
	objtool: Combine UNWIND_HINT_RET_OFFSET and UNWIND_HINT_FUNC
	x86/xen: Support objtool validation in xen-asm.S
	x86/xen: Support objtool vmlinux.o validation in xen-head.S
	x86/alternative: Merge include files
	x86/alternative: Support not-feature
	x86/alternative: Support ALTERNATIVE_TERNARY
	x86/alternative: Use ALTERNATIVE_TERNARY() in _static_cpu_has()
	x86/insn: Rename insn_decode() to insn_decode_from_regs()
	x86/insn: Add a __ignore_sync_check__ marker
	x86/insn: Add an insn_decode() API
	x86/insn-eval: Handle return values from the decoder
	x86/alternative: Use insn_decode()
	x86: Add insn_decode_kernel()
	x86/alternatives: Optimize optimize_nops()
	x86/retpoline: Simplify retpolines
	objtool: Correctly handle retpoline thunk calls
	objtool: Handle per arch retpoline naming
	objtool: Rework the elf_rebuild_reloc_section() logic
	objtool: Add elf_create_reloc() helper
	objtool: Create reloc sections implicitly
	objtool: Extract elf_strtab_concat()
	objtool: Extract elf_symbol_add()
	objtool: Add elf_create_undef_symbol()
	objtool: Keep track of retpoline call sites
	objtool: Cache instruction relocs
	objtool: Skip magical retpoline .altinstr_replacement
	objtool/x86: Rewrite retpoline thunk calls
	objtool: Support asm jump tables
	x86/alternative: Optimize single-byte NOPs at an arbitrary position
	objtool: Fix .symtab_shndx handling for elf_create_undef_symbol()
	objtool: Only rewrite unconditional retpoline thunk calls
	objtool/x86: Ignore __x86_indirect_alt_* symbols
	objtool: Don't make .altinstructions writable
	objtool: Teach get_alt_entry() about more relocation types
	objtool: print out the symbol type when complaining about it
	objtool: Remove reloc symbol type checks in get_alt_entry()
	objtool: Make .altinstructions section entry size consistent
	objtool: Introduce CFI hash
	objtool: Handle __sanitize_cov*() tail calls
	objtool: Classify symbols
	objtool: Explicitly avoid self modifying code in .altinstr_replacement
	objtool,x86: Replace alternatives with .retpoline_sites
	x86/retpoline: Remove unused replacement symbols
	x86/asm: Fix register order
	x86/asm: Fixup odd GEN-for-each-reg.h usage
	x86/retpoline: Move the retpoline thunk declarations to nospec-branch.h
	x86/retpoline: Create a retpoline thunk array
	x86/alternative: Implement .retpoline_sites support
	x86/alternative: Handle Jcc __x86_indirect_thunk_\reg
	x86/alternative: Try inline spectre_v2=retpoline,amd
	x86/alternative: Add debug prints to apply_retpolines()
	bpf,x86: Simplify computing label offsets
	bpf,x86: Respect X86_FEATURE_RETPOLINE*
	x86/lib/atomic64_386_32: Rename things
	x86: Prepare asm files for straight-line-speculation
	x86: Prepare inline-asm for straight-line-speculation
	x86/alternative: Relax text_poke_bp() constraint
	objtool: Add straight-line-speculation validation
	x86: Add straight-line-speculation mitigation
	tools arch: Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy'
	kvm/emulate: Fix SETcc emulation function offsets with SLS
	objtool: Default ignore INT3 for unreachable
	crypto: x86/poly1305 - Fixup SLS
	objtool: Fix SLS validation for kcov tail-call replacement
	objtool: Fix code relocs vs weak symbols
	objtool: Fix type of reloc::addend
	objtool: Fix symbol creation
	x86/entry: Remove skip_r11rcx
	objtool: Fix objtool regression on x32 systems
	x86/realmode: build with -D__DISABLE_EXPORTS
	x86/kvm/vmx: Make noinstr clean
	x86/cpufeatures: Move RETPOLINE flags to word 11
	x86/retpoline: Cleanup some #ifdefery
	x86/retpoline: Swizzle retpoline thunk
	Makefile: Set retpoline cflags based on CONFIG_CC_IS_{CLANG,GCC}
	x86/retpoline: Use -mfunction-return
	x86: Undo return-thunk damage
	x86,objtool: Create .return_sites
	objtool: skip non-text sections when adding return-thunk sites
	x86,static_call: Use alternative RET encoding
	x86/ftrace: Use alternative RET encoding
	x86/bpf: Use alternative RET encoding
	x86/kvm: Fix SETcc emulation for return thunks
	x86/vsyscall_emu/64: Don't use RET in vsyscall emulation
	x86/sev: Avoid using __x86_return_thunk
	x86: Use return-thunk in asm code
	objtool: Treat .text.__x86.* as noinstr
	x86: Add magic AMD return-thunk
	x86/bugs: Report AMD retbleed vulnerability
	x86/bugs: Add AMD retbleed= boot parameter
	x86/bugs: Enable STIBP for JMP2RET
	x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value
	x86/entry: Add kernel IBRS implementation
	x86/bugs: Optimize SPEC_CTRL MSR writes
	x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS
	x86/bugs: Split spectre_v2_select_mitigation() and spectre_v2_user_select_mitigation()
	x86/bugs: Report Intel retbleed vulnerability
	intel_idle: Disable IBRS during long idle
	objtool: Update Retpoline validation
	x86/xen: Rename SYS* entry points
	x86/bugs: Add retbleed=ibpb
	x86/bugs: Do IBPB fallback check only once
	objtool: Add entry UNRET validation
	x86/cpu/amd: Add Spectral Chicken
	x86/speculation: Fix RSB filling with CONFIG_RETPOLINE=n
	x86/speculation: Fix firmware entry SPEC_CTRL handling
	x86/speculation: Fix SPEC_CTRL write on SMT state change
	x86/speculation: Use cached host SPEC_CTRL value for guest entry/exit
	x86/speculation: Remove x86_spec_ctrl_mask
	objtool: Re-add UNWIND_HINT_{SAVE_RESTORE}
	KVM: VMX: Flatten __vmx_vcpu_run()
	KVM: VMX: Convert launched argument to flags
	KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS
	KVM: VMX: Fix IBRS handling after vmexit
	x86/speculation: Fill RSB on vmexit for IBRS
	x86/common: Stamp out the stepping madness
	x86/cpu/amd: Enumerate BTC_NO
	x86/retbleed: Add fine grained Kconfig knobs
	x86/bugs: Add Cannon lake to RETBleed affected CPU list
	x86/bugs: Do not enable IBPB-on-entry when IBPB is not supported
	x86/kexec: Disable RET on kexec
	x86/speculation: Disable RRSBA behavior
	x86/static_call: Serialize __static_call_fixup() properly
	tools/insn: Restore the relative include paths for cross building
	x86, kvm: use proper ASM macros for kvm_vcpu_is_preempted
	x86/xen: Fix initialisation in hypercall_page after rethunk
	x86/ftrace: Add UNWIND_HINT_FUNC annotation for ftrace_stub
	x86/asm/32: Fix ANNOTATE_UNRET_SAFE use on 32-bit
	x86/speculation: Use DECLARE_PER_CPU for x86_spec_ctrl_current
	efi/x86: use naked RET on mixed mode call wrapper
	x86/kvm: fix FASTOP_SIZE when return thunks are enabled
	KVM: emulate: do not adjust size of fastop and setcc subroutines
	tools arch x86: Sync the msr-index.h copy with the kernel sources
	tools headers cpufeatures: Sync with the kernel sources
	x86/bugs: Remove apostrophe typo
	um: Add missing apply_returns()
	x86: Use -mindirect-branch-cs-prefix for RETPOLINE builds
	kvm: fix objtool relocation warning
	objtool: Fix elf_create_undef_symbol() endianness
	tools arch: Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy' - again
	tools headers: Remove broken definition of __LITTLE_ENDIAN
	Linux 5.10.133

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Change-Id: I7e23843058c509562ae3f3a68e0710f31249a087
This commit is contained in:
Sami Tolvanen
2022-08-02 11:30:07 -07:00
208 changed files with 4622 additions and 1995 deletions

View File

@@ -4751,6 +4751,30 @@
retain_initrd [RAM] Keep initrd memory after extraction
retbleed= [X86] Control mitigation of RETBleed (Arbitrary
Speculative Code Execution with Return Instructions)
vulnerability.
off - no mitigation
auto - automatically select a migitation
auto,nosmt - automatically select a mitigation,
disabling SMT if necessary for
the full mitigation (only on Zen1
and older without STIBP).
ibpb - mitigate short speculation windows on
basic block boundaries too. Safe, highest
perf impact.
unret - force enable untrained return thunks,
only effective on AMD f15h-f17h
based systems.
unret,nosmt - like unret, will disable SMT when STIBP
is not available.
Selecting 'auto' will choose a mitigation method at run
time according to the CPU.
Not specifying this option is equivalent to retbleed=auto.
rfkill.default_state=
0 "airplane mode". All wifi, bluetooth, wimax, gps, fm,
etc. communication is blocked by default.
@@ -5100,6 +5124,7 @@
eibrs - enhanced IBRS
eibrs,retpoline - enhanced IBRS + Retpolines
eibrs,lfence - enhanced IBRS + LFENCE
ibrs - use IBRS to protect kernel
Not specifying this option is equivalent to
spectre_v2=auto.

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 132
SUBLEVEL = 133
EXTRAVERSION =
NAME = Dare mighty things
@@ -688,12 +688,21 @@ ifdef CONFIG_FUNCTION_TRACER
CC_FLAGS_FTRACE := -pg
endif
RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
ifdef CONFIG_CC_IS_GCC
RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix)
RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
endif
ifdef CONFIG_CC_IS_CLANG
RETPOLINE_CFLAGS := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS := -mretpoline
endif
ifdef CONFIG_RETHUNK
RETHUNK_CFLAGS := -mfunction-return=thunk-extern
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
endif
export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS

View File

@@ -358,6 +358,14 @@ void __init check_bugs(void)
os_check_bugs();
}
void apply_retpolines(s32 *start, s32 *end)
{
}
void apply_returns(s32 *start, s32 *end)
{
}
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
}

View File

@@ -460,15 +460,6 @@ config GOLDFISH
def_bool y
depends on X86_GOLDFISH
config RETPOLINE
bool "Avoid speculative indirect branches in kernel"
default y
help
Compile kernel with the retpoline compiler options to guard against
kernel-to-user data leaks by avoiding speculative indirect
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
config X86_CPU_RESCTRL
bool "x86 CPU resource control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
@@ -2422,6 +2413,88 @@ source "kernel/livepatch/Kconfig"
endmenu
config CC_HAS_SLS
def_bool $(cc-option,-mharden-sls=all)
config CC_HAS_RETURN_THUNK
def_bool $(cc-option,-mfunction-return=thunk-extern)
menuconfig SPECULATION_MITIGATIONS
bool "Mitigations for speculative execution vulnerabilities"
default y
help
Say Y here to enable options which enable mitigations for
speculative execution hardware vulnerabilities.
If you say N, all mitigations will be disabled. You really
should know what you are doing to say so.
if SPECULATION_MITIGATIONS
config PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
default y
depends on (X86_64 || X86_PAE)
help
This feature reduces the number of hardware side channels by
ensuring that the majority of kernel addresses are not mapped
into userspace.
See Documentation/x86/pti.rst for more details.
config RETPOLINE
bool "Avoid speculative indirect branches in kernel"
default y
help
Compile kernel with the retpoline compiler options to guard against
kernel-to-user data leaks by avoiding speculative indirect
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
config RETHUNK
bool "Enable return-thunks"
depends on RETPOLINE && CC_HAS_RETURN_THUNK
default y
help
Compile the kernel with the return-thunks compiler option to guard
against kernel-to-user data leaks by avoiding return speculation.
Requires a compiler with -mfunction-return=thunk-extern
support for full protection. The kernel may run slower.
config CPU_UNRET_ENTRY
bool "Enable UNRET on kernel entry"
depends on CPU_SUP_AMD && RETHUNK
default y
help
Compile the kernel with support for the retbleed=unret mitigation.
config CPU_IBPB_ENTRY
bool "Enable IBPB on kernel entry"
depends on CPU_SUP_AMD
default y
help
Compile the kernel with support for the retbleed=ibpb mitigation.
config CPU_IBRS_ENTRY
bool "Enable IBRS on kernel entry"
depends on CPU_SUP_INTEL
default y
help
Compile the kernel with support for the spectre_v2=ibrs mitigation.
This mitigates both spectre_v2 and retbleed at great cost to
performance.
config SLS
bool "Mitigate Straight-Line-Speculation"
depends on CC_HAS_SLS && X86_64
default n
help
Compile the kernel with straight-line-speculation options to guard
against straight line speculation. The kernel image might be slightly
larger.
endif
config ARCH_HAS_ADD_PAGES
def_bool y
depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG

View File

@@ -31,7 +31,7 @@ endif
CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h
M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
-mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
@@ -196,6 +196,10 @@ ifdef CONFIG_RETPOLINE
endif
endif
ifdef CONFIG_SLS
KBUILD_CFLAGS += -mharden-sls=all
endif
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
ifdef CONFIG_LTO_CLANG

View File

@@ -89,7 +89,7 @@ SYM_FUNC_START(__efi64_thunk)
pop %rbx
pop %rbp
ret
RET
SYM_FUNC_END(__efi64_thunk)
.code32

View File

@@ -786,7 +786,7 @@ SYM_FUNC_START(efi32_pe_entry)
2: popl %edi // restore callee-save registers
popl %ebx
leave
ret
RET
SYM_FUNC_END(efi32_pe_entry)
.section ".rodata"
@@ -868,7 +868,7 @@ SYM_FUNC_START(startup32_check_sev_cbit)
popl %ebx
popl %eax
#endif
ret
RET
SYM_FUNC_END(startup32_check_sev_cbit)
/*

View File

@@ -58,7 +58,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
#endif /* CONFIG_AMD_MEM_ENCRYPT */
ret
RET
SYM_FUNC_END(get_sev_encryption_bit)
.code64
@@ -99,7 +99,7 @@ SYM_FUNC_START(set_sev_encryption_mask)
#endif
xor %rax, %rax
ret
RET
SYM_FUNC_END(set_sev_encryption_mask)
.data

View File

@@ -122,7 +122,7 @@ SYM_FUNC_START_LOCAL(__load_partial)
pxor T0, MSG
.Lld_partial_8:
ret
RET
SYM_FUNC_END(__load_partial)
/*
@@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(__store_partial)
mov %r10b, (%r9)
.Lst_partial_1:
ret
RET
SYM_FUNC_END(__store_partial)
/*
@@ -225,7 +225,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_init)
movdqu STATE4, 0x40(STATEP)
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_init)
/*
@@ -337,7 +337,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
ret
RET
.Lad_out_1:
movdqu STATE4, 0x00(STATEP)
@@ -346,7 +346,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
ret
RET
.Lad_out_2:
movdqu STATE3, 0x00(STATEP)
@@ -355,7 +355,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
ret
RET
.Lad_out_3:
movdqu STATE2, 0x00(STATEP)
@@ -364,7 +364,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
ret
RET
.Lad_out_4:
movdqu STATE1, 0x00(STATEP)
@@ -373,11 +373,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
ret
RET
.Lad_out:
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_ad)
.macro encrypt_block a s0 s1 s2 s3 s4 i
@@ -452,7 +452,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
ret
RET
.Lenc_out_1:
movdqu STATE3, 0x00(STATEP)
@@ -461,7 +461,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
ret
RET
.Lenc_out_2:
movdqu STATE2, 0x00(STATEP)
@@ -470,7 +470,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
ret
RET
.Lenc_out_3:
movdqu STATE1, 0x00(STATEP)
@@ -479,7 +479,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
ret
RET
.Lenc_out_4:
movdqu STATE0, 0x00(STATEP)
@@ -488,11 +488,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
ret
RET
.Lenc_out:
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_enc)
/*
@@ -532,7 +532,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc_tail)
movdqu STATE3, 0x40(STATEP)
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_enc_tail)
.macro decrypt_block a s0 s1 s2 s3 s4 i
@@ -606,7 +606,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
ret
RET
.Ldec_out_1:
movdqu STATE3, 0x00(STATEP)
@@ -615,7 +615,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
ret
RET
.Ldec_out_2:
movdqu STATE2, 0x00(STATEP)
@@ -624,7 +624,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
ret
RET
.Ldec_out_3:
movdqu STATE1, 0x00(STATEP)
@@ -633,7 +633,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
ret
RET
.Ldec_out_4:
movdqu STATE0, 0x00(STATEP)
@@ -642,11 +642,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
ret
RET
.Ldec_out:
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_dec)
/*
@@ -696,7 +696,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec_tail)
movdqu STATE3, 0x40(STATEP)
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_dec_tail)
/*
@@ -743,5 +743,5 @@ SYM_FUNC_START(crypto_aegis128_aesni_final)
movdqu MSG, (%rsi)
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_final)

View File

@@ -525,7 +525,7 @@ ddq_add_8:
/* return updated IV */
vpshufb xbyteswap, xcounter, xcounter
vmovdqu xcounter, (p_iv)
ret
RET
.endm
/*

View File

@@ -1598,7 +1598,7 @@ SYM_FUNC_START(aesni_gcm_dec)
GCM_ENC_DEC dec
GCM_COMPLETE arg10, arg11
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_dec)
@@ -1687,7 +1687,7 @@ SYM_FUNC_START(aesni_gcm_enc)
GCM_COMPLETE arg10, arg11
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_enc)
/*****************************************************************************
@@ -1705,7 +1705,7 @@ SYM_FUNC_START(aesni_gcm_init)
FUNC_SAVE
GCM_INIT %arg3, %arg4,%arg5, %arg6
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_init)
/*****************************************************************************
@@ -1720,7 +1720,7 @@ SYM_FUNC_START(aesni_gcm_enc_update)
FUNC_SAVE
GCM_ENC_DEC enc
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_enc_update)
/*****************************************************************************
@@ -1735,7 +1735,7 @@ SYM_FUNC_START(aesni_gcm_dec_update)
FUNC_SAVE
GCM_ENC_DEC dec
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_dec_update)
/*****************************************************************************
@@ -1750,7 +1750,7 @@ SYM_FUNC_START(aesni_gcm_finalize)
FUNC_SAVE
GCM_COMPLETE %arg3 %arg4
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_finalize)
#endif
@@ -1766,7 +1766,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
pxor %xmm1, %xmm0
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
RET
SYM_FUNC_END(_key_expansion_256a)
SYM_FUNC_END_ALIAS(_key_expansion_128)
@@ -1791,7 +1791,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192a)
shufps $0b01001110, %xmm2, %xmm1
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
RET
SYM_FUNC_END(_key_expansion_192a)
SYM_FUNC_START_LOCAL(_key_expansion_192b)
@@ -1810,7 +1810,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192b)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
RET
SYM_FUNC_END(_key_expansion_192b)
SYM_FUNC_START_LOCAL(_key_expansion_256b)
@@ -1822,7 +1822,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256b)
pxor %xmm1, %xmm2
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
RET
SYM_FUNC_END(_key_expansion_256b)
/*
@@ -1937,7 +1937,7 @@ SYM_FUNC_START(aesni_set_key)
popl KEYP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_set_key)
/*
@@ -1961,7 +1961,7 @@ SYM_FUNC_START(aesni_enc)
popl KEYP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_enc)
/*
@@ -2018,7 +2018,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc1)
aesenc KEY, STATE
movaps 0x70(TKEYP), KEY
aesenclast KEY, STATE
ret
RET
SYM_FUNC_END(_aesni_enc1)
/*
@@ -2126,7 +2126,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc4)
aesenclast KEY, STATE2
aesenclast KEY, STATE3
aesenclast KEY, STATE4
ret
RET
SYM_FUNC_END(_aesni_enc4)
/*
@@ -2151,7 +2151,7 @@ SYM_FUNC_START(aesni_dec)
popl KEYP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_dec)
/*
@@ -2208,7 +2208,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec1)
aesdec KEY, STATE
movaps 0x70(TKEYP), KEY
aesdeclast KEY, STATE
ret
RET
SYM_FUNC_END(_aesni_dec1)
/*
@@ -2316,7 +2316,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec4)
aesdeclast KEY, STATE2
aesdeclast KEY, STATE3
aesdeclast KEY, STATE4
ret
RET
SYM_FUNC_END(_aesni_dec4)
/*
@@ -2376,7 +2376,7 @@ SYM_FUNC_START(aesni_ecb_enc)
popl LEN
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_ecb_enc)
/*
@@ -2437,7 +2437,7 @@ SYM_FUNC_START(aesni_ecb_dec)
popl LEN
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_ecb_dec)
/*
@@ -2481,7 +2481,7 @@ SYM_FUNC_START(aesni_cbc_enc)
popl IVP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_cbc_enc)
/*
@@ -2574,7 +2574,7 @@ SYM_FUNC_START(aesni_cbc_dec)
popl IVP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_cbc_dec)
#ifdef __x86_64__
@@ -2602,7 +2602,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc_init)
mov $1, TCTR_LOW
movq TCTR_LOW, INC
movq CTR, TCTR_LOW
ret
RET
SYM_FUNC_END(_aesni_inc_init)
/*
@@ -2630,7 +2630,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc)
.Linc_low:
movaps CTR, IV
pshufb BSWAP_MASK, IV
ret
RET
SYM_FUNC_END(_aesni_inc)
/*
@@ -2693,7 +2693,7 @@ SYM_FUNC_START(aesni_ctr_enc)
movups IV, (IVP)
.Lctr_enc_just_ret:
FRAME_END
ret
RET
SYM_FUNC_END(aesni_ctr_enc)
/*
@@ -2778,7 +2778,7 @@ SYM_FUNC_START(aesni_xts_encrypt)
movups IV, (IVP)
FRAME_END
ret
RET
SYM_FUNC_END(aesni_xts_encrypt)
/*
@@ -2846,7 +2846,7 @@ SYM_FUNC_START(aesni_xts_decrypt)
movups IV, (IVP)
FRAME_END
ret
RET
SYM_FUNC_END(aesni_xts_decrypt)
#endif

View File

@@ -1777,7 +1777,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen2)
FUNC_SAVE
INIT GHASH_MUL_AVX, PRECOMPUTE_AVX
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_init_avx_gen2)
###############################################################################
@@ -1798,15 +1798,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen2)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11
FUNC_RESTORE
ret
RET
key_128_enc_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9
FUNC_RESTORE
ret
RET
key_256_enc_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2)
###############################################################################
@@ -1827,15 +1827,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen2)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11
FUNC_RESTORE
ret
RET
key_128_dec_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9
FUNC_RESTORE
ret
RET
key_256_dec_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2)
###############################################################################
@@ -1856,15 +1856,15 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen2)
# must be 192
GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4
FUNC_RESTORE
ret
RET
key_128_finalize:
GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4
FUNC_RESTORE
ret
RET
key_256_finalize:
GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
###############################################################################
@@ -2745,7 +2745,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen4)
FUNC_SAVE
INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_init_avx_gen4)
###############################################################################
@@ -2766,15 +2766,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen4)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11
FUNC_RESTORE
ret
RET
key_128_enc_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9
FUNC_RESTORE
ret
RET
key_256_enc_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4)
###############################################################################
@@ -2795,15 +2795,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen4)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11
FUNC_RESTORE
ret
RET
key_128_dec_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9
FUNC_RESTORE
ret
RET
key_256_dec_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4)
###############################################################################
@@ -2824,13 +2824,13 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen4)
# must be 192
GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4
FUNC_RESTORE
ret
RET
key_128_finalize4:
GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4
FUNC_RESTORE
ret
RET
key_256_finalize4:
GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_finalize_avx_gen4)

View File

@@ -171,7 +171,7 @@ SYM_FUNC_START(blake2s_compress_ssse3)
movdqu %xmm1,0x10(%rdi)
movdqu %xmm14,0x20(%rdi)
.Lendofloop:
ret
RET
SYM_FUNC_END(blake2s_compress_ssse3)
#ifdef CONFIG_AS_AVX512
@@ -251,6 +251,6 @@ SYM_FUNC_START(blake2s_compress_avx512)
vmovdqu %xmm1,0x10(%rdi)
vmovdqu %xmm4,0x20(%rdi)
vzeroupper
retq
RET
SYM_FUNC_END(blake2s_compress_avx512)
#endif /* CONFIG_AS_AVX512 */

View File

@@ -135,10 +135,10 @@ SYM_FUNC_START(__blowfish_enc_blk)
jnz .L__enc_xor;
write_block();
ret;
RET;
.L__enc_xor:
xor_block();
ret;
RET;
SYM_FUNC_END(__blowfish_enc_blk)
SYM_FUNC_START(blowfish_dec_blk)
@@ -170,7 +170,7 @@ SYM_FUNC_START(blowfish_dec_blk)
movq %r11, %r12;
ret;
RET;
SYM_FUNC_END(blowfish_dec_blk)
/**********************************************************************
@@ -322,14 +322,14 @@ SYM_FUNC_START(__blowfish_enc_blk_4way)
popq %rbx;
popq %r12;
ret;
RET;
.L__enc_xor4:
xor_block4();
popq %rbx;
popq %r12;
ret;
RET;
SYM_FUNC_END(__blowfish_enc_blk_4way)
SYM_FUNC_START(blowfish_dec_blk_4way)
@@ -364,5 +364,5 @@ SYM_FUNC_START(blowfish_dec_blk_4way)
popq %rbx;
popq %r12;
ret;
RET;
SYM_FUNC_END(blowfish_dec_blk_4way)

View File

@@ -193,7 +193,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c
roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
%rcx, (%r9));
ret;
RET;
SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
.align 8
@@ -201,7 +201,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a
roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
%xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
%rax, (%r9));
ret;
RET;
SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
/*
@@ -787,7 +787,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk16)
%xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
FRAME_END
ret;
RET;
.align 8
.Lenc_max32:
@@ -874,7 +874,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk16)
%xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
FRAME_END
ret;
RET;
.align 8
.Ldec_max32:
@@ -915,7 +915,7 @@ SYM_FUNC_START(camellia_ecb_enc_16way)
%xmm8, %rsi);
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_ecb_enc_16way)
SYM_FUNC_START(camellia_ecb_dec_16way)
@@ -945,7 +945,7 @@ SYM_FUNC_START(camellia_ecb_dec_16way)
%xmm8, %rsi);
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_ecb_dec_16way)
SYM_FUNC_START(camellia_cbc_dec_16way)
@@ -996,7 +996,7 @@ SYM_FUNC_START(camellia_cbc_dec_16way)
%xmm8, %rsi);
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_cbc_dec_16way)
#define inc_le128(x, minus_one, tmp) \
@@ -1109,7 +1109,7 @@ SYM_FUNC_START(camellia_ctr_16way)
%xmm8, %rsi);
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_ctr_16way)
#define gf128mul_x_ble(iv, mask, tmp) \
@@ -1253,7 +1253,7 @@ SYM_FUNC_START_LOCAL(camellia_xts_crypt_16way)
%xmm8, %rsi);
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_xts_crypt_16way)
SYM_FUNC_START(camellia_xts_enc_16way)

View File

@@ -227,7 +227,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c
roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
%rcx, (%r9));
ret;
RET;
SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
.align 8
@@ -235,7 +235,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a
roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
%ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
%rax, (%r9));
ret;
RET;
SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
/*
@@ -825,7 +825,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk32)
%ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
FRAME_END
ret;
RET;
.align 8
.Lenc_max32:
@@ -912,7 +912,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk32)
%ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
FRAME_END
ret;
RET;
.align 8
.Ldec_max32:
@@ -957,7 +957,7 @@ SYM_FUNC_START(camellia_ecb_enc_32way)
vzeroupper;
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_ecb_enc_32way)
SYM_FUNC_START(camellia_ecb_dec_32way)
@@ -991,7 +991,7 @@ SYM_FUNC_START(camellia_ecb_dec_32way)
vzeroupper;
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_ecb_dec_32way)
SYM_FUNC_START(camellia_cbc_dec_32way)
@@ -1059,7 +1059,7 @@ SYM_FUNC_START(camellia_cbc_dec_32way)
vzeroupper;
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_cbc_dec_32way)
#define inc_le128(x, minus_one, tmp) \
@@ -1199,7 +1199,7 @@ SYM_FUNC_START(camellia_ctr_32way)
vzeroupper;
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_ctr_32way)
#define gf128mul_x_ble(iv, mask, tmp) \
@@ -1366,7 +1366,7 @@ SYM_FUNC_START_LOCAL(camellia_xts_crypt_32way)
vzeroupper;
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_xts_crypt_32way)
SYM_FUNC_START(camellia_xts_enc_32way)

View File

@@ -213,13 +213,13 @@ SYM_FUNC_START(__camellia_enc_blk)
enc_outunpack(mov, RT1);
movq RR12, %r12;
ret;
RET;
.L__enc_xor:
enc_outunpack(xor, RT1);
movq RR12, %r12;
ret;
RET;
SYM_FUNC_END(__camellia_enc_blk)
SYM_FUNC_START(camellia_dec_blk)
@@ -257,7 +257,7 @@ SYM_FUNC_START(camellia_dec_blk)
dec_outunpack();
movq RR12, %r12;
ret;
RET;
SYM_FUNC_END(camellia_dec_blk)
/**********************************************************************
@@ -448,14 +448,14 @@ SYM_FUNC_START(__camellia_enc_blk_2way)
movq RR12, %r12;
popq %rbx;
ret;
RET;
.L__enc2_xor:
enc_outunpack2(xor, RT2);
movq RR12, %r12;
popq %rbx;
ret;
RET;
SYM_FUNC_END(__camellia_enc_blk_2way)
SYM_FUNC_START(camellia_dec_blk_2way)
@@ -495,5 +495,5 @@ SYM_FUNC_START(camellia_dec_blk_2way)
movq RR12, %r12;
movq RXOR, %rbx;
ret;
RET;
SYM_FUNC_END(camellia_dec_blk_2way)

View File

@@ -279,7 +279,7 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
ret;
RET;
SYM_FUNC_END(__cast5_enc_blk16)
.align 16
@@ -352,7 +352,7 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
ret;
RET;
.L__skip_dec:
vpsrldq $4, RKR, RKR;
@@ -393,7 +393,7 @@ SYM_FUNC_START(cast5_ecb_enc_16way)
popq %r15;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast5_ecb_enc_16way)
SYM_FUNC_START(cast5_ecb_dec_16way)
@@ -431,7 +431,7 @@ SYM_FUNC_START(cast5_ecb_dec_16way)
popq %r15;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast5_ecb_dec_16way)
SYM_FUNC_START(cast5_cbc_dec_16way)
@@ -483,7 +483,7 @@ SYM_FUNC_START(cast5_cbc_dec_16way)
popq %r15;
popq %r12;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast5_cbc_dec_16way)
SYM_FUNC_START(cast5_ctr_16way)
@@ -559,5 +559,5 @@ SYM_FUNC_START(cast5_ctr_16way)
popq %r15;
popq %r12;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast5_ctr_16way)

View File

@@ -291,7 +291,7 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;
RET;
SYM_FUNC_END(__cast6_enc_blk8)
.align 8
@@ -338,7 +338,7 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;
RET;
SYM_FUNC_END(__cast6_dec_blk8)
SYM_FUNC_START(cast6_ecb_enc_8way)
@@ -361,7 +361,7 @@ SYM_FUNC_START(cast6_ecb_enc_8way)
popq %r15;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast6_ecb_enc_8way)
SYM_FUNC_START(cast6_ecb_dec_8way)
@@ -384,7 +384,7 @@ SYM_FUNC_START(cast6_ecb_dec_8way)
popq %r15;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast6_ecb_dec_8way)
SYM_FUNC_START(cast6_cbc_dec_8way)
@@ -410,7 +410,7 @@ SYM_FUNC_START(cast6_cbc_dec_8way)
popq %r15;
popq %r12;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast6_cbc_dec_8way)
SYM_FUNC_START(cast6_ctr_8way)
@@ -438,7 +438,7 @@ SYM_FUNC_START(cast6_ctr_8way)
popq %r15;
popq %r12;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast6_ctr_8way)
SYM_FUNC_START(cast6_xts_enc_8way)
@@ -465,7 +465,7 @@ SYM_FUNC_START(cast6_xts_enc_8way)
popq %r15;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast6_xts_enc_8way)
SYM_FUNC_START(cast6_xts_dec_8way)
@@ -492,5 +492,5 @@ SYM_FUNC_START(cast6_xts_dec_8way)
popq %r15;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast6_xts_dec_8way)

View File

@@ -193,7 +193,7 @@ SYM_FUNC_START(chacha_2block_xor_avx2)
.Ldone2:
vzeroupper
ret
RET
.Lxorpart2:
# xor remaining bytes from partial register into output
@@ -498,7 +498,7 @@ SYM_FUNC_START(chacha_4block_xor_avx2)
.Ldone4:
vzeroupper
ret
RET
.Lxorpart4:
# xor remaining bytes from partial register into output
@@ -992,7 +992,7 @@ SYM_FUNC_START(chacha_8block_xor_avx2)
.Ldone8:
vzeroupper
lea -8(%r10),%rsp
ret
RET
.Lxorpart8:
# xor remaining bytes from partial register into output

Some files were not shown because too many files have changed in this diff Show More