- Get rid of all the .fixup sections because this generates

misleading/wrong stacktraces and confuse RELIABLE_STACKTRACE and
 LIVEPATCH as the backtrace misses the function which is being fixed up.
 
 - Add Straight Light Speculation mitigation support which uses a new
 compiler switch -mharden-sls= which sticks an INT3 after a RET or an
 indirect branch in order to block speculation after them. Reportedly,
 CPUs do speculate behind such insns.
 
 - The usual set of cleanups and improvements
 -----BEGIN PGP SIGNATURE-----
 
 iQIyBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmHfKA0ACgkQEsHwGGHe
 VUqLJg/2I2X2xXr5filJVaK+sQgmvDzk67DKnbxRBW2xcPF+B5sSW5yhe3G5UPW7
 SJVdhQ3gHcTiliGGlBf/VE7KXbqxFN0vO4/VFHZm78r43g7OrXTxz6WXXQRJ1n67
 U3YwRH3b6cqXZNFMs+X4bJt6qsGJM1kdTTZ2as4aERnaFr5AOAfQvfKbyhxLe/XA
 3SakfYISVKCBQ2RkTfpMpwmqlsatGFhTC5IrvuDQ83dDsM7O+Dx1J6Gu3fwjKmie
 iVzPOjCh+xTpZQp/SIZmt7MzoduZvpSym4YVyHvEnMiexQT4AmyaRthWqrhnEXY/
 qOvj8/XIqxmix8EaooGqRIK0Y2ZegxkPckNFzaeC3lsWohwMIGIhNXwHNEeuhNyH
 yvNGAW9Cq6NeDRgz5MRUXcimYw4P4oQKYLObS1WqFZhNMqm4sNtoEAYpai/lPYfs
 zUDckgXF2AoPOsSqy3hFAVaGovAgzfDaJVzkt0Lk4kzzjX2WQiNLhmiior460w+K
 0l2Iej58IajSp3MkWmFH368Jo8YfUVmkjbbpsmjsBppA08e1xamJB7RmswI/Ezj6
 s5re6UioCD+UYdjWx41kgbvYdvIkkZ2RLrktoZd/hqHrOLWEIiwEbyFO2nRFJIAh
 YjvPkB1p7iNuAeYcP1x9Ft9GNYVIsUlJ+hK86wtFCqy+abV+zQ==
 =R52z
 -----END PGP SIGNATURE-----

Merge tag 'x86_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 core updates from Borislav Petkov:

 - Get rid of all the .fixup sections because this generates
   misleading/wrong stacktraces and confuse RELIABLE_STACKTRACE and
   LIVEPATCH as the backtrace misses the function which is being fixed
   up.

 - Add Straight Line Speculation mitigation support which uses a new
   compiler switch -mharden-sls= which sticks an INT3 after a RET or an
   indirect branch in order to block speculation after them. Reportedly,
   CPUs do speculate behind such insns.

 - The usual set of cleanups and improvements

* tag 'x86_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits)
  x86/entry_32: Fix segment exceptions
  objtool: Remove .fixup handling
  x86: Remove .fixup section
  x86/word-at-a-time: Remove .fixup usage
  x86/usercopy: Remove .fixup usage
  x86/usercopy_32: Simplify __copy_user_intel_nocache()
  x86/sgx: Remove .fixup usage
  x86/checksum_32: Remove .fixup usage
  x86/vmx: Remove .fixup usage
  x86/kvm: Remove .fixup usage
  x86/segment: Remove .fixup usage
  x86/fpu: Remove .fixup usage
  x86/xen: Remove .fixup usage
  x86/uaccess: Remove .fixup usage
  x86/futex: Remove .fixup usage
  x86/msr: Remove .fixup usage
  x86/extable: Extend extable functionality
  x86/entry_32: Remove .fixup usage
  x86/entry_64: Remove .fixup usage
  x86/copy_mc_64: Remove .fixup usage
  ...
This commit is contained in:
Linus Torvalds 2022-01-12 16:31:19 -08:00
commit 64ad946152
163 changed files with 1063 additions and 1365 deletions

View File

@ -688,17 +688,6 @@ ifdef CONFIG_FUNCTION_TRACER
CC_FLAGS_FTRACE := -pg
endif
ifdef CONFIG_CC_IS_GCC
RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
endif
ifdef CONFIG_CC_IS_CLANG
RETPOLINE_CFLAGS := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS := -mretpoline
endif
export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS
include $(srctree)/arch/$(SRCARCH)/Makefile
ifdef need-config

View File

@ -473,6 +473,18 @@ config RETPOLINE
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
config CC_HAS_SLS
def_bool $(cc-option,-mharden-sls=all)
config SLS
bool "Mitigate Straight-Line-Speculation"
depends on CC_HAS_SLS && X86_64
default n
help
Compile the kernel with straight-line-speculation options to guard
against straight line speculation. The kernel image might be slightly
larger.
config X86_CPU_RESCTRL
bool "x86 CPU resource control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
@ -1952,7 +1964,7 @@ config EFI
config EFI_STUB
bool "EFI stub support"
depends on EFI && !X86_USE_3DNOW
depends on EFI
depends on $(cc-option,-mabi=ms) || X86_32
select RELOCATABLE
help

View File

@ -342,10 +342,6 @@ config X86_USE_PPRO_CHECKSUM
def_bool y
depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
config X86_USE_3DNOW
def_bool y
depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
#
# P6_NOPs are a relatively minor optimization that require a family >=
# 6 processor, except that it is broken on certain VIA chips.

View File

@ -12,6 +12,18 @@ else
KBUILD_DEFCONFIG := $(ARCH)_defconfig
endif
ifdef CONFIG_CC_IS_GCC
RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix)
RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
endif
ifdef CONFIG_CC_IS_CLANG
RETPOLINE_CFLAGS := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS := -mretpoline
endif
export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS
# For gcc stack alignment is specified with -mpreferred-stack-boundary,
# clang has the option -mstack-alignment for that purpose.
ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
@ -179,6 +191,10 @@ ifdef CONFIG_RETPOLINE
endif
endif
ifdef CONFIG_SLS
KBUILD_CFLAGS += -mharden-sls=all
endif
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
ifdef CONFIG_LTO_CLANG

View File

@ -101,7 +101,7 @@ SYM_FUNC_START(__efi64_thunk)
pop %rbx
pop %rbp
ret
RET
SYM_FUNC_END(__efi64_thunk)
.code32

View File

@ -813,7 +813,7 @@ SYM_FUNC_START(efi32_pe_entry)
2: popl %edi // restore callee-save registers
popl %ebx
leave
ret
RET
SYM_FUNC_END(efi32_pe_entry)
.section ".rodata"
@ -868,7 +868,7 @@ SYM_FUNC_START(startup32_set_idt_entry)
pop %ecx
pop %ebx
ret
RET
SYM_FUNC_END(startup32_set_idt_entry)
#endif
@ -884,7 +884,7 @@ SYM_FUNC_START(startup32_load_idt)
movl %eax, rva(boot32_idt_desc+2)(%ebp)
lidt rva(boot32_idt_desc)(%ebp)
#endif
ret
RET
SYM_FUNC_END(startup32_load_idt)
/*
@ -954,7 +954,7 @@ SYM_FUNC_START(startup32_check_sev_cbit)
popl %ebx
popl %eax
#endif
ret
RET
SYM_FUNC_END(startup32_check_sev_cbit)
/*

View File

@ -58,7 +58,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
#endif /* CONFIG_AMD_MEM_ENCRYPT */
ret
RET
SYM_FUNC_END(get_sev_encryption_bit)
/**
@ -92,7 +92,7 @@ SYM_CODE_START_LOCAL(sev_es_req_cpuid)
/* All good - return success */
xorl %eax, %eax
1:
ret
RET
2:
movl $-1, %eax
jmp 1b
@ -221,7 +221,7 @@ SYM_FUNC_START(set_sev_encryption_mask)
#endif
xor %rax, %rax
ret
RET
SYM_FUNC_END(set_sev_encryption_mask)
.data

View File

@ -122,7 +122,7 @@ SYM_FUNC_START_LOCAL(__load_partial)
pxor T0, MSG
.Lld_partial_8:
ret
RET
SYM_FUNC_END(__load_partial)
/*
@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(__store_partial)
mov %r10b, (%r9)
.Lst_partial_1:
ret
RET
SYM_FUNC_END(__store_partial)
/*
@ -225,7 +225,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_init)
movdqu STATE4, 0x40(STATEP)
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_init)
/*
@ -337,7 +337,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
ret
RET
.Lad_out_1:
movdqu STATE4, 0x00(STATEP)
@ -346,7 +346,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
ret
RET
.Lad_out_2:
movdqu STATE3, 0x00(STATEP)
@ -355,7 +355,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
ret
RET
.Lad_out_3:
movdqu STATE2, 0x00(STATEP)
@ -364,7 +364,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
ret
RET
.Lad_out_4:
movdqu STATE1, 0x00(STATEP)
@ -373,11 +373,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
ret
RET
.Lad_out:
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_ad)
.macro encrypt_block a s0 s1 s2 s3 s4 i
@ -452,7 +452,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
ret
RET
.Lenc_out_1:
movdqu STATE3, 0x00(STATEP)
@ -461,7 +461,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
ret
RET
.Lenc_out_2:
movdqu STATE2, 0x00(STATEP)
@ -470,7 +470,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
ret
RET
.Lenc_out_3:
movdqu STATE1, 0x00(STATEP)
@ -479,7 +479,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
ret
RET
.Lenc_out_4:
movdqu STATE0, 0x00(STATEP)
@ -488,11 +488,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
ret
RET
.Lenc_out:
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_enc)
/*
@ -532,7 +532,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc_tail)
movdqu STATE3, 0x40(STATEP)
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_enc_tail)
.macro decrypt_block a s0 s1 s2 s3 s4 i
@ -606,7 +606,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
ret
RET
.Ldec_out_1:
movdqu STATE3, 0x00(STATEP)
@ -615,7 +615,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
ret
RET
.Ldec_out_2:
movdqu STATE2, 0x00(STATEP)
@ -624,7 +624,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
ret
RET
.Ldec_out_3:
movdqu STATE1, 0x00(STATEP)
@ -633,7 +633,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
ret
RET
.Ldec_out_4:
movdqu STATE0, 0x00(STATEP)
@ -642,11 +642,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
ret
RET
.Ldec_out:
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_dec)
/*
@ -696,7 +696,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec_tail)
movdqu STATE3, 0x40(STATEP)
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_dec_tail)
/*
@ -743,5 +743,5 @@ SYM_FUNC_START(crypto_aegis128_aesni_final)
movdqu MSG, (%rsi)
FRAME_END
ret
RET
SYM_FUNC_END(crypto_aegis128_aesni_final)

View File

@ -525,7 +525,7 @@ ddq_add_8:
/* return updated IV */
vpshufb xbyteswap, xcounter, xcounter
vmovdqu xcounter, (p_iv)
ret
RET
.endm
/*

View File

@ -1594,7 +1594,7 @@ SYM_FUNC_START(aesni_gcm_dec)
GCM_ENC_DEC dec
GCM_COMPLETE arg10, arg11
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_dec)
@ -1683,7 +1683,7 @@ SYM_FUNC_START(aesni_gcm_enc)
GCM_COMPLETE arg10, arg11
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_enc)
/*****************************************************************************
@ -1701,7 +1701,7 @@ SYM_FUNC_START(aesni_gcm_init)
FUNC_SAVE
GCM_INIT %arg3, %arg4,%arg5, %arg6
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_init)
/*****************************************************************************
@ -1716,7 +1716,7 @@ SYM_FUNC_START(aesni_gcm_enc_update)
FUNC_SAVE
GCM_ENC_DEC enc
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_enc_update)
/*****************************************************************************
@ -1731,7 +1731,7 @@ SYM_FUNC_START(aesni_gcm_dec_update)
FUNC_SAVE
GCM_ENC_DEC dec
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_dec_update)
/*****************************************************************************
@ -1746,7 +1746,7 @@ SYM_FUNC_START(aesni_gcm_finalize)
FUNC_SAVE
GCM_COMPLETE %arg3 %arg4
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_finalize)
#endif
@ -1762,7 +1762,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
pxor %xmm1, %xmm0
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
RET
SYM_FUNC_END(_key_expansion_256a)
SYM_FUNC_END_ALIAS(_key_expansion_128)
@ -1787,7 +1787,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192a)
shufps $0b01001110, %xmm2, %xmm1
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
RET
SYM_FUNC_END(_key_expansion_192a)
SYM_FUNC_START_LOCAL(_key_expansion_192b)
@ -1806,7 +1806,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192b)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
RET
SYM_FUNC_END(_key_expansion_192b)
SYM_FUNC_START_LOCAL(_key_expansion_256b)
@ -1818,7 +1818,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256b)
pxor %xmm1, %xmm2
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
RET
SYM_FUNC_END(_key_expansion_256b)
/*
@ -1933,7 +1933,7 @@ SYM_FUNC_START(aesni_set_key)
popl KEYP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_set_key)
/*
@ -1957,7 +1957,7 @@ SYM_FUNC_START(aesni_enc)
popl KEYP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_enc)
/*
@ -2014,7 +2014,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc1)
aesenc KEY, STATE
movaps 0x70(TKEYP), KEY
aesenclast KEY, STATE
ret
RET
SYM_FUNC_END(_aesni_enc1)
/*
@ -2122,7 +2122,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc4)
aesenclast KEY, STATE2
aesenclast KEY, STATE3
aesenclast KEY, STATE4
ret
RET
SYM_FUNC_END(_aesni_enc4)
/*
@ -2147,7 +2147,7 @@ SYM_FUNC_START(aesni_dec)
popl KEYP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_dec)
/*
@ -2204,7 +2204,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec1)
aesdec KEY, STATE
movaps 0x70(TKEYP), KEY
aesdeclast KEY, STATE
ret
RET
SYM_FUNC_END(_aesni_dec1)
/*
@ -2312,7 +2312,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec4)
aesdeclast KEY, STATE2
aesdeclast KEY, STATE3
aesdeclast KEY, STATE4
ret
RET
SYM_FUNC_END(_aesni_dec4)
/*
@ -2372,7 +2372,7 @@ SYM_FUNC_START(aesni_ecb_enc)
popl LEN
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_ecb_enc)
/*
@ -2433,7 +2433,7 @@ SYM_FUNC_START(aesni_ecb_dec)
popl LEN
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_ecb_dec)
/*
@ -2477,7 +2477,7 @@ SYM_FUNC_START(aesni_cbc_enc)
popl IVP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_cbc_enc)
/*
@ -2570,7 +2570,7 @@ SYM_FUNC_START(aesni_cbc_dec)
popl IVP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_cbc_dec)
/*
@ -2627,7 +2627,7 @@ SYM_FUNC_START(aesni_cts_cbc_enc)
popl IVP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_cts_cbc_enc)
/*
@ -2688,7 +2688,7 @@ SYM_FUNC_START(aesni_cts_cbc_dec)
popl IVP
#endif
FRAME_END
ret
RET
SYM_FUNC_END(aesni_cts_cbc_dec)
.pushsection .rodata
@ -2725,7 +2725,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc_init)
mov $1, TCTR_LOW
movq TCTR_LOW, INC
movq CTR, TCTR_LOW
ret
RET
SYM_FUNC_END(_aesni_inc_init)
/*
@ -2753,7 +2753,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc)
.Linc_low:
movaps CTR, IV
pshufb BSWAP_MASK, IV
ret
RET
SYM_FUNC_END(_aesni_inc)
/*
@ -2816,7 +2816,7 @@ SYM_FUNC_START(aesni_ctr_enc)
movups IV, (IVP)
.Lctr_enc_just_ret:
FRAME_END
ret
RET
SYM_FUNC_END(aesni_ctr_enc)
#endif
@ -2932,7 +2932,7 @@ SYM_FUNC_START(aesni_xts_encrypt)
popl IVP
#endif
FRAME_END
ret
RET
.Lxts_enc_1x:
add $64, LEN
@ -3092,7 +3092,7 @@ SYM_FUNC_START(aesni_xts_decrypt)
popl IVP
#endif
FRAME_END
ret
RET
.Lxts_dec_1x:
add $64, LEN

View File

@ -1767,7 +1767,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen2)
FUNC_SAVE
INIT GHASH_MUL_AVX, PRECOMPUTE_AVX
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_init_avx_gen2)
###############################################################################
@ -1788,15 +1788,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen2)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11
FUNC_RESTORE
ret
RET
key_128_enc_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9
FUNC_RESTORE
ret
RET
key_256_enc_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2)
###############################################################################
@ -1817,15 +1817,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen2)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11
FUNC_RESTORE
ret
RET
key_128_dec_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9
FUNC_RESTORE
ret
RET
key_256_dec_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2)
###############################################################################
@ -1846,15 +1846,15 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen2)
# must be 192
GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4
FUNC_RESTORE
ret
RET
key_128_finalize:
GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4
FUNC_RESTORE
ret
RET
key_256_finalize:
GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
###############################################################################
@ -2735,7 +2735,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen4)
FUNC_SAVE
INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_init_avx_gen4)
###############################################################################
@ -2756,15 +2756,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen4)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11
FUNC_RESTORE
ret
RET
key_128_enc_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9
FUNC_RESTORE
ret
RET
key_256_enc_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4)
###############################################################################
@ -2785,15 +2785,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen4)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11
FUNC_RESTORE
ret
RET
key_128_dec_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9
FUNC_RESTORE
ret
RET
key_256_dec_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4)
###############################################################################
@ -2814,13 +2814,13 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen4)
# must be 192
GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4
FUNC_RESTORE
ret
RET
key_128_finalize4:
GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4
FUNC_RESTORE
ret
RET
key_256_finalize4:
GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4
FUNC_RESTORE
ret
RET
SYM_FUNC_END(aesni_gcm_finalize_avx_gen4)

View File

@ -171,7 +171,7 @@ SYM_FUNC_START(blake2s_compress_ssse3)
movdqu %xmm1,0x10(%rdi)
movdqu %xmm14,0x20(%rdi)
.Lendofloop:
ret
RET
SYM_FUNC_END(blake2s_compress_ssse3)
#ifdef CONFIG_AS_AVX512
@ -251,6 +251,6 @@ SYM_FUNC_START(blake2s_compress_avx512)
vmovdqu %xmm1,0x10(%rdi)
vmovdqu %xmm4,0x20(%rdi)
vzeroupper
retq
RET
SYM_FUNC_END(blake2s_compress_avx512)
#endif /* CONFIG_AS_AVX512 */

View File

@ -135,10 +135,10 @@ SYM_FUNC_START(__blowfish_enc_blk)
jnz .L__enc_xor;
write_block();
ret;
RET;
.L__enc_xor:
xor_block();
ret;
RET;
SYM_FUNC_END(__blowfish_enc_blk)
SYM_FUNC_START(blowfish_dec_blk)
@ -170,7 +170,7 @@ SYM_FUNC_START(blowfish_dec_blk)
movq %r11, %r12;
ret;
RET;
SYM_FUNC_END(blowfish_dec_blk)
/**********************************************************************
@ -322,14 +322,14 @@ SYM_FUNC_START(__blowfish_enc_blk_4way)
popq %rbx;
popq %r12;
ret;
RET;
.L__enc_xor4:
xor_block4();
popq %rbx;
popq %r12;
ret;
RET;
SYM_FUNC_END(__blowfish_enc_blk_4way)
SYM_FUNC_START(blowfish_dec_blk_4way)
@ -364,5 +364,5 @@ SYM_FUNC_START(blowfish_dec_blk_4way)
popq %rbx;
popq %r12;
ret;
RET;
SYM_FUNC_END(blowfish_dec_blk_4way)

View File

@ -192,7 +192,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c
roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
%rcx, (%r9));
ret;
RET;
SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
.align 8
@ -200,7 +200,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a
roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
%xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
%rax, (%r9));
ret;
RET;
SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
/*
@ -778,7 +778,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk16)
%xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
FRAME_END
ret;
RET;
.align 8
.Lenc_max32:
@ -865,7 +865,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk16)
%xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
FRAME_END
ret;
RET;
.align 8
.Ldec_max32:
@ -906,7 +906,7 @@ SYM_FUNC_START(camellia_ecb_enc_16way)
%xmm8, %rsi);
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_ecb_enc_16way)
SYM_FUNC_START(camellia_ecb_dec_16way)
@ -936,7 +936,7 @@ SYM_FUNC_START(camellia_ecb_dec_16way)
%xmm8, %rsi);
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_ecb_dec_16way)
SYM_FUNC_START(camellia_cbc_dec_16way)
@ -987,5 +987,5 @@ SYM_FUNC_START(camellia_cbc_dec_16way)
%xmm8, %rsi);
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_cbc_dec_16way)

View File

@ -226,7 +226,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c
roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
%rcx, (%r9));
ret;
RET;
SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
.align 8
@ -234,7 +234,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a
roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
%ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
%rax, (%r9));
ret;
RET;
SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
/*
@ -814,7 +814,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk32)
%ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
FRAME_END
ret;
RET;
.align 8
.Lenc_max32:
@ -901,7 +901,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk32)
%ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
FRAME_END
ret;
RET;
.align 8
.Ldec_max32:
@ -946,7 +946,7 @@ SYM_FUNC_START(camellia_ecb_enc_32way)
vzeroupper;
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_ecb_enc_32way)
SYM_FUNC_START(camellia_ecb_dec_32way)
@ -980,7 +980,7 @@ SYM_FUNC_START(camellia_ecb_dec_32way)
vzeroupper;
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_ecb_dec_32way)
SYM_FUNC_START(camellia_cbc_dec_32way)
@ -1047,5 +1047,5 @@ SYM_FUNC_START(camellia_cbc_dec_32way)
addq $(16 * 32), %rsp;
FRAME_END
ret;
RET;
SYM_FUNC_END(camellia_cbc_dec_32way)

View File

@ -213,13 +213,13 @@ SYM_FUNC_START(__camellia_enc_blk)
enc_outunpack(mov, RT1);
movq RR12, %r12;
ret;
RET;
.L__enc_xor:
enc_outunpack(xor, RT1);
movq RR12, %r12;
ret;
RET;
SYM_FUNC_END(__camellia_enc_blk)
SYM_FUNC_START(camellia_dec_blk)
@ -257,7 +257,7 @@ SYM_FUNC_START(camellia_dec_blk)
dec_outunpack();
movq RR12, %r12;
ret;
RET;
SYM_FUNC_END(camellia_dec_blk)
/**********************************************************************
@ -448,14 +448,14 @@ SYM_FUNC_START(__camellia_enc_blk_2way)
movq RR12, %r12;
popq %rbx;
ret;
RET;
.L__enc2_xor:
enc_outunpack2(xor, RT2);
movq RR12, %r12;
popq %rbx;
ret;
RET;
SYM_FUNC_END(__camellia_enc_blk_2way)
SYM_FUNC_START(camellia_dec_blk_2way)
@ -495,5 +495,5 @@ SYM_FUNC_START(camellia_dec_blk_2way)
movq RR12, %r12;
movq RXOR, %rbx;
ret;
RET;
SYM_FUNC_END(camellia_dec_blk_2way)

View File

@ -279,7 +279,7 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
ret;
RET;
SYM_FUNC_END(__cast5_enc_blk16)
.align 16
@ -352,7 +352,7 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
ret;
RET;
.L__skip_dec:
vpsrldq $4, RKR, RKR;
@ -393,7 +393,7 @@ SYM_FUNC_START(cast5_ecb_enc_16way)
popq %r15;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast5_ecb_enc_16way)
SYM_FUNC_START(cast5_ecb_dec_16way)
@ -431,7 +431,7 @@ SYM_FUNC_START(cast5_ecb_dec_16way)
popq %r15;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast5_ecb_dec_16way)
SYM_FUNC_START(cast5_cbc_dec_16way)
@ -483,7 +483,7 @@ SYM_FUNC_START(cast5_cbc_dec_16way)
popq %r15;
popq %r12;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast5_cbc_dec_16way)
SYM_FUNC_START(cast5_ctr_16way)
@ -559,5 +559,5 @@ SYM_FUNC_START(cast5_ctr_16way)
popq %r15;
popq %r12;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast5_ctr_16way)

View File

@ -289,7 +289,7 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;
RET;
SYM_FUNC_END(__cast6_enc_blk8)
.align 8
@ -336,7 +336,7 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;
RET;
SYM_FUNC_END(__cast6_dec_blk8)
SYM_FUNC_START(cast6_ecb_enc_8way)
@ -359,7 +359,7 @@ SYM_FUNC_START(cast6_ecb_enc_8way)
popq %r15;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast6_ecb_enc_8way)
SYM_FUNC_START(cast6_ecb_dec_8way)
@ -382,7 +382,7 @@ SYM_FUNC_START(cast6_ecb_dec_8way)
popq %r15;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast6_ecb_dec_8way)
SYM_FUNC_START(cast6_cbc_dec_8way)
@ -408,5 +408,5 @@ SYM_FUNC_START(cast6_cbc_dec_8way)
popq %r15;
popq %r12;
FRAME_END
ret;
RET;
SYM_FUNC_END(cast6_cbc_dec_8way)

View File

@ -193,7 +193,7 @@ SYM_FUNC_START(chacha_2block_xor_avx2)
.Ldone2:
vzeroupper
ret
RET
.Lxorpart2:
# xor remaining bytes from partial register into output
@ -498,7 +498,7 @@ SYM_FUNC_START(chacha_4block_xor_avx2)
.Ldone4:
vzeroupper
ret
RET
.Lxorpart4:
# xor remaining bytes from partial register into output
@ -992,7 +992,7 @@ SYM_FUNC_START(chacha_8block_xor_avx2)
.Ldone8:
vzeroupper
lea -8(%r10),%rsp
ret
RET
.Lxorpart8:
# xor remaining bytes from partial register into output

View File

@ -166,7 +166,7 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl)
.Ldone2:
vzeroupper
ret
RET
.Lxorpart2:
# xor remaining bytes from partial register into output
@ -432,7 +432,7 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl)
.Ldone4:
vzeroupper
ret
RET
.Lxorpart4:
# xor remaining bytes from partial register into output
@ -812,7 +812,7 @@ SYM_FUNC_START(chacha_8block_xor_avx512vl)
.Ldone8:
vzeroupper
ret
RET
.Lxorpart8:
# xor remaining bytes from partial register into output

View File

@ -108,7 +108,7 @@ SYM_FUNC_START_LOCAL(chacha_permute)
sub $2,%r8d
jnz .Ldoubleround
ret
RET
SYM_FUNC_END(chacha_permute)
SYM_FUNC_START(chacha_block_xor_ssse3)
@ -166,7 +166,7 @@ SYM_FUNC_START(chacha_block_xor_ssse3)
.Ldone:
FRAME_END
ret
RET
.Lxorpart:
# xor remaining bytes from partial register into output
@ -217,7 +217,7 @@ SYM_FUNC_START(hchacha_block_ssse3)
movdqu %xmm3,0x10(%rsi)
FRAME_END
ret
RET
SYM_FUNC_END(hchacha_block_ssse3)
SYM_FUNC_START(chacha_4block_xor_ssse3)
@ -762,7 +762,7 @@ SYM_FUNC_START(chacha_4block_xor_ssse3)
.Ldone4:
lea -8(%r10),%rsp
ret
RET
.Lxorpart4:
# xor remaining bytes from partial register into output

View File

@ -236,5 +236,5 @@ fold_64:
pxor %xmm2, %xmm1
pextrd $0x01, %xmm1, %eax
ret
RET
SYM_FUNC_END(crc32_pclmul_le_16)

View File

@ -306,7 +306,7 @@ do_return:
popq %rsi
popq %rdi
popq %rbx
ret
RET
SYM_FUNC_END(crc_pcl)
.section .rodata, "a", @progbits

View File

@ -257,7 +257,7 @@ SYM_FUNC_START(crc_t10dif_pcl)
# Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0.
pextrw $0, %xmm0, %eax
ret
RET
.align 16
.Lless_than_256_bytes:

View File

@ -243,7 +243,7 @@ SYM_FUNC_START(des3_ede_x86_64_crypt_blk)
popq %r12;
popq %rbx;
ret;
RET;
SYM_FUNC_END(des3_ede_x86_64_crypt_blk)
/***********************************************************************
@ -528,7 +528,7 @@ SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way)
popq %r12;
popq %rbx;
ret;
RET;
SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way)
.section .rodata, "a", @progbits

View File

@ -85,7 +85,7 @@ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble)
psrlq $1, T2
pxor T2, T1
pxor T1, DATA
ret
RET
SYM_FUNC_END(__clmul_gf128mul_ble)
/* void clmul_ghash_mul(char *dst, const u128 *shash) */
@ -99,7 +99,7 @@ SYM_FUNC_START(clmul_ghash_mul)
pshufb BSWAP, DATA
movups DATA, (%rdi)
FRAME_END
ret
RET
SYM_FUNC_END(clmul_ghash_mul)
/*
@ -128,5 +128,5 @@ SYM_FUNC_START(clmul_ghash_update)
movups DATA, (%rdi)
.Lupdate_just_ret:
FRAME_END
ret
RET
SYM_FUNC_END(clmul_ghash_update)

View File

@ -153,5 +153,5 @@ SYM_FUNC_START(nh_avx2)
vpaddq T1, T0, T0
vpaddq T4, T0, T0
vmovdqu T0, (HASH)
ret
RET
SYM_FUNC_END(nh_avx2)

View File

@ -119,5 +119,5 @@ SYM_FUNC_START(nh_sse2)
paddq PASS2_SUMS, T1
movdqu T0, 0x00(HASH)
movdqu T1, 0x10(HASH)
ret
RET
SYM_FUNC_END(nh_sse2)

View File

@ -601,7 +601,7 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx)
write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
RET;
SYM_FUNC_END(__serpent_enc_blk8_avx)
.align 8
@ -655,7 +655,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx)
write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
ret;
RET;
SYM_FUNC_END(__serpent_dec_blk8_avx)
SYM_FUNC_START(serpent_ecb_enc_8way_avx)
@ -673,7 +673,7 @@ SYM_FUNC_START(serpent_ecb_enc_8way_avx)
store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
ret;
RET;
SYM_FUNC_END(serpent_ecb_enc_8way_avx)
SYM_FUNC_START(serpent_ecb_dec_8way_avx)
@ -691,7 +691,7 @@ SYM_FUNC_START(serpent_ecb_dec_8way_avx)
store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
FRAME_END
ret;
RET;
SYM_FUNC_END(serpent_ecb_dec_8way_avx)
SYM_FUNC_START(serpent_cbc_dec_8way_avx)
@ -709,5 +709,5 @@ SYM_FUNC_START(serpent_cbc_dec_8way_avx)
store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
FRAME_END
ret;
RET;
SYM_FUNC_END(serpent_cbc_dec_8way_avx)

View File

@ -601,7 +601,7 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk16)
write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
RET;
SYM_FUNC_END(__serpent_enc_blk16)
.align 8
@ -655,7 +655,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk16)
write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
ret;
RET;
SYM_FUNC_END(__serpent_dec_blk16)
SYM_FUNC_START(serpent_ecb_enc_16way)
@ -677,7 +677,7 @@ SYM_FUNC_START(serpent_ecb_enc_16way)
vzeroupper;
FRAME_END
ret;
RET;
SYM_FUNC_END(serpent_ecb_enc_16way)
SYM_FUNC_START(serpent_ecb_dec_16way)
@ -699,7 +699,7 @@ SYM_FUNC_START(serpent_ecb_dec_16way)
vzeroupper;
FRAME_END
ret;
RET;
SYM_FUNC_END(serpent_ecb_dec_16way)
SYM_FUNC_START(serpent_cbc_dec_16way)
@ -722,5 +722,5 @@ SYM_FUNC_START(serpent_cbc_dec_16way)
vzeroupper;
FRAME_END
ret;
RET;
SYM_FUNC_END(serpent_cbc_dec_16way)

View File

@ -553,12 +553,12 @@ SYM_FUNC_START(__serpent_enc_blk_4way)
write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
ret;
RET;
.L__enc_xor4:
xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
ret;
RET;
SYM_FUNC_END(__serpent_enc_blk_4way)
SYM_FUNC_START(serpent_dec_blk_4way)
@ -612,5 +612,5 @@ SYM_FUNC_START(serpent_dec_blk_4way)
movl arg_dst(%esp), %eax;
write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);
ret;
RET;
SYM_FUNC_END(serpent_dec_blk_4way)

View File

@ -675,13 +675,13 @@ SYM_FUNC_START(__serpent_enc_blk_8way)
write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
RET;
.L__enc_xor8:
xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
RET;
SYM_FUNC_END(__serpent_enc_blk_8way)
SYM_FUNC_START(serpent_dec_blk_8way)
@ -735,5 +735,5 @@ SYM_FUNC_START(serpent_dec_blk_8way)
write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
ret;
RET;
SYM_FUNC_END(serpent_dec_blk_8way)

View File

@ -674,7 +674,7 @@ _loop3:
pop %r12
pop %rbx
ret
RET
SYM_FUNC_END(\name)
.endm

View File

@ -290,7 +290,7 @@ SYM_FUNC_START(sha1_ni_transform)
mov %rbp, %rsp
pop %rbp
ret
RET
SYM_FUNC_END(sha1_ni_transform)
.section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16

View File

@ -99,7 +99,7 @@
pop %rbp
pop %r12
pop %rbx
ret
RET
SYM_FUNC_END(\name)
.endm

View File

@ -458,7 +458,7 @@ done_hash:
popq %r13
popq %r12
popq %rbx
ret
RET
SYM_FUNC_END(sha256_transform_avx)
.section .rodata.cst256.K256, "aM", @progbits, 256

View File

@ -710,7 +710,7 @@ done_hash:
popq %r13
popq %r12
popq %rbx
ret
RET
SYM_FUNC_END(sha256_transform_rorx)
.section .rodata.cst512.K256, "aM", @progbits, 512

View File

@ -472,7 +472,7 @@ done_hash:
popq %r12
popq %rbx
ret
RET
SYM_FUNC_END(sha256_transform_ssse3)
.section .rodata.cst256.K256, "aM", @progbits, 256

View File

@ -326,7 +326,7 @@ SYM_FUNC_START(sha256_ni_transform)
.Ldone_hash:
ret
RET
SYM_FUNC_END(sha256_ni_transform)
.section .rodata.cst256.K256, "aM", @progbits, 256

View File

@ -361,7 +361,7 @@ updateblock:
pop %rbx
nowork:
ret
RET
SYM_FUNC_END(sha512_transform_avx)
########################################################################

View File

@ -679,7 +679,7 @@ done_hash:
pop %r12
pop %rbx
ret
RET
SYM_FUNC_END(sha512_transform_rorx)
########################################################################

View File

@ -363,7 +363,7 @@ updateblock:
pop %rbx
nowork:
ret
RET
SYM_FUNC_END(sha512_transform_ssse3)
########################################################################

View File

@ -246,7 +246,7 @@ SYM_FUNC_START(sm4_aesni_avx_crypt4)
.Lblk4_store_output_done:
vzeroall;
FRAME_END
ret;
RET;
SYM_FUNC_END(sm4_aesni_avx_crypt4)
.align 8
@ -356,7 +356,7 @@ SYM_FUNC_START_LOCAL(__sm4_crypt_blk8)
vpshufb RTMP2, RB3, RB3;
FRAME_END
ret;
RET;
SYM_FUNC_END(__sm4_crypt_blk8)
/*
@ -412,7 +412,7 @@ SYM_FUNC_START(sm4_aesni_avx_crypt8)
.Lblk8_store_output_done:
vzeroall;
FRAME_END
ret;
RET;
SYM_FUNC_END(sm4_aesni_avx_crypt8)
/*
@ -487,7 +487,7 @@ SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
vzeroall;
FRAME_END
ret;
RET;
SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8)
/*
@ -537,7 +537,7 @@ SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
vzeroall;
FRAME_END
ret;
RET;
SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)
/*
@ -590,5 +590,5 @@ SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
vzeroall;
FRAME_END
ret;
RET;
SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8)

View File

@ -268,7 +268,7 @@ SYM_FUNC_START_LOCAL(__sm4_crypt_blk16)
vpshufb RTMP2, RB3, RB3;
FRAME_END
ret;
RET;
SYM_FUNC_END(__sm4_crypt_blk16)
#define inc_le128(x, minus_one, tmp) \
@ -387,7 +387,7 @@ SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
vzeroall;
FRAME_END
ret;
RET;
SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
/*
@ -441,7 +441,7 @@ SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
vzeroall;
FRAME_END
ret;
RET;
SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
/*
@ -497,5 +497,5 @@ SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
vzeroall;
FRAME_END
ret;
RET;
SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)

View File

@ -267,7 +267,7 @@ SYM_FUNC_START_LOCAL(__twofish_enc_blk8)
outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
ret;
RET;
SYM_FUNC_END(__twofish_enc_blk8)
.align 8
@ -307,7 +307,7 @@ SYM_FUNC_START_LOCAL(__twofish_dec_blk8)
outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
ret;
RET;
SYM_FUNC_END(__twofish_dec_blk8)
SYM_FUNC_START(twofish_ecb_enc_8way)
@ -327,7 +327,7 @@ SYM_FUNC_START(twofish_ecb_enc_8way)
store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
FRAME_END
ret;
RET;
SYM_FUNC_END(twofish_ecb_enc_8way)
SYM_FUNC_START(twofish_ecb_dec_8way)
@ -347,7 +347,7 @@ SYM_FUNC_START(twofish_ecb_dec_8way)
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
ret;
RET;
SYM_FUNC_END(twofish_ecb_dec_8way)
SYM_FUNC_START(twofish_cbc_dec_8way)
@ -372,5 +372,5 @@ SYM_FUNC_START(twofish_cbc_dec_8way)
popq %r12;
FRAME_END
ret;
RET;
SYM_FUNC_END(twofish_cbc_dec_8way)

View File

@ -260,7 +260,7 @@ SYM_FUNC_START(twofish_enc_blk)
pop %ebx
pop %ebp
mov $1, %eax
ret
RET
SYM_FUNC_END(twofish_enc_blk)
SYM_FUNC_START(twofish_dec_blk)
@ -317,5 +317,5 @@ SYM_FUNC_START(twofish_dec_blk)
pop %ebx
pop %ebp
mov $1, %eax
ret
RET
SYM_FUNC_END(twofish_dec_blk)

View File

@ -258,7 +258,7 @@ SYM_FUNC_START(__twofish_enc_blk_3way)
popq %rbx;
popq %r12;
popq %r13;
ret;
RET;
.L__enc_xor3:
outunpack_enc3(xor);
@ -266,7 +266,7 @@ SYM_FUNC_START(__twofish_enc_blk_3way)
popq %rbx;
popq %r12;
popq %r13;
ret;
RET;
SYM_FUNC_END(__twofish_enc_blk_3way)
SYM_FUNC_START(twofish_dec_blk_3way)
@ -301,5 +301,5 @@ SYM_FUNC_START(twofish_dec_blk_3way)
popq %rbx;
popq %r12;
popq %r13;
ret;
RET;
SYM_FUNC_END(twofish_dec_blk_3way)

View File

@ -252,7 +252,7 @@ SYM_FUNC_START(twofish_enc_blk)
popq R1
movl $1,%eax
ret
RET
SYM_FUNC_END(twofish_enc_blk)
SYM_FUNC_START(twofish_dec_blk)
@ -304,5 +304,5 @@ SYM_FUNC_START(twofish_dec_blk)
popq R1
movl $1,%eax
ret
RET
SYM_FUNC_END(twofish_dec_blk)

View File

@ -268,19 +268,16 @@
1: popl %ds
2: popl %es
3: popl %fs
addl $(4 + \pop), %esp /* pop the unused "gs" slot */
4: addl $(4 + \pop), %esp /* pop the unused "gs" slot */
IRET_FRAME
.pushsection .fixup, "ax"
4: movl $0, (%esp)
jmp 1b
5: movl $0, (%esp)
jmp 2b
6: movl $0, (%esp)
jmp 3b
.popsection
_ASM_EXTABLE(1b, 4b)
_ASM_EXTABLE(2b, 5b)
_ASM_EXTABLE(3b, 6b)
/*
* There is no _ASM_EXTABLE_TYPE_REG() for ASM, however since this is
* ASM the registers are known and we can trivially hard-code them.
*/
_ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_POP_ZERO|EX_REG_DS)
_ASM_EXTABLE_TYPE(2b, 3b, EX_TYPE_POP_ZERO|EX_REG_ES)
_ASM_EXTABLE_TYPE(3b, 4b, EX_TYPE_POP_ZERO|EX_REG_FS)
.endm
.macro RESTORE_ALL_NMI cr3_reg:req pop=0
@ -740,7 +737,7 @@ SYM_FUNC_START(schedule_tail_wrapper)
popl %eax
FRAME_END
ret
RET
SYM_FUNC_END(schedule_tail_wrapper)
.popsection
@ -925,10 +922,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
sti
sysexit
.pushsection .fixup, "ax"
2: movl $0, PT_FS(%esp)
jmp 1b
.popsection
2: movl $0, PT_FS(%esp)
jmp 1b
_ASM_EXTABLE(1b, 2b)
.Lsysenter_fix_flags:
@ -996,8 +991,7 @@ restore_all_switch_stack:
*/
iret
.section .fixup, "ax"
SYM_CODE_START(asm_iret_error)
.Lasm_iret_error:
pushl $0 # no error code
pushl $iret_error
@ -1014,9 +1008,8 @@ SYM_CODE_START(asm_iret_error)
#endif
jmp handle_exception
SYM_CODE_END(asm_iret_error)
.previous
_ASM_EXTABLE(.Lirq_return, asm_iret_error)
_ASM_EXTABLE(.Lirq_return, .Lasm_iret_error)
SYM_FUNC_END(entry_INT80_32)
.macro FIXUP_ESPFIX_STACK

View File

@ -738,14 +738,10 @@ SYM_FUNC_START(asm_load_gs_index)
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
swapgs
FRAME_END
ret
SYM_FUNC_END(asm_load_gs_index)
EXPORT_SYMBOL(asm_load_gs_index)
RET
_ASM_EXTABLE(.Lgs_change, .Lbad_gs)
.section .fixup, "ax"
/* running with kernelgs */
SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
.Lbad_gs:
swapgs /* switch back to user gs */
.macro ZAP_GS
/* This can't be a string because the preprocessor needs to see it. */
@ -756,8 +752,11 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
xorl %eax, %eax
movl %eax, %gs
jmp 2b
SYM_CODE_END(.Lbad_gs)
.previous
_ASM_EXTABLE(.Lgs_change, .Lbad_gs)
SYM_FUNC_END(asm_load_gs_index)
EXPORT_SYMBOL(asm_load_gs_index)
#ifdef CONFIG_XEN_PV
/*
@ -889,7 +888,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
* is needed here.
*/
SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
ret
RET
.Lparanoid_entry_checkgs:
/* EBX = 1 -> kernel GSBASE active, no restore required */
@ -910,7 +909,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
.Lparanoid_kernel_gsbase:
FENCE_SWAPGS_KERNEL_ENTRY
ret
RET
SYM_CODE_END(paranoid_entry)
/*
@ -989,7 +988,7 @@ SYM_CODE_START_LOCAL(error_entry)
movq %rax, %rsp /* switch stack */
ENCODE_FRAME_POINTER
pushq %r12
ret
RET
/*
* There are two places in the kernel that can potentially fault with
@ -1020,7 +1019,7 @@ SYM_CODE_START_LOCAL(error_entry)
*/
.Lerror_entry_done_lfence:
FENCE_SWAPGS_KERNEL_ENTRY
ret
RET
.Lbstep_iret:
/* Fix truncated RIP */

View File

@ -24,7 +24,7 @@ SYM_CODE_START_NOALIGN(\name)
popl %edx
popl %ecx
popl %eax
ret
RET
_ASM_NOKPROBE(\name)
SYM_CODE_END(\name)
.endm

View File

@ -50,7 +50,7 @@ SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore)
popq %rsi
popq %rdi
popq %rbp
ret
RET
_ASM_NOKPROBE(__thunk_restore)
SYM_CODE_END(__thunk_restore)
#endif

View File

@ -77,7 +77,6 @@ SECTIONS
.text : {
*(.text*)
*(.fixup)
} :text =0x90909090,

View File

@ -78,7 +78,7 @@ SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL)
popl %ecx
CFI_RESTORE ecx
CFI_ADJUST_CFA_OFFSET -4
ret
RET
CFI_ENDPROC
.size __kernel_vsyscall,.-__kernel_vsyscall

View File

@ -81,7 +81,7 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave)
pop %rbx
leave
.cfi_def_cfa %rsp, 8
ret
RET
/* The out-of-line code runs with the pre-leave stack frame. */
.cfi_def_cfa %rbp, 16

View File

@ -19,17 +19,17 @@ __vsyscall_page:
mov $__NR_gettimeofday, %rax
syscall
ret
RET
.balign 1024, 0xcc
mov $__NR_time, %rax
syscall
ret
RET
.balign 1024, 0xcc
mov $__NR_getcpu, %rax
syscall
ret
RET
.balign 4096, 0xcc

View File

@ -152,6 +152,33 @@
#else /* ! __ASSEMBLY__ */
# define DEFINE_EXTABLE_TYPE_REG \
".macro extable_type_reg type:req reg:req\n" \
".set found, 0\n" \
".set regnr, 0\n" \
".irp rs,rax,rcx,rdx,rbx,rsp,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15\n" \
".ifc \\reg, %%\\rs\n" \
".set found, found+1\n" \
".long \\type + (regnr << 8)\n" \
".endif\n" \
".set regnr, regnr+1\n" \
".endr\n" \
".set regnr, 0\n" \
".irp rs,eax,ecx,edx,ebx,esp,ebp,esi,edi,r8d,r9d,r10d,r11d,r12d,r13d,r14d,r15d\n" \
".ifc \\reg, %%\\rs\n" \
".set found, found+1\n" \
".long \\type + (regnr << 8)\n" \
".endif\n" \
".set regnr, regnr+1\n" \
".endr\n" \
".if (found != 1)\n" \
".error \"extable_type_reg: bad register argument\"\n" \
".endif\n" \
".endm\n"
# define UNDEFINE_EXTABLE_TYPE_REG \
".purgem extable_type_reg\n"
# define _ASM_EXTABLE_TYPE(from, to, type) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 4\n" \
@ -160,6 +187,16 @@
" .long " __stringify(type) " \n" \
" .popsection\n"
# define _ASM_EXTABLE_TYPE_REG(from, to, type, reg) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 4\n" \
" .long (" #from ") - .\n" \
" .long (" #to ") - .\n" \
DEFINE_EXTABLE_TYPE_REG \
"extable_type_reg reg=" __stringify(reg) ", type=" __stringify(type) " \n"\
UNDEFINE_EXTABLE_TYPE_REG \
" .popsection\n"
/* For C file, we already have NOKPROBE_SYMBOL macro */
/*

View File

@ -21,7 +21,7 @@
*/
struct exception_table_entry {
int insn, fixup, type;
int insn, fixup, data;
};
struct pt_regs;
@ -31,8 +31,8 @@ struct pt_regs;
do { \
(a)->fixup = (b)->fixup + (delta); \
(b)->fixup = (tmp).fixup - (delta); \
(a)->type = (b)->type; \
(b)->type = (tmp).type; \
(a)->data = (b)->data; \
(b)->data = (tmp).data; \
} while (0)
extern int fixup_exception(struct pt_regs *regs, int trapnr,

View File

@ -2,6 +2,36 @@
#ifndef _ASM_X86_EXTABLE_FIXUP_TYPES_H
#define _ASM_X86_EXTABLE_FIXUP_TYPES_H
/*
* Our IMM is signed, as such it must live at the top end of the word. Also,
* since C99 hex constants are of ambigious type, force cast the mask to 'int'
* so that FIELD_GET() will DTRT and sign extend the value when it extracts it.
*/
#define EX_DATA_TYPE_MASK ((int)0x000000FF)
#define EX_DATA_REG_MASK ((int)0x00000F00)
#define EX_DATA_FLAG_MASK ((int)0x0000F000)
#define EX_DATA_IMM_MASK ((int)0xFFFF0000)
#define EX_DATA_REG_SHIFT 8
#define EX_DATA_FLAG_SHIFT 12
#define EX_DATA_IMM_SHIFT 16
#define EX_DATA_REG(reg) ((reg) << EX_DATA_REG_SHIFT)
#define EX_DATA_FLAG(flag) ((flag) << EX_DATA_FLAG_SHIFT)
#define EX_DATA_IMM(imm) ((imm) << EX_DATA_IMM_SHIFT)
/* segment regs */
#define EX_REG_DS EX_DATA_REG(8)
#define EX_REG_ES EX_DATA_REG(9)
#define EX_REG_FS EX_DATA_REG(10)
#define EX_REG_GS EX_DATA_REG(11)
/* flags */
#define EX_FLAG_CLEAR_AX EX_DATA_FLAG(1)
#define EX_FLAG_CLEAR_DX EX_DATA_FLAG(2)
#define EX_FLAG_CLEAR_AX_DX EX_DATA_FLAG(3)
/* types */
#define EX_TYPE_NONE 0
#define EX_TYPE_DEFAULT 1
#define EX_TYPE_FAULT 2
@ -9,14 +39,29 @@
#define EX_TYPE_COPY 4
#define EX_TYPE_CLEAR_FS 5
#define EX_TYPE_FPU_RESTORE 6
#define EX_TYPE_WRMSR 7
#define EX_TYPE_RDMSR 8
#define EX_TYPE_BPF 9
#define EX_TYPE_BPF 7
#define EX_TYPE_WRMSR 8
#define EX_TYPE_RDMSR 9
#define EX_TYPE_WRMSR_SAFE 10 /* reg := -EIO */
#define EX_TYPE_RDMSR_SAFE 11 /* reg := -EIO */
#define EX_TYPE_WRMSR_IN_MCE 12
#define EX_TYPE_RDMSR_IN_MCE 13
#define EX_TYPE_DEFAULT_MCE_SAFE 14
#define EX_TYPE_FAULT_MCE_SAFE 15
#define EX_TYPE_WRMSR_IN_MCE 10
#define EX_TYPE_RDMSR_IN_MCE 11
#define EX_TYPE_POP_REG 16 /* sp += sizeof(long) */
#define EX_TYPE_POP_ZERO (EX_TYPE_POP_REG | EX_DATA_IMM(0))
#define EX_TYPE_DEFAULT_MCE_SAFE 12
#define EX_TYPE_FAULT_MCE_SAFE 13
#define EX_TYPE_IMM_REG 17 /* reg := (long)imm */
#define EX_TYPE_EFAULT_REG (EX_TYPE_IMM_REG | EX_DATA_IMM(-EFAULT))
#define EX_TYPE_ZERO_REG (EX_TYPE_IMM_REG | EX_DATA_IMM(0))
#define EX_TYPE_ONE_REG (EX_TYPE_IMM_REG | EX_DATA_IMM(1))
#define EX_TYPE_FAULT_SGX 18
#define EX_TYPE_UCOPY_LEN 19 /* cx := reg + imm*cx */
#define EX_TYPE_UCOPY_LEN1 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(1))
#define EX_TYPE_UCOPY_LEN4 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(4))
#define EX_TYPE_UCOPY_LEN8 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(8))
#endif

View File

@ -17,13 +17,9 @@ do { \
int oldval = 0, ret; \
asm volatile("1:\t" insn "\n" \
"2:\n" \
"\t.section .fixup,\"ax\"\n" \
"3:\tmov\t%3, %1\n" \
"\tjmp\t2b\n" \
"\t.previous\n" \
_ASM_EXTABLE_UA(1b, 3b) \
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %1) \
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
: "i" (-EFAULT), "0" (oparg), "1" (0)); \
: "0" (oparg), "1" (0)); \
if (ret) \
goto label; \
*oval = oldval; \
@ -39,15 +35,11 @@ do { \
"3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
"\tjnz\t2b\n" \
"4:\n" \
"\t.section .fixup,\"ax\"\n" \
"5:\tmov\t%5, %1\n" \
"\tjmp\t4b\n" \
"\t.previous\n" \
_ASM_EXTABLE_UA(1b, 5b) \
_ASM_EXTABLE_UA(3b, 5b) \
_ASM_EXTABLE_TYPE_REG(1b, 4b, EX_TYPE_EFAULT_REG, %1) \
_ASM_EXTABLE_TYPE_REG(3b, 4b, EX_TYPE_EFAULT_REG, %1) \
: "=&a" (oldval), "=&r" (ret), \
"+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0)); \
: "r" (oparg), "1" (0)); \
if (ret) \
goto label; \
*oval = oldval; \
@ -95,15 +87,11 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT;
asm volatile("\n"
"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
"1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
"2:\n"
"\t.section .fixup, \"ax\"\n"
"3:\tmov %3, %0\n"
"\tjmp 2b\n"
"\t.previous\n"
_ASM_EXTABLE_UA(1b, 3b)
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
: "i" (-EFAULT), "r" (newval), "1" (oldval)
: "r" (newval), "1" (oldval)
: "memory"
);
user_access_end();

View File

@ -15,6 +15,8 @@
#define INSN_CODE_SEG_OPND_SZ(params) (params & 0xf)
#define INSN_CODE_SEG_PARAMS(oper_sz, addr_sz) (oper_sz | (addr_sz << 4))
int pt_regs_offset(struct pt_regs *regs, int regno);
bool insn_has_rep_prefix(struct insn *insn);
void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs);
int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs);

View File

@ -18,6 +18,20 @@
#define __ALIGN_STR __stringify(__ALIGN)
#endif
#ifdef CONFIG_SLS
#define RET ret; int3
#else
#define RET ret
#endif
#else /* __ASSEMBLY__ */
#ifdef CONFIG_SLS
#define ASM_RET "ret; int3\n\t"
#else
#define ASM_RET "ret\n\t"
#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_LINKAGE_H */

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_MMX_H
#define _ASM_X86_MMX_H
/*
* MMX 3Dnow! helper operations
*/
#include <linux/types.h>
extern void *_mmx_memcpy(void *to, const void *from, size_t size);
extern void mmx_clear_page(void *page);
extern void mmx_copy_page(void *to, void *from);
#endif /* _ASM_X86_MMX_H */

View File

@ -137,17 +137,11 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
{
DECLARE_ARGS(val, low, high);
asm volatile("2: rdmsr ; xor %[err],%[err]\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
"3: mov %[fault],%[err]\n\t"
"xorl %%eax, %%eax\n\t"
"xorl %%edx, %%edx\n\t"
"jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
asm volatile("1: rdmsr ; xor %[err],%[err]\n"
"2:\n\t"
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), [fault] "i" (-EIO));
: "c" (msr));
if (tracepoint_enabled(read_msr))
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
return EAX_EDX_VAL(val, low, high);
@ -169,15 +163,11 @@ native_write_msr_safe(unsigned int msr, u32 low, u32 high)
{
int err;
asm volatile("2: wrmsr ; xor %[err],%[err]\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
"3: mov %[fault],%[err] ; jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
asm volatile("1: wrmsr ; xor %[err],%[err]\n"
"2:\n\t"
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err])
: [err] "=a" (err)
: "c" (msr), "0" (low), "d" (high),
[fault] "i" (-EIO)
: "c" (msr), "0" (low), "d" (high)
: "memory");
if (tracepoint_enabled(write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), err);

View File

@ -19,19 +19,6 @@ extern unsigned long __phys_addr(unsigned long);
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_FLATMEM */
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
static inline void clear_page(void *page)
{
mmx_clear_page(page);
}
static inline void copy_page(void *to, void *from)
{
mmx_copy_page(to, from);
}
#else /* !CONFIG_X86_USE_3DNOW */
#include <linux/string.h>
static inline void clear_page(void *page)
@ -43,7 +30,6 @@ static inline void copy_page(void *to, void *from)
{
memcpy(to, from, PAGE_SIZE);
}
#endif /* CONFIG_X86_USE_3DNOW */
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PAGE_32_H */

View File

@ -671,7 +671,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
"call " #func ";" \
PV_RESTORE_ALL_CALLER_REGS \
FRAME_END \
"ret;" \
ASM_RET \
".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
".popsection")

View File

@ -48,7 +48,7 @@ asm (".pushsection .text;"
"jne .slowpath;"
"pop %rdx;"
FRAME_END
"ret;"
ASM_RET
".slowpath: "
"push %rsi;"
"movzbl %al,%esi;"
@ -56,7 +56,7 @@ asm (".pushsection .text;"
"pop %rsi;"
"pop %rdx;"
FRAME_END
"ret;"
ASM_RET
".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
".popsection");

View File

@ -35,11 +35,7 @@
# define NEED_CMOV 0
#endif
#ifdef CONFIG_X86_USE_3DNOW
# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
#else
# define NEED_3DNOW 0
#endif
#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))

View File

@ -307,14 +307,7 @@ do { \
\
asm volatile(" \n" \
"1: movl %k0,%%" #seg " \n" \
\
".section .fixup,\"ax\" \n" \
"2: xorl %k0,%k0 \n" \
" jmp 1b \n" \
".previous \n" \
\
_ASM_EXTABLE(1b, 2b) \
\
_ASM_EXTABLE_TYPE_REG(1b, 1b, EX_TYPE_ZERO_REG, %k0)\
: "+r" (__val) : : "memory"); \
} while (0)

View File

@ -45,6 +45,24 @@ enum sgx_encls_function {
EMODT = 0x0F,
};
/**
* SGX_ENCLS_FAULT_FLAG - flag signifying an ENCLS return code is a trapnr
*
* ENCLS has its own (positive value) error codes and also generates
* ENCLS specific #GP and #PF faults. And the ENCLS values get munged
* with system error codes as everything percolates back up the stack.
* Unfortunately (for us), we need to precisely identify each unique
* error code, e.g. the action taken if EWB fails varies based on the
* type of fault and on the exact SGX error code, i.e. we can't simply
* convert all faults to -EFAULT.
*
* To make all three error types coexist, we set bit 30 to identify an
* ENCLS fault. Bit 31 (technically bits N:31) is used to differentiate
* between positive (faults and SGX error codes) and negative (system
* error codes) values.
*/
#define SGX_ENCLS_FAULT_FLAG 0x40000000
/**
* enum sgx_return_code - The return code type for ENCLS, ENCLU and ENCLV
* %SGX_NOT_TRACKED: Previous ETRACK's shootdown sequence has not

View File

@ -36,7 +36,7 @@
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop")
#define ARCH_ADD_TRAMP_KEY(name) \

View File

@ -146,42 +146,9 @@ static __always_inline void *__constant_memcpy(void *to, const void *from,
extern void *memcpy(void *, const void *, size_t);
#ifndef CONFIG_FORTIFY_SOURCE
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
/*
* This CPU favours 3DNow strongly (eg AMD Athlon)
*/
static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
{
if (len < 512)
return __constant_memcpy(to, from, len);
return _mmx_memcpy(to, from, len);
}
static inline void *__memcpy3d(void *to, const void *from, size_t len)
{
if (len < 512)
return __memcpy(to, from, len);
return _mmx_memcpy(to, from, len);
}
#define memcpy(t, f, n) \
(__builtin_constant_p((n)) \
? __constant_memcpy3d((t), (f), (n)) \
: __memcpy3d((t), (f), (n)))
#else
/*
* No 3D Now!
*/
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMMOVE

View File

@ -352,24 +352,22 @@ do { \
"1: movl %[lowbits],%%eax\n" \
"2: movl %[highbits],%%edx\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
"4: mov %[efault],%[errout]\n" \
" xorl %%eax,%%eax\n" \
" xorl %%edx,%%edx\n" \
" jmp 3b\n" \
".previous\n" \
_ASM_EXTABLE_UA(1b, 4b) \
_ASM_EXTABLE_UA(2b, 4b) \
_ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG | \
EX_FLAG_CLEAR_AX_DX, \
%[errout]) \
_ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG | \
EX_FLAG_CLEAR_AX_DX, \
%[errout]) \
: [errout] "=r" (retval), \
[output] "=&A"(x) \
: [lowbits] "m" (__m(__ptr)), \
[highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
[efault] "i" (-EFAULT), "0" (retval)); \
"0" (retval)); \
})
#else
#define __get_user_asm_u64(x, ptr, retval) \
__get_user_asm(x, ptr, retval, "q", "=r")
__get_user_asm(x, ptr, retval, "q")
#endif
#define __get_user_size(x, ptr, size, retval) \
@ -380,14 +378,14 @@ do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm(x_u8__, ptr, retval, "b", "=q"); \
__get_user_asm(x_u8__, ptr, retval, "b"); \
(x) = x_u8__; \
break; \
case 2: \
__get_user_asm(x, ptr, retval, "w", "=r"); \
__get_user_asm(x, ptr, retval, "w"); \
break; \
case 4: \
__get_user_asm(x, ptr, retval, "l", "=r"); \
__get_user_asm(x, ptr, retval, "l"); \
break; \
case 8: \
__get_user_asm_u64(x, ptr, retval); \
@ -397,20 +395,17 @@ do { \
} \
} while (0)
#define __get_user_asm(x, addr, err, itype, ltype) \
#define __get_user_asm(x, addr, err, itype) \
asm volatile("\n" \
"1: mov"itype" %[umem],%[output]\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %[efault],%[errout]\n" \
" xorl %k[output],%k[output]\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE_UA(1b, 3b) \
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \
EX_FLAG_CLEAR_AX, \
%[errout]) \
: [errout] "=r" (err), \
[output] ltype(x) \
[output] "=a" (x) \
: [umem] "m" (__m(addr)), \
[efault] "i" (-EFAULT), "0" (err))
"0" (err))
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT

View File

@ -77,30 +77,58 @@ static inline unsigned long find_zero(unsigned long mask)
* and the next page not being mapped, take the exception and
* return zeroes in the non-existing part.
*/
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret, dummy;
unsigned long offset, data;
unsigned long ret;
asm_volatile_goto(
"1: mov %[mem], %[ret]\n"
_ASM_EXTABLE(1b, %l[do_exception])
: [ret] "=r" (ret)
: [mem] "m" (*(unsigned long *)addr)
: : do_exception);
return ret;
do_exception:
offset = (unsigned long)addr & (sizeof(long) - 1);
addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
data = *(unsigned long *)addr;
ret = data >> offset * 8;
asm(
"1:\tmov %2,%0\n"
"2:\n"
".section .fixup,\"ax\"\n"
"3:\t"
"lea %2,%1\n\t"
"and %3,%1\n\t"
"mov (%1),%0\n\t"
"leal %2,%%ecx\n\t"
"andl %4,%%ecx\n\t"
"shll $3,%%ecx\n\t"
"shr %%cl,%0\n\t"
"jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b)
:"=&r" (ret),"=&c" (dummy)
:"m" (*(unsigned long *)addr),
"i" (-sizeof(unsigned long)),
"i" (sizeof(unsigned long)-1));
return ret;
}
#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long offset, data;
unsigned long ret, err = 0;
asm( "1: mov %[mem], %[ret]\n"
"2:\n"
_ASM_EXTABLE_FAULT(1b, 2b)
: [ret] "=&r" (ret), "+a" (err)
: [mem] "m" (*(unsigned long *)addr));
if (unlikely(err)) {
offset = (unsigned long)addr & (sizeof(long) - 1);
addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
data = *(unsigned long *)addr;
ret = data >> offset * 8;
}
return ret;
}
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
#endif /* _ASM_WORD_AT_A_TIME_H */

View File

@ -96,11 +96,7 @@ static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
asm volatile("1: mov %[val], %[ptr]\n"
"2:\n"
".section .fixup, \"ax\"\n"
"3: sub $1, %[ret]\n"
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b)
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %[ret])
: [ret] "+r" (ret), [ptr] "=m" (*addr)
: [val] "r" (val));
@ -110,16 +106,12 @@ static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
static inline int xen_safe_read_ulong(const unsigned long *addr,
unsigned long *val)
{
int ret = 0;
unsigned long rval = ~0ul;
int ret = 0;
asm volatile("1: mov %[ptr], %[rval]\n"
"2:\n"
".section .fixup, \"ax\"\n"
"3: sub $1, %[ret]\n"
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b)
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %[ret])
: [ret] "+r" (ret), [rval] "+r" (rval)
: [ptr] "m" (*addr));
*val = rval;

View File

@ -60,7 +60,7 @@ save_registers:
popl saved_context_eflags
movl $ret_point, saved_eip
ret
RET
restore_registers:
@ -70,7 +70,7 @@ restore_registers:
movl saved_context_edi, %edi
pushl saved_context_eflags
popfl
ret
RET
SYM_CODE_START(do_suspend_lowlevel)
call save_processor_state
@ -86,7 +86,7 @@ SYM_CODE_START(do_suspend_lowlevel)
ret_point:
call restore_registers
call restore_processor_state
ret
RET
SYM_CODE_END(do_suspend_lowlevel)
.data

View File

@ -714,7 +714,7 @@ asm (
" .type int3_magic, @function\n"
"int3_magic:\n"
" movl $1, (%" _ASM_ARG1 ")\n"
" ret\n"
ASM_RET
" .size int3_magic, .-int3_magic\n"
" .popsection\n"
);
@ -1113,10 +1113,13 @@ void text_poke_sync(void)
}
struct text_poke_loc {
s32 rel_addr; /* addr := _stext + rel_addr */
s32 rel32;
/* addr := _stext + rel_addr */
s32 rel_addr;
s32 disp;
u8 len;
u8 opcode;
const u8 text[POKE_MAX_OPCODE_SIZE];
/* see text_poke_bp_batch() */
u8 old;
};
@ -1131,7 +1134,8 @@ static struct bp_patching_desc *bp_desc;
static __always_inline
struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
{
struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
/* rcu_dereference */
struct bp_patching_desc *desc = __READ_ONCE(*descp);
if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
return NULL;
@ -1165,7 +1169,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
{
struct bp_patching_desc *desc;
struct text_poke_loc *tp;
int len, ret = 0;
int ret = 0;
void *ip;
if (user_mode(regs))
@ -1205,8 +1209,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
goto out_put;
}
len = text_opcode_size(tp->opcode);
ip += len;
ip += tp->len;
switch (tp->opcode) {
case INT3_INSN_OPCODE:
@ -1221,12 +1224,12 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
break;
case CALL_INSN_OPCODE:
int3_emulate_call(regs, (long)ip + tp->rel32);
int3_emulate_call(regs, (long)ip + tp->disp);
break;
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
int3_emulate_jmp(regs, (long)ip + tp->rel32);
int3_emulate_jmp(regs, (long)ip + tp->disp);
break;
default:
@ -1301,7 +1304,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
*/
for (do_sync = 0, i = 0; i < nr_entries; i++) {
u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
int len = text_opcode_size(tp[i].opcode);
int len = tp[i].len;
if (len - INT3_INSN_SIZE > 0) {
memcpy(old + INT3_INSN_SIZE,
@ -1378,20 +1381,36 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
const void *opcode, size_t len, const void *emulate)
{
struct insn insn;
int ret;
int ret, i;
memcpy((void *)tp->text, opcode, len);
if (!emulate)
emulate = opcode;
ret = insn_decode_kernel(&insn, emulate);
BUG_ON(ret < 0);
BUG_ON(len != insn.length);
tp->rel_addr = addr - (void *)_stext;
tp->len = len;
tp->opcode = insn.opcode.bytes[0];
switch (tp->opcode) {
case RET_INSN_OPCODE:
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
/*
* Control flow instructions without implied execution of the
* next instruction can be padded with INT3.
*/
for (i = insn.length; i < len; i++)
BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
break;
default:
BUG_ON(len != insn.length);
};
switch (tp->opcode) {
case INT3_INSN_OPCODE:
case RET_INSN_OPCODE:
@ -1400,7 +1419,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
case CALL_INSN_OPCODE:
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
tp->rel32 = insn.immediate.value;
tp->disp = insn.immediate.value;
break;
default: /* assume NOP */
@ -1408,13 +1427,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
case 2: /* NOP2 -- emulate as JMP8+0 */
BUG_ON(memcmp(emulate, x86_nops[len], len));
tp->opcode = JMP8_INSN_OPCODE;
tp->rel32 = 0;
tp->disp = 0;
break;
case 5: /* NOP5 -- emulate as JMP32+0 */
BUG_ON(memcmp(emulate, x86_nops[len], len));
tp->opcode = JMP32_INSN_OPCODE;
tp->rel32 = 0;
tp->disp = 0;
break;
default: /* unknown instruction */

View File

@ -11,26 +11,8 @@
#include <asm/traps.h>
#include "sgx.h"
/**
* ENCLS_FAULT_FLAG - flag signifying an ENCLS return code is a trapnr
*
* ENCLS has its own (positive value) error codes and also generates
* ENCLS specific #GP and #PF faults. And the ENCLS values get munged
* with system error codes as everything percolates back up the stack.
* Unfortunately (for us), we need to precisely identify each unique
* error code, e.g. the action taken if EWB fails varies based on the
* type of fault and on the exact SGX error code, i.e. we can't simply
* convert all faults to -EFAULT.
*
* To make all three error types coexist, we set bit 30 to identify an
* ENCLS fault. Bit 31 (technically bits N:31) is used to differentiate
* between positive (faults and SGX error codes) and negative (system
* error codes) values.
*/
#define ENCLS_FAULT_FLAG 0x40000000
/* Retrieve the encoded trapnr from the specified return code. */
#define ENCLS_TRAPNR(r) ((r) & ~ENCLS_FAULT_FLAG)
#define ENCLS_TRAPNR(r) ((r) & ~SGX_ENCLS_FAULT_FLAG)
/* Issue a WARN() about an ENCLS function. */
#define ENCLS_WARN(r, name) { \
@ -50,7 +32,7 @@
*/
static inline bool encls_faulted(int ret)
{
return ret & ENCLS_FAULT_FLAG;
return ret & SGX_ENCLS_FAULT_FLAG;
}
/**
@ -88,11 +70,7 @@ static inline bool encls_failed(int ret)
asm volatile( \
"1: .byte 0x0f, 0x01, 0xcf;\n\t" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: orl $"__stringify(ENCLS_FAULT_FLAG)",%%eax\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE_FAULT(1b, 3b) \
_ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \
: "=a"(ret) \
: "a"(rax), inputs \
: "memory", "cc"); \
@ -127,7 +105,7 @@ static inline bool encls_failed(int ret)
*
* Return:
* 0 on success,
* trapnr with ENCLS_FAULT_FLAG set on fault
* trapnr with SGX_ENCLS_FAULT_FLAG set on fault
*/
#define __encls_N(rax, rbx_out, inputs...) \
({ \
@ -136,11 +114,7 @@ static inline bool encls_failed(int ret)
"1: .byte 0x0f, 0x01, 0xcf;\n\t" \
" xor %%eax,%%eax;\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: orl $"__stringify(ENCLS_FAULT_FLAG)",%%eax\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE_FAULT(1b, 3b) \
_ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \
: "=a"(ret), "=b"(rbx_out) \
: "a"(rax), inputs \
: "memory"); \

View File

@ -35,11 +35,7 @@ static inline void ldmxcsr(u32 mxcsr)
int err; \
asm volatile("1:" #insn "\n\t" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $-1,%[err]\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %[err]) \
: [err] "=r" (err), output \
: "0"(0), input); \
err; \

View File

@ -108,11 +108,7 @@ static inline u64 xfeatures_mask_independent(void)
"\n" \
"xor %[err], %[err]\n" \
"3:\n" \
".pushsection .fixup,\"ax\"\n" \
"4: movl $-2, %[err]\n" \
"jmp 3b\n" \
".popsection\n" \
_ASM_EXTABLE(661b, 4b) \
_ASM_EXTABLE_TYPE_REG(661b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
: [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")

View File

@ -303,7 +303,7 @@ union ftrace_op_code_union {
} __attribute__((packed));
};
#define RET_SIZE 1
#define RET_SIZE 1 + IS_ENABLED(CONFIG_SLS)
static unsigned long
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)

View File

@ -19,7 +19,7 @@
#endif
SYM_FUNC_START(__fentry__)
ret
RET
SYM_FUNC_END(__fentry__)
EXPORT_SYMBOL(__fentry__)
@ -84,7 +84,7 @@ ftrace_graph_call:
/* This is weak to keep gas from relaxing the jumps */
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
ret
RET
SYM_CODE_END(ftrace_caller)
SYM_CODE_START(ftrace_regs_caller)
@ -177,7 +177,7 @@ SYM_CODE_START(ftrace_graph_caller)
popl %edx
popl %ecx
popl %eax
ret
RET
SYM_CODE_END(ftrace_graph_caller)
.globl return_to_handler

View File

@ -132,7 +132,7 @@
#ifdef CONFIG_DYNAMIC_FTRACE
SYM_FUNC_START(__fentry__)
retq
RET
SYM_FUNC_END(__fentry__)
EXPORT_SYMBOL(__fentry__)
@ -176,11 +176,11 @@ SYM_FUNC_END(ftrace_caller);
SYM_FUNC_START(ftrace_epilogue)
/*
* This is weak to keep gas from relaxing the jumps.
* It is also used to copy the retq for trampolines.
* It is also used to copy the RET for trampolines.
*/
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
UNWIND_HINT_FUNC
retq
RET
SYM_FUNC_END(ftrace_epilogue)
SYM_FUNC_START(ftrace_regs_caller)
@ -284,7 +284,7 @@ SYM_FUNC_START(__fentry__)
jnz trace
SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
retq
RET
trace:
/* save_mcount_regs fills in first two parameters */

View File

@ -340,7 +340,7 @@ SYM_FUNC_END(startup_32_smp)
__INIT
setup_once:
andl $0,setup_once_ref /* Once is enough, thanks */
ret
RET
SYM_FUNC_START(early_idt_handler_array)
# 36(%esp) %eflags

View File

@ -11,7 +11,7 @@
SYM_FUNC_START(native_save_fl)
pushf
pop %_ASM_AX
ret
RET
SYM_FUNC_END(native_save_fl)
.popsection
EXPORT_SYMBOL(native_save_fl)

View File

@ -1051,7 +1051,7 @@ asm(
" addl $4, %esp\n"
" popfl\n"
#endif
" ret\n"
ASM_RET
".size __kretprobe_trampoline, .-__kretprobe_trampoline\n"
);
NOKPROBE_SYMBOL(__kretprobe_trampoline);

View File

@ -41,7 +41,7 @@ extern void _paravirt_nop(void);
asm (".pushsection .entry.text, \"ax\"\n"
".global _paravirt_nop\n"
"_paravirt_nop:\n\t"
"ret\n\t"
ASM_RET
".size _paravirt_nop, . - _paravirt_nop\n\t"
".type _paravirt_nop, @function\n\t"
".popsection");
@ -51,7 +51,7 @@ asm (".pushsection .entry.text, \"ax\"\n"
".global paravirt_ret0\n"
"paravirt_ret0:\n\t"
"xor %" _ASM_AX ", %" _ASM_AX ";\n\t"
"ret\n\t"
ASM_RET
".size paravirt_ret0, . - paravirt_ret0\n\t"
".type paravirt_ret0, @function\n\t"
".popsection");

View File

@ -91,7 +91,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
movl %edi, %eax
addl $(identity_mapped - relocate_kernel), %eax
pushl %eax
ret
RET
SYM_CODE_END(relocate_kernel)
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
@ -159,7 +159,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
xorl %edx, %edx
xorl %esi, %esi
xorl %ebp, %ebp
ret
RET
1:
popl %edx
movl CP_PA_SWAP_PAGE(%edi), %esp
@ -190,7 +190,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
movl %edi, %eax
addl $(virtual_mapped - relocate_kernel), %eax
pushl %eax
ret
RET
SYM_CODE_END(identity_mapped)
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
@ -208,7 +208,7 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
popl %edi
popl %esi
popl %ebx
ret
RET
SYM_CODE_END(virtual_mapped)
/* Do the copies */
@ -271,7 +271,7 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
popl %edi
popl %ebx
popl %ebp
ret
RET
SYM_CODE_END(swap_pages)
.globl kexec_control_code_size

View File

@ -104,7 +104,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
/* jump to identity mapped page */
addq $(identity_mapped - relocate_kernel), %r8
pushq %r8
ret
RET
SYM_CODE_END(relocate_kernel)
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
@ -191,7 +191,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
xorl %r14d, %r14d
xorl %r15d, %r15d
ret
RET
1:
popq %rdx
@ -210,7 +210,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
call swap_pages
movq $virtual_mapped, %rax
pushq %rax
ret
RET
SYM_CODE_END(identity_mapped)
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
@ -231,7 +231,7 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
popq %r12
popq %rbp
popq %rbx
ret
RET
SYM_CODE_END(virtual_mapped)
/* Do the copies */
@ -288,7 +288,7 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
lea PAGE_SIZE(%rax), %rsi
jmp 0b
3:
ret
RET
SYM_CODE_END(swap_pages)
.globl kexec_control_code_size

View File

@ -85,5 +85,5 @@ SYM_FUNC_START(sev_verify_cbit)
#endif
/* Return page-table pointer */
movq %rdi, %rax
ret
RET
SYM_FUNC_END(sev_verify_cbit)

View File

@ -17,6 +17,8 @@ enum insn_type {
*/
static const u8 xor5rax[] = { 0x66, 0x66, 0x48, 0x31, 0xc0 };
static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
{
const void *emulate = NULL;
@ -42,8 +44,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void
break;
case RET:
code = text_gen_insn(RET_INSN_OPCODE, insn, func);
size = RET_INSN_SIZE;
code = &retinsn;
break;
}

View File

@ -132,9 +132,9 @@ SYM_FUNC_START_LOCAL(verify_cpu)
.Lverify_cpu_no_longmode:
popf # Restore caller passed flags
movl $1,%eax
ret
RET
.Lverify_cpu_sse_ok:
popf # Restore caller passed flags
xorl %eax, %eax
ret
RET
SYM_FUNC_END(verify_cpu)

View File

@ -137,7 +137,6 @@ SECTIONS
ALIGN_ENTRY_TEXT_END
SOFTIRQENTRY_TEXT
STATIC_CALL_TEXT
*(.fixup)
*(.gnu.warning)
#ifdef CONFIG_RETPOLINE

View File

@ -315,7 +315,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
__FOP_FUNC(#name)
#define __FOP_RET(name) \
"ret \n\t" \
"11: " ASM_RET \
".size " name ", .-" name "\n\t"
#define FOP_RET(name) \
@ -344,7 +344,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
__FOP_RET(#op "_" #dst)
#define FOP1EEX(op, dst) \
FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
#define FASTOP1(op) \
FOP_START(op) \
@ -434,10 +434,6 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
#op " %al \n\t" \
__FOP_RET(#op)
asm(".pushsection .fixup, \"ax\"\n"
"kvm_fastop_exception: xor %esi, %esi; ret\n"
".popsection");
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
@ -473,12 +469,8 @@ FOP_END;
\
asm volatile("1:" insn "\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
"3: movl $1, %[_fault]\n" \
" jmp 2b\n" \
".popsection\n" \
_ASM_EXTABLE(1b, 3b) \
: [_fault] "+qm"(_fault) inoutclob ); \
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
: [_fault] "+r"(_fault) inoutclob ); \
\
_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
})

View File

@ -148,7 +148,7 @@ SYM_FUNC_START(__svm_vcpu_run)
pop %edi
#endif
pop %_ASM_BP
ret
RET
3: cmpb $0, kvm_rebooting
jne 2b
@ -202,7 +202,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
pop %edi
#endif
pop %_ASM_BP
ret
RET
3: cmpb $0, kvm_rebooting
jne 2b

View File

@ -49,14 +49,14 @@ SYM_FUNC_START_LOCAL(vmx_vmenter)
je 2f
1: vmresume
ret
RET
2: vmlaunch
ret
RET
3: cmpb $0, kvm_rebooting
je 4f
ret
RET
4: ud2
_ASM_EXTABLE(1b, 3b)
@ -89,7 +89,7 @@ SYM_FUNC_START(vmx_vmexit)
pop %_ASM_AX
.Lvmexit_skip_rsb:
#endif
ret
RET
SYM_FUNC_END(vmx_vmexit)
/**
@ -228,7 +228,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
pop %edi
#endif
pop %_ASM_BP
ret
RET
/* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
2: mov $1, %eax
@ -293,7 +293,7 @@ SYM_FUNC_START(vmread_error_trampoline)
pop %_ASM_AX
pop %_ASM_BP
ret
RET
SYM_FUNC_END(vmread_error_trampoline)
SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
@ -326,5 +326,5 @@ SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
*/
mov %_ASM_BP, %_ASM_SP
pop %_ASM_BP
ret
RET
SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff)

View File

@ -80,9 +80,11 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
* @field, and bounce through the trampoline to preserve
* volatile registers.
*/
"push $0\n\t"
"xorl %k1, %k1\n\t"
"2:\n\t"
"push %1\n\t"
"push %2\n\t"
"2:call vmread_error_trampoline\n\t"
"call vmread_error_trampoline\n\t"
/*
* Unwind the stack. Note, the trampoline zeros out the
@ -93,13 +95,9 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
"3:\n\t"
/* VMREAD faulted. As above, except push '1' for @fault. */
".pushsection .fixup, \"ax\"\n\t"
"4: push $1\n\t"
"push %2\n\t"
"jmp 2b\n\t"
".popsection\n\t"
_ASM_EXTABLE(1b, 4b)
: ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1)
: ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc");
return value;
}

View File

@ -63,7 +63,6 @@ ifeq ($(CONFIG_X86_32),y)
ifneq ($(CONFIG_X86_CMPXCHG64),y)
lib-y += cmpxchg8b_emu.o atomic64_386_32.o
endif
lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
else
obj-y += iomap_copy_64.o
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o

View File

@ -9,81 +9,83 @@
#include <asm/alternative.h>
/* if you want SMP support, implement these with real spinlocks */
.macro LOCK reg
.macro IRQ_SAVE reg
pushfl
cli
.endm
.macro UNLOCK reg
.macro IRQ_RESTORE reg
popfl
.endm
#define BEGIN(op) \
#define BEGIN_IRQ_SAVE(op) \
.macro endp; \
SYM_FUNC_END(atomic64_##op##_386); \
.purgem endp; \
.endm; \
SYM_FUNC_START(atomic64_##op##_386); \
LOCK v;
IRQ_SAVE v;
#define ENDP endp
#define RET \
UNLOCK v; \
ret
#define RET_ENDP \
RET; \
ENDP
#define RET_IRQ_RESTORE \
IRQ_RESTORE v; \
RET
#define v %ecx
BEGIN(read)
BEGIN_IRQ_SAVE(read)
movl (v), %eax
movl 4(v), %edx
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v
#define v %esi
BEGIN(set)
BEGIN_IRQ_SAVE(set)
movl %ebx, (v)
movl %ecx, 4(v)
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v
#define v %esi
BEGIN(xchg)
BEGIN_IRQ_SAVE(xchg)
movl (v), %eax
movl 4(v), %edx
movl %ebx, (v)
movl %ecx, 4(v)
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v
#define v %ecx
BEGIN(add)
BEGIN_IRQ_SAVE(add)
addl %eax, (v)
adcl %edx, 4(v)
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v
#define v %ecx
BEGIN(add_return)
BEGIN_IRQ_SAVE(add_return)
addl (v), %eax
adcl 4(v), %edx
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v
#define v %ecx
BEGIN(sub)
BEGIN_IRQ_SAVE(sub)
subl %eax, (v)
sbbl %edx, 4(v)
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v
#define v %ecx
BEGIN(sub_return)
BEGIN_IRQ_SAVE(sub_return)
negl %edx
negl %eax
sbbl $0, %edx
@ -91,47 +93,52 @@ BEGIN(sub_return)
adcl 4(v), %edx
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v
#define v %esi
BEGIN(inc)
BEGIN_IRQ_SAVE(inc)
addl $1, (v)
adcl $0, 4(v)
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v
#define v %esi
BEGIN(inc_return)
BEGIN_IRQ_SAVE(inc_return)
movl (v), %eax
movl 4(v), %edx
addl $1, %eax
adcl $0, %edx
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v
#define v %esi
BEGIN(dec)
BEGIN_IRQ_SAVE(dec)
subl $1, (v)
sbbl $0, 4(v)
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v
#define v %esi
BEGIN(dec_return)
BEGIN_IRQ_SAVE(dec_return)
movl (v), %eax
movl 4(v), %edx
subl $1, %eax
sbbl $0, %edx
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v
#define v %esi
BEGIN(add_unless)
BEGIN_IRQ_SAVE(add_unless)
addl %eax, %ecx
adcl %edx, %edi
addl (v), %eax
@ -143,7 +150,7 @@ BEGIN(add_unless)
movl %edx, 4(v)
movl $1, %eax
2:
RET
RET_IRQ_RESTORE
3:
cmpl %edx, %edi
jne 1b
@ -153,7 +160,7 @@ ENDP
#undef v
#define v %esi
BEGIN(inc_not_zero)
BEGIN_IRQ_SAVE(inc_not_zero)
movl (v), %eax
movl 4(v), %edx
testl %eax, %eax
@ -165,7 +172,7 @@ BEGIN(inc_not_zero)
movl %edx, 4(v)
movl $1, %eax
2:
RET
RET_IRQ_RESTORE
3:
testl %edx, %edx
jne 1b
@ -174,7 +181,7 @@ ENDP
#undef v
#define v %esi
BEGIN(dec_if_positive)
BEGIN_IRQ_SAVE(dec_if_positive)
movl (v), %eax
movl 4(v), %edx
subl $1, %eax
@ -183,5 +190,6 @@ BEGIN(dec_if_positive)
movl %eax, (v)
movl %edx, 4(v)
1:
RET_ENDP
RET_IRQ_RESTORE
ENDP
#undef v

View File

@ -18,7 +18,7 @@
SYM_FUNC_START(atomic64_read_cx8)
read64 %ecx
ret
RET
SYM_FUNC_END(atomic64_read_cx8)
SYM_FUNC_START(atomic64_set_cx8)
@ -28,7 +28,7 @@ SYM_FUNC_START(atomic64_set_cx8)
cmpxchg8b (%esi)
jne 1b
ret
RET
SYM_FUNC_END(atomic64_set_cx8)
SYM_FUNC_START(atomic64_xchg_cx8)
@ -37,7 +37,7 @@ SYM_FUNC_START(atomic64_xchg_cx8)
cmpxchg8b (%esi)
jne 1b
ret
RET
SYM_FUNC_END(atomic64_xchg_cx8)
.macro addsub_return func ins insc
@ -68,7 +68,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8)
popl %esi
popl %ebx
popl %ebp
ret
RET
SYM_FUNC_END(atomic64_\func\()_return_cx8)
.endm
@ -93,7 +93,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8)
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
ret
RET
SYM_FUNC_END(atomic64_\func\()_return_cx8)
.endm
@ -118,7 +118,7 @@ SYM_FUNC_START(atomic64_dec_if_positive_cx8)
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
ret
RET
SYM_FUNC_END(atomic64_dec_if_positive_cx8)
SYM_FUNC_START(atomic64_add_unless_cx8)
@ -149,7 +149,7 @@ SYM_FUNC_START(atomic64_add_unless_cx8)
addl $8, %esp
popl %ebx
popl %ebp
ret
RET
4:
cmpl %edx, 4(%esp)
jne 2b
@ -176,5 +176,5 @@ SYM_FUNC_START(atomic64_inc_not_zero_cx8)
movl $1, %eax
3:
popl %ebx
ret
RET
SYM_FUNC_END(atomic64_inc_not_zero_cx8)

Some files were not shown because too many files have changed in this diff Show More