forked from Minki/linux
- Make retbleed mitigations 64-bit only (32-bit will need a bit more
work if even needed, at all). - Prevent return thunks patching of the LKDTM modules as it is not needed there - Avoid writing the SPEC_CTRL MSR on every kernel entry on eIBRS parts - Enhance error output of apply_returns() when it fails to patch a return thunk - A sparse fix to the sev-guest module - Protect EFI fw calls by issuing an IBPB on AMD -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmLdCZIACgkQEsHwGGHe VUpZHw//frU5tCHiP+4UM/kGdrcDxVy+FutkoprrNzhYszkvVGWOU4GOyExjnbXj cS47AlIAv5o2T+UKN6pwtH1n6Jb+Qf2zsaMlyIcxI7t1PiyddkT9wvrgP52GPNyl vrxtDVoo1aYF8z7RxTEtjWHzUlfxQTSlo+olQVtW/JYWnqQOPwvPm+bOA+0MBzwA gbDV2nM1Z0D9ifo536J9P93/HtYwW/fsrutTj0hICxweRGnEN7Gw11Ci+6cT+Rgq TYO5feaFnnbv0hnfDbWQXAuDVdgi0aJ06CDuoC6U/WmKNpQo2rpjAASVXN/EEQyh wwy/bqauj8daOpSU/8/hNx1Rwx3gP6FWEzYpylsn3xP7p9y6dWQnBh4i2ZmMhxUt zbIlwNqSFWluFJaSpOnaPJJVrQbRj03Yb1ynTPCcuTl1K1ydGr03J351K8SfeXqP bhkecr+BiddFs2Y3udaq/NYJEFRHtbZi3CdDcUulbmEfKOZIQRN0JMSAvR7SH1sZ 4lljsoSCfJgxKmgP9tVhRc19LD6knxTAzVbtjtyuEmsf1dN36rN5r5Y4lc1AN68s 12ymbV0qc/Qh6Cn8x/Cgv3S6kXjYrKdbSQHVnJXsJE0kz/mqyIn3+LChU+PoIvmu 7uGeEouIlDroBgT5WWFWnTgxujFPZD6O2rPsJwOXvTkp9uDof4Q= =aSDG -----END PGP SIGNATURE----- Merge tag 'x86_urgent_for_v5.19_rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Borislav Petkov: "A couple more retbleed fallout fixes. It looks like their urgency is decreasing so it seems like we've managed to catch whatever snafus the limited -rc testing has exposed. Maybe we're getting ready... :) - Make retbleed mitigations 64-bit only (32-bit will need a bit more work if even needed, at all). - Prevent return thunks patching of the LKDTM modules as it is not needed there - Avoid writing the SPEC_CTRL MSR on every kernel entry on eIBRS parts - Enhance error output of apply_returns() when it fails to patch a return thunk - A sparse fix to the sev-guest module - Protect EFI fw calls by issuing an IBPB on AMD" * tag 'x86_urgent_for_v5.19_rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/speculation: Make all RETbleed mitigations 64-bit only lkdtm: Disable return thunks in rodata.c x86/bugs: Warn when "ibrs" mitigation is selected on Enhanced IBRS parts x86/alternative: Report missing return thunk details virt: sev-guest: Pass the appropriate argument type to iounmap() x86/amd: Use IBPB for firmware calls
This commit is contained in:
commit
05017fed92
@ -2474,7 +2474,7 @@ config RETHUNK
|
||||
bool "Enable return-thunks"
|
||||
depends on RETPOLINE && CC_HAS_RETURN_THUNK
|
||||
select OBJTOOL if HAVE_OBJTOOL
|
||||
default y
|
||||
default y if X86_64
|
||||
help
|
||||
Compile the kernel with the return-thunks compiler option to guard
|
||||
against kernel-to-user data leaks by avoiding return speculation.
|
||||
@ -2483,21 +2483,21 @@ config RETHUNK
|
||||
|
||||
config CPU_UNRET_ENTRY
|
||||
bool "Enable UNRET on kernel entry"
|
||||
depends on CPU_SUP_AMD && RETHUNK
|
||||
depends on CPU_SUP_AMD && RETHUNK && X86_64
|
||||
default y
|
||||
help
|
||||
Compile the kernel with support for the retbleed=unret mitigation.
|
||||
|
||||
config CPU_IBPB_ENTRY
|
||||
bool "Enable IBPB on kernel entry"
|
||||
depends on CPU_SUP_AMD
|
||||
depends on CPU_SUP_AMD && X86_64
|
||||
default y
|
||||
help
|
||||
Compile the kernel with support for the retbleed=ibpb mitigation.
|
||||
|
||||
config CPU_IBRS_ENTRY
|
||||
bool "Enable IBRS on kernel entry"
|
||||
depends on CPU_SUP_INTEL
|
||||
depends on CPU_SUP_INTEL && X86_64
|
||||
default y
|
||||
help
|
||||
Compile the kernel with support for the spectre_v2=ibrs mitigation.
|
||||
|
@ -27,6 +27,7 @@ RETHUNK_CFLAGS := -mfunction-return=thunk-extern
|
||||
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
|
||||
endif
|
||||
|
||||
export RETHUNK_CFLAGS
|
||||
export RETPOLINE_CFLAGS
|
||||
export RETPOLINE_VDSO_CFLAGS
|
||||
|
||||
|
@ -302,6 +302,7 @@
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
|
||||
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
|
||||
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
|
||||
|
@ -297,6 +297,8 @@ do { \
|
||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
|
||||
spec_ctrl_current() | SPEC_CTRL_IBRS, \
|
||||
X86_FEATURE_USE_IBRS_FW); \
|
||||
alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
|
||||
X86_FEATURE_USE_IBPB_FW); \
|
||||
} while (0)
|
||||
|
||||
#define firmware_restrict_branch_speculation_end() \
|
||||
|
@ -555,7 +555,9 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
|
||||
dest = addr + insn.length + insn.immediate.value;
|
||||
|
||||
if (__static_call_fixup(addr, op, dest) ||
|
||||
WARN_ON_ONCE(dest != &__x86_return_thunk))
|
||||
WARN_ONCE(dest != &__x86_return_thunk,
|
||||
"missing return thunk: %pS-%pS: %*ph",
|
||||
addr, dest, 5, addr))
|
||||
continue;
|
||||
|
||||
DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
|
||||
|
@ -975,6 +975,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
|
||||
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
|
||||
#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
|
||||
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
|
||||
#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
void unpriv_ebpf_notify(int new_state)
|
||||
@ -1415,6 +1416,8 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
|
||||
case SPECTRE_V2_IBRS:
|
||||
setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
|
||||
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
|
||||
pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_LFENCE:
|
||||
@ -1516,7 +1519,16 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
* the CPU supports Enhanced IBRS, kernel might un-intentionally not
|
||||
* enable IBRS around firmware calls.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
|
||||
if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
|
||||
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
|
||||
|
||||
if (retbleed_cmd != RETBLEED_CMD_IBPB) {
|
||||
setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
|
||||
pr_info("Enabling Speculation Barrier for firmware calls\n");
|
||||
}
|
||||
|
||||
} else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
|
||||
pr_info("Enabling Restricted Speculation for firmware calls\n");
|
||||
}
|
||||
|
@ -13,10 +13,13 @@ lkdtm-$(CONFIG_LKDTM) += cfi.o
|
||||
lkdtm-$(CONFIG_LKDTM) += fortify.o
|
||||
lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
|
||||
|
||||
KASAN_SANITIZE_rodata.o := n
|
||||
KASAN_SANITIZE_stackleak.o := n
|
||||
KCOV_INSTRUMENT_rodata.o := n
|
||||
CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO)
|
||||
|
||||
KASAN_SANITIZE_rodata.o := n
|
||||
KCSAN_SANITIZE_rodata.o := n
|
||||
KCOV_INSTRUMENT_rodata.o := n
|
||||
OBJECT_FILES_NON_STANDARD_rodata.o := y
|
||||
CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
|
||||
|
||||
OBJCOPYFLAGS :=
|
||||
OBJCOPYFLAGS_rodata_objcopy.o := \
|
||||
|
@ -632,16 +632,19 @@ static int __init sev_guest_probe(struct platform_device *pdev)
|
||||
struct device *dev = &pdev->dev;
|
||||
struct snp_guest_dev *snp_dev;
|
||||
struct miscdevice *misc;
|
||||
void __iomem *mapping;
|
||||
int ret;
|
||||
|
||||
if (!dev->platform_data)
|
||||
return -ENODEV;
|
||||
|
||||
data = (struct sev_guest_platform_data *)dev->platform_data;
|
||||
layout = (__force void *)ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
|
||||
if (!layout)
|
||||
mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
|
||||
if (!mapping)
|
||||
return -ENODEV;
|
||||
|
||||
layout = (__force void *)mapping;
|
||||
|
||||
ret = -ENOMEM;
|
||||
snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
|
||||
if (!snp_dev)
|
||||
@ -706,7 +709,7 @@ e_free_response:
|
||||
e_free_request:
|
||||
free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
|
||||
e_unmap:
|
||||
iounmap(layout);
|
||||
iounmap(mapping);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user