mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
include/linux/bpf.h1f6e04a1c7
("bpf: Fix offset calculation error in __copy_map_value and zero_map_value")aa3496accc
("bpf: Refactor kptr_off_tab into btf_record")f71b2f6417
("bpf: Refactor map->off_arr handling") https://lore.kernel.org/all/20221114095000.67a73239@canb.auug.org.au/ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
224b744abf
@ -67,6 +67,7 @@ uninitialized in the local variable, as well as the stack where the value was
|
||||
copied to another memory location before use.
|
||||
|
||||
A use of uninitialized value ``v`` is reported by KMSAN in the following cases:
|
||||
|
||||
- in a condition, e.g. ``if (v) { ... }``;
|
||||
- in an indexing or pointer dereferencing, e.g. ``array[v]`` or ``*v``;
|
||||
- when it is copied to userspace or hardware, e.g. ``copy_to_user(..., &v, ...)``;
|
||||
|
@ -215,6 +215,7 @@ KVM_S390_VM_TOD_EXT).
|
||||
:Parameters: address of a buffer in user space to store the data (u8) to
|
||||
:Returns: -EFAULT if the given address is not accessible from kernel space;
|
||||
-EINVAL if setting the TOD clock extension to != 0 is not supported
|
||||
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
|
||||
|
||||
3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW
|
||||
-----------------------------------
|
||||
@ -224,6 +225,7 @@ the POP (u64).
|
||||
|
||||
:Parameters: address of a buffer in user space to store the data (u64) to
|
||||
:Returns: -EFAULT if the given address is not accessible from kernel space
|
||||
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
|
||||
|
||||
3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT
|
||||
-----------------------------------
|
||||
@ -237,6 +239,7 @@ it, it is stored as 0 and not allowed to be set to a value != 0.
|
||||
(kvm_s390_vm_tod_clock) to
|
||||
:Returns: -EFAULT if the given address is not accessible from kernel space;
|
||||
-EINVAL if setting the TOD clock extension to != 0 is not supported
|
||||
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
|
||||
|
||||
4. GROUP: KVM_S390_VM_CRYPTO
|
||||
============================
|
||||
|
@ -9343,7 +9343,7 @@ S: Maintained
|
||||
F: drivers/crypto/hisilicon/trng/trng.c
|
||||
|
||||
HISILICON V3XX SPI NOR FLASH Controller Driver
|
||||
M: John Garry <john.garry@huawei.com>
|
||||
M: Jay Fang <f.fangjian@huawei.com>
|
||||
S: Maintained
|
||||
W: http://www.hisilicon.com
|
||||
F: drivers/spi/spi-hisi-sfc-v3xx.c
|
||||
@ -9510,7 +9510,6 @@ F: drivers/media/i2c/hi847.c
|
||||
Hyper-V/Azure CORE AND DRIVERS
|
||||
M: "K. Y. Srinivasan" <kys@microsoft.com>
|
||||
M: Haiyang Zhang <haiyangz@microsoft.com>
|
||||
M: Stephen Hemminger <sthemmin@microsoft.com>
|
||||
M: Wei Liu <wei.liu@kernel.org>
|
||||
M: Dexuan Cui <decui@microsoft.com>
|
||||
L: linux-hyperv@vger.kernel.org
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -41,7 +41,7 @@
|
||||
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
|
||||
|
||||
#define MIDR_CPU_MODEL(imp, partnum) \
|
||||
(((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
|
||||
((_AT(u32, imp) << MIDR_IMPLEMENTOR_SHIFT) | \
|
||||
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
|
||||
((partnum) << MIDR_PARTNUM_SHIFT))
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
#ifndef __ASM_SYSCALL_WRAPPER_H
|
||||
#define __ASM_SYSCALL_WRAPPER_H
|
||||
|
||||
struct pt_regs;
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define SC_ARM64_REGS_TO_ARGS(x, ...) \
|
||||
__MAP(x,__SC_ARGS \
|
||||
|
@ -13,6 +13,14 @@
|
||||
|
||||
#include <asm/efi.h>
|
||||
|
||||
static bool region_is_misaligned(const efi_memory_desc_t *md)
|
||||
{
|
||||
if (PAGE_SIZE == EFI_PAGE_SIZE)
|
||||
return false;
|
||||
return !PAGE_ALIGNED(md->phys_addr) ||
|
||||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
|
||||
* executable, everything else can be mapped with the XN bits
|
||||
@ -26,14 +34,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
|
||||
if (type == EFI_MEMORY_MAPPED_IO)
|
||||
return PROT_DEVICE_nGnRE;
|
||||
|
||||
if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
|
||||
"UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
|
||||
if (region_is_misaligned(md)) {
|
||||
static bool __initdata code_is_misaligned;
|
||||
|
||||
/*
|
||||
* If the region is not aligned to the page size of the OS, we
|
||||
* can not use strict permissions, since that would also affect
|
||||
* the mapping attributes of the adjacent regions.
|
||||
* Regions that are not aligned to the OS page size cannot be
|
||||
* mapped with strict permissions, as those might interfere
|
||||
* with the permissions that are needed by the adjacent
|
||||
* region's mapping. However, if we haven't encountered any
|
||||
* misaligned runtime code regions so far, we can safely use
|
||||
* non-executable permissions for non-code regions.
|
||||
*/
|
||||
return pgprot_val(PAGE_KERNEL_EXEC);
|
||||
code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
|
||||
|
||||
return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
|
||||
: pgprot_val(PAGE_KERNEL);
|
||||
}
|
||||
|
||||
/* R-- */
|
||||
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
|
||||
@ -64,19 +80,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
|
||||
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
|
||||
md->type == EFI_RUNTIME_SERVICES_DATA);
|
||||
|
||||
if (!PAGE_ALIGNED(md->phys_addr) ||
|
||||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
|
||||
/*
|
||||
* If the end address of this region is not aligned to page
|
||||
* size, the mapping is rounded up, and may end up sharing a
|
||||
* page frame with the next UEFI memory region. If we create
|
||||
* a block entry now, we may need to split it again when mapping
|
||||
* the next region, and support for that is going to be removed
|
||||
* from the MMU routines. So avoid block mappings altogether in
|
||||
* that case.
|
||||
*/
|
||||
/*
|
||||
* If this region is not aligned to the page size used by the OS, the
|
||||
* mapping will be rounded outwards, and may end up sharing a page
|
||||
* frame with an adjacent runtime memory region. Given that the page
|
||||
* table descriptor covering the shared page will be rewritten when the
|
||||
* adjacent region gets mapped, we must avoid block mappings here so we
|
||||
* don't have to worry about splitting them when that happens.
|
||||
*/
|
||||
if (region_is_misaligned(md))
|
||||
page_mappings_only = true;
|
||||
}
|
||||
|
||||
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
|
||||
md->num_pages << EFI_PAGE_SHIFT,
|
||||
@ -103,6 +116,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
|
||||
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
|
||||
md->type != EFI_RUNTIME_SERVICES_DATA);
|
||||
|
||||
if (region_is_misaligned(md))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Calling apply_to_page_range() is only safe on regions that are
|
||||
* guaranteed to be mapped down to pages. Since we are only called
|
||||
|
@ -26,7 +26,7 @@ bool can_set_direct_map(void)
|
||||
* mapped at page granularity, so that it is possible to
|
||||
* protect/unprotect single pages.
|
||||
*/
|
||||
return rodata_full || debug_pagealloc_enabled() ||
|
||||
return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
|
||||
IS_ENABLED(CONFIG_KFENCE);
|
||||
}
|
||||
|
||||
@ -102,7 +102,8 @@ static int change_memory_common(unsigned long addr, int numpages,
|
||||
* If we are manipulating read-only permissions, apply the same
|
||||
* change to the linear mapping of the pages that back this VM area.
|
||||
*/
|
||||
if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
|
||||
if (rodata_enabled &&
|
||||
rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
|
||||
pgprot_val(clear_mask) == PTE_RDONLY)) {
|
||||
for (i = 0; i < area->nr_pages; i++) {
|
||||
__change_memory_common((u64)page_address(area->pages[i]),
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/driver.h>
|
||||
#include <asm/mach-au1x00/gpio-au1000.h>
|
||||
#include <asm/mach-au1x00/gpio-au1300.h>
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
#define DISABLE_BRANCH_PROFILING
|
||||
|
||||
#define __NO_FORTIFY
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
@ -26,6 +26,6 @@ extern char *fw_getcmdline(void);
|
||||
extern void fw_meminit(void);
|
||||
extern char *fw_getenv(char *name);
|
||||
extern unsigned long fw_getenvl(char *name);
|
||||
extern void fw_init_early_console(char port);
|
||||
extern void fw_init_early_console(void);
|
||||
|
||||
#endif /* __ASM_FW_H_ */
|
||||
|
@ -56,7 +56,7 @@ void arch_jump_label_transform(struct jump_entry *e,
|
||||
* The branch offset must fit in the instruction's 26
|
||||
* bit field.
|
||||
*/
|
||||
WARN_ON((offset >= BIT(25)) ||
|
||||
WARN_ON((offset >= (long)BIT(25)) ||
|
||||
(offset < -(long)BIT(25)));
|
||||
|
||||
insn.j_format.opcode = bc6_op;
|
||||
|
@ -145,8 +145,7 @@ LEAF(kexec_smp_wait)
|
||||
* kexec_args[0..3] are used to prepare register values.
|
||||
*/
|
||||
|
||||
kexec_args:
|
||||
EXPORT(kexec_args)
|
||||
EXPORT(kexec_args)
|
||||
arg0: PTR_WD 0x0
|
||||
arg1: PTR_WD 0x0
|
||||
arg2: PTR_WD 0x0
|
||||
@ -159,8 +158,7 @@ arg3: PTR_WD 0x0
|
||||
* their registers a0-a3. secondary_kexec_args[0..3] are used
|
||||
* to prepare register values.
|
||||
*/
|
||||
secondary_kexec_args:
|
||||
EXPORT(secondary_kexec_args)
|
||||
EXPORT(secondary_kexec_args)
|
||||
s_arg0: PTR_WD 0x0
|
||||
s_arg1: PTR_WD 0x0
|
||||
s_arg2: PTR_WD 0x0
|
||||
@ -171,19 +169,16 @@ kexec_flag:
|
||||
|
||||
#endif
|
||||
|
||||
kexec_start_address:
|
||||
EXPORT(kexec_start_address)
|
||||
EXPORT(kexec_start_address)
|
||||
PTR_WD 0x0
|
||||
.size kexec_start_address, PTRSIZE
|
||||
|
||||
kexec_indirection_page:
|
||||
EXPORT(kexec_indirection_page)
|
||||
EXPORT(kexec_indirection_page)
|
||||
PTR_WD 0
|
||||
.size kexec_indirection_page, PTRSIZE
|
||||
|
||||
relocate_new_kernel_end:
|
||||
|
||||
relocate_new_kernel_size:
|
||||
EXPORT(relocate_new_kernel_size)
|
||||
EXPORT(relocate_new_kernel_size)
|
||||
PTR_WD relocate_new_kernel_end - relocate_new_kernel
|
||||
.size relocate_new_kernel_size, PTRSIZE
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/reboot.h>
|
||||
#include <asm/bug.h>
|
||||
|
||||
#include <loongson.h>
|
||||
#include <boot_param.h>
|
||||
@ -159,8 +160,17 @@ static int __init mips_reboot_setup(void)
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
|
||||
if (WARN_ON(!kexec_argv))
|
||||
return -ENOMEM;
|
||||
|
||||
kdump_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
|
||||
if (WARN_ON(!kdump_argv))
|
||||
return -ENOMEM;
|
||||
|
||||
kexec_envp = kmalloc(KEXEC_ENVP_SIZE, GFP_KERNEL);
|
||||
if (WARN_ON(!kexec_envp))
|
||||
return -ENOMEM;
|
||||
|
||||
fw_arg1 = KEXEC_ARGV_ADDR;
|
||||
memcpy(kexec_envp, (void *)fw_arg2, KEXEC_ENVP_SIZE);
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
#define U_BRG(x) (UART_BASE(x) + 0x40)
|
||||
|
||||
static void __iomem *uart_base;
|
||||
static char console_port = -1;
|
||||
static int console_port = -1;
|
||||
|
||||
static int __init configure_uart_pins(int port)
|
||||
{
|
||||
@ -47,7 +47,7 @@ static int __init configure_uart_pins(int port)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init configure_uart(char port, int baud)
|
||||
static void __init configure_uart(int port, int baud)
|
||||
{
|
||||
u32 pbclk;
|
||||
|
||||
@ -60,7 +60,7 @@ static void __init configure_uart(char port, int baud)
|
||||
uart_base + PIC32_SET(U_STA(port)));
|
||||
}
|
||||
|
||||
static void __init setup_early_console(char port, int baud)
|
||||
static void __init setup_early_console(int port, int baud)
|
||||
{
|
||||
if (configure_uart_pins(port))
|
||||
return;
|
||||
@ -130,16 +130,15 @@ _out:
|
||||
return baud;
|
||||
}
|
||||
|
||||
void __init fw_init_early_console(char port)
|
||||
void __init fw_init_early_console(void)
|
||||
{
|
||||
char *arch_cmdline = pic32_getcmdline();
|
||||
int baud = -1;
|
||||
int baud, port;
|
||||
|
||||
uart_base = ioremap(PIC32_BASE_UART, 0xc00);
|
||||
|
||||
baud = get_baud_from_cmdline(arch_cmdline);
|
||||
if (port == -1)
|
||||
port = get_port_from_cmdline(arch_cmdline);
|
||||
port = get_port_from_cmdline(arch_cmdline);
|
||||
|
||||
if (port == -1)
|
||||
port = EARLY_CONSOLE_PORT;
|
||||
|
@ -47,7 +47,7 @@ void __init plat_mem_setup(void)
|
||||
strscpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
|
||||
|
||||
#ifdef CONFIG_EARLY_PRINTK
|
||||
fw_init_early_console(-1);
|
||||
fw_init_early_console();
|
||||
#endif
|
||||
pic32_config_init();
|
||||
}
|
||||
|
@ -3,6 +3,8 @@
|
||||
|
||||
#include "fu540-c000.dtsi"
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include <dt-bindings/leds/common.h>
|
||||
#include <dt-bindings/pwm/pwm.h>
|
||||
|
||||
/* Clock frequency (in Hz) of the PCB crystal for rtcclk */
|
||||
#define RTCCLK_FREQ 1000000
|
||||
@ -42,6 +44,42 @@
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpio 10 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
|
||||
led-controller {
|
||||
compatible = "pwm-leds";
|
||||
|
||||
led-d1 {
|
||||
pwms = <&pwm0 0 7812500 PWM_POLARITY_INVERTED>;
|
||||
active-low;
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
max-brightness = <255>;
|
||||
label = "d1";
|
||||
};
|
||||
|
||||
led-d2 {
|
||||
pwms = <&pwm0 1 7812500 PWM_POLARITY_INVERTED>;
|
||||
active-low;
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
max-brightness = <255>;
|
||||
label = "d2";
|
||||
};
|
||||
|
||||
led-d3 {
|
||||
pwms = <&pwm0 2 7812500 PWM_POLARITY_INVERTED>;
|
||||
active-low;
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
max-brightness = <255>;
|
||||
label = "d3";
|
||||
};
|
||||
|
||||
led-d4 {
|
||||
pwms = <&pwm0 3 7812500 PWM_POLARITY_INVERTED>;
|
||||
active-low;
|
||||
color = <LED_COLOR_ID_GREEN>;
|
||||
max-brightness = <255>;
|
||||
label = "d4";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
&uart0 {
|
||||
|
@ -164,6 +164,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
unsigned long tls = args->tls;
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
||||
memset(&p->thread.s, 0, sizeof(p->thread.s));
|
||||
|
||||
/* p->thread holds context to be restored by __switch_to() */
|
||||
if (unlikely(args->fn)) {
|
||||
/* Kernel thread */
|
||||
|
@ -283,6 +283,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
else
|
||||
pr_err("No DTB found in kernel mappings\n");
|
||||
#endif
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
misc_mem_init();
|
||||
|
||||
init_resources();
|
||||
|
@ -28,9 +28,12 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
|
||||
|
||||
obj-y += vdso.o
|
||||
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
|
||||
ifneq ($(filter vgettimeofday, $(vdso-syms)),)
|
||||
CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY
|
||||
endif
|
||||
|
||||
# Disable -pg to prevent insert call site
|
||||
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
|
||||
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
# Disable profiling and instrumentation for VDSO code
|
||||
GCOV_PROFILE := n
|
||||
|
@ -68,9 +68,11 @@ VERSION
|
||||
LINUX_4.15 {
|
||||
global:
|
||||
__vdso_rt_sigreturn;
|
||||
#ifdef HAS_VGETTIMEOFDAY
|
||||
__vdso_gettimeofday;
|
||||
__vdso_clock_gettime;
|
||||
__vdso_clock_getres;
|
||||
#endif
|
||||
__vdso_getcpu;
|
||||
__vdso_flush_icache;
|
||||
local: *;
|
||||
|
@ -262,7 +262,6 @@ static void __init setup_bootmem(void)
|
||||
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
|
||||
}
|
||||
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
dma_contiguous_reserve(dma32_phys_limit);
|
||||
if (IS_ENABLED(CONFIG_64BIT))
|
||||
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
|
||||
|
@ -568,8 +568,7 @@ config EXPOLINE_FULL
|
||||
endchoice
|
||||
|
||||
config RELOCATABLE
|
||||
bool "Build a relocatable kernel"
|
||||
default y
|
||||
def_bool y
|
||||
help
|
||||
This builds a kernel image that retains relocation information
|
||||
so it can be loaded at an arbitrary address.
|
||||
@ -578,10 +577,11 @@ config RELOCATABLE
|
||||
bootup process.
|
||||
The relocations make the kernel image about 15% larger (compressed
|
||||
10%), but are discarded at runtime.
|
||||
Note: this option exists only for documentation purposes, please do
|
||||
not remove it.
|
||||
|
||||
config RANDOMIZE_BASE
|
||||
bool "Randomize the address of the kernel image (KASLR)"
|
||||
depends on RELOCATABLE
|
||||
default y
|
||||
help
|
||||
In support of Kernel Address Space Layout Randomization (KASLR),
|
||||
|
@ -14,10 +14,8 @@ KBUILD_AFLAGS_MODULE += -fPIC
|
||||
KBUILD_CFLAGS_MODULE += -fPIC
|
||||
KBUILD_AFLAGS += -m64
|
||||
KBUILD_CFLAGS += -m64
|
||||
ifeq ($(CONFIG_RELOCATABLE),y)
|
||||
KBUILD_CFLAGS += -fPIE
|
||||
LDFLAGS_vmlinux := -pie
|
||||
endif
|
||||
aflags_dwarf := -Wa,-gdwarf-2
|
||||
KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
|
||||
ifndef CONFIG_AS_IS_LLVM
|
||||
|
@ -37,9 +37,8 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
|
||||
|
||||
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
|
||||
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
|
||||
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o
|
||||
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o machine_kexec_reloc.o
|
||||
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
|
||||
obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
|
||||
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
|
||||
obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
|
||||
obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
|
||||
|
@ -291,8 +291,7 @@ void startup_kernel(void)
|
||||
|
||||
clear_bss_section();
|
||||
copy_bootdata();
|
||||
if (IS_ENABLED(CONFIG_RELOCATABLE))
|
||||
handle_relocs(__kaslr_offset);
|
||||
handle_relocs(__kaslr_offset);
|
||||
|
||||
if (__kaslr_offset) {
|
||||
/*
|
||||
|
1
arch/s390/configs/btf.config
Normal file
1
arch/s390/configs/btf.config
Normal file
@ -0,0 +1 @@
|
||||
CONFIG_DEBUG_INFO_BTF=y
|
@ -723,52 +723,42 @@ CONFIG_CRYPTO_ECDSA=m
|
||||
CONFIG_CRYPTO_ECRDSA=m
|
||||
CONFIG_CRYPTO_SM2=m
|
||||
CONFIG_CRYPTO_CURVE25519=m
|
||||
CONFIG_CRYPTO_GCM=y
|
||||
CONFIG_CRYPTO_CHACHA20POLY1305=m
|
||||
CONFIG_CRYPTO_AEGIS128=m
|
||||
CONFIG_CRYPTO_SEQIV=y
|
||||
CONFIG_CRYPTO_CFB=m
|
||||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_ADIANTUM=m
|
||||
CONFIG_CRYPTO_HCTR2=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_CRYPTO_MD4=m
|
||||
CONFIG_CRYPTO_MD5=y
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_SHA512_S390=m
|
||||
CONFIG_CRYPTO_SHA1_S390=m
|
||||
CONFIG_CRYPTO_SHA256_S390=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_SHA3_256_S390=m
|
||||
CONFIG_CRYPTO_SHA3_512_S390=m
|
||||
CONFIG_CRYPTO_SM3_GENERIC=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_ARC4=m
|
||||
CONFIG_CRYPTO_ARIA=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
CONFIG_CRYPTO_CAST5=m
|
||||
CONFIG_CRYPTO_CAST6=m
|
||||
CONFIG_CRYPTO_DES=m
|
||||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_FCRYPT=m
|
||||
CONFIG_CRYPTO_KHAZAD=m
|
||||
CONFIG_CRYPTO_CHACHA_S390=m
|
||||
CONFIG_CRYPTO_SEED=m
|
||||
CONFIG_CRYPTO_ARIA=m
|
||||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_SM4_GENERIC=m
|
||||
CONFIG_CRYPTO_TEA=m
|
||||
CONFIG_CRYPTO_TWOFISH=m
|
||||
CONFIG_CRYPTO_ADIANTUM=m
|
||||
CONFIG_CRYPTO_ARC4=m
|
||||
CONFIG_CRYPTO_CFB=m
|
||||
CONFIG_CRYPTO_HCTR2=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_AEGIS128=m
|
||||
CONFIG_CRYPTO_CHACHA20POLY1305=m
|
||||
CONFIG_CRYPTO_GCM=y
|
||||
CONFIG_CRYPTO_SEQIV=y
|
||||
CONFIG_CRYPTO_MD4=m
|
||||
CONFIG_CRYPTO_MD5=y
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_SM3_GENERIC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_842=m
|
||||
CONFIG_CRYPTO_LZ4=m
|
||||
CONFIG_CRYPTO_LZ4HC=m
|
||||
@ -779,6 +769,16 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
||||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
CONFIG_CRYPTO_STATS=y
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_CRYPTO_SHA512_S390=m
|
||||
CONFIG_CRYPTO_SHA1_S390=m
|
||||
CONFIG_CRYPTO_SHA256_S390=m
|
||||
CONFIG_CRYPTO_SHA3_256_S390=m
|
||||
CONFIG_CRYPTO_SHA3_512_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_CHACHA_S390=m
|
||||
CONFIG_ZCRYPT=m
|
||||
CONFIG_PKEY=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
@ -797,7 +797,6 @@ CONFIG_CMA_SIZE_MBYTES=0
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_DEBUG_INFO_BTF=y
|
||||
CONFIG_GDB_SCRIPTS=y
|
||||
CONFIG_HEADERS_INSTALL=y
|
||||
CONFIG_DEBUG_SECTION_MISMATCH=y
|
||||
|
@ -707,53 +707,43 @@ CONFIG_CRYPTO_ECDSA=m
|
||||
CONFIG_CRYPTO_ECRDSA=m
|
||||
CONFIG_CRYPTO_SM2=m
|
||||
CONFIG_CRYPTO_CURVE25519=m
|
||||
CONFIG_CRYPTO_GCM=y
|
||||
CONFIG_CRYPTO_CHACHA20POLY1305=m
|
||||
CONFIG_CRYPTO_AEGIS128=m
|
||||
CONFIG_CRYPTO_SEQIV=y
|
||||
CONFIG_CRYPTO_CFB=m
|
||||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_OFB=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_ADIANTUM=m
|
||||
CONFIG_CRYPTO_HCTR2=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_CRYPTO_MD4=m
|
||||
CONFIG_CRYPTO_MD5=y
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_SHA512_S390=m
|
||||
CONFIG_CRYPTO_SHA1_S390=m
|
||||
CONFIG_CRYPTO_SHA256_S390=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_SHA3_256_S390=m
|
||||
CONFIG_CRYPTO_SHA3_512_S390=m
|
||||
CONFIG_CRYPTO_SM3_GENERIC=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_ARC4=m
|
||||
CONFIG_CRYPTO_ARIA=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
CONFIG_CRYPTO_CAST5=m
|
||||
CONFIG_CRYPTO_CAST6=m
|
||||
CONFIG_CRYPTO_DES=m
|
||||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_FCRYPT=m
|
||||
CONFIG_CRYPTO_KHAZAD=m
|
||||
CONFIG_CRYPTO_CHACHA_S390=m
|
||||
CONFIG_CRYPTO_SEED=m
|
||||
CONFIG_CRYPTO_ARIA=m
|
||||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_SM4_GENERIC=m
|
||||
CONFIG_CRYPTO_TEA=m
|
||||
CONFIG_CRYPTO_TWOFISH=m
|
||||
CONFIG_CRYPTO_ADIANTUM=m
|
||||
CONFIG_CRYPTO_ARC4=m
|
||||
CONFIG_CRYPTO_CFB=m
|
||||
CONFIG_CRYPTO_HCTR2=m
|
||||
CONFIG_CRYPTO_KEYWRAP=m
|
||||
CONFIG_CRYPTO_LRW=m
|
||||
CONFIG_CRYPTO_OFB=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_AEGIS128=m
|
||||
CONFIG_CRYPTO_CHACHA20POLY1305=m
|
||||
CONFIG_CRYPTO_GCM=y
|
||||
CONFIG_CRYPTO_SEQIV=y
|
||||
CONFIG_CRYPTO_MD4=m
|
||||
CONFIG_CRYPTO_MD5=y
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_SM3_GENERIC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_842=m
|
||||
CONFIG_CRYPTO_LZ4=m
|
||||
CONFIG_CRYPTO_LZ4HC=m
|
||||
@ -764,6 +754,16 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
|
||||
CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
CONFIG_CRYPTO_STATS=y
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_CRYPTO_SHA512_S390=m
|
||||
CONFIG_CRYPTO_SHA1_S390=m
|
||||
CONFIG_CRYPTO_SHA256_S390=m
|
||||
CONFIG_CRYPTO_SHA3_256_S390=m
|
||||
CONFIG_CRYPTO_SHA3_512_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_CHACHA_S390=m
|
||||
CONFIG_ZCRYPT=m
|
||||
CONFIG_PKEY=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
@ -781,7 +781,6 @@ CONFIG_CMA_SIZE_MBYTES=0
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_DEBUG_INFO_BTF=y
|
||||
CONFIG_GDB_SCRIPTS=y
|
||||
CONFIG_DEBUG_SECTION_MISMATCH=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
|
3
arch/s390/configs/kasan.config
Normal file
3
arch/s390/configs/kasan.config
Normal file
@ -0,0 +1,3 @@
|
||||
CONFIG_KASAN=y
|
||||
CONFIG_KASAN_INLINE=y
|
||||
CONFIG_KASAN_VMALLOC=y
|
@ -74,7 +74,6 @@ CONFIG_PRINTK_TIME=y
|
||||
# CONFIG_SYMBOLIC_ERRNAME is not set
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_DEBUG_INFO_BTF=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
|
@ -1207,6 +1207,8 @@ static int kvm_s390_vm_get_migration(struct kvm *kvm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
|
||||
|
||||
static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
{
|
||||
struct kvm_s390_vm_tod_clock gtod;
|
||||
@ -1216,7 +1218,7 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
|
||||
if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
|
||||
return -EINVAL;
|
||||
kvm_s390_set_tod_clock(kvm, >od);
|
||||
__kvm_s390_set_tod_clock(kvm, >od);
|
||||
|
||||
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
|
||||
gtod.epoch_idx, gtod.tod);
|
||||
@ -1247,7 +1249,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
sizeof(gtod.tod)))
|
||||
return -EFAULT;
|
||||
|
||||
kvm_s390_set_tod_clock(kvm, >od);
|
||||
__kvm_s390_set_tod_clock(kvm, >od);
|
||||
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
|
||||
return 0;
|
||||
}
|
||||
@ -1259,6 +1261,16 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
if (attr->flags)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
/*
|
||||
* For protected guests, the TOD is managed by the ultravisor, so trying
|
||||
* to change it will never bring the expected results.
|
||||
*/
|
||||
if (kvm_s390_pv_is_protected(kvm)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
switch (attr->attr) {
|
||||
case KVM_S390_VM_TOD_EXT:
|
||||
ret = kvm_s390_set_tod_ext(kvm, attr);
|
||||
@ -1273,6 +1285,9 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
ret = -ENXIO;
|
||||
break;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -4377,13 +4392,6 @@ static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_t
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
|
||||
{
|
||||
mutex_lock(&kvm->lock);
|
||||
__kvm_s390_set_tod_clock(kvm, gtod);
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
|
||||
{
|
||||
if (!mutex_trylock(&kvm->lock))
|
||||
|
@ -363,7 +363,6 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* implemented in kvm-s390.c */
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
|
||||
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
|
||||
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
|
||||
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
@ -126,7 +126,7 @@ int kvm_s390_pci_aen_init(u8 nisc)
|
||||
return -EPERM;
|
||||
|
||||
mutex_lock(&aift->aift_lock);
|
||||
aift->kzdev = kcalloc(ZPCI_NR_DEVICES, sizeof(struct kvm_zdev),
|
||||
aift->kzdev = kcalloc(ZPCI_NR_DEVICES, sizeof(struct kvm_zdev *),
|
||||
GFP_KERNEL);
|
||||
if (!aift->kzdev) {
|
||||
rc = -ENOMEM;
|
||||
|
@ -444,7 +444,7 @@ void __init hyperv_init(void)
|
||||
|
||||
if (hv_root_partition) {
|
||||
struct page *pg;
|
||||
void *src, *dst;
|
||||
void *src;
|
||||
|
||||
/*
|
||||
* For the root partition, the hypervisor will set up its
|
||||
@ -459,13 +459,11 @@ void __init hyperv_init(void)
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
||||
pg = vmalloc_to_page(hv_hypercall_pg);
|
||||
dst = kmap_local_page(pg);
|
||||
src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE,
|
||||
MEMREMAP_WB);
|
||||
BUG_ON(!(src && dst));
|
||||
memcpy(dst, src, HV_HYP_PAGE_SIZE);
|
||||
BUG_ON(!src);
|
||||
memcpy_to_page(pg, 0, src, HV_HYP_PAGE_SIZE);
|
||||
memunmap(src);
|
||||
kunmap_local(dst);
|
||||
} else {
|
||||
hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
@ -537,6 +535,7 @@ common_free:
|
||||
void hyperv_cleanup(void)
|
||||
{
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
union hv_reference_tsc_msr tsc_msr;
|
||||
|
||||
unregister_syscore_ops(&hv_syscore_ops);
|
||||
|
||||
@ -552,12 +551,14 @@ void hyperv_cleanup(void)
|
||||
hv_hypercall_pg = NULL;
|
||||
|
||||
/* Reset the hypercall page */
|
||||
hypercall_msr.as_uint64 = 0;
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
hypercall_msr.as_uint64 = hv_get_register(HV_X64_MSR_HYPERCALL);
|
||||
hypercall_msr.enable = 0;
|
||||
hv_set_register(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
||||
/* Reset the TSC page */
|
||||
hypercall_msr.as_uint64 = 0;
|
||||
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
|
||||
tsc_msr.as_uint64 = hv_get_register(HV_X64_MSR_REFERENCE_TSC);
|
||||
tsc_msr.enable = 0;
|
||||
hv_set_register(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
|
||||
}
|
||||
|
||||
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
|
||||
|
@ -501,7 +501,12 @@ struct kvm_pmc {
|
||||
bool intr;
|
||||
};
|
||||
|
||||
/* More counters may conflict with other existing Architectural MSRs */
|
||||
#define KVM_INTEL_PMC_MAX_GENERIC 8
|
||||
#define MSR_ARCH_PERFMON_PERFCTR_MAX (MSR_ARCH_PERFMON_PERFCTR0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
|
||||
#define MSR_ARCH_PERFMON_EVENTSEL_MAX (MSR_ARCH_PERFMON_EVENTSEL0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
|
||||
#define KVM_PMC_MAX_FIXED 3
|
||||
#define KVM_AMD_PMC_MAX_GENERIC 6
|
||||
struct kvm_pmu {
|
||||
unsigned nr_arch_gp_counters;
|
||||
unsigned nr_arch_fixed_counters;
|
||||
@ -516,7 +521,7 @@ struct kvm_pmu {
|
||||
u64 reserved_bits;
|
||||
u64 raw_event_mask;
|
||||
u8 version;
|
||||
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
|
||||
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
|
||||
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
|
||||
struct irq_work irq_work;
|
||||
DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
|
||||
|
@ -535,6 +535,11 @@
|
||||
#define MSR_AMD64_CPUID_FN_1 0xc0011004
|
||||
#define MSR_AMD64_LS_CFG 0xc0011020
|
||||
#define MSR_AMD64_DC_CFG 0xc0011022
|
||||
|
||||
#define MSR_AMD64_DE_CFG 0xc0011029
|
||||
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
|
||||
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
|
||||
|
||||
#define MSR_AMD64_BU_CFG2 0xc001102a
|
||||
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
|
||||
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
|
||||
@ -640,9 +645,6 @@
|
||||
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
|
||||
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
|
||||
#define MSR_FAM10H_NODE_ID 0xc001100c
|
||||
#define MSR_F10H_DECFG 0xc0011029
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
|
||||
|
||||
/* K8 MSRs */
|
||||
#define MSR_K8_TOP_MEM1 0xc001001a
|
||||
|
@ -13,7 +13,7 @@
|
||||
* Takes the guest view of SPEC_CTRL MSR as a parameter and also
|
||||
* the guest's version of VIRT_SPEC_CTRL, if emulated.
|
||||
*/
|
||||
extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
|
||||
extern void x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool guest);
|
||||
|
||||
/**
|
||||
* x86_spec_ctrl_set_guest - Set speculation control registers for the guest
|
||||
@ -24,9 +24,9 @@ extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bo
|
||||
* Avoids writing to the MSR if the content/bits are the same
|
||||
*/
|
||||
static inline
|
||||
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
||||
void x86_spec_ctrl_set_guest(u64 guest_virt_spec_ctrl)
|
||||
{
|
||||
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
|
||||
x86_virt_spec_ctrl(guest_virt_spec_ctrl, true);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -38,9 +38,9 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
||||
* Avoids writing to the MSR if the content/bits are the same
|
||||
*/
|
||||
static inline
|
||||
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
||||
void x86_spec_ctrl_restore_host(u64 guest_virt_spec_ctrl)
|
||||
{
|
||||
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
|
||||
x86_virt_spec_ctrl(guest_virt_spec_ctrl, false);
|
||||
}
|
||||
|
||||
/* AMD specific Speculative Store Bypass MSR data */
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/tdx.h>
|
||||
#include "../kvm/vmx/vmx.h"
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
#include <xen/interface/xen.h>
|
||||
@ -108,9 +107,4 @@ static void __used common(void)
|
||||
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
|
||||
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
|
||||
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KVM_INTEL)) {
|
||||
BLANK();
|
||||
OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
|
||||
}
|
||||
}
|
||||
|
@ -770,8 +770,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
|
||||
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
|
||||
}
|
||||
|
||||
#define MSR_AMD64_DE_CFG 0xC0011029
|
||||
|
||||
static void init_amd_ln(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
@ -965,8 +963,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
* msr_set_bit() uses the safe accessors, too, even if the MSR
|
||||
* is not present.
|
||||
*/
|
||||
msr_set_bit(MSR_F10H_DECFG,
|
||||
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
|
||||
msr_set_bit(MSR_AMD64_DE_CFG,
|
||||
MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
|
||||
|
||||
/* A serializing LFENCE stops RDTSC speculation */
|
||||
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
||||
|
@ -196,22 +196,15 @@ void __init check_bugs(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: This function is *only* called for SVM. VMX spec_ctrl handling is
|
||||
* done in vmenter.S.
|
||||
* NOTE: This function is *only* called for SVM, since Intel uses
|
||||
* MSR_IA32_SPEC_CTRL for SSBD.
|
||||
*/
|
||||
void
|
||||
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
||||
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
|
||||
{
|
||||
u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
|
||||
u64 guestval, hostval;
|
||||
struct thread_info *ti = current_thread_info();
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
|
||||
if (hostval != guestval) {
|
||||
msrval = setguest ? guestval : hostval;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
|
||||
* MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
|
||||
|
@ -326,8 +326,8 @@ static void init_hygon(struct cpuinfo_x86 *c)
|
||||
* msr_set_bit() uses the safe accessors, too, even if the MSR
|
||||
* is not present.
|
||||
*/
|
||||
msr_set_bit(MSR_F10H_DECFG,
|
||||
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
|
||||
msr_set_bit(MSR_AMD64_DE_CFG,
|
||||
MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
|
||||
|
||||
/* A serializing LFENCE stops RDTSC speculation */
|
||||
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/kmsan.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/uaccess.h>
|
||||
@ -301,6 +302,12 @@ static noinstr bool handle_bug(struct pt_regs *regs)
|
||||
{
|
||||
bool handled = false;
|
||||
|
||||
/*
|
||||
* Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
|
||||
* is a rare case that uses @regs without passing them to
|
||||
* irqentry_enter().
|
||||
*/
|
||||
kmsan_unpoison_entry_regs(regs);
|
||||
if (!is_valid_bugaddr(regs->ip))
|
||||
return handled;
|
||||
|
||||
|
2
arch/x86/kvm/.gitignore
vendored
Normal file
2
arch/x86/kvm/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/kvm-asm-offsets.s
|
||||
/kvm-asm-offsets.h
|
@ -34,3 +34,15 @@ endif
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
|
||||
obj-$(CONFIG_KVM_AMD) += kvm-amd.o
|
||||
|
||||
AFLAGS_svm/vmenter.o := -iquote $(obj)
|
||||
$(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h
|
||||
|
||||
AFLAGS_vmx/vmenter.o := -iquote $(obj)
|
||||
$(obj)/vmx/vmenter.o: $(obj)/kvm-asm-offsets.h
|
||||
|
||||
$(obj)/kvm-asm-offsets.h: $(obj)/kvm-asm-offsets.s FORCE
|
||||
$(call filechk,offsets,__KVM_ASM_OFFSETS_H__)
|
||||
|
||||
targets += kvm-asm-offsets.s
|
||||
clean-files += kvm-asm-offsets.h
|
||||
|
29
arch/x86/kvm/kvm-asm-offsets.c
Normal file
29
arch/x86/kvm/kvm-asm-offsets.c
Normal file
@ -0,0 +1,29 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Generate definitions needed by assembly language modules.
|
||||
* This code generates raw asm output which is post-processed to extract
|
||||
* and format the required data.
|
||||
*/
|
||||
#define COMPILE_OFFSETS
|
||||
|
||||
#include <linux/kbuild.h>
|
||||
#include "vmx/vmx.h"
|
||||
#include "svm/svm.h"
|
||||
|
||||
static void __used common(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_KVM_AMD)) {
|
||||
BLANK();
|
||||
OFFSET(SVM_vcpu_arch_regs, vcpu_svm, vcpu.arch.regs);
|
||||
OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb);
|
||||
OFFSET(SVM_spec_ctrl, vcpu_svm, spec_ctrl);
|
||||
OFFSET(SVM_vmcb01, vcpu_svm, vmcb01);
|
||||
OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa);
|
||||
OFFSET(SD_save_area_pa, svm_cpu_data, save_area_pa);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_KVM_INTEL)) {
|
||||
BLANK();
|
||||
OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
|
||||
}
|
||||
}
|
@ -6056,7 +6056,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
||||
|
||||
write_lock(&kvm->mmu_lock);
|
||||
|
||||
kvm_mmu_invalidate_begin(kvm, gfn_start, gfn_end);
|
||||
kvm_mmu_invalidate_begin(kvm, 0, -1ul);
|
||||
|
||||
flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
|
||||
|
||||
@ -6070,7 +6070,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
||||
kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
|
||||
gfn_end - gfn_start);
|
||||
|
||||
kvm_mmu_invalidate_end(kvm, gfn_start, gfn_end);
|
||||
kvm_mmu_invalidate_end(kvm, 0, -1ul);
|
||||
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ static const struct x86_cpu_id vmx_icl_pebs_cpu[] = {
|
||||
* code. Each pmc, stored in kvm_pmc.idx field, is unique across
|
||||
* all perf counters (both gp and fixed). The mapping relationship
|
||||
* between pmc and perf counters is as the following:
|
||||
* * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
|
||||
* * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
|
||||
* [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
|
||||
* * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
|
||||
* and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
|
||||
|
@ -192,9 +192,10 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
int i;
|
||||
|
||||
BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
|
||||
BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > AMD64_NUM_COUNTERS_CORE);
|
||||
BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > INTEL_PMC_MAX_GENERIC);
|
||||
|
||||
for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
|
||||
for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC ; i++) {
|
||||
pmu->gp_counters[i].type = KVM_PMC_GP;
|
||||
pmu->gp_counters[i].vcpu = vcpu;
|
||||
pmu->gp_counters[i].idx = i;
|
||||
@ -207,7 +208,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
|
||||
for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC; i++) {
|
||||
struct kvm_pmc *pmc = &pmu->gp_counters[i];
|
||||
|
||||
pmc_stop_counter(pmc);
|
||||
|
@ -196,7 +196,7 @@ static void sev_asid_free(struct kvm_sev_info *sev)
|
||||
__set_bit(sev->asid, sev_reclaim_asid_bitmap);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
sd = per_cpu(svm_data, cpu);
|
||||
sd = per_cpu_ptr(&svm_data, cpu);
|
||||
sd->sev_vmcbs[sev->asid] = NULL;
|
||||
}
|
||||
|
||||
@ -605,7 +605,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
|
||||
save->dr6 = svm->vcpu.arch.dr6;
|
||||
|
||||
pr_debug("Virtual Machine Save Area (VMSA):\n");
|
||||
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
|
||||
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2600,7 +2600,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
|
||||
|
||||
void pre_sev_run(struct vcpu_svm *svm, int cpu)
|
||||
{
|
||||
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
|
||||
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
|
||||
int asid = sev_get_asid(svm->vcpu.kvm);
|
||||
|
||||
/* Assign the asid allocated with this SEV guest */
|
||||
|
@ -245,7 +245,7 @@ struct kvm_ldttss_desc {
|
||||
u32 zero1;
|
||||
} __attribute__((packed));
|
||||
|
||||
DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
|
||||
DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
|
||||
|
||||
/*
|
||||
* Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via
|
||||
@ -581,12 +581,7 @@ static int svm_hardware_enable(void)
|
||||
pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
|
||||
return -EINVAL;
|
||||
}
|
||||
sd = per_cpu(svm_data, me);
|
||||
if (!sd) {
|
||||
pr_err("%s: svm_data is NULL on %d\n", __func__, me);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sd = per_cpu_ptr(&svm_data, me);
|
||||
sd->asid_generation = 1;
|
||||
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
|
||||
sd->next_asid = sd->max_asid + 1;
|
||||
@ -597,7 +592,7 @@ static int svm_hardware_enable(void)
|
||||
|
||||
wrmsrl(MSR_EFER, efer | EFER_SVME);
|
||||
|
||||
wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area));
|
||||
wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
|
||||
/*
|
||||
@ -646,42 +641,37 @@ static int svm_hardware_enable(void)
|
||||
|
||||
static void svm_cpu_uninit(int cpu)
|
||||
{
|
||||
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
|
||||
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
|
||||
|
||||
if (!sd)
|
||||
if (!sd->save_area)
|
||||
return;
|
||||
|
||||
per_cpu(svm_data, cpu) = NULL;
|
||||
kfree(sd->sev_vmcbs);
|
||||
__free_page(sd->save_area);
|
||||
kfree(sd);
|
||||
sd->save_area_pa = 0;
|
||||
sd->save_area = NULL;
|
||||
}
|
||||
|
||||
static int svm_cpu_init(int cpu)
|
||||
{
|
||||
struct svm_cpu_data *sd;
|
||||
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
|
||||
int ret = -ENOMEM;
|
||||
|
||||
sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
|
||||
if (!sd)
|
||||
return ret;
|
||||
sd->cpu = cpu;
|
||||
memset(sd, 0, sizeof(struct svm_cpu_data));
|
||||
sd->save_area = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!sd->save_area)
|
||||
goto free_cpu_data;
|
||||
return ret;
|
||||
|
||||
ret = sev_cpu_init(sd);
|
||||
if (ret)
|
||||
goto free_save_area;
|
||||
|
||||
per_cpu(svm_data, cpu) = sd;
|
||||
|
||||
sd->save_area_pa = __sme_page_pa(sd->save_area);
|
||||
return 0;
|
||||
|
||||
free_save_area:
|
||||
__free_page(sd->save_area);
|
||||
free_cpu_data:
|
||||
kfree(sd);
|
||||
sd->save_area = NULL;
|
||||
return ret;
|
||||
|
||||
}
|
||||
@ -730,6 +720,15 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
|
||||
u32 offset;
|
||||
u32 *msrpm;
|
||||
|
||||
/*
|
||||
* For non-nested case:
|
||||
* If the L01 MSR bitmap does not intercept the MSR, then we need to
|
||||
* save it.
|
||||
*
|
||||
* For nested case:
|
||||
* If the L02 MSR bitmap does not intercept the MSR, then we need to
|
||||
* save it.
|
||||
*/
|
||||
msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
|
||||
to_svm(vcpu)->msrpm;
|
||||
|
||||
@ -1425,7 +1424,7 @@ static void svm_clear_current_vmcb(struct vmcb *vmcb)
|
||||
int i;
|
||||
|
||||
for_each_online_cpu(i)
|
||||
cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
|
||||
cmpxchg(per_cpu_ptr(&svm_data.current_vmcb, i), vmcb, NULL);
|
||||
}
|
||||
|
||||
static void svm_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
@ -1450,7 +1449,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
|
||||
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
|
||||
|
||||
if (sev_es_guest(vcpu->kvm))
|
||||
sev_es_unmap_ghcb(svm);
|
||||
@ -1462,7 +1461,7 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
* Save additional host state that will be restored on VMEXIT (sev-es)
|
||||
* or subsequent vmload of host save area.
|
||||
*/
|
||||
vmsave(__sme_page_pa(sd->save_area));
|
||||
vmsave(sd->save_area_pa);
|
||||
if (sev_es_guest(vcpu->kvm)) {
|
||||
struct sev_es_save_area *hostsa;
|
||||
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
|
||||
@ -1487,7 +1486,7 @@ static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
|
||||
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
|
||||
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
|
||||
|
||||
if (sd->current_vmcb != svm->vmcb) {
|
||||
sd->current_vmcb = svm->vmcb;
|
||||
@ -2710,9 +2709,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
|
||||
msr->data = 0;
|
||||
|
||||
switch (msr->index) {
|
||||
case MSR_F10H_DECFG:
|
||||
if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
|
||||
msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
|
||||
case MSR_AMD64_DE_CFG:
|
||||
if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
|
||||
msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
|
||||
break;
|
||||
case MSR_IA32_PERF_CAPABILITIES:
|
||||
return 0;
|
||||
@ -2813,7 +2812,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
msr_info->data = 0x1E;
|
||||
}
|
||||
break;
|
||||
case MSR_F10H_DECFG:
|
||||
case MSR_AMD64_DE_CFG:
|
||||
msr_info->data = svm->msr_decfg;
|
||||
break;
|
||||
default:
|
||||
@ -3042,7 +3041,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
case MSR_VM_IGNNE:
|
||||
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
|
||||
break;
|
||||
case MSR_F10H_DECFG: {
|
||||
case MSR_AMD64_DE_CFG: {
|
||||
struct kvm_msr_entry msr_entry;
|
||||
|
||||
msr_entry.index = msr->index;
|
||||
@ -3443,7 +3442,7 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
||||
|
||||
static void reload_tss(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
|
||||
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
|
||||
|
||||
sd->tss_desc->type = 9; /* available 32/64-bit TSS */
|
||||
load_TR_desc();
|
||||
@ -3451,7 +3450,7 @@ static void reload_tss(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void pre_svm_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
|
||||
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
/*
|
||||
@ -3911,30 +3910,16 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
||||
return EXIT_FASTPATH_NONE;
|
||||
}
|
||||
|
||||
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
||||
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
unsigned long vmcb_pa = svm->current_vmcb->pa;
|
||||
|
||||
guest_state_enter_irqoff();
|
||||
|
||||
if (sev_es_guest(vcpu->kvm)) {
|
||||
__svm_sev_es_vcpu_run(vmcb_pa);
|
||||
} else {
|
||||
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
|
||||
|
||||
/*
|
||||
* Use a single vmcb (vmcb01 because it's always valid) for
|
||||
* context switching guest state via VMLOAD/VMSAVE, that way
|
||||
* the state doesn't need to be copied between vmcb01 and
|
||||
* vmcb02 when switching vmcbs for nested virtualization.
|
||||
*/
|
||||
vmload(svm->vmcb01.pa);
|
||||
__svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs);
|
||||
vmsave(svm->vmcb01.pa);
|
||||
|
||||
vmload(__sme_page_pa(sd->save_area));
|
||||
}
|
||||
if (sev_es_guest(vcpu->kvm))
|
||||
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
|
||||
else
|
||||
__svm_vcpu_run(svm, spec_ctrl_intercepted);
|
||||
|
||||
guest_state_exit_irqoff();
|
||||
}
|
||||
@ -3942,6 +3927,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
|
||||
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
|
||||
|
||||
trace_kvm_entry(vcpu);
|
||||
|
||||
@ -3998,34 +3984,15 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
* being speculatively taken.
|
||||
*/
|
||||
if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
|
||||
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
|
||||
x86_spec_ctrl_set_guest(svm->virt_spec_ctrl);
|
||||
|
||||
svm_vcpu_enter_exit(vcpu);
|
||||
|
||||
/*
|
||||
* We do not use IBRS in the kernel. If this vCPU has used the
|
||||
* SPEC_CTRL MSR it may have left it on; save the value and
|
||||
* turn it off. This is much more efficient than blindly adding
|
||||
* it to the atomic save/restore list. Especially as the former
|
||||
* (Saving guest MSRs on vmexit) doesn't even exist in KVM.
|
||||
*
|
||||
* For non-nested case:
|
||||
* If the L01 MSR bitmap does not intercept the MSR, then we need to
|
||||
* save it.
|
||||
*
|
||||
* For nested case:
|
||||
* If the L02 MSR bitmap does not intercept the MSR, then we need to
|
||||
* save it.
|
||||
*/
|
||||
if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL) &&
|
||||
unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
||||
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
||||
svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
|
||||
|
||||
if (!sev_es_guest(vcpu->kvm))
|
||||
reload_tss(vcpu);
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
|
||||
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
|
||||
x86_spec_ctrl_restore_host(svm->virt_spec_ctrl);
|
||||
|
||||
if (!sev_es_guest(vcpu->kvm)) {
|
||||
vcpu->arch.cr2 = svm->vmcb->save.cr2;
|
||||
|
@ -209,7 +209,6 @@ struct vcpu_svm {
|
||||
struct vmcb *vmcb;
|
||||
struct kvm_vmcb_info vmcb01;
|
||||
struct kvm_vmcb_info *current_vmcb;
|
||||
struct svm_cpu_data *svm_data;
|
||||
u32 asid;
|
||||
u32 sysenter_esp_hi;
|
||||
u32 sysenter_eip_hi;
|
||||
@ -281,8 +280,6 @@ struct vcpu_svm {
|
||||
};
|
||||
|
||||
struct svm_cpu_data {
|
||||
int cpu;
|
||||
|
||||
u64 asid_generation;
|
||||
u32 max_asid;
|
||||
u32 next_asid;
|
||||
@ -290,13 +287,15 @@ struct svm_cpu_data {
|
||||
struct kvm_ldttss_desc *tss_desc;
|
||||
|
||||
struct page *save_area;
|
||||
unsigned long save_area_pa;
|
||||
|
||||
struct vmcb *current_vmcb;
|
||||
|
||||
/* index = sev_asid, value = vmcb pointer */
|
||||
struct vmcb **sev_vmcbs;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
|
||||
DECLARE_PER_CPU(struct svm_cpu_data, svm_data);
|
||||
|
||||
void recalc_intercepts(struct vcpu_svm *svm);
|
||||
|
||||
@ -683,7 +682,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
|
||||
|
||||
/* vmenter.S */
|
||||
|
||||
void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
|
||||
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
|
||||
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
|
||||
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
|
||||
|
||||
#endif
|
||||
|
@ -61,9 +61,4 @@ static __always_inline void vmsave(unsigned long pa)
|
||||
svm_asm1(vmsave, "a" (pa), "memory");
|
||||
}
|
||||
|
||||
static __always_inline void vmload(unsigned long pa)
|
||||
{
|
||||
svm_asm1(vmload, "a" (pa), "memory");
|
||||
}
|
||||
|
||||
#endif /* __KVM_X86_SVM_OPS_H */
|
||||
|
@ -4,35 +4,97 @@
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <asm/kvm_vcpu_regs.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include "kvm-asm-offsets.h"
|
||||
|
||||
#define WORD_SIZE (BITS_PER_LONG / 8)
|
||||
|
||||
/* Intentionally omit RAX as it's context switched by hardware */
|
||||
#define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
|
||||
#define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
|
||||
#define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
|
||||
#define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
|
||||
#define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
|
||||
#define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
|
||||
/* Intentionally omit RSP as it's context switched by hardware */
|
||||
#define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
|
||||
#define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
|
||||
#define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
|
||||
#define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
|
||||
#define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
|
||||
#define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
|
||||
#define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
|
||||
#define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
|
||||
#define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
|
||||
#define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
|
||||
#define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
|
||||
#define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
|
||||
#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
|
||||
#define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)
|
||||
#define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)
|
||||
#define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
|
||||
#define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
|
||||
#define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
|
||||
#define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
|
||||
#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
|
||||
#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
|
||||
#endif
|
||||
|
||||
#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
|
||||
|
||||
.section .noinstr.text, "ax"
|
||||
|
||||
.macro RESTORE_GUEST_SPEC_CTRL
|
||||
/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
|
||||
ALTERNATIVE_2 "", \
|
||||
"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
|
||||
"", X86_FEATURE_V_SPEC_CTRL
|
||||
801:
|
||||
.endm
|
||||
.macro RESTORE_GUEST_SPEC_CTRL_BODY
|
||||
800:
|
||||
/*
|
||||
* SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
|
||||
* host's, write the MSR. This is kept out-of-line so that the common
|
||||
* case does not have to jump.
|
||||
*
|
||||
* IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
|
||||
* there must not be any returns or indirect branches between this code
|
||||
* and vmentry.
|
||||
*/
|
||||
movl SVM_spec_ctrl(%_ASM_DI), %eax
|
||||
cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
|
||||
je 801b
|
||||
mov $MSR_IA32_SPEC_CTRL, %ecx
|
||||
xor %edx, %edx
|
||||
wrmsr
|
||||
jmp 801b
|
||||
.endm
|
||||
|
||||
.macro RESTORE_HOST_SPEC_CTRL
|
||||
/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
|
||||
ALTERNATIVE_2 "", \
|
||||
"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
|
||||
"", X86_FEATURE_V_SPEC_CTRL
|
||||
901:
|
||||
.endm
|
||||
.macro RESTORE_HOST_SPEC_CTRL_BODY
|
||||
900:
|
||||
/* Same for after vmexit. */
|
||||
mov $MSR_IA32_SPEC_CTRL, %ecx
|
||||
|
||||
/*
|
||||
* Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
|
||||
* if it was not intercepted during guest execution.
|
||||
*/
|
||||
cmpb $0, (%_ASM_SP)
|
||||
jnz 998f
|
||||
rdmsr
|
||||
movl %eax, SVM_spec_ctrl(%_ASM_DI)
|
||||
998:
|
||||
|
||||
/* Now restore the host value of the MSR if different from the guest's. */
|
||||
movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
|
||||
cmp SVM_spec_ctrl(%_ASM_DI), %eax
|
||||
je 901b
|
||||
xor %edx, %edx
|
||||
wrmsr
|
||||
jmp 901b
|
||||
.endm
|
||||
|
||||
|
||||
/**
|
||||
* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
|
||||
* @vmcb_pa: unsigned long
|
||||
* @regs: unsigned long * (to guest registers)
|
||||
* @svm: struct vcpu_svm *
|
||||
* @spec_ctrl_intercepted: bool
|
||||
*/
|
||||
SYM_FUNC_START(__svm_vcpu_run)
|
||||
push %_ASM_BP
|
||||
@ -47,49 +109,71 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
#endif
|
||||
push %_ASM_BX
|
||||
|
||||
/* Save @regs. */
|
||||
/*
|
||||
* Save variables needed after vmexit on the stack, in inverse
|
||||
* order compared to when they are needed.
|
||||
*/
|
||||
|
||||
/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
|
||||
push %_ASM_ARG2
|
||||
|
||||
/* Save @vmcb. */
|
||||
/* Needed to restore access to percpu variables. */
|
||||
__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
|
||||
|
||||
/* Finally save @svm. */
|
||||
push %_ASM_ARG1
|
||||
|
||||
/* Move @regs to RAX. */
|
||||
mov %_ASM_ARG2, %_ASM_AX
|
||||
.ifnc _ASM_ARG1, _ASM_DI
|
||||
/*
|
||||
* Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
|
||||
* and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
|
||||
*/
|
||||
mov %_ASM_ARG1, %_ASM_DI
|
||||
.endif
|
||||
|
||||
/* Clobbers RAX, RCX, RDX. */
|
||||
RESTORE_GUEST_SPEC_CTRL
|
||||
|
||||
/*
|
||||
* Use a single vmcb (vmcb01 because it's always valid) for
|
||||
* context switching guest state via VMLOAD/VMSAVE, that way
|
||||
* the state doesn't need to be copied between vmcb01 and
|
||||
* vmcb02 when switching vmcbs for nested virtualization.
|
||||
*/
|
||||
mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
|
||||
1: vmload %_ASM_AX
|
||||
2:
|
||||
|
||||
/* Get svm->current_vmcb->pa into RAX. */
|
||||
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
|
||||
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
|
||||
|
||||
/* Load guest registers. */
|
||||
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
|
||||
mov VCPU_RDX(%_ASM_AX), %_ASM_DX
|
||||
mov VCPU_RBX(%_ASM_AX), %_ASM_BX
|
||||
mov VCPU_RBP(%_ASM_AX), %_ASM_BP
|
||||
mov VCPU_RSI(%_ASM_AX), %_ASM_SI
|
||||
mov VCPU_RDI(%_ASM_AX), %_ASM_DI
|
||||
mov VCPU_RCX(%_ASM_DI), %_ASM_CX
|
||||
mov VCPU_RDX(%_ASM_DI), %_ASM_DX
|
||||
mov VCPU_RBX(%_ASM_DI), %_ASM_BX
|
||||
mov VCPU_RBP(%_ASM_DI), %_ASM_BP
|
||||
mov VCPU_RSI(%_ASM_DI), %_ASM_SI
|
||||
#ifdef CONFIG_X86_64
|
||||
mov VCPU_R8 (%_ASM_AX), %r8
|
||||
mov VCPU_R9 (%_ASM_AX), %r9
|
||||
mov VCPU_R10(%_ASM_AX), %r10
|
||||
mov VCPU_R11(%_ASM_AX), %r11
|
||||
mov VCPU_R12(%_ASM_AX), %r12
|
||||
mov VCPU_R13(%_ASM_AX), %r13
|
||||
mov VCPU_R14(%_ASM_AX), %r14
|
||||
mov VCPU_R15(%_ASM_AX), %r15
|
||||
mov VCPU_R8 (%_ASM_DI), %r8
|
||||
mov VCPU_R9 (%_ASM_DI), %r9
|
||||
mov VCPU_R10(%_ASM_DI), %r10
|
||||
mov VCPU_R11(%_ASM_DI), %r11
|
||||
mov VCPU_R12(%_ASM_DI), %r12
|
||||
mov VCPU_R13(%_ASM_DI), %r13
|
||||
mov VCPU_R14(%_ASM_DI), %r14
|
||||
mov VCPU_R15(%_ASM_DI), %r15
|
||||
#endif
|
||||
|
||||
/* "POP" @vmcb to RAX. */
|
||||
pop %_ASM_AX
|
||||
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
|
||||
|
||||
/* Enter guest mode */
|
||||
sti
|
||||
|
||||
1: vmrun %_ASM_AX
|
||||
3: vmrun %_ASM_AX
|
||||
4:
|
||||
cli
|
||||
|
||||
2: cli
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
||||
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
||||
#endif
|
||||
|
||||
/* "POP" @regs to RAX. */
|
||||
/* Pop @svm to RAX while it's the only available register. */
|
||||
pop %_ASM_AX
|
||||
|
||||
/* Save all guest registers. */
|
||||
@ -110,6 +194,26 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
mov %r15, VCPU_R15(%_ASM_AX)
|
||||
#endif
|
||||
|
||||
/* @svm can stay in RDI from now on. */
|
||||
mov %_ASM_AX, %_ASM_DI
|
||||
|
||||
mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
|
||||
5: vmsave %_ASM_AX
|
||||
6:
|
||||
|
||||
/* Restores GSBASE among other things, allowing access to percpu data. */
|
||||
pop %_ASM_AX
|
||||
7: vmload %_ASM_AX
|
||||
8:
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
||||
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
||||
#endif
|
||||
|
||||
/* Clobbers RAX, RCX, RDX. */
|
||||
RESTORE_HOST_SPEC_CTRL
|
||||
|
||||
/*
|
||||
* Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
|
||||
* untrained as soon as we exit the VM and are back to the
|
||||
@ -145,6 +249,9 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
xor %r15d, %r15d
|
||||
#endif
|
||||
|
||||
/* "Pop" @spec_ctrl_intercepted. */
|
||||
pop %_ASM_BX
|
||||
|
||||
pop %_ASM_BX
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
@ -159,17 +266,33 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
pop %_ASM_BP
|
||||
RET
|
||||
|
||||
3: cmpb $0, kvm_rebooting
|
||||
RESTORE_GUEST_SPEC_CTRL_BODY
|
||||
RESTORE_HOST_SPEC_CTRL_BODY
|
||||
|
||||
10: cmpb $0, kvm_rebooting
|
||||
jne 2b
|
||||
ud2
|
||||
30: cmpb $0, kvm_rebooting
|
||||
jne 4b
|
||||
ud2
|
||||
50: cmpb $0, kvm_rebooting
|
||||
jne 6b
|
||||
ud2
|
||||
70: cmpb $0, kvm_rebooting
|
||||
jne 8b
|
||||
ud2
|
||||
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
_ASM_EXTABLE(1b, 10b)
|
||||
_ASM_EXTABLE(3b, 30b)
|
||||
_ASM_EXTABLE(5b, 50b)
|
||||
_ASM_EXTABLE(7b, 70b)
|
||||
|
||||
SYM_FUNC_END(__svm_vcpu_run)
|
||||
|
||||
/**
|
||||
* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
|
||||
* @vmcb_pa: unsigned long
|
||||
* @svm: struct vcpu_svm *
|
||||
* @spec_ctrl_intercepted: bool
|
||||
*/
|
||||
SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
||||
push %_ASM_BP
|
||||
@ -184,8 +307,31 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
||||
#endif
|
||||
push %_ASM_BX
|
||||
|
||||
/* Move @vmcb to RAX. */
|
||||
mov %_ASM_ARG1, %_ASM_AX
|
||||
/*
|
||||
* Save variables needed after vmexit on the stack, in inverse
|
||||
* order compared to when they are needed.
|
||||
*/
|
||||
|
||||
/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
|
||||
push %_ASM_ARG2
|
||||
|
||||
/* Save @svm. */
|
||||
push %_ASM_ARG1
|
||||
|
||||
.ifnc _ASM_ARG1, _ASM_DI
|
||||
/*
|
||||
* Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
|
||||
* and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
|
||||
*/
|
||||
mov %_ASM_ARG1, %_ASM_DI
|
||||
.endif
|
||||
|
||||
/* Clobbers RAX, RCX, RDX. */
|
||||
RESTORE_GUEST_SPEC_CTRL
|
||||
|
||||
/* Get svm->current_vmcb->pa into RAX. */
|
||||
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
|
||||
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
|
||||
|
||||
/* Enter guest mode */
|
||||
sti
|
||||
@ -194,11 +340,17 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
||||
|
||||
2: cli
|
||||
|
||||
/* Pop @svm to RDI, guest registers have been saved already. */
|
||||
pop %_ASM_DI
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
||||
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
||||
#endif
|
||||
|
||||
/* Clobbers RAX, RCX, RDX. */
|
||||
RESTORE_HOST_SPEC_CTRL
|
||||
|
||||
/*
|
||||
* Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
|
||||
* untrained as soon as we exit the VM and are back to the
|
||||
@ -208,6 +360,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
||||
*/
|
||||
UNTRAIN_RET
|
||||
|
||||
/* "Pop" @spec_ctrl_intercepted. */
|
||||
pop %_ASM_BX
|
||||
|
||||
pop %_ASM_BX
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
@ -222,6 +377,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
||||
pop %_ASM_BP
|
||||
RET
|
||||
|
||||
RESTORE_GUEST_SPEC_CTRL_BODY
|
||||
RESTORE_HOST_SPEC_CTRL_BODY
|
||||
|
||||
3: cmpb $0, kvm_rebooting
|
||||
jne 2b
|
||||
ud2
|
||||
|
@ -617,7 +617,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
||||
|
||||
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
|
||||
for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
|
||||
pmu->gp_counters[i].type = KVM_PMC_GP;
|
||||
pmu->gp_counters[i].vcpu = vcpu;
|
||||
pmu->gp_counters[i].idx = i;
|
||||
@ -643,7 +643,7 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
struct kvm_pmc *pmc = NULL;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
|
||||
for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
|
||||
pmc = &pmu->gp_counters[i];
|
||||
|
||||
pmc_stop_counter(pmc);
|
||||
|
@ -1,12 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <asm/kvm_vcpu_regs.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/segment.h>
|
||||
#include "kvm-asm-offsets.h"
|
||||
#include "run_flags.h"
|
||||
|
||||
#define WORD_SIZE (BITS_PER_LONG / 8)
|
||||
|
@ -1438,32 +1438,27 @@ static const u32 msrs_to_save_all[] = {
|
||||
MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
|
||||
MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
|
||||
MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
|
||||
MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
|
||||
|
||||
/* This part of MSRs should match KVM_INTEL_PMC_MAX_GENERIC. */
|
||||
MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
|
||||
MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
|
||||
MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
|
||||
MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
|
||||
MSR_ARCH_PERFMON_PERFCTR0 + 8, MSR_ARCH_PERFMON_PERFCTR0 + 9,
|
||||
MSR_ARCH_PERFMON_PERFCTR0 + 10, MSR_ARCH_PERFMON_PERFCTR0 + 11,
|
||||
MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13,
|
||||
MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15,
|
||||
MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 8, MSR_ARCH_PERFMON_EVENTSEL0 + 9,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 10, MSR_ARCH_PERFMON_EVENTSEL0 + 11,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
|
||||
MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
|
||||
|
||||
MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
|
||||
MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
|
||||
|
||||
/* This part of MSRs should match KVM_AMD_PMC_MAX_GENERIC. */
|
||||
MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
|
||||
MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
|
||||
MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
|
||||
MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
|
||||
|
||||
MSR_IA32_XFD, MSR_IA32_XFD_ERR,
|
||||
};
|
||||
|
||||
@ -1562,7 +1557,7 @@ static const u32 msr_based_features_all[] = {
|
||||
MSR_IA32_VMX_EPT_VPID_CAP,
|
||||
MSR_IA32_VMX_VMFUNC,
|
||||
|
||||
MSR_F10H_DECFG,
|
||||
MSR_AMD64_DE_CFG,
|
||||
MSR_IA32_UCODE_REV,
|
||||
MSR_IA32_ARCH_CAPABILITIES,
|
||||
MSR_IA32_PERF_CAPABILITIES,
|
||||
@ -7041,14 +7036,14 @@ static void kvm_init_msr_list(void)
|
||||
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
|
||||
continue;
|
||||
break;
|
||||
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
|
||||
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX:
|
||||
if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
|
||||
min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
|
||||
min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
|
||||
continue;
|
||||
break;
|
||||
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
|
||||
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX:
|
||||
if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
|
||||
min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
|
||||
min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
|
||||
continue;
|
||||
break;
|
||||
case MSR_IA32_XFD:
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/instrumented.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
@ -44,7 +45,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
||||
* called from other contexts.
|
||||
*/
|
||||
pagefault_disable();
|
||||
instrument_copy_from_user_before(to, from, n);
|
||||
ret = raw_copy_from_user(to, from, n);
|
||||
instrument_copy_from_user_after(to, from, n, ret);
|
||||
pagefault_enable();
|
||||
|
||||
return ret;
|
||||
|
@ -37,8 +37,12 @@ int pmd_huge(pmd_t pmd)
|
||||
*/
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
return !pud_none(pud) &&
|
||||
(pud_val(pud) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/extable.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
@ -389,18 +388,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __init bpf_arch_init_dispatcher_early(void *ip)
|
||||
{
|
||||
const u8 *nop_insn = x86_nops[5];
|
||||
|
||||
if (is_endbr(*(u32 *)ip))
|
||||
ip += ENDBR_INSN_SIZE;
|
||||
|
||||
if (memcmp(ip, nop_insn, X86_PATCH_SIZE))
|
||||
text_poke_early(ip, nop_insn, X86_PATCH_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
||||
void *old_addr, void *new_addr)
|
||||
{
|
||||
|
@ -519,6 +519,7 @@ static void pm_save_spec_msr(void)
|
||||
MSR_TSX_FORCE_ABORT,
|
||||
MSR_IA32_MCU_OPT_CTRL,
|
||||
MSR_AMD64_LS_CFG,
|
||||
MSR_AMD64_DE_CFG,
|
||||
};
|
||||
|
||||
msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/start_kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kstrtox.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
@ -113,7 +114,7 @@ static __read_mostly bool xen_msr_safe = IS_ENABLED(CONFIG_XEN_PV_MSR_SAFE);
|
||||
static int __init parse_xen_msr_safe(char *str)
|
||||
{
|
||||
if (str)
|
||||
return strtobool(str, &xen_msr_safe);
|
||||
return kstrtobool(str, &xen_msr_safe);
|
||||
return -EINVAL;
|
||||
}
|
||||
early_param("xen_msr_safe", parse_xen_msr_safe);
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kstrtox.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/memblock.h>
|
||||
@ -85,7 +86,7 @@ static void __init xen_parse_512gb(void)
|
||||
arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
|
||||
if (!arg)
|
||||
val = true;
|
||||
else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
|
||||
else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
|
||||
return;
|
||||
|
||||
xen_512gb_limit = val;
|
||||
|
@ -610,6 +610,10 @@ struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
|
||||
struct bfq_group *bfqg;
|
||||
|
||||
while (blkg) {
|
||||
if (!blkg->online) {
|
||||
blkg = blkg->parent;
|
||||
continue;
|
||||
}
|
||||
bfqg = blkg_to_bfqg(blkg);
|
||||
if (bfqg->online) {
|
||||
bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
|
||||
|
@ -87,8 +87,8 @@ struct opal_dev {
|
||||
u64 lowest_lba;
|
||||
|
||||
size_t pos;
|
||||
u8 cmd[IO_BUFFER_LENGTH];
|
||||
u8 resp[IO_BUFFER_LENGTH];
|
||||
u8 *cmd;
|
||||
u8 *resp;
|
||||
|
||||
struct parsed_resp parsed;
|
||||
size_t prev_d_len;
|
||||
@ -2175,6 +2175,8 @@ void free_opal_dev(struct opal_dev *dev)
|
||||
return;
|
||||
|
||||
clean_opal_dev(dev);
|
||||
kfree(dev->resp);
|
||||
kfree(dev->cmd);
|
||||
kfree(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(free_opal_dev);
|
||||
@ -2187,6 +2189,18 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv)
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Presumably DMA-able buffers must be cache-aligned. Kmalloc makes
|
||||
* sure the allocated buffer is DMA-safe in that regard.
|
||||
*/
|
||||
dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
|
||||
if (!dev->cmd)
|
||||
goto err_free_dev;
|
||||
|
||||
dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
|
||||
if (!dev->resp)
|
||||
goto err_free_cmd;
|
||||
|
||||
INIT_LIST_HEAD(&dev->unlk_lst);
|
||||
mutex_init(&dev->dev_lock);
|
||||
dev->flags = 0;
|
||||
@ -2194,11 +2208,21 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv)
|
||||
dev->send_recv = send_recv;
|
||||
if (check_opal_support(dev) != 0) {
|
||||
pr_debug("Opal is not supported on this device\n");
|
||||
kfree(dev);
|
||||
return NULL;
|
||||
goto err_free_resp;
|
||||
}
|
||||
|
||||
return dev;
|
||||
|
||||
err_free_resp:
|
||||
kfree(dev->resp);
|
||||
|
||||
err_free_cmd:
|
||||
kfree(dev->cmd);
|
||||
|
||||
err_free_dev:
|
||||
kfree(dev);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(init_opal_dev);
|
||||
|
||||
|
@ -3264,6 +3264,7 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
|
||||
case REPORT_LUNS:
|
||||
case REQUEST_SENSE:
|
||||
case SYNCHRONIZE_CACHE:
|
||||
case SYNCHRONIZE_CACHE_16:
|
||||
case REZERO_UNIT:
|
||||
case SEEK_6:
|
||||
case SEEK_10:
|
||||
@ -3922,6 +3923,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
|
||||
return ata_scsi_write_same_xlat;
|
||||
|
||||
case SYNCHRONIZE_CACHE:
|
||||
case SYNCHRONIZE_CACHE_16:
|
||||
if (ata_try_flush_cache(dev))
|
||||
return ata_scsi_flush_xlat;
|
||||
break;
|
||||
@ -3962,9 +3964,19 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
|
||||
|
||||
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
|
||||
{
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
u8 scsi_op = scmd->cmnd[0];
|
||||
ata_xlat_func_t xlat_func;
|
||||
|
||||
/*
|
||||
* scsi_queue_rq() will defer commands if scsi_host_in_recovery().
|
||||
* However, this check is done without holding the ap->lock (a libata
|
||||
* specific lock), so we can have received an error irq since then,
|
||||
* therefore we must check if EH is pending, while holding ap->lock.
|
||||
*/
|
||||
if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS))
|
||||
return SCSI_MLQUEUE_DEVICE_BUSY;
|
||||
|
||||
if (unlikely(!scmd->cmd_len))
|
||||
goto bad_cdb_len;
|
||||
|
||||
@ -4145,6 +4157,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
|
||||
* turning this into a no-op.
|
||||
*/
|
||||
case SYNCHRONIZE_CACHE:
|
||||
case SYNCHRONIZE_CACHE_16:
|
||||
fallthrough;
|
||||
|
||||
/* no-op's, complete with success */
|
||||
|
@ -301,7 +301,9 @@ int ata_tport_add(struct device *parent,
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_forbid(dev);
|
||||
|
||||
transport_add_device(dev);
|
||||
error = transport_add_device(dev);
|
||||
if (error)
|
||||
goto tport_transport_add_err;
|
||||
transport_configure_device(dev);
|
||||
|
||||
error = ata_tlink_add(&ap->link);
|
||||
@ -312,12 +314,12 @@ int ata_tport_add(struct device *parent,
|
||||
|
||||
tport_link_err:
|
||||
transport_remove_device(dev);
|
||||
tport_transport_add_err:
|
||||
device_del(dev);
|
||||
|
||||
tport_err:
|
||||
transport_destroy_device(dev);
|
||||
put_device(dev);
|
||||
ata_host_put(ap->host);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -456,7 +458,9 @@ int ata_tlink_add(struct ata_link *link)
|
||||
goto tlink_err;
|
||||
}
|
||||
|
||||
transport_add_device(dev);
|
||||
error = transport_add_device(dev);
|
||||
if (error)
|
||||
goto tlink_transport_err;
|
||||
transport_configure_device(dev);
|
||||
|
||||
ata_for_each_dev(ata_dev, link, ALL) {
|
||||
@ -471,6 +475,7 @@ int ata_tlink_add(struct ata_link *link)
|
||||
ata_tdev_delete(ata_dev);
|
||||
}
|
||||
transport_remove_device(dev);
|
||||
tlink_transport_err:
|
||||
device_del(dev);
|
||||
tlink_err:
|
||||
transport_destroy_device(dev);
|
||||
@ -708,7 +713,13 @@ static int ata_tdev_add(struct ata_device *ata_dev)
|
||||
return error;
|
||||
}
|
||||
|
||||
transport_add_device(dev);
|
||||
error = transport_add_device(dev);
|
||||
if (error) {
|
||||
device_del(dev);
|
||||
ata_tdev_free(ata_dev);
|
||||
return error;
|
||||
}
|
||||
|
||||
transport_configure_device(dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/hyperv.h>
|
||||
#include <clocksource/hyperv_timer.h>
|
||||
#include <asm/hyperv-tlfs.h>
|
||||
#include <asm/mshyperv.h>
|
||||
@ -395,25 +396,25 @@ static u64 notrace read_hv_sched_clock_tsc(void)
|
||||
|
||||
static void suspend_hv_clock_tsc(struct clocksource *arg)
|
||||
{
|
||||
u64 tsc_msr;
|
||||
union hv_reference_tsc_msr tsc_msr;
|
||||
|
||||
/* Disable the TSC page */
|
||||
tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
|
||||
tsc_msr &= ~BIT_ULL(0);
|
||||
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
|
||||
tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
|
||||
tsc_msr.enable = 0;
|
||||
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
|
||||
}
|
||||
|
||||
|
||||
static void resume_hv_clock_tsc(struct clocksource *arg)
|
||||
{
|
||||
phys_addr_t phys_addr = virt_to_phys(&tsc_pg);
|
||||
u64 tsc_msr;
|
||||
union hv_reference_tsc_msr tsc_msr;
|
||||
|
||||
/* Re-enable the TSC page */
|
||||
tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
|
||||
tsc_msr &= GENMASK_ULL(11, 0);
|
||||
tsc_msr |= BIT_ULL(0) | (u64)phys_addr;
|
||||
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
|
||||
tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
|
||||
tsc_msr.enable = 1;
|
||||
tsc_msr.pfn = HVPFN_DOWN(phys_addr);
|
||||
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
|
||||
}
|
||||
|
||||
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
|
||||
@ -495,7 +496,7 @@ static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
|
||||
|
||||
static bool __init hv_init_tsc_clocksource(void)
|
||||
{
|
||||
u64 tsc_msr;
|
||||
union hv_reference_tsc_msr tsc_msr;
|
||||
phys_addr_t phys_addr;
|
||||
|
||||
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
|
||||
@ -530,10 +531,10 @@ static bool __init hv_init_tsc_clocksource(void)
|
||||
* (which already has at least the low 12 bits set to zero since
|
||||
* it is page aligned). Also set the "enable" bit, which is bit 0.
|
||||
*/
|
||||
tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
|
||||
tsc_msr &= GENMASK_ULL(11, 0);
|
||||
tsc_msr = tsc_msr | 0x1 | (u64)phys_addr;
|
||||
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
|
||||
tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
|
||||
tsc_msr.enable = 1;
|
||||
tsc_msr.pfn = HVPFN_DOWN(phys_addr);
|
||||
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
|
||||
|
||||
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
|
||||
|
||||
|
@ -493,7 +493,7 @@ static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &ad->channels[index].chan;
|
||||
return dma_get_slave_channel(&ad->channels[index].chan);
|
||||
}
|
||||
|
||||
static int admac_drain_reports(struct admac_data *ad, int channo)
|
||||
|
@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
|
||||
ATC_SPIP_BOUNDARY(first->boundary));
|
||||
channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
|
||||
ATC_DPIP_BOUNDARY(first->boundary));
|
||||
/* Don't allow CPU to reorder channel enable. */
|
||||
wmb();
|
||||
dma_writel(atdma, CHER, atchan->mask);
|
||||
|
||||
vdbg_dump_regs(atchan);
|
||||
@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
|
||||
struct at_desc *desc_first = atc_first_active(atchan);
|
||||
struct at_desc *desc;
|
||||
int ret;
|
||||
u32 ctrla, dscr, trials;
|
||||
u32 ctrla, dscr;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* If the cookie doesn't match to the currently running transfer then
|
||||
@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
|
||||
dscr = channel_readl(atchan, DSCR);
|
||||
rmb(); /* ensure DSCR is read before CTRLA */
|
||||
ctrla = channel_readl(atchan, CTRLA);
|
||||
for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
|
||||
for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
|
||||
u32 new_dscr;
|
||||
|
||||
rmb(); /* ensure DSCR is read after CTRLA */
|
||||
@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
|
||||
rmb(); /* ensure DSCR is read before CTRLA */
|
||||
ctrla = channel_readl(atchan, CTRLA);
|
||||
}
|
||||
if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
|
||||
if (unlikely(i == ATC_MAX_DSCR_TRIALS))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
/* for the first descriptor we can be more accurate */
|
||||
@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
|
||||
if (!atc_chan_is_cyclic(atchan))
|
||||
dma_cookie_complete(txd);
|
||||
|
||||
/* If the transfer was a memset, free our temporary buffer */
|
||||
if (desc->memset_buffer) {
|
||||
dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
|
||||
desc->memset_paddr);
|
||||
desc->memset_buffer = false;
|
||||
}
|
||||
|
||||
/* move children to free_list */
|
||||
list_splice_init(&desc->tx_list, &atchan->free_list);
|
||||
/* move myself to free_list */
|
||||
list_move(&desc->desc_node, &atchan->free_list);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
dma_descriptor_unmap(txd);
|
||||
@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
|
||||
dma_run_dependencies(txd);
|
||||
}
|
||||
|
||||
/**
|
||||
* atc_complete_all - finish work for all transactions
|
||||
* @atchan: channel to complete transactions for
|
||||
*
|
||||
* Eventually submit queued descriptors if any
|
||||
*
|
||||
* Assume channel is idle while calling this function
|
||||
* Called with atchan->lock held and bh disabled
|
||||
*/
|
||||
static void atc_complete_all(struct at_dma_chan *atchan)
|
||||
{
|
||||
struct at_desc *desc, *_desc;
|
||||
LIST_HEAD(list);
|
||||
unsigned long flags;
|
||||
|
||||
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
|
||||
/*
|
||||
* Submit queued descriptors ASAP, i.e. before we go through
|
||||
* the completed ones.
|
||||
*/
|
||||
if (!list_empty(&atchan->queue))
|
||||
atc_dostart(atchan, atc_first_queued(atchan));
|
||||
/* empty active_list now it is completed */
|
||||
list_splice_init(&atchan->active_list, &list);
|
||||
/* empty queue list by moving descriptors (if any) to active_list */
|
||||
list_splice_init(&atchan->queue, &atchan->active_list);
|
||||
|
||||
/* move children to free_list */
|
||||
list_splice_init(&desc->tx_list, &atchan->free_list);
|
||||
/* add myself to free_list */
|
||||
list_add(&desc->desc_node, &atchan->free_list);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
atc_chain_complete(atchan, desc);
|
||||
/* If the transfer was a memset, free our temporary buffer */
|
||||
if (desc->memset_buffer) {
|
||||
dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
|
||||
desc->memset_paddr);
|
||||
desc->memset_buffer = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan)
|
||||
*/
|
||||
static void atc_advance_work(struct at_dma_chan *atchan)
|
||||
{
|
||||
struct at_desc *desc;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
ret = atc_chan_is_enabled(atchan);
|
||||
if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list))
|
||||
return spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
desc = atc_first_active(atchan);
|
||||
/* Remove the transfer node from the active list. */
|
||||
list_del_init(&desc->desc_node);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
if (list_empty(&atchan->active_list) ||
|
||||
list_is_singular(&atchan->active_list))
|
||||
return atc_complete_all(atchan);
|
||||
|
||||
atc_chain_complete(atchan, atc_first_active(atchan));
|
||||
atc_chain_complete(atchan, desc);
|
||||
|
||||
/* advance work */
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
atc_dostart(atchan, atc_first_active(atchan));
|
||||
if (!list_empty(&atchan->active_list)) {
|
||||
desc = atc_first_queued(atchan);
|
||||
list_move_tail(&desc->desc_node, &atchan->active_list);
|
||||
atc_dostart(atchan, desc);
|
||||
}
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
}
|
||||
|
||||
@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan)
|
||||
static void atc_handle_error(struct at_dma_chan *atchan)
|
||||
{
|
||||
struct at_desc *bad_desc;
|
||||
struct at_desc *desc;
|
||||
struct at_desc *child;
|
||||
unsigned long flags;
|
||||
|
||||
@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan)
|
||||
bad_desc = atc_first_active(atchan);
|
||||
list_del_init(&bad_desc->desc_node);
|
||||
|
||||
/* As we are stopped, take advantage to push queued descriptors
|
||||
* in active_list */
|
||||
list_splice_init(&atchan->queue, atchan->active_list.prev);
|
||||
|
||||
/* Try to restart the controller */
|
||||
if (!list_empty(&atchan->active_list))
|
||||
atc_dostart(atchan, atc_first_active(atchan));
|
||||
if (!list_empty(&atchan->active_list)) {
|
||||
desc = atc_first_queued(atchan);
|
||||
list_move_tail(&desc->desc_node, &atchan->active_list);
|
||||
atc_dostart(atchan, desc);
|
||||
}
|
||||
|
||||
/*
|
||||
* KERN_CRITICAL may seem harsh, but since this only happens
|
||||
@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
cookie = dma_cookie_assign(tx);
|
||||
|
||||
if (list_empty(&atchan->active_list)) {
|
||||
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
|
||||
desc->txd.cookie);
|
||||
atc_dostart(atchan, desc);
|
||||
list_add_tail(&desc->desc_node, &atchan->active_list);
|
||||
} else {
|
||||
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
|
||||
desc->txd.cookie);
|
||||
list_add_tail(&desc->desc_node, &atchan->queue);
|
||||
}
|
||||
|
||||
list_add_tail(&desc->desc_node, &atchan->queue);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
|
||||
desc->txd.cookie);
|
||||
return cookie;
|
||||
}
|
||||
|
||||
@ -1445,11 +1408,8 @@ static int atc_terminate_all(struct dma_chan *chan)
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
struct at_dma *atdma = to_at_dma(chan->device);
|
||||
int chan_id = atchan->chan_common.chan_id;
|
||||
struct at_desc *desc, *_desc;
|
||||
unsigned long flags;
|
||||
|
||||
LIST_HEAD(list);
|
||||
|
||||
dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
||||
|
||||
/*
|
||||
@ -1468,19 +1428,15 @@ static int atc_terminate_all(struct dma_chan *chan)
|
||||
cpu_relax();
|
||||
|
||||
/* active_list entries will end up before queued entries */
|
||||
list_splice_init(&atchan->queue, &list);
|
||||
list_splice_init(&atchan->active_list, &list);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
/* Flush all pending and queued descriptors */
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
atc_chain_complete(atchan, desc);
|
||||
list_splice_tail_init(&atchan->queue, &atchan->free_list);
|
||||
list_splice_tail_init(&atchan->active_list, &atchan->free_list);
|
||||
|
||||
clear_bit(ATC_IS_PAUSED, &atchan->status);
|
||||
/* if channel dedicated to cyclic operations, free it */
|
||||
clear_bit(ATC_IS_CYCLIC, &atchan->status);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1535,20 +1491,26 @@ atc_tx_status(struct dma_chan *chan,
|
||||
}
|
||||
|
||||
/**
|
||||
* atc_issue_pending - try to finish work
|
||||
* atc_issue_pending - takes the first transaction descriptor in the pending
|
||||
* queue and starts the transfer.
|
||||
* @chan: target DMA channel
|
||||
*/
|
||||
static void atc_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
struct at_desc *desc;
|
||||
unsigned long flags;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "issue_pending\n");
|
||||
|
||||
/* Not needed for cyclic transfers */
|
||||
if (atc_chan_is_cyclic(atchan))
|
||||
return;
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue))
|
||||
return spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
atc_advance_work(atchan);
|
||||
desc = atc_first_queued(atchan);
|
||||
list_move_tail(&desc->desc_node, &atchan->active_list);
|
||||
atc_dostart(atchan, desc);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1966,7 +1928,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
||||
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
|
||||
plat_dat->nr_channels);
|
||||
|
||||
dma_async_device_register(&atdma->dma_common);
|
||||
err = dma_async_device_register(&atdma->dma_common);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Unable to register: %d.\n", err);
|
||||
goto err_dma_async_device_register;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not return an error if the dmac node is not present in order to
|
||||
@ -1986,6 +1952,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
||||
|
||||
err_of_dma_controller_register:
|
||||
dma_async_device_unregister(&atdma->dma_common);
|
||||
err_dma_async_device_register:
|
||||
dma_pool_destroy(atdma->memset_pool);
|
||||
err_memset_pool_create:
|
||||
dma_pool_destroy(atdma->dma_desc_pool);
|
||||
|
@ -186,13 +186,13 @@
|
||||
/* LLI == Linked List Item; aka DMA buffer descriptor */
|
||||
struct at_lli {
|
||||
/* values that are not changed by hardware */
|
||||
dma_addr_t saddr;
|
||||
dma_addr_t daddr;
|
||||
u32 saddr;
|
||||
u32 daddr;
|
||||
/* value that may get written back: */
|
||||
u32 ctrla;
|
||||
u32 ctrla;
|
||||
/* more values that are not changed by hardware */
|
||||
u32 ctrlb;
|
||||
dma_addr_t dscr; /* chain to next lli */
|
||||
u32 ctrlb;
|
||||
u32 dscr; /* chain to next lli */
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -312,6 +312,24 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
|
||||
if (idxd->state != IDXD_DEV_ENABLED)
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* User type WQ is enabled only when SVA is enabled for two reasons:
|
||||
* - If no IOMMU or IOMMU Passthrough without SVA, userspace
|
||||
* can directly access physical address through the WQ.
|
||||
* - The IDXD cdev driver does not provide any ways to pin
|
||||
* user pages and translate the address from user VA to IOVA or
|
||||
* PA without IOMMU SVA. Therefore the application has no way
|
||||
* to instruct the device to perform DMA function. This makes
|
||||
* the cdev not usable for normal application usage.
|
||||
*/
|
||||
if (!device_user_pasid_enabled(idxd)) {
|
||||
idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU;
|
||||
dev_dbg(&idxd->pdev->dev,
|
||||
"User type WQ cannot be enabled without SVA.\n");
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
mutex_lock(&wq->wq_lock);
|
||||
wq->type = IDXD_WQT_USER;
|
||||
rc = drv_enable_wq(wq);
|
||||
|
@ -390,7 +390,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
|
||||
clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
|
||||
memset(wq->name, 0, WQ_NAME_SIZE);
|
||||
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
|
||||
wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
|
||||
idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
|
||||
if (wq->opcap_bmap)
|
||||
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
|
||||
}
|
||||
@ -730,13 +730,21 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
|
||||
|
||||
void idxd_device_clear_state(struct idxd_device *idxd)
|
||||
{
|
||||
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||
return;
|
||||
/* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */
|
||||
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
|
||||
/*
|
||||
* Clearing wq state is protected by wq lock.
|
||||
* So no need to be protected by device lock.
|
||||
*/
|
||||
idxd_device_wqs_clear_state(idxd);
|
||||
|
||||
spin_lock(&idxd->dev_lock);
|
||||
idxd_groups_clear_state(idxd);
|
||||
idxd_engines_clear_state(idxd);
|
||||
} else {
|
||||
spin_lock(&idxd->dev_lock);
|
||||
}
|
||||
|
||||
idxd_device_wqs_clear_state(idxd);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
idxd_groups_clear_state(idxd);
|
||||
idxd_engines_clear_state(idxd);
|
||||
idxd->state = IDXD_DEV_DISABLED;
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
}
|
||||
@ -869,7 +877,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
||||
|
||||
/* bytes 12-15 */
|
||||
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
|
||||
wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
|
||||
idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
|
||||
|
||||
/* bytes 32-63 */
|
||||
if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
|
||||
@ -1051,7 +1059,7 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
|
||||
wq->priority = wq->wqcfg->priority;
|
||||
|
||||
wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
|
||||
wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift;
|
||||
idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
|
||||
|
||||
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
|
||||
wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
|
||||
|
@ -548,6 +548,38 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
|
||||
return wq->client_count;
|
||||
};
|
||||
|
||||
/*
|
||||
* Intel IAA does not support batch processing.
|
||||
* The max batch size of device, max batch size of wq and
|
||||
* max batch shift of wqcfg should be always 0 on IAA.
|
||||
*/
|
||||
static inline void idxd_set_max_batch_size(int idxd_type, struct idxd_device *idxd,
|
||||
u32 max_batch_size)
|
||||
{
|
||||
if (idxd_type == IDXD_TYPE_IAX)
|
||||
idxd->max_batch_size = 0;
|
||||
else
|
||||
idxd->max_batch_size = max_batch_size;
|
||||
}
|
||||
|
||||
static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq,
|
||||
u32 max_batch_size)
|
||||
{
|
||||
if (idxd_type == IDXD_TYPE_IAX)
|
||||
wq->max_batch_size = 0;
|
||||
else
|
||||
wq->max_batch_size = max_batch_size;
|
||||
}
|
||||
|
||||
static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg,
|
||||
u32 max_batch_shift)
|
||||
{
|
||||
if (idxd_type == IDXD_TYPE_IAX)
|
||||
wqcfg->max_batch_shift = 0;
|
||||
else
|
||||
wqcfg->max_batch_shift = max_batch_shift;
|
||||
}
|
||||
|
||||
int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
|
||||
struct module *module, const char *mod_name);
|
||||
#define idxd_driver_register(driver) \
|
||||
|
@ -183,7 +183,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
|
||||
init_completion(&wq->wq_dead);
|
||||
init_completion(&wq->wq_resurrect);
|
||||
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
|
||||
wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
|
||||
idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
|
||||
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
|
||||
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
|
||||
if (!wq->wqcfg) {
|
||||
@ -418,7 +418,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
|
||||
|
||||
idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
|
||||
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
|
||||
idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
|
||||
idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift);
|
||||
dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
|
||||
if (idxd->hw.gen_cap.config_en)
|
||||
set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
|
||||
|
@ -1046,7 +1046,7 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
|
||||
if (batch_size > idxd->max_batch_size)
|
||||
return -EINVAL;
|
||||
|
||||
wq->max_batch_size = (u32)batch_size;
|
||||
idxd_wq_set_max_batch_size(idxd->data->type, wq, (u32)batch_size);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -893,6 +893,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
|
||||
tasklet_kill(&xor_dev->irq_tasklet);
|
||||
|
||||
clk_disable_unprepare(xor_dev->clk);
|
||||
clk_disable_unprepare(xor_dev->reg_clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1247,14 +1247,14 @@ static int pxad_init_phys(struct platform_device *op,
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < nb_phy_chans; i++)
|
||||
if (platform_get_irq(op, i) > 0)
|
||||
if (platform_get_irq_optional(op, i) > 0)
|
||||
nr_irq++;
|
||||
|
||||
for (i = 0; i < nb_phy_chans; i++) {
|
||||
phy = &pdev->phys[i];
|
||||
phy->base = pdev->base;
|
||||
phy->idx = i;
|
||||
irq = platform_get_irq(op, i);
|
||||
irq = platform_get_irq_optional(op, i);
|
||||
if ((nr_irq > 1) && (irq > 0))
|
||||
ret = devm_request_irq(&op->dev, irq,
|
||||
pxad_chan_handler,
|
||||
|
@ -675,6 +675,8 @@ static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
|
||||
|
||||
chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
|
||||
|
||||
chan->status = DMA_PAUSED;
|
||||
|
||||
dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
|
||||
}
|
||||
|
||||
@ -789,9 +791,7 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
|
||||
if (status & STM32_DMA_TCI) {
|
||||
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
|
||||
if (scr & STM32_DMA_SCR_TCIE) {
|
||||
if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN))
|
||||
stm32_dma_handle_chan_paused(chan);
|
||||
else
|
||||
if (chan->status != DMA_PAUSED)
|
||||
stm32_dma_handle_chan_done(chan, scr);
|
||||
}
|
||||
status &= ~STM32_DMA_TCI;
|
||||
@ -838,13 +838,11 @@ static int stm32_dma_pause(struct dma_chan *c)
|
||||
return -EPERM;
|
||||
|
||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||
|
||||
ret = stm32_dma_disable_chan(chan);
|
||||
/*
|
||||
* A transfer complete flag is set to indicate the end of transfer due to the stream
|
||||
* interruption, so wait for interrupt
|
||||
*/
|
||||
if (!ret)
|
||||
chan->status = DMA_PAUSED;
|
||||
stm32_dma_handle_chan_paused(chan);
|
||||
|
||||
spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||||
|
||||
return ret;
|
||||
|
@ -1539,6 +1539,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(&config, 0, sizeof(config));
|
||||
config.request = dma_spec->args[0];
|
||||
config.priority_level = dma_spec->args[1];
|
||||
config.transfer_config = dma_spec->args[2];
|
||||
|
@ -299,6 +299,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
|
||||
ret = device_register(&tx_chn->common.chan_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Channel Device registration failed %d\n", ret);
|
||||
put_device(&tx_chn->common.chan_dev);
|
||||
tx_chn->common.chan_dev.parent = NULL;
|
||||
goto err;
|
||||
}
|
||||
@ -917,6 +918,7 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
|
||||
ret = device_register(&rx_chn->common.chan_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Channel Device registration failed %d\n", ret);
|
||||
put_device(&rx_chn->common.chan_dev);
|
||||
rx_chn->common.chan_dev.parent = NULL;
|
||||
goto err;
|
||||
}
|
||||
@ -1048,6 +1050,7 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
|
||||
ret = device_register(&rx_chn->common.chan_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Channel Device registration failed %d\n", ret);
|
||||
put_device(&rx_chn->common.chan_dev);
|
||||
rx_chn->common.chan_dev.parent = NULL;
|
||||
goto err;
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
|
||||
lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o
|
||||
|
||||
lib-$(CONFIG_ARM) += arm32-stub.o
|
||||
lib-$(CONFIG_ARM64) += arm64-stub.o
|
||||
lib-$(CONFIG_ARM64) += arm64-stub.o smbios.o
|
||||
lib-$(CONFIG_X86) += x86-stub.o
|
||||
lib-$(CONFIG_RISCV) += riscv-stub.o
|
||||
lib-$(CONFIG_LOONGARCH) += loongarch-stub.o
|
||||
|
@ -15,6 +15,21 @@
|
||||
|
||||
#include "efistub.h"
|
||||
|
||||
static bool system_needs_vamap(void)
|
||||
{
|
||||
const u8 *type1_family = efi_get_smbios_string(1, family);
|
||||
|
||||
/*
|
||||
* Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
|
||||
* has not been called prior.
|
||||
*/
|
||||
if (!type1_family || strcmp(type1_family, "Altra"))
|
||||
return false;
|
||||
|
||||
efi_warn("Working around broken SetVirtualAddressMap()\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
efi_status_t check_platform_features(void)
|
||||
{
|
||||
u64 tg;
|
||||
@ -24,7 +39,7 @@ efi_status_t check_platform_features(void)
|
||||
* UEFI runtime regions 1:1 and so calling SetVirtualAddressMap() is
|
||||
* unnecessary.
|
||||
*/
|
||||
if (VA_BITS_MIN >= 48)
|
||||
if (VA_BITS_MIN >= 48 && !system_needs_vamap())
|
||||
efi_novamap = true;
|
||||
|
||||
/* UEFI mandates support for 4 KB granularity, no need to check */
|
||||
|
@ -975,4 +975,32 @@ efi_enable_reset_attack_mitigation(void) { }
|
||||
|
||||
void efi_retrieve_tpm2_eventlog(void);
|
||||
|
||||
struct efi_smbios_record {
|
||||
u8 type;
|
||||
u8 length;
|
||||
u16 handle;
|
||||
};
|
||||
|
||||
struct efi_smbios_type1_record {
|
||||
struct efi_smbios_record header;
|
||||
|
||||
u8 manufacturer;
|
||||
u8 product_name;
|
||||
u8 version;
|
||||
u8 serial_number;
|
||||
efi_guid_t uuid;
|
||||
u8 wakeup_type;
|
||||
u8 sku_number;
|
||||
u8 family;
|
||||
};
|
||||
|
||||
#define efi_get_smbios_string(__type, __name) ({ \
|
||||
int size = sizeof(struct efi_smbios_type ## __type ## _record); \
|
||||
int off = offsetof(struct efi_smbios_type ## __type ## _record, \
|
||||
__name); \
|
||||
__efi_get_smbios_string(__type, off, size); \
|
||||
})
|
||||
|
||||
const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize);
|
||||
|
||||
#endif
|
||||
|
48
drivers/firmware/efi/libstub/smbios.c
Normal file
48
drivers/firmware/efi/libstub/smbios.c
Normal file
@ -0,0 +1,48 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// Copyright 2022 Google LLC
|
||||
// Author: Ard Biesheuvel <ardb@google.com>
|
||||
|
||||
#include <linux/efi.h>
|
||||
|
||||
#include "efistub.h"
|
||||
|
||||
typedef struct efi_smbios_protocol efi_smbios_protocol_t;
|
||||
|
||||
struct efi_smbios_protocol {
|
||||
efi_status_t (__efiapi *add)(efi_smbios_protocol_t *, efi_handle_t,
|
||||
u16 *, struct efi_smbios_record *);
|
||||
efi_status_t (__efiapi *update_string)(efi_smbios_protocol_t *, u16 *,
|
||||
unsigned long *, u8 *);
|
||||
efi_status_t (__efiapi *remove)(efi_smbios_protocol_t *, u16);
|
||||
efi_status_t (__efiapi *get_next)(efi_smbios_protocol_t *, u16 *, u8 *,
|
||||
struct efi_smbios_record **,
|
||||
efi_handle_t *);
|
||||
|
||||
u8 major_version;
|
||||
u8 minor_version;
|
||||
};
|
||||
|
||||
const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize)
|
||||
{
|
||||
struct efi_smbios_record *record;
|
||||
efi_smbios_protocol_t *smbios;
|
||||
efi_status_t status;
|
||||
u16 handle = 0xfffe;
|
||||
const u8 *strtable;
|
||||
|
||||
status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL,
|
||||
(void **)&smbios) ?:
|
||||
efi_call_proto(smbios, get_next, &handle, &type, &record, NULL);
|
||||
if (status != EFI_SUCCESS)
|
||||
return NULL;
|
||||
|
||||
strtable = (u8 *)record + recsize;
|
||||
for (int i = 1; i < ((u8 *)record)[offset]; i++) {
|
||||
int len = strlen(strtable);
|
||||
|
||||
if (!len)
|
||||
return NULL;
|
||||
strtable += len + 1;
|
||||
}
|
||||
return strtable;
|
||||
}
|
@ -992,6 +992,7 @@ out_free_user_pages:
|
||||
kvfree(e->user_pages);
|
||||
e->user_pages = NULL;
|
||||
}
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -143,32 +143,6 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
|
||||
* happens while holding this lock anywhere to prevent deadlocks when
|
||||
* an MMU notifier runs in reclaim-FS context.
|
||||
*/
|
||||
static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
|
||||
{
|
||||
mutex_lock(&vm->eviction_lock);
|
||||
vm->saved_flags = memalloc_noreclaim_save();
|
||||
}
|
||||
|
||||
static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
|
||||
{
|
||||
if (mutex_trylock(&vm->eviction_lock)) {
|
||||
vm->saved_flags = memalloc_noreclaim_save();
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
|
||||
{
|
||||
memalloc_noreclaim_restore(vm->saved_flags);
|
||||
mutex_unlock(&vm->eviction_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_bo_evicted - vm_bo is evicted
|
||||
*
|
||||
|
@ -492,7 +492,48 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
|
||||
*/
|
||||
static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock;
|
||||
|
||||
/*
|
||||
* Workaround to stop racing between the fence signaling and handling
|
||||
* the cb. The lock is static after initially setting it up, just make
|
||||
* sure that the dma_fence structure isn't freed up.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
lock = vm->last_tlb_flush->lock;
|
||||
rcu_read_unlock();
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
|
||||
return atomic64_read(&vm->tlb_seq);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
|
||||
* happens while holding this lock anywhere to prevent deadlocks when
|
||||
* an MMU notifier runs in reclaim-FS context.
|
||||
*/
|
||||
static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
|
||||
{
|
||||
mutex_lock(&vm->eviction_lock);
|
||||
vm->saved_flags = memalloc_noreclaim_save();
|
||||
}
|
||||
|
||||
static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
|
||||
{
|
||||
if (mutex_trylock(&vm->eviction_lock)) {
|
||||
vm->saved_flags = memalloc_noreclaim_save();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
|
||||
{
|
||||
memalloc_noreclaim_restore(vm->saved_flags);
|
||||
mutex_unlock(&vm->eviction_lock);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -597,7 +597,9 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
|
||||
if (entry->bo)
|
||||
return 0;
|
||||
|
||||
amdgpu_vm_eviction_unlock(vm);
|
||||
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
|
||||
amdgpu_vm_eviction_lock(vm);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -435,7 +435,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
if (place->flags & TTM_PL_FLAG_TOPDOWN)
|
||||
vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
|
||||
|
||||
if (fpfn || lpfn != man->size)
|
||||
if (fpfn || lpfn != mgr->mm.size)
|
||||
/* Allocate blocks in desired range */
|
||||
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
|
||||
|
||||
|
@ -1950,7 +1950,7 @@ static int criu_checkpoint(struct file *filep,
|
||||
{
|
||||
int ret;
|
||||
uint32_t num_devices, num_bos, num_objects;
|
||||
uint64_t priv_size, priv_offset = 0;
|
||||
uint64_t priv_size, priv_offset = 0, bo_priv_offset;
|
||||
|
||||
if (!args->devices || !args->bos || !args->priv_data)
|
||||
return -EINVAL;
|
||||
@ -1994,38 +1994,34 @@ static int criu_checkpoint(struct file *filep,
|
||||
if (ret)
|
||||
goto exit_unlock;
|
||||
|
||||
ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
|
||||
(uint8_t __user *)args->priv_data, &priv_offset);
|
||||
if (ret)
|
||||
goto exit_unlock;
|
||||
/* Leave room for BOs in the private data. They need to be restored
|
||||
* before events, but we checkpoint them last to simplify the error
|
||||
* handling.
|
||||
*/
|
||||
bo_priv_offset = priv_offset;
|
||||
priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data);
|
||||
|
||||
if (num_objects) {
|
||||
ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
|
||||
&priv_offset);
|
||||
if (ret)
|
||||
goto close_bo_fds;
|
||||
goto exit_unlock;
|
||||
|
||||
ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
|
||||
&priv_offset);
|
||||
if (ret)
|
||||
goto close_bo_fds;
|
||||
goto exit_unlock;
|
||||
|
||||
ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
|
||||
if (ret)
|
||||
goto close_bo_fds;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
close_bo_fds:
|
||||
if (ret) {
|
||||
/* If IOCTL returns err, user assumes all FDs opened in criu_dump_bos are closed */
|
||||
uint32_t i;
|
||||
struct kfd_criu_bo_bucket *bo_buckets = (struct kfd_criu_bo_bucket *) args->bos;
|
||||
|
||||
for (i = 0; i < num_bos; i++) {
|
||||
if (bo_buckets[i].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
|
||||
close_fd(bo_buckets[i].dmabuf_fd);
|
||||
}
|
||||
}
|
||||
/* This must be the last thing in this function that can fail.
|
||||
* Otherwise we leak dmabuf file descriptors.
|
||||
*/
|
||||
ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
|
||||
(uint8_t __user *)args->priv_data, &bo_priv_offset);
|
||||
|
||||
exit_unlock:
|
||||
mutex_unlock(&p->mutex);
|
||||
|
@ -506,6 +506,7 @@ int kfd_criu_restore_event(struct file *devkfd,
|
||||
ret = create_other_event(p, ev, &ev_priv->event_id);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&p->event_mutex);
|
||||
|
||||
exit:
|
||||
if (ret)
|
||||
@ -513,8 +514,6 @@ exit:
|
||||
|
||||
kfree(ev_priv);
|
||||
|
||||
mutex_unlock(&p->event_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -7622,9 +7622,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
bundle->surface_updates[planes_count].plane_info =
|
||||
&bundle->plane_infos[planes_count];
|
||||
|
||||
fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
|
||||
new_crtc_state,
|
||||
&bundle->flip_addrs[planes_count]);
|
||||
if (acrtc_state->stream->link->psr_settings.psr_feature_enabled)
|
||||
fill_dc_dirty_rects(plane, old_plane_state,
|
||||
new_plane_state, new_crtc_state,
|
||||
&bundle->flip_addrs[planes_count]);
|
||||
|
||||
/*
|
||||
* Only allow immediate flips for fast updates that don't
|
||||
|
@ -363,32 +363,32 @@ static struct wm_table ddr5_wm_table = {
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 12.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 12.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 12.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 12.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
@ -400,32 +400,32 @@ static struct wm_table lpddr5_wm_table = {
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 16.5,
|
||||
.sr_enter_plus_exit_time_us = 18.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 16.5,
|
||||
.sr_enter_plus_exit_time_us = 18.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 16.5,
|
||||
.sr_enter_plus_exit_time_us = 18.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 16.5,
|
||||
.sr_enter_plus_exit_time_us = 18.5,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
|
@ -853,6 +853,7 @@ struct dc_debug_options {
|
||||
bool enable_dp_dig_pixel_rate_div_policy;
|
||||
enum lttpr_mode lttpr_mode_override;
|
||||
unsigned int dsc_delay_factor_wa_x1000;
|
||||
unsigned int min_prefetch_in_strobe_ns;
|
||||
};
|
||||
|
||||
struct gpu_info_soc_bounding_box_v1_0;
|
||||
|
@ -49,18 +49,30 @@
|
||||
#define CTX \
|
||||
enc1->base.ctx
|
||||
|
||||
static void enc314_reset_fifo(struct stream_encoder *enc, bool reset)
|
||||
{
|
||||
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
|
||||
uint32_t reset_val = reset ? 1 : 0;
|
||||
uint32_t is_symclk_on;
|
||||
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val);
|
||||
REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on);
|
||||
|
||||
if (is_symclk_on)
|
||||
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000);
|
||||
else
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
static void enc314_enable_fifo(struct stream_encoder *enc)
|
||||
{
|
||||
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
|
||||
|
||||
/* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */
|
||||
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
|
||||
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
|
||||
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000);
|
||||
|
||||
enc314_reset_fifo(enc, true);
|
||||
enc314_reset_fifo(enc, false);
|
||||
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1);
|
||||
}
|
||||
|
||||
|
@ -724,6 +724,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.enable_dp_dig_pixel_rate_div_policy = 1,
|
||||
.allow_sw_cursor_fallback = false,
|
||||
.alloc_extra_way_for_cursor = true,
|
||||
.min_prefetch_in_strobe_ns = 60000, // 60us
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_diags = {
|
||||
|
@ -722,6 +722,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.enable_dp_dig_pixel_rate_div_policy = 1,
|
||||
.allow_sw_cursor_fallback = false,
|
||||
.alloc_extra_way_for_cursor = true,
|
||||
.min_prefetch_in_strobe_ns = 60000, // 60us
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_diags = {
|
||||
|
@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
|
||||
},
|
||||
},
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 9.0,
|
||||
.sr_enter_plus_exit_time_us = 11.0,
|
||||
.sr_exit_time_us = 16.5,
|
||||
.sr_enter_plus_exit_time_us = 18.5,
|
||||
.sr_exit_z8_time_us = 442.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 560.0,
|
||||
.writeback_latency_us = 12.0,
|
||||
|
@ -2364,6 +2364,8 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
|
||||
/* DML DSC delay factor workaround */
|
||||
dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0;
|
||||
|
||||
dcn3_2_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0;
|
||||
|
||||
/* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */
|
||||
dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user