mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
Linux 5.15
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmF/AjYeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiG1hkIAJ6sFDbvb4M4LMwf Slh2NVL9o5sLMBDzVwnVlyMSKDbMn1WBKreGssaLgZjGDc74lxsdSmw5l9MZm0JN xlq95Q6XFiuu+0qDHPWwfDz3JFO4TqW2ZLLPWk9NnkNbRXqccSrlVRi1RpgE1t3/ NUtS8CQLu6A2BYMc6mkk3aV6IwSNKOkWbM5eBHSvU4j8B6lLbNQop0AfO/wyY1xB U6LiVE1RpN/b7Yv+75ITtNzuHzVIBx6305FvSnOlKbMKKvIClt96Vd2OeuoEkK+6 wGU8JraB1+fc0GckAhynNrjWQWdvi0MAhFWWEJxjS20OGcV1rXDduNfkVNauO1Zn +dNyJ3s= =g9fz -----END PGP SIGNATURE----- Merge tag 'v5.15' into rdma.git for-next Pull in the accepted for-rc patches as the next merge needs a newer base. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
commit
a2a2a69d14
2
.mailmap
2
.mailmap
@ -33,6 +33,8 @@ Al Viro <viro@zenIV.linux.org.uk>
|
||||
Andi Kleen <ak@linux.intel.com> <ak@suse.de>
|
||||
Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
|
||||
Andreas Herrmann <aherrman@de.ibm.com>
|
||||
Andrej Shadura <andrew.shadura@collabora.co.uk>
|
||||
Andrej Shadura <andrew@shadura.me> <andrew@beldisplaytech.com>
|
||||
Andrew Morton <akpm@linux-foundation.org>
|
||||
Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
|
||||
Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
|
||||
|
@ -32,13 +32,13 @@ properties:
|
||||
"#size-cells":
|
||||
const: 1
|
||||
|
||||
pinctrl:
|
||||
$ref: ../pinctrl/brcm,ns-pinmux.yaml
|
||||
|
||||
patternProperties:
|
||||
'^clock-controller@[a-f0-9]+$':
|
||||
$ref: ../clock/brcm,iproc-clocks.yaml
|
||||
|
||||
'^pin-controller@[a-f0-9]+$':
|
||||
$ref: ../pinctrl/brcm,ns-pinmux.yaml
|
||||
|
||||
'^thermal@[a-f0-9]+$':
|
||||
$ref: ../thermal/brcm,ns-thermal.yaml
|
||||
|
||||
@ -73,9 +73,10 @@ examples:
|
||||
"iprocfast", "sata1", "sata2";
|
||||
};
|
||||
|
||||
pinctrl {
|
||||
pin-controller@1c0 {
|
||||
compatible = "brcm,bcm4708-pinmux";
|
||||
offset = <0x1c0>;
|
||||
reg = <0x1c0 0x24>;
|
||||
reg-names = "cru_gpio_control";
|
||||
};
|
||||
|
||||
thermal@2c0 {
|
||||
|
@ -17,9 +17,6 @@ description:
|
||||
|
||||
A list of pins varies across chipsets so few bindings are available.
|
||||
|
||||
Node of the pinmux must be nested in the CRU (Central Resource Unit) "syscon"
|
||||
node.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
@ -27,10 +24,11 @@ properties:
|
||||
- brcm,bcm4709-pinmux
|
||||
- brcm,bcm53012-pinmux
|
||||
|
||||
offset:
|
||||
description: offset of pin registers in the CRU block
|
||||
reg:
|
||||
maxItems: 1
|
||||
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||
|
||||
reg-names:
|
||||
const: cru_gpio_control
|
||||
|
||||
patternProperties:
|
||||
'-pins$':
|
||||
@ -72,23 +70,20 @@ allOf:
|
||||
uart1_grp ]
|
||||
|
||||
required:
|
||||
- offset
|
||||
- reg
|
||||
- reg-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
cru@1800c100 {
|
||||
compatible = "syscon", "simple-mfd";
|
||||
reg = <0x1800c100 0x1a4>;
|
||||
pin-controller@1800c1c0 {
|
||||
compatible = "brcm,bcm4708-pinmux";
|
||||
reg = <0x1800c1c0 0x24>;
|
||||
reg-names = "cru_gpio_control";
|
||||
|
||||
pinctrl {
|
||||
compatible = "brcm,bcm4708-pinmux";
|
||||
offset = <0xc0>;
|
||||
|
||||
spi-pins {
|
||||
function = "spi";
|
||||
groups = "spi_grp";
|
||||
};
|
||||
spi-pins {
|
||||
function = "spi";
|
||||
groups = "spi_grp";
|
||||
};
|
||||
};
|
||||
|
@ -30,10 +30,11 @@ The ``ice`` driver reports the following versions
|
||||
PHY, link, etc.
|
||||
* - ``fw.mgmt.api``
|
||||
- running
|
||||
- 1.5
|
||||
- 2-digit version number of the API exported over the AdminQ by the
|
||||
management firmware. Used by the driver to identify what commands
|
||||
are supported.
|
||||
- 1.5.1
|
||||
- 3-digit version number (major.minor.patch) of the API exported over
|
||||
the AdminQ by the management firmware. Used by the driver to
|
||||
identify what commands are supported. Historical versions of the
|
||||
kernel only displayed a 2-digit version number (major.minor).
|
||||
* - ``fw.mgmt.build``
|
||||
- running
|
||||
- 0x305d955f
|
||||
|
@ -59,11 +59,11 @@ specified with a ``sockaddr`` type, with a single-byte endpoint address:
|
||||
};
|
||||
|
||||
struct sockaddr_mctp {
|
||||
unsigned short int smctp_family;
|
||||
int smctp_network;
|
||||
struct mctp_addr smctp_addr;
|
||||
__u8 smctp_type;
|
||||
__u8 smctp_tag;
|
||||
__kernel_sa_family_t smctp_family;
|
||||
unsigned int smctp_network;
|
||||
struct mctp_addr smctp_addr;
|
||||
__u8 smctp_type;
|
||||
__u8 smctp_tag;
|
||||
};
|
||||
|
||||
#define MCTP_NET_ANY 0x0
|
||||
|
@ -104,6 +104,7 @@ Code Seq# Include File Comments
|
||||
'8' all SNP8023 advanced NIC card
|
||||
<mailto:mcr@solidum.com>
|
||||
';' 64-7F linux/vfio.h
|
||||
'=' 00-3f uapi/linux/ptp_clock.h <mailto:richardcochran@gmail.com>
|
||||
'@' 00-0F linux/radeonfb.h conflict!
|
||||
'@' 00-0F drivers/video/aty/aty128fb.c conflict!
|
||||
'A' 00-1F linux/apm_bios.h conflict!
|
||||
|
18
MAINTAINERS
18
MAINTAINERS
@ -5458,6 +5458,19 @@ F: include/net/devlink.h
|
||||
F: include/uapi/linux/devlink.h
|
||||
F: net/core/devlink.c
|
||||
|
||||
DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT
|
||||
M: Christoph Niedermaier <cniedermaier@dh-electronics.com>
|
||||
L: kernel@dh-electronics.com
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/imx6*-dhcom-*
|
||||
|
||||
DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT
|
||||
M: Marek Vasut <marex@denx.de>
|
||||
L: kernel@dh-electronics.com
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/stm32mp1*-dhcom-*
|
||||
F: arch/arm/boot/dts/stm32mp1*-dhcor-*
|
||||
|
||||
DIALOG SEMICONDUCTOR DRIVERS
|
||||
M: Support Opensource <support.opensource@diasemi.com>
|
||||
S: Supported
|
||||
@ -6147,8 +6160,7 @@ T: git git://anongit.freedesktop.org/drm/drm
|
||||
F: Documentation/devicetree/bindings/display/
|
||||
F: Documentation/devicetree/bindings/gpu/
|
||||
F: Documentation/gpu/
|
||||
F: drivers/gpu/drm/
|
||||
F: drivers/gpu/vga/
|
||||
F: drivers/gpu/
|
||||
F: include/drm/
|
||||
F: include/linux/vga*
|
||||
F: include/uapi/drm/
|
||||
@ -11278,7 +11290,6 @@ F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
|
||||
F: drivers/net/ethernet/marvell/octeontx2/af/
|
||||
|
||||
MARVELL PRESTERA ETHERNET SWITCH DRIVER
|
||||
M: Vadym Kochan <vkochan@marvell.com>
|
||||
M: Taras Chornyi <tchornyi@marvell.com>
|
||||
S: Supported
|
||||
W: https://github.com/Marvell-switching/switchdev-prestera
|
||||
@ -20336,6 +20347,7 @@ X86 ARCHITECTURE (32-BIT AND 64-BIT)
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
M: Ingo Molnar <mingo@redhat.com>
|
||||
M: Borislav Petkov <bp@alien8.de>
|
||||
M: Dave Hansen <dave.hansen@linux.intel.com>
|
||||
M: x86@kernel.org
|
||||
R: "H. Peter Anvin" <hpa@zytor.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
4
Makefile
4
Makefile
@ -2,8 +2,8 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Opossums on Parade
|
||||
EXTRAVERSION =
|
||||
NAME = Trick or Treat
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
@ -92,6 +92,7 @@ config ARM
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||||
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
|
||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
|
||||
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||
select HAVE_GCC_PLUGINS
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
|
@ -47,7 +47,10 @@ extern char * strchrnul(const char *, int);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_XZ
|
||||
/* Prevent KASAN override of string helpers in decompressor */
|
||||
#undef memmove
|
||||
#define memmove memmove
|
||||
#undef memcpy
|
||||
#define memcpy memcpy
|
||||
#include "../../../../lib/decompress_unxz.c"
|
||||
#endif
|
||||
|
@ -112,7 +112,7 @@
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&gmac_rgmii_pins>;
|
||||
phy-handle = <&phy1>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -176,6 +176,7 @@ extern int __get_user_64t_4(void *);
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
unsigned int __ua_flags = uaccess_save_and_enable(); \
|
||||
int __tmp_e; \
|
||||
switch (sizeof(*(__p))) { \
|
||||
case 1: \
|
||||
if (sizeof((x)) >= 8) \
|
||||
@ -203,9 +204,10 @@ extern int __get_user_64t_4(void *);
|
||||
break; \
|
||||
default: __e = __get_user_bad(); break; \
|
||||
} \
|
||||
__tmp_e = __e; \
|
||||
uaccess_restore(__ua_flags); \
|
||||
x = (typeof(*(p))) __r2; \
|
||||
__e; \
|
||||
__tmp_e; \
|
||||
})
|
||||
|
||||
#define get_user(x, p) \
|
||||
|
@ -253,7 +253,7 @@ __create_page_tables:
|
||||
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
|
||||
ldr r6, =(_end - 1)
|
||||
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
|
||||
str r8, [r5, #4] @ Save physical start of kernel (BE)
|
||||
#else
|
||||
str r8, [r5] @ Save physical start of kernel (LE)
|
||||
@ -266,7 +266,7 @@ __create_page_tables:
|
||||
bls 1b
|
||||
eor r3, r3, r7 @ Remove the MMU flags
|
||||
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
|
||||
str r3, [r5, #4] @ Save physical end of kernel (BE)
|
||||
#else
|
||||
str r3, [r5] @ Save physical end of kernel (LE)
|
||||
|
@ -136,7 +136,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
|
||||
for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
|
||||
if (p >= bottom && p < top) {
|
||||
unsigned long val;
|
||||
if (get_kernel_nofault(val, (unsigned long *)p))
|
||||
if (!get_kernel_nofault(val, (unsigned long *)p))
|
||||
sprintf(str + i * 9, " %08lx", val);
|
||||
else
|
||||
sprintf(str + i * 9, " ????????");
|
||||
|
@ -40,6 +40,10 @@ SECTIONS
|
||||
ARM_DISCARD
|
||||
*(.alt.smp.init)
|
||||
*(.pv_table)
|
||||
#ifndef CONFIG_ARM_UNWIND
|
||||
*(.ARM.exidx) *(.ARM.exidx.*)
|
||||
*(.ARM.extab) *(.ARM.extab.*)
|
||||
#endif
|
||||
}
|
||||
|
||||
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
|
||||
@ -172,7 +176,7 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
|
||||
ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA")
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
#if defined(CONFIG_ARM_MPU) && !defined(CONFIG_COMPILE_TEST)
|
||||
/*
|
||||
* Due to PMSAv7 restriction on base address and size we have to
|
||||
* enforce minimal alignment restrictions. It was seen that weaker
|
||||
|
@ -340,6 +340,7 @@ ENTRY(\name\()_cache_fns)
|
||||
|
||||
.macro define_tlb_functions name:req, flags_up:req, flags_smp
|
||||
.type \name\()_tlb_fns, #object
|
||||
.align 2
|
||||
ENTRY(\name\()_tlb_fns)
|
||||
.long \name\()_flush_user_tlb_range
|
||||
.long \name\()_flush_kern_tlb_range
|
||||
|
@ -439,7 +439,7 @@ static struct undef_hook kprobes_arm_break_hook = {
|
||||
|
||||
#endif /* !CONFIG_THUMB2_KERNEL */
|
||||
|
||||
int __init arch_init_kprobes()
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
arm_probes_decode_init();
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
|
@ -75,7 +75,7 @@
|
||||
pinctrl-0 = <&emac_rgmii_pins>;
|
||||
phy-supply = <®_gmac_3v3>;
|
||||
phy-handle = <&ext_rgmii_phy>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -70,7 +70,9 @@
|
||||
regulator-name = "rst-usb-eth2";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usb_eth2>;
|
||||
gpio = <&gpio3 2 GPIO_ACTIVE_LOW>;
|
||||
gpio = <&gpio3 2 GPIO_ACTIVE_HIGH>;
|
||||
enable-active-high;
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
reg_vdd_5v: regulator-5v {
|
||||
@ -95,7 +97,7 @@
|
||||
clocks = <&osc_can>;
|
||||
interrupt-parent = <&gpio4>;
|
||||
interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
|
||||
spi-max-frequency = <100000>;
|
||||
spi-max-frequency = <10000000>;
|
||||
vdd-supply = <®_vdd_3v3>;
|
||||
xceiver-supply = <®_vdd_5v>;
|
||||
};
|
||||
@ -111,7 +113,7 @@
|
||||
&fec1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_enet>;
|
||||
phy-connection-type = "rgmii";
|
||||
phy-connection-type = "rgmii-rxid";
|
||||
phy-handle = <ðphy>;
|
||||
status = "okay";
|
||||
|
||||
|
@ -91,10 +91,12 @@
|
||||
reg_vdd_soc: BUCK1 {
|
||||
regulator-name = "buck1";
|
||||
regulator-min-microvolt = <800000>;
|
||||
regulator-max-microvolt = <900000>;
|
||||
regulator-max-microvolt = <850000>;
|
||||
regulator-boot-on;
|
||||
regulator-always-on;
|
||||
regulator-ramp-delay = <3125>;
|
||||
nxp,dvs-run-voltage = <850000>;
|
||||
nxp,dvs-standby-voltage = <800000>;
|
||||
};
|
||||
|
||||
reg_vdd_arm: BUCK2 {
|
||||
@ -111,7 +113,7 @@
|
||||
reg_vdd_dram: BUCK3 {
|
||||
regulator-name = "buck3";
|
||||
regulator-min-microvolt = <850000>;
|
||||
regulator-max-microvolt = <900000>;
|
||||
regulator-max-microvolt = <950000>;
|
||||
regulator-boot-on;
|
||||
regulator-always-on;
|
||||
};
|
||||
@ -150,7 +152,7 @@
|
||||
|
||||
reg_vdd_snvs: LDO2 {
|
||||
regulator-name = "ldo2";
|
||||
regulator-min-microvolt = <850000>;
|
||||
regulator-min-microvolt = <800000>;
|
||||
regulator-max-microvolt = <900000>;
|
||||
regulator-boot-on;
|
||||
regulator-always-on;
|
||||
|
@ -2590,9 +2590,10 @@
|
||||
power-domains = <&dispcc MDSS_GDSC>;
|
||||
|
||||
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
|
||||
<&gcc GCC_DISP_HF_AXI_CLK>,
|
||||
<&gcc GCC_DISP_SF_AXI_CLK>,
|
||||
<&dispcc DISP_CC_MDSS_MDP_CLK>;
|
||||
clock-names = "iface", "nrt_bus", "core";
|
||||
clock-names = "iface", "bus", "nrt_bus", "core";
|
||||
|
||||
assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>;
|
||||
assigned-clock-rates = <460000000>;
|
||||
|
@ -24,6 +24,7 @@ struct hyp_pool {
|
||||
|
||||
/* Allocation */
|
||||
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
|
||||
void hyp_split_page(struct hyp_page *page);
|
||||
void hyp_get_page(struct hyp_pool *pool, void *addr);
|
||||
void hyp_put_page(struct hyp_pool *pool, void *addr);
|
||||
|
||||
|
@ -35,7 +35,18 @@ const u8 pkvm_hyp_id = 1;
|
||||
|
||||
static void *host_s2_zalloc_pages_exact(size_t size)
|
||||
{
|
||||
return hyp_alloc_pages(&host_s2_pool, get_order(size));
|
||||
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
|
||||
|
||||
hyp_split_page(hyp_virt_to_page(addr));
|
||||
|
||||
/*
|
||||
* The size of concatenated PGDs is always a power of two of PAGE_SIZE,
|
||||
* so there should be no need to free any of the tail pages to make the
|
||||
* allocation exact.
|
||||
*/
|
||||
WARN_ON(size != (PAGE_SIZE << get_order(size)));
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void *host_s2_zalloc_page(void *pool)
|
||||
|
@ -152,6 +152,7 @@ static inline void hyp_page_ref_inc(struct hyp_page *p)
|
||||
|
||||
static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
|
||||
{
|
||||
BUG_ON(!p->refcount);
|
||||
p->refcount--;
|
||||
return (p->refcount == 0);
|
||||
}
|
||||
@ -193,6 +194,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
|
||||
hyp_spin_unlock(&pool->lock);
|
||||
}
|
||||
|
||||
void hyp_split_page(struct hyp_page *p)
|
||||
{
|
||||
unsigned short order = p->order;
|
||||
unsigned int i;
|
||||
|
||||
p->order = 0;
|
||||
for (i = 1; i < (1 << order); i++) {
|
||||
struct hyp_page *tail = p + i;
|
||||
|
||||
tail->order = 0;
|
||||
hyp_set_page_refcounted(tail);
|
||||
}
|
||||
}
|
||||
|
||||
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
|
||||
{
|
||||
unsigned short i = order;
|
||||
|
@ -1529,8 +1529,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
* when updating the PG_mte_tagged page flag, see
|
||||
* sanitise_mte_tags for more details.
|
||||
*/
|
||||
if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
|
||||
return -EINVAL;
|
||||
if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vma->vm_flags & VM_PFNMAP) {
|
||||
/* IO region dirty page logging not allowed */
|
||||
|
@ -1136,6 +1136,11 @@ out:
|
||||
return prog;
|
||||
}
|
||||
|
||||
u64 bpf_jit_alloc_exec_limit(void)
|
||||
{
|
||||
return BPF_JIT_REGION_SIZE;
|
||||
}
|
||||
|
||||
void *bpf_jit_alloc_exec(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#ifndef CONFIG_DYNAMIC_FTRACE
|
||||
extern void (*ftrace_trace_function)(unsigned long, unsigned long,
|
||||
struct ftrace_ops*, struct pt_regs*);
|
||||
struct ftrace_ops*, struct ftrace_regs*);
|
||||
extern void ftrace_graph_caller(void);
|
||||
|
||||
noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
return RDCTL(CTL_STATUS);
|
||||
return RDCTL(CTL_FSTATUS);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -18,7 +18,7 @@ static inline unsigned long arch_local_save_flags(void)
|
||||
*/
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
WRCTL(CTL_STATUS, flags);
|
||||
WRCTL(CTL_FSTATUS, flags);
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_disable(void)
|
||||
|
@ -11,7 +11,7 @@
|
||||
#endif
|
||||
|
||||
/* control register numbers */
|
||||
#define CTL_STATUS 0
|
||||
#define CTL_FSTATUS 0
|
||||
#define CTL_ESTATUS 1
|
||||
#define CTL_BSTATUS 2
|
||||
#define CTL_IENABLE 3
|
||||
|
@ -37,6 +37,7 @@ config NIOS2_DTB_PHYS_ADDR
|
||||
|
||||
config NIOS2_DTB_SOURCE_BOOL
|
||||
bool "Compile and link device tree into kernel image"
|
||||
depends on !COMPILE_TEST
|
||||
help
|
||||
This allows you to specify a dts (device tree source) file
|
||||
which will be compiled and linked into the kernel image.
|
||||
|
@ -126,14 +126,16 @@ _GLOBAL(idle_return_gpr_loss)
|
||||
/*
|
||||
* This is the sequence required to execute idle instructions, as
|
||||
* specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
|
||||
*
|
||||
* The 0(r1) slot is used to save r2 in isa206, so use that here.
|
||||
* We have to store a GPR somewhere, ptesync, then reload it, and create
|
||||
* a false dependency on the result of the load. It doesn't matter which
|
||||
* GPR we store, or where we store it. We have already stored r2 to the
|
||||
* stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
|
||||
*/
|
||||
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
|
||||
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
|
||||
std r2,0(r1); \
|
||||
std r2,-8(r1); \
|
||||
ptesync; \
|
||||
ld r2,0(r1); \
|
||||
ld r2,-8(r1); \
|
||||
236: cmpd cr0,r2,r2; \
|
||||
bne 236b; \
|
||||
IDLE_INST; \
|
||||
|
@ -1730,8 +1730,6 @@ void __cpu_die(unsigned int cpu)
|
||||
|
||||
void arch_cpu_idle_dead(void)
|
||||
{
|
||||
sched_preempt_enable_no_resched();
|
||||
|
||||
/*
|
||||
* Disable on the down path. This will be re-enabled by
|
||||
* start_secondary() via start_secondary_resume() below
|
||||
|
@ -1302,6 +1302,12 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
struct property *default_win;
|
||||
int reset_win_ext;
|
||||
|
||||
/* DDW + IOMMU on single window may fail if there is any allocation */
|
||||
if (iommu_table_in_use(tbl)) {
|
||||
dev_warn(&dev->dev, "current IOMMU table in use, can't be replaced.\n");
|
||||
goto out_failed;
|
||||
}
|
||||
|
||||
default_win = of_find_property(pdn, "ibm,dma-window", NULL);
|
||||
if (!default_win)
|
||||
goto out_failed;
|
||||
@ -1356,12 +1362,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
query.largest_available_block,
|
||||
1ULL << page_shift);
|
||||
|
||||
/* DDW + IOMMU on single window may fail if there is any allocation */
|
||||
if (default_win_removed && iommu_table_in_use(tbl)) {
|
||||
dev_dbg(&dev->dev, "current IOMMU table in use, can't be replaced.\n");
|
||||
goto out_failed;
|
||||
}
|
||||
|
||||
len = order_base_2(query.largest_available_block << page_shift);
|
||||
win_name = DMA64_PROPNAME;
|
||||
} else {
|
||||
@ -1411,18 +1411,19 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
} else {
|
||||
struct iommu_table *newtbl;
|
||||
int i;
|
||||
unsigned long start = 0, end = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) {
|
||||
const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM;
|
||||
|
||||
/* Look for MMIO32 */
|
||||
if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM)
|
||||
if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM) {
|
||||
start = pci->phb->mem_resources[i].start;
|
||||
end = pci->phb->mem_resources[i].end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == ARRAY_SIZE(pci->phb->mem_resources))
|
||||
goto out_del_list;
|
||||
|
||||
/* New table for using DDW instead of the default DMA window */
|
||||
newtbl = iommu_pseries_alloc_table(pci->phb->node);
|
||||
if (!newtbl) {
|
||||
@ -1432,15 +1433,15 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
|
||||
iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, win_addr,
|
||||
1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops);
|
||||
iommu_init_table(newtbl, pci->phb->node, pci->phb->mem_resources[i].start,
|
||||
pci->phb->mem_resources[i].end);
|
||||
iommu_init_table(newtbl, pci->phb->node, start, end);
|
||||
|
||||
pci->table_group->tables[1] = newtbl;
|
||||
|
||||
/* Keep default DMA window stuct if removed */
|
||||
if (default_win_removed) {
|
||||
tbl->it_size = 0;
|
||||
kfree(tbl->it_map);
|
||||
vfree(tbl->it_map);
|
||||
tbl->it_map = NULL;
|
||||
}
|
||||
|
||||
set_iommu_table_base(&dev->dev, newtbl);
|
||||
|
@ -163,6 +163,12 @@ config PAGE_OFFSET
|
||||
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
|
||||
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
|
||||
|
||||
config KASAN_SHADOW_OFFSET
|
||||
hex
|
||||
depends on KASAN_GENERIC
|
||||
default 0xdfffffc800000000 if 64BIT
|
||||
default 0xffffffff if 32BIT
|
||||
|
||||
config ARCH_FLATMEM_ENABLE
|
||||
def_bool !NUMA
|
||||
|
||||
|
@ -30,8 +30,7 @@
|
||||
#define KASAN_SHADOW_SIZE (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
|
||||
#define KASAN_SHADOW_START KERN_VIRT_START
|
||||
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
|
||||
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
|
||||
(64 - KASAN_SHADOW_SCALE_SHIFT)))
|
||||
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
|
||||
|
||||
void kasan_init(void);
|
||||
asmlinkage void kasan_early_init(void);
|
||||
|
@ -193,6 +193,7 @@ setup_trap_vector:
|
||||
csrw CSR_SCRATCH, zero
|
||||
ret
|
||||
|
||||
.align 2
|
||||
.Lsecondary_park:
|
||||
/* We lack SMP support or have too many harts, so park this hart */
|
||||
wfi
|
||||
|
@ -17,6 +17,9 @@ asmlinkage void __init kasan_early_init(void)
|
||||
uintptr_t i;
|
||||
pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
|
||||
|
||||
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
|
||||
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; ++i)
|
||||
set_pte(kasan_early_shadow_pte + i,
|
||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||
@ -172,21 +175,10 @@ void __init kasan_init(void)
|
||||
phys_addr_t p_start, p_end;
|
||||
u64 i;
|
||||
|
||||
/*
|
||||
* Populate all kernel virtual address space with kasan_early_shadow_page
|
||||
* except for the linear mapping and the modules/kernel/BPF mapping.
|
||||
*/
|
||||
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
|
||||
(void *)kasan_mem_to_shadow((void *)
|
||||
VMEMMAP_END));
|
||||
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
|
||||
kasan_shallow_populate(
|
||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
|
||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
||||
else
|
||||
kasan_populate_early_shadow(
|
||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
|
||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
||||
|
||||
/* Populate the linear mapping */
|
||||
for_each_mem_range(i, &p_start, &p_end) {
|
||||
|
@ -125,7 +125,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
|
||||
if (i == NR_JIT_ITERATIONS) {
|
||||
pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
|
||||
bpf_jit_binary_free(jit_data->header);
|
||||
if (jit_data->header)
|
||||
bpf_jit_binary_free(jit_data->header);
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
@ -166,6 +167,11 @@ out:
|
||||
return prog;
|
||||
}
|
||||
|
||||
u64 bpf_jit_alloc_exec_limit(void)
|
||||
{
|
||||
return BPF_JIT_REGION_SIZE;
|
||||
}
|
||||
|
||||
void *bpf_jit_alloc_exec(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
|
||||
|
@ -894,6 +894,11 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
|
||||
/**
|
||||
* guest_translate_address - translate guest logical into guest absolute address
|
||||
* @vcpu: virtual cpu
|
||||
* @gva: Guest virtual address
|
||||
* @ar: Access register
|
||||
* @gpa: Guest physical address
|
||||
* @mode: Translation access mode
|
||||
*
|
||||
* Parameter semantics are the same as the ones from guest_translate.
|
||||
* The memory contents at the guest address are not changed.
|
||||
@ -934,6 +939,11 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||
|
||||
/**
|
||||
* check_gva_range - test a range of guest virtual addresses for accessibility
|
||||
* @vcpu: virtual cpu
|
||||
* @gva: Guest virtual address
|
||||
* @ar: Access register
|
||||
* @length: Length of test range
|
||||
* @mode: Translation access mode
|
||||
*/
|
||||
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||
unsigned long length, enum gacc_mode mode)
|
||||
@ -956,6 +966,7 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||
|
||||
/**
|
||||
* kvm_s390_check_low_addr_prot_real - check for low-address protection
|
||||
* @vcpu: virtual cpu
|
||||
* @gra: Guest real address
|
||||
*
|
||||
* Checks whether an address is subject to low-address protection and set
|
||||
@ -979,6 +990,7 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
|
||||
* @pgt: pointer to the beginning of the page table for the given address if
|
||||
* successful (return value 0), or to the first invalid DAT entry in
|
||||
* case of exceptions (return value > 0)
|
||||
* @dat_protection: referenced memory is write protected
|
||||
* @fake: pgt references contiguous guest memory block, not a pgtable
|
||||
*/
|
||||
static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
|
||||
|
@ -269,6 +269,7 @@ static int handle_prog(struct kvm_vcpu *vcpu)
|
||||
|
||||
/**
|
||||
* handle_external_interrupt - used for external interruption interceptions
|
||||
* @vcpu: virtual cpu
|
||||
*
|
||||
* This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
|
||||
* the new PSW does not have external interrupts disabled. In the first case,
|
||||
@ -315,7 +316,8 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle MOVE PAGE partial execution interception.
|
||||
* handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
|
||||
* @vcpu: virtual cpu
|
||||
*
|
||||
* This interception can only happen for guests with DAT disabled and
|
||||
* addresses that are currently not mapped in the host. Thus we try to
|
||||
|
@ -3053,13 +3053,14 @@ static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
|
||||
int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
|
||||
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
|
||||
struct kvm_vcpu *vcpu;
|
||||
u8 vcpu_isc_mask;
|
||||
|
||||
for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
|
||||
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
|
||||
if (psw_ioint_disabled(vcpu))
|
||||
continue;
|
||||
deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
|
||||
if (deliverable_mask) {
|
||||
vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
|
||||
if (deliverable_mask & vcpu_isc_mask) {
|
||||
/* lately kicked but not yet running */
|
||||
if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
|
||||
return;
|
||||
|
@ -3363,6 +3363,7 @@ out_free_sie_block:
|
||||
|
||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
|
||||
return kvm_s390_vcpu_has_irq(vcpu, 0);
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,7 @@
|
||||
vpxor tmp0, x, x;
|
||||
|
||||
|
||||
.section .rodata.cst164, "aM", @progbits, 164
|
||||
.section .rodata.cst16, "aM", @progbits, 16
|
||||
.align 16
|
||||
|
||||
/*
|
||||
@ -133,6 +133,10 @@
|
||||
.L0f0f0f0f:
|
||||
.long 0x0f0f0f0f
|
||||
|
||||
/* 12 bytes, only for padding */
|
||||
.Lpadding_deadbeef:
|
||||
.long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef
|
||||
|
||||
|
||||
.text
|
||||
.align 16
|
||||
|
@ -93,7 +93,7 @@
|
||||
vpxor tmp0, x, x;
|
||||
|
||||
|
||||
.section .rodata.cst164, "aM", @progbits, 164
|
||||
.section .rodata.cst16, "aM", @progbits, 16
|
||||
.align 16
|
||||
|
||||
/*
|
||||
@ -148,6 +148,10 @@
|
||||
.L0f0f0f0f:
|
||||
.long 0x0f0f0f0f
|
||||
|
||||
/* 12 bytes, only for padding */
|
||||
.Lpadding_deadbeef:
|
||||
.long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef
|
||||
|
||||
.text
|
||||
.align 16
|
||||
|
||||
|
@ -702,7 +702,8 @@ struct kvm_vcpu_arch {
|
||||
|
||||
struct kvm_pio_request pio;
|
||||
void *pio_data;
|
||||
void *guest_ins_data;
|
||||
void *sev_pio_data;
|
||||
unsigned sev_pio_count;
|
||||
|
||||
u8 event_exit_inst_len;
|
||||
|
||||
@ -1097,7 +1098,7 @@ struct kvm_arch {
|
||||
u64 cur_tsc_generation;
|
||||
int nr_vcpus_matched_tsc;
|
||||
|
||||
spinlock_t pvclock_gtod_sync_lock;
|
||||
raw_spinlock_t pvclock_gtod_sync_lock;
|
||||
bool use_master_clock;
|
||||
u64 master_kernel_ns;
|
||||
u64 master_cycle_now;
|
||||
|
@ -2321,13 +2321,14 @@ EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
|
||||
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
{
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
u64 msr_val;
|
||||
int i;
|
||||
|
||||
if (!init_event) {
|
||||
vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE |
|
||||
MSR_IA32_APICBASE_ENABLE;
|
||||
msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
|
||||
if (kvm_vcpu_is_reset_bsp(vcpu))
|
||||
vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
|
||||
msr_val |= MSR_IA32_APICBASE_BSP;
|
||||
kvm_lapic_set_base(vcpu, msr_val);
|
||||
}
|
||||
|
||||
if (!apic)
|
||||
@ -2336,11 +2337,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
/* Stop the timer in case it's a reset to an active apic */
|
||||
hrtimer_cancel(&apic->lapic_timer.timer);
|
||||
|
||||
if (!init_event) {
|
||||
apic->base_address = APIC_DEFAULT_PHYS_BASE;
|
||||
|
||||
/* The xAPIC ID is set at RESET even if the APIC was already enabled. */
|
||||
if (!init_event)
|
||||
kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
|
||||
}
|
||||
kvm_apic_set_version(apic->vcpu);
|
||||
|
||||
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
|
||||
@ -2481,6 +2480,11 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
|
||||
lapic_timer_advance_dynamic = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stuff the APIC ENABLE bit in lieu of temporarily incrementing
|
||||
* apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
|
||||
*/
|
||||
vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
|
||||
static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
|
||||
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
|
||||
|
||||
@ -2942,5 +2946,7 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
|
||||
void kvm_lapic_exit(void)
|
||||
{
|
||||
static_key_deferred_flush(&apic_hw_disabled);
|
||||
WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
|
||||
static_key_deferred_flush(&apic_sw_disabled);
|
||||
WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
|
||||
}
|
||||
|
@ -4596,10 +4596,10 @@ static void update_pkru_bitmask(struct kvm_mmu *mmu)
|
||||
unsigned bit;
|
||||
bool wp;
|
||||
|
||||
if (!is_cr4_pke(mmu)) {
|
||||
mmu->pkru_mask = 0;
|
||||
mmu->pkru_mask = 0;
|
||||
|
||||
if (!is_cr4_pke(mmu))
|
||||
return;
|
||||
}
|
||||
|
||||
wp = is_cr0_wp(mmu);
|
||||
|
||||
|
@ -618,7 +618,12 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
|
||||
vmsa.address = __sme_pa(svm->vmsa);
|
||||
vmsa.len = PAGE_SIZE;
|
||||
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
|
||||
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vcpu->arch.guest_state_protected = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
@ -1479,6 +1484,13 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
goto e_free_trans;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
|
||||
* encrypts the written data with the guest's key, and the cache may
|
||||
* contain dirty, unencrypted data.
|
||||
*/
|
||||
sev_clflush_pages(guest_page, n);
|
||||
|
||||
/* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
|
||||
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
|
||||
data.guest_address |= sev_me_mask;
|
||||
@ -2579,11 +2591,20 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
||||
|
||||
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
|
||||
{
|
||||
if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
|
||||
int count;
|
||||
int bytes;
|
||||
|
||||
if (svm->vmcb->control.exit_info_2 > INT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
return kvm_sev_es_string_io(&svm->vcpu, size, port,
|
||||
svm->ghcb_sa, svm->ghcb_sa_len, in);
|
||||
count = svm->vmcb->control.exit_info_2;
|
||||
if (unlikely(check_mul_overflow(count, size, &bytes)))
|
||||
return -EINVAL;
|
||||
|
||||
if (!setup_vmgexit_scratch(svm, in, bytes))
|
||||
return -EINVAL;
|
||||
|
||||
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->ghcb_sa, count, in);
|
||||
}
|
||||
|
||||
void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
|
@ -191,7 +191,7 @@ struct vcpu_svm {
|
||||
|
||||
/* SEV-ES scratch area support */
|
||||
void *ghcb_sa;
|
||||
u64 ghcb_sa_len;
|
||||
u32 ghcb_sa_len;
|
||||
bool ghcb_sa_sync;
|
||||
bool ghcb_sa_free;
|
||||
|
||||
|
@ -5562,9 +5562,13 @@ static int handle_encls(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
|
||||
vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
|
||||
return 0;
|
||||
/*
|
||||
* Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
|
||||
* VM-Exits. Unconditionally set the flag here and leave the handling to
|
||||
* vmx_handle_exit().
|
||||
*/
|
||||
to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -6051,9 +6055,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
||||
int ret = __vmx_handle_exit(vcpu, exit_fastpath);
|
||||
|
||||
/*
|
||||
* Even when current exit reason is handled by KVM internally, we
|
||||
* still need to exit to user space when bus lock detected to inform
|
||||
* that there is a bus lock in guest.
|
||||
* Exit to user space when bus lock detected to inform that there is
|
||||
* a bus lock in guest.
|
||||
*/
|
||||
if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
|
||||
if (ret > 0)
|
||||
@ -6302,18 +6305,13 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
||||
|
||||
/*
|
||||
* If we are running L2 and L1 has a new pending interrupt
|
||||
* which can be injected, we should re-evaluate
|
||||
* what should be done with this new L1 interrupt.
|
||||
* If L1 intercepts external-interrupts, we should
|
||||
* exit from L2 to L1. Otherwise, interrupt should be
|
||||
* delivered directly to L2.
|
||||
* which can be injected, this may cause a vmexit or it may
|
||||
* be injected into L2. Either way, this interrupt will be
|
||||
* processed via KVM_REQ_EVENT, not RVI, because we do not use
|
||||
* virtual interrupt delivery to inject L1 interrupts into L2.
|
||||
*/
|
||||
if (is_guest_mode(vcpu) && max_irr_updated) {
|
||||
if (nested_exit_on_intr(vcpu))
|
||||
kvm_vcpu_exiting_guest_mode(vcpu);
|
||||
else
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
}
|
||||
if (is_guest_mode(vcpu) && max_irr_updated)
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
} else {
|
||||
max_irr = kvm_lapic_find_highest_irr(vcpu);
|
||||
}
|
||||
|
@ -2542,7 +2542,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
|
||||
kvm_vcpu_write_tsc_offset(vcpu, offset);
|
||||
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
||||
|
||||
spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||
if (!matched) {
|
||||
kvm->arch.nr_vcpus_matched_tsc = 0;
|
||||
} else if (!already_matched) {
|
||||
@ -2550,7 +2550,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
|
||||
}
|
||||
|
||||
kvm_track_tsc_matching(vcpu);
|
||||
spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||
}
|
||||
|
||||
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
|
||||
@ -2780,9 +2780,9 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
|
||||
kvm_make_mclock_inprogress_request(kvm);
|
||||
|
||||
/* no guest entries from this point */
|
||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
pvclock_update_vm_gtod_copy(kvm);
|
||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
@ -2800,15 +2800,15 @@ u64 get_kvmclock_ns(struct kvm *kvm)
|
||||
unsigned long flags;
|
||||
u64 ret;
|
||||
|
||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
if (!ka->use_master_clock) {
|
||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
return get_kvmclock_base_ns() + ka->kvmclock_offset;
|
||||
}
|
||||
|
||||
hv_clock.tsc_timestamp = ka->master_cycle_now;
|
||||
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
|
||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
|
||||
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
|
||||
get_cpu();
|
||||
@ -2902,13 +2902,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
* If the host uses TSC clock, then passthrough TSC as stable
|
||||
* to the guest.
|
||||
*/
|
||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
use_master_clock = ka->use_master_clock;
|
||||
if (use_master_clock) {
|
||||
host_tsc = ka->master_cycle_now;
|
||||
kernel_ns = ka->master_kernel_ns;
|
||||
}
|
||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
|
||||
/* Keep irq disabled to prevent changes to the clock */
|
||||
local_irq_save(flags);
|
||||
@ -6100,13 +6100,13 @@ set_pit2_out:
|
||||
* is slightly ahead) here we risk going negative on unsigned
|
||||
* 'system_time' when 'user_ns.clock' is very small.
|
||||
*/
|
||||
spin_lock_irq(&ka->pvclock_gtod_sync_lock);
|
||||
raw_spin_lock_irq(&ka->pvclock_gtod_sync_lock);
|
||||
if (kvm->arch.use_master_clock)
|
||||
now_ns = ka->master_kernel_ns;
|
||||
else
|
||||
now_ns = get_kvmclock_base_ns();
|
||||
ka->kvmclock_offset = user_ns.clock - now_ns;
|
||||
spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
|
||||
raw_spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
|
||||
|
||||
kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
|
||||
break;
|
||||
@ -6906,7 +6906,7 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
|
||||
}
|
||||
|
||||
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
|
||||
unsigned short port, void *val,
|
||||
unsigned short port,
|
||||
unsigned int count, bool in)
|
||||
{
|
||||
vcpu->arch.pio.port = port;
|
||||
@ -6914,10 +6914,8 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
|
||||
vcpu->arch.pio.count = count;
|
||||
vcpu->arch.pio.size = size;
|
||||
|
||||
if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
|
||||
vcpu->arch.pio.count = 0;
|
||||
if (!kernel_pio(vcpu, vcpu->arch.pio_data))
|
||||
return 1;
|
||||
}
|
||||
|
||||
vcpu->run->exit_reason = KVM_EXIT_IO;
|
||||
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
|
||||
@ -6929,26 +6927,39 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size,
|
||||
unsigned short port, unsigned int count)
|
||||
{
|
||||
WARN_ON(vcpu->arch.pio.count);
|
||||
memset(vcpu->arch.pio_data, 0, size * count);
|
||||
return emulator_pio_in_out(vcpu, size, port, count, true);
|
||||
}
|
||||
|
||||
static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
|
||||
{
|
||||
int size = vcpu->arch.pio.size;
|
||||
unsigned count = vcpu->arch.pio.count;
|
||||
memcpy(val, vcpu->arch.pio_data, size * count);
|
||||
trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
|
||||
vcpu->arch.pio.count = 0;
|
||||
}
|
||||
|
||||
static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
|
||||
unsigned short port, void *val, unsigned int count)
|
||||
{
|
||||
int ret;
|
||||
if (vcpu->arch.pio.count) {
|
||||
/* Complete previous iteration. */
|
||||
} else {
|
||||
int r = __emulator_pio_in(vcpu, size, port, count);
|
||||
if (!r)
|
||||
return r;
|
||||
|
||||
if (vcpu->arch.pio.count)
|
||||
goto data_avail;
|
||||
|
||||
memset(vcpu->arch.pio_data, 0, size * count);
|
||||
|
||||
ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
|
||||
if (ret) {
|
||||
data_avail:
|
||||
memcpy(val, vcpu->arch.pio_data, size * count);
|
||||
trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
|
||||
vcpu->arch.pio.count = 0;
|
||||
return 1;
|
||||
/* Results already available, fall through. */
|
||||
}
|
||||
|
||||
return 0;
|
||||
WARN_ON(count != vcpu->arch.pio.count);
|
||||
complete_emulator_pio_in(vcpu, val);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
@ -6963,9 +6974,15 @@ static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
|
||||
unsigned short port, const void *val,
|
||||
unsigned int count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
memcpy(vcpu->arch.pio_data, val, size * count);
|
||||
trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
|
||||
return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
|
||||
ret = emulator_pio_in_out(vcpu, size, port, count, false);
|
||||
if (ret)
|
||||
vcpu->arch.pio.count = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
@ -8139,9 +8156,9 @@ static void kvm_hyperv_tsc_notifier(void)
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
struct kvm_arch *ka = &kvm->arch;
|
||||
|
||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
pvclock_update_vm_gtod_copy(kvm);
|
||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
|
||||
kvm_for_each_vcpu(cpu, vcpu, kvm)
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
@ -8783,9 +8800,17 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_run->cr8 = kvm_get_cr8(vcpu);
|
||||
kvm_run->apic_base = kvm_get_apic_base(vcpu);
|
||||
|
||||
/*
|
||||
* The call to kvm_ready_for_interrupt_injection() may end up in
|
||||
* kvm_xen_has_interrupt() which may require the srcu lock to be
|
||||
* held, to protect against changes in the vcpu_info address.
|
||||
*/
|
||||
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
kvm_run->ready_for_interrupt_injection =
|
||||
pic_in_kernel(vcpu->kvm) ||
|
||||
kvm_vcpu_ready_for_interrupt_injection(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||
|
||||
if (is_smm(vcpu))
|
||||
kvm_run->flags |= KVM_RUN_X86_SMM;
|
||||
@ -9643,14 +9668,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
|
||||
break;
|
||||
|
||||
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
|
||||
if (vcpu->arch.apicv_active)
|
||||
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||
|
||||
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
|
||||
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vcpu->arch.apicv_active)
|
||||
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Do this here before restoring debug registers on the host. And
|
||||
@ -11182,7 +11207,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
raw_spin_lock_init(&kvm->arch.tsc_write_lock);
|
||||
mutex_init(&kvm->arch.apic_map_lock);
|
||||
spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
|
||||
raw_spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
|
||||
|
||||
kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
|
||||
pvclock_update_vm_gtod_copy(kvm);
|
||||
@ -11392,7 +11417,8 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
|
||||
int level = i + 1;
|
||||
int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
|
||||
|
||||
WARN_ON(slot->arch.rmap[i]);
|
||||
if (slot->arch.rmap[i])
|
||||
continue;
|
||||
|
||||
slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
|
||||
if (!slot->arch.rmap[i]) {
|
||||
@ -12367,44 +12393,81 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
|
||||
|
||||
static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
memcpy(vcpu->arch.guest_ins_data, vcpu->arch.pio_data,
|
||||
vcpu->arch.pio.count * vcpu->arch.pio.size);
|
||||
vcpu->arch.pio.count = 0;
|
||||
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port);
|
||||
|
||||
static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int size = vcpu->arch.pio.size;
|
||||
int port = vcpu->arch.pio.port;
|
||||
|
||||
vcpu->arch.pio.count = 0;
|
||||
if (vcpu->arch.sev_pio_count)
|
||||
return kvm_sev_es_outs(vcpu, size, port);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port, void *data, unsigned int count)
|
||||
unsigned int port)
|
||||
{
|
||||
int ret;
|
||||
for (;;) {
|
||||
unsigned int count =
|
||||
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
|
||||
int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
|
||||
|
||||
ret = emulator_pio_out_emulated(vcpu->arch.emulate_ctxt, size, port,
|
||||
data, count);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* memcpy done already by emulator_pio_out. */
|
||||
vcpu->arch.sev_pio_count -= count;
|
||||
vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
vcpu->arch.pio.count = 0;
|
||||
/* Emulation done by the kernel. */
|
||||
if (!vcpu->arch.sev_pio_count)
|
||||
return 1;
|
||||
}
|
||||
|
||||
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port, void *data, unsigned int count)
|
||||
{
|
||||
int ret;
|
||||
unsigned int port);
|
||||
|
||||
ret = emulator_pio_in_emulated(vcpu->arch.emulate_ctxt, size, port,
|
||||
data, count);
|
||||
if (ret) {
|
||||
vcpu->arch.pio.count = 0;
|
||||
} else {
|
||||
vcpu->arch.guest_ins_data = data;
|
||||
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
|
||||
static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned count = vcpu->arch.pio.count;
|
||||
complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
|
||||
vcpu->arch.sev_pio_count -= count;
|
||||
vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
|
||||
}
|
||||
|
||||
static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int size = vcpu->arch.pio.size;
|
||||
int port = vcpu->arch.pio.port;
|
||||
|
||||
advance_sev_es_emulated_ins(vcpu);
|
||||
if (vcpu->arch.sev_pio_count)
|
||||
return kvm_sev_es_ins(vcpu, size, port);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port)
|
||||
{
|
||||
for (;;) {
|
||||
unsigned int count =
|
||||
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
|
||||
if (!__emulator_pio_in(vcpu, size, port, count))
|
||||
break;
|
||||
|
||||
/* Emulation done by the kernel. */
|
||||
advance_sev_es_emulated_ins(vcpu);
|
||||
if (!vcpu->arch.sev_pio_count)
|
||||
return 1;
|
||||
}
|
||||
|
||||
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -12412,8 +12475,10 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port, void *data, unsigned int count,
|
||||
int in)
|
||||
{
|
||||
return in ? kvm_sev_es_ins(vcpu, size, port, data, count)
|
||||
: kvm_sev_es_outs(vcpu, size, port, data, count);
|
||||
vcpu->arch.sev_pio_data = data;
|
||||
vcpu->arch.sev_pio_count = count;
|
||||
return in ? kvm_sev_es_ins(vcpu, size, port)
|
||||
: kvm_sev_es_outs(vcpu, size, port);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
|
||||
|
||||
|
@ -190,6 +190,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
|
||||
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||
{
|
||||
int err;
|
||||
u8 rc = 0;
|
||||
|
||||
/*
|
||||
@ -216,13 +217,29 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||
if (likely(slots->generation == ghc->generation &&
|
||||
!kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
|
||||
/* Fast path */
|
||||
__get_user(rc, (u8 __user *)ghc->hva + offset);
|
||||
} else {
|
||||
/* Slow path */
|
||||
kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
|
||||
sizeof(rc));
|
||||
pagefault_disable();
|
||||
err = __get_user(rc, (u8 __user *)ghc->hva + offset);
|
||||
pagefault_enable();
|
||||
if (!err)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Slow path */
|
||||
|
||||
/*
|
||||
* This function gets called from kvm_vcpu_block() after setting the
|
||||
* task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
|
||||
* from a HLT. So we really mustn't sleep. If the page ended up absent
|
||||
* at that point, just return 1 in order to trigger an immediate wake,
|
||||
* and we'll end up getting called again from a context where we *can*
|
||||
* fault in the page and wait for it.
|
||||
*/
|
||||
if (in_atomic() || !task_is_running(current))
|
||||
return 1;
|
||||
|
||||
kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
|
||||
sizeof(rc));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1897,10 +1897,11 @@ void blk_cgroup_bio_start(struct bio *bio)
|
||||
{
|
||||
int rwd = blk_cgroup_io_type(bio), cpu;
|
||||
struct blkg_iostat_set *bis;
|
||||
unsigned long flags;
|
||||
|
||||
cpu = get_cpu();
|
||||
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
|
||||
u64_stats_update_begin(&bis->sync);
|
||||
flags = u64_stats_update_begin_irqsave(&bis->sync);
|
||||
|
||||
/*
|
||||
* If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
|
||||
@ -1912,7 +1913,7 @@ void blk_cgroup_bio_start(struct bio *bio)
|
||||
}
|
||||
bis->cur.ios[rwd]++;
|
||||
|
||||
u64_stats_update_end(&bis->sync);
|
||||
u64_stats_update_end_irqrestore(&bis->sync, flags);
|
||||
if (cgroup_subsys_on_dfl(io_cgrp_subsys))
|
||||
cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);
|
||||
put_cpu();
|
||||
|
@ -1325,6 +1325,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||
int errors, queued;
|
||||
blk_status_t ret = BLK_STS_OK;
|
||||
LIST_HEAD(zone_list);
|
||||
bool needs_resource = false;
|
||||
|
||||
if (list_empty(list))
|
||||
return false;
|
||||
@ -1370,6 +1371,8 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||
queued++;
|
||||
break;
|
||||
case BLK_STS_RESOURCE:
|
||||
needs_resource = true;
|
||||
fallthrough;
|
||||
case BLK_STS_DEV_RESOURCE:
|
||||
blk_mq_handle_dev_resource(rq, list);
|
||||
goto out;
|
||||
@ -1380,6 +1383,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||
* accept.
|
||||
*/
|
||||
blk_mq_handle_zone_resource(rq, &zone_list);
|
||||
needs_resource = true;
|
||||
break;
|
||||
default:
|
||||
errors++;
|
||||
@ -1406,7 +1410,6 @@ out:
|
||||
/* For non-shared tags, the RESTART check will suffice */
|
||||
bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
|
||||
(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
|
||||
bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
|
||||
|
||||
if (nr_budgets)
|
||||
blk_mq_release_budgets(q, list);
|
||||
@ -1447,14 +1450,16 @@ out:
|
||||
* If driver returns BLK_STS_RESOURCE and SCHED_RESTART
|
||||
* bit is set, run queue after a delay to avoid IO stalls
|
||||
* that could otherwise occur if the queue is idle. We'll do
|
||||
* similar if we couldn't get budget and SCHED_RESTART is set.
|
||||
* similar if we couldn't get budget or couldn't lock a zone
|
||||
* and SCHED_RESTART is set.
|
||||
*/
|
||||
needs_restart = blk_mq_sched_needs_restart(hctx);
|
||||
if (prep == PREP_DISPATCH_NO_BUDGET)
|
||||
needs_resource = true;
|
||||
if (!needs_restart ||
|
||||
(no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
else if (needs_restart && (ret == BLK_STS_RESOURCE ||
|
||||
no_budget_avail))
|
||||
else if (needs_restart && needs_resource)
|
||||
blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
|
||||
|
||||
blk_mq_update_dispatch_busy(hctx, true);
|
||||
|
@ -842,6 +842,24 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
|
||||
|
||||
static bool disk_has_partitions(struct gendisk *disk)
|
||||
{
|
||||
unsigned long idx;
|
||||
struct block_device *part;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
xa_for_each(&disk->part_tbl, idx, part) {
|
||||
if (bdev_is_partition(part)) {
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_queue_set_zoned - configure a disk queue zoned model.
|
||||
* @disk: the gendisk of the queue to configure
|
||||
@ -876,7 +894,7 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
||||
* we do nothing special as far as the block layer is concerned.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
|
||||
!xa_empty(&disk->part_tbl))
|
||||
disk_has_partitions(disk))
|
||||
model = BLK_ZONED_NONE;
|
||||
break;
|
||||
case BLK_ZONED_NONE:
|
||||
|
@ -588,16 +588,6 @@ void del_gendisk(struct gendisk *disk)
|
||||
* Prevent new I/O from crossing bio_queue_enter().
|
||||
*/
|
||||
blk_queue_start_drain(q);
|
||||
blk_mq_freeze_queue_wait(q);
|
||||
|
||||
rq_qos_exit(q);
|
||||
blk_sync_queue(q);
|
||||
blk_flush_integrity();
|
||||
/*
|
||||
* Allow using passthrough request again after the queue is torn down.
|
||||
*/
|
||||
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
|
||||
__blk_mq_unfreeze_queue(q, true);
|
||||
|
||||
if (!(disk->flags & GENHD_FL_HIDDEN)) {
|
||||
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
|
||||
@ -620,6 +610,18 @@ void del_gendisk(struct gendisk *disk)
|
||||
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
|
||||
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
|
||||
device_del(disk_to_dev(disk));
|
||||
|
||||
blk_mq_freeze_queue_wait(q);
|
||||
|
||||
rq_qos_exit(q);
|
||||
blk_sync_queue(q);
|
||||
blk_flush_integrity();
|
||||
/*
|
||||
* Allow using passthrough request again after the queue is torn down.
|
||||
*/
|
||||
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
|
||||
__blk_mq_unfreeze_queue(q, true);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(del_gendisk);
|
||||
|
||||
|
@ -423,6 +423,7 @@ out_del:
|
||||
device_del(pdev);
|
||||
out_put:
|
||||
put_device(pdev);
|
||||
return ERR_PTR(err);
|
||||
out_put_disk:
|
||||
put_disk(disk);
|
||||
return ERR_PTR(err);
|
||||
|
@ -1035,13 +1035,8 @@ void acpi_turn_off_unused_power_resources(void)
|
||||
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
|
||||
mutex_lock(&resource->resource_lock);
|
||||
|
||||
/*
|
||||
* Turn off power resources in an unknown state too, because the
|
||||
* platform firmware on some system expects the OS to turn off
|
||||
* power resources without any users unconditionally.
|
||||
*/
|
||||
if (!resource->ref_count &&
|
||||
resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
|
||||
resource->state == ACPI_POWER_RESOURCE_STATE_ON) {
|
||||
acpi_handle_debug(resource->device.handle, "Turning OFF\n");
|
||||
__acpi_power_off(resource);
|
||||
}
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/earlycpio.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include "internal.h"
|
||||
|
||||
#ifdef CONFIG_ACPI_CUSTOM_DSDT
|
||||
@ -601,6 +602,8 @@ void __init acpi_table_upgrade(void)
|
||||
*/
|
||||
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
|
||||
|
||||
kmemleak_ignore_phys(acpi_tables_addr);
|
||||
|
||||
/*
|
||||
* early_ioremap only can remap 256k one time. If we map all
|
||||
* tables one time, we will hit the limit. Need to map chunks
|
||||
|
@ -3896,8 +3896,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
|
||||
return 1;
|
||||
dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hpriv->hp_flags = hp_flags;
|
||||
|
@ -281,14 +281,14 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
|
||||
if (!blk)
|
||||
return -ENOMEM;
|
||||
|
||||
rbnode->block = blk;
|
||||
|
||||
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
|
||||
present = krealloc(rbnode->cache_present,
|
||||
BITS_TO_LONGS(blklen) * sizeof(*present),
|
||||
GFP_KERNEL);
|
||||
if (!present) {
|
||||
kfree(blk);
|
||||
if (!present)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
|
||||
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
|
||||
@ -305,7 +305,6 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
|
||||
}
|
||||
|
||||
/* update the rbnode block, its size and the base register */
|
||||
rbnode->block = blk;
|
||||
rbnode->blklen = blklen;
|
||||
rbnode->base_reg = base_reg;
|
||||
rbnode->cache_present = present;
|
||||
|
@ -58,11 +58,8 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
|
||||
long rate;
|
||||
int i;
|
||||
|
||||
if (rate_hw && rate_ops && rate_ops->determine_rate) {
|
||||
__clk_hw_set_clk(rate_hw, hw);
|
||||
return rate_ops->determine_rate(rate_hw, req);
|
||||
} else if (rate_hw && rate_ops && rate_ops->round_rate &&
|
||||
mux_hw && mux_ops && mux_ops->set_parent) {
|
||||
if (rate_hw && rate_ops && rate_ops->round_rate &&
|
||||
mux_hw && mux_ops && mux_ops->set_parent) {
|
||||
req->best_parent_hw = NULL;
|
||||
|
||||
if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
|
||||
@ -107,6 +104,9 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
|
||||
|
||||
req->rate = best_rate;
|
||||
return 0;
|
||||
} else if (rate_hw && rate_ops && rate_ops->determine_rate) {
|
||||
__clk_hw_set_clk(rate_hw, hw);
|
||||
return rate_ops->determine_rate(rate_hw, req);
|
||||
} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
|
||||
__clk_hw_set_clk(mux_hw, hw);
|
||||
return mux_ops->determine_rate(mux_hw, req);
|
||||
|
@ -256,6 +256,11 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
|
||||
NULL,
|
||||
0);
|
||||
|
||||
if (ret) {
|
||||
dev_err(dev, "bgpio_init failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
gc->direction_input = mlxbf2_gpio_direction_input;
|
||||
gc->direction_output = mlxbf2_gpio_direction_output;
|
||||
gc->ngpio = npins;
|
||||
|
@ -224,7 +224,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
chip->gc.label = dev_name(dev);
|
||||
if (of_property_read_u32(dn, "ngpios", &num_gpios))
|
||||
if (!of_property_read_u32(dn, "ngpios", &num_gpios))
|
||||
chip->gc.ngpio = num_gpios;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
|
@ -1257,7 +1257,7 @@ static int nv_common_early_init(void *handle)
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
AMD_PG_SUPPORT_JPEG;
|
||||
if (adev->pdev->device == 0x1681)
|
||||
adev->external_rev_id = adev->rev_id + 0x19;
|
||||
adev->external_rev_id = 0x20;
|
||||
else
|
||||
adev->external_rev_id = adev->rev_id + 0x01;
|
||||
break;
|
||||
|
@ -263,7 +263,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
|
||||
if (!wr_buf)
|
||||
return -ENOSPC;
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
@ -487,7 +487,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
|
||||
if (!wr_buf)
|
||||
return -ENOSPC;
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
@ -639,7 +639,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
|
||||
if (!wr_buf)
|
||||
return -ENOSPC;
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
@ -914,7 +914,7 @@ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf,
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
¶m, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
@ -1211,7 +1211,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
@ -1396,7 +1396,7 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
@ -1581,7 +1581,7 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
@ -1766,7 +1766,7 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
@ -1944,7 +1944,7 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
@ -2382,7 +2382,7 @@ static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf,
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
|
||||
(long *)param, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
|
@ -366,32 +366,32 @@ static struct wm_table lpddr5_wm_table = {
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 5.32,
|
||||
.sr_enter_plus_exit_time_us = 6.38,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.82,
|
||||
.sr_enter_plus_exit_time_us = 11.196,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.89,
|
||||
.sr_enter_plus_exit_time_us = 11.24,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.748,
|
||||
.sr_enter_plus_exit_time_us = 11.102,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
@ -518,14 +518,21 @@ static unsigned int find_clk_for_voltage(
|
||||
unsigned int voltage)
|
||||
{
|
||||
int i;
|
||||
int max_voltage = 0;
|
||||
int clock = 0;
|
||||
|
||||
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
|
||||
if (clock_table->SocVoltage[i] == voltage)
|
||||
if (clock_table->SocVoltage[i] == voltage) {
|
||||
return clocks[i];
|
||||
} else if (clock_table->SocVoltage[i] >= max_voltage &&
|
||||
clock_table->SocVoltage[i] < voltage) {
|
||||
max_voltage = clock_table->SocVoltage[i];
|
||||
clock = clocks[i];
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(0);
|
||||
return 0;
|
||||
ASSERT(clock);
|
||||
return clock;
|
||||
}
|
||||
|
||||
void dcn31_clk_mgr_helper_populate_bw_params(
|
||||
|
@ -76,10 +76,6 @@ void dcn31_init_hw(struct dc *dc)
|
||||
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
|
||||
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
|
||||
|
||||
// Initialize the dccg
|
||||
if (res_pool->dccg->funcs->dccg_init)
|
||||
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
|
||||
|
||||
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
|
||||
|
||||
REG_WRITE(REFCLK_CNTL, 0);
|
||||
@ -106,6 +102,9 @@ void dcn31_init_hw(struct dc *dc)
|
||||
hws->funcs.bios_golden_init(dc);
|
||||
hws->funcs.disable_vga(dc->hwseq);
|
||||
}
|
||||
// Initialize the dccg
|
||||
if (res_pool->dccg->funcs->dccg_init)
|
||||
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
|
||||
|
||||
if (dc->debug.enable_mem_low_power.bits.dmcu) {
|
||||
// Force ERAM to shutdown if DMCU is not enabled
|
||||
|
@ -217,8 +217,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 9.0,
|
||||
.sr_enter_plus_exit_time_us = 11.0,
|
||||
.sr_exit_z8_time_us = 402.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 520.0,
|
||||
.sr_exit_z8_time_us = 442.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 560.0,
|
||||
.writeback_latency_us = 12.0,
|
||||
.dram_channel_width_bytes = 4,
|
||||
.round_trip_ping_latency_dcfclk_cycles = 106,
|
||||
@ -928,7 +928,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
.performance_trace = false,
|
||||
.max_downscale_src_width = 3840,/*upto 4K*/
|
||||
.max_downscale_src_width = 4096,/*upto true 4K*/
|
||||
.disable_pplib_wm_range = false,
|
||||
.scl_reset_length10 = true,
|
||||
.sanity_checks = false,
|
||||
@ -1590,6 +1590,13 @@ static int dcn31_populate_dml_pipes_from_context(
|
||||
pipe = &res_ctx->pipe_ctx[i];
|
||||
timing = &pipe->stream->timing;
|
||||
|
||||
/*
|
||||
* Immediate flip can be set dynamically after enabling the plane.
|
||||
* We need to require support for immediate flip or underflow can be
|
||||
* intermittently experienced depending on peak b/w requirements.
|
||||
*/
|
||||
pipes[pipe_cnt].pipe.src.immediate_flip = true;
|
||||
|
||||
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
|
||||
pipes[pipe_cnt].pipe.src.gpuvm = true;
|
||||
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
|
||||
|
@ -5398,9 +5398,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
|
||||
v->MaximumReadBandwidthWithPrefetch =
|
||||
v->MaximumReadBandwidthWithPrefetch
|
||||
+ dml_max4(
|
||||
v->VActivePixelBandwidth[i][j][k],
|
||||
v->VActiveCursorBandwidth[i][j][k]
|
||||
+ dml_max3(
|
||||
v->VActivePixelBandwidth[i][j][k]
|
||||
+ v->VActiveCursorBandwidth[i][j][k]
|
||||
+ v->NoOfDPP[i][j][k]
|
||||
* (v->meta_row_bandwidth[i][j][k]
|
||||
+ v->dpte_row_bandwidth[i][j][k]),
|
||||
|
@ -227,7 +227,7 @@ enum {
|
||||
#define FAMILY_YELLOW_CARP 146
|
||||
|
||||
#define YELLOW_CARP_A0 0x01
|
||||
#define YELLOW_CARP_B0 0x1A
|
||||
#define YELLOW_CARP_B0 0x20
|
||||
#define YELLOW_CARP_UNKNOWN 0xFF
|
||||
|
||||
#ifndef ASICREV_IS_YELLOW_CARP
|
||||
|
@ -105,6 +105,7 @@ static enum mod_hdcp_status remove_display_from_topology_v3(
|
||||
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
|
||||
|
||||
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
|
||||
mutex_unlock(&psp->dtm_context.mutex);
|
||||
|
||||
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
|
||||
status = remove_display_from_topology_v2(hdcp, index);
|
||||
@ -115,8 +116,6 @@ static enum mod_hdcp_status remove_display_from_topology_v3(
|
||||
HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
|
||||
}
|
||||
|
||||
mutex_unlock(&psp->dtm_context.mutex);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -205,6 +204,7 @@ static enum mod_hdcp_status add_display_to_topology_v3(
|
||||
dtm_cmd->dtm_in_message.topology_update_v3.link_hdcp_cap = link->hdcp_supported_informational;
|
||||
|
||||
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
|
||||
mutex_unlock(&psp->dtm_context.mutex);
|
||||
|
||||
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
|
||||
status = add_display_to_topology_v2(hdcp, display);
|
||||
@ -214,8 +214,6 @@ static enum mod_hdcp_status add_display_to_topology_v3(
|
||||
HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
|
||||
}
|
||||
|
||||
mutex_unlock(&psp->dtm_context.mutex);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1300,18 +1300,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
|
||||
return flags;
|
||||
}
|
||||
|
||||
static enum drm_connector_status ast_connector_detect(struct drm_connector
|
||||
*connector, bool force)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = ast_get_modes(connector);
|
||||
if (r <= 0)
|
||||
return connector_status_disconnected;
|
||||
|
||||
return connector_status_connected;
|
||||
}
|
||||
|
||||
static void ast_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct ast_connector *ast_connector = to_ast_connector(connector);
|
||||
@ -1327,7 +1315,6 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
|
||||
|
||||
static const struct drm_connector_funcs ast_connector_funcs = {
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.detect = ast_connector_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = ast_connector_destroy,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
@ -1355,8 +1342,7 @@ static int ast_connector_init(struct drm_device *dev)
|
||||
connector->interlace_allowed = 0;
|
||||
connector->doublescan_allowed = 0;
|
||||
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
|
||||
DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
|
||||
drm_connector_attach_encoder(connector, encoder);
|
||||
|
||||
@ -1425,8 +1411,6 @@ int ast_mode_config_init(struct ast_private *ast)
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -134,6 +134,12 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* AYA NEO 2021 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* GPD MicroPC (generic strings, also match on bios date) */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
|
||||
@ -185,6 +191,12 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
|
||||
},
|
||||
.driver_data = (void *)&gpd_win2,
|
||||
}, { /* GPD Win 3 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
|
||||
},
|
||||
.driver_data = (void *)&lcd720x1280_rightside_up,
|
||||
}, { /* I.T.Works TW891 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
|
||||
|
@ -1916,6 +1916,9 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (!crtc_state)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Don't clobber DPCD if it's been already read out during output
|
||||
* setup (eDP) or detect.
|
||||
|
@ -64,7 +64,7 @@ intel_timeline_pin_map(struct intel_timeline *timeline)
|
||||
|
||||
timeline->hwsp_map = vaddr;
|
||||
timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
|
||||
clflush(vaddr + ofs);
|
||||
drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -225,7 +225,7 @@ void intel_timeline_reset_seqno(const struct intel_timeline *tl)
|
||||
|
||||
memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
|
||||
WRITE_ONCE(*hwsp_seqno, tl->seqno);
|
||||
clflush(hwsp_seqno);
|
||||
drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES);
|
||||
}
|
||||
|
||||
void intel_timeline_enter(struct intel_timeline *tl)
|
||||
|
@ -11048,12 +11048,6 @@ enum skl_power_gate {
|
||||
#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
|
||||
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
|
||||
|
||||
#define BXT_P_CR_MC_BIOS_REQ_0_0_0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7114)
|
||||
#define BXT_REQ_DATA_MASK 0x3F
|
||||
#define BXT_DRAM_CHANNEL_ACTIVE_SHIFT 12
|
||||
#define BXT_DRAM_CHANNEL_ACTIVE_MASK (0xF << 12)
|
||||
#define BXT_MEMORY_FREQ_MULTIPLIER_HZ 133333333
|
||||
|
||||
#define BXT_D_CR_DRP0_DUNIT8 0x1000
|
||||
#define BXT_D_CR_DRP0_DUNIT9 0x1200
|
||||
#define BXT_D_CR_DRP0_DUNIT_START 8
|
||||
@ -11084,9 +11078,7 @@ enum skl_power_gate {
|
||||
#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
|
||||
#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
|
||||
|
||||
#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
|
||||
#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
|
||||
#define SKL_REQ_DATA_MASK (0xF << 0)
|
||||
#define DG1_GEAR_TYPE REG_BIT(16)
|
||||
|
||||
#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
|
||||
|
@ -794,7 +794,6 @@ DECLARE_EVENT_CLASS(i915_request,
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(u64, ctx)
|
||||
__field(u32, guc_id)
|
||||
__field(u16, class)
|
||||
__field(u16, instance)
|
||||
__field(u32, seqno)
|
||||
@ -805,16 +804,14 @@ DECLARE_EVENT_CLASS(i915_request,
|
||||
__entry->dev = rq->engine->i915->drm.primary->index;
|
||||
__entry->class = rq->engine->uabi_class;
|
||||
__entry->instance = rq->engine->uabi_instance;
|
||||
__entry->guc_id = rq->context->guc_id;
|
||||
__entry->ctx = rq->fence.context;
|
||||
__entry->seqno = rq->fence.seqno;
|
||||
__entry->tail = rq->tail;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, engine=%u:%u, guc_id=%u, ctx=%llu, seqno=%u, tail=%u",
|
||||
TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u",
|
||||
__entry->dev, __entry->class, __entry->instance,
|
||||
__entry->guc_id, __entry->ctx, __entry->seqno,
|
||||
__entry->tail)
|
||||
__entry->ctx, __entry->seqno, __entry->tail)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_request, i915_request_add,
|
||||
|
@ -244,7 +244,6 @@ static int
|
||||
skl_get_dram_info(struct drm_i915_private *i915)
|
||||
{
|
||||
struct dram_info *dram_info = &i915->dram_info;
|
||||
u32 mem_freq_khz, val;
|
||||
int ret;
|
||||
|
||||
dram_info->type = skl_get_dram_type(i915);
|
||||
@ -255,17 +254,6 @@ skl_get_dram_info(struct drm_i915_private *i915)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val = intel_uncore_read(&i915->uncore,
|
||||
SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
|
||||
mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
|
||||
SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
|
||||
|
||||
if (dram_info->num_channels * mem_freq_khz == 0) {
|
||||
drm_info(&i915->drm,
|
||||
"Couldn't get system memory bandwidth\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -350,24 +338,10 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
|
||||
static int bxt_get_dram_info(struct drm_i915_private *i915)
|
||||
{
|
||||
struct dram_info *dram_info = &i915->dram_info;
|
||||
u32 dram_channels;
|
||||
u32 mem_freq_khz, val;
|
||||
u8 num_active_channels, valid_ranks = 0;
|
||||
u32 val;
|
||||
u8 valid_ranks = 0;
|
||||
int i;
|
||||
|
||||
val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
|
||||
mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
|
||||
BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
|
||||
|
||||
dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
|
||||
num_active_channels = hweight32(dram_channels);
|
||||
|
||||
if (mem_freq_khz * num_active_channels == 0) {
|
||||
drm_info(&i915->drm,
|
||||
"Couldn't get system memory bandwidth\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
|
||||
*/
|
||||
|
@ -66,7 +66,8 @@ static const struct drm_crtc_funcs kmb_crtc_funcs = {
|
||||
.disable_vblank = kmb_crtc_disable_vblank,
|
||||
};
|
||||
|
||||
static void kmb_crtc_set_mode(struct drm_crtc *crtc)
|
||||
static void kmb_crtc_set_mode(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *old_state)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_display_mode *m = &crtc->state->adjusted_mode;
|
||||
@ -75,7 +76,7 @@ static void kmb_crtc_set_mode(struct drm_crtc *crtc)
|
||||
unsigned int val = 0;
|
||||
|
||||
/* Initialize mipi */
|
||||
kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz);
|
||||
kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state);
|
||||
drm_info(dev,
|
||||
"vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n",
|
||||
m->crtc_vsync_start - m->crtc_vdisplay,
|
||||
@ -138,7 +139,7 @@ static void kmb_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||
struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
|
||||
|
||||
clk_prepare_enable(kmb->kmb_clk.clk_lcd);
|
||||
kmb_crtc_set_mode(crtc);
|
||||
kmb_crtc_set_mode(crtc, state);
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
@ -185,11 +186,45 @@ static void kmb_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
spin_unlock_irq(&crtc->dev->event_lock);
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
kmb_crtc_mode_valid(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
int refresh;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
int vfp = mode->vsync_start - mode->vdisplay;
|
||||
|
||||
if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) {
|
||||
drm_dbg(dev, "height = %d less than %d",
|
||||
mode->vdisplay, KMB_CRTC_MAX_HEIGHT);
|
||||
return MODE_BAD_VVALUE;
|
||||
}
|
||||
if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) {
|
||||
drm_dbg(dev, "width = %d less than %d",
|
||||
mode->hdisplay, KMB_CRTC_MAX_WIDTH);
|
||||
return MODE_BAD_HVALUE;
|
||||
}
|
||||
refresh = drm_mode_vrefresh(mode);
|
||||
if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) {
|
||||
drm_dbg(dev, "refresh = %d less than %d or greater than %d",
|
||||
refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH);
|
||||
return MODE_BAD;
|
||||
}
|
||||
|
||||
if (vfp < KMB_CRTC_MIN_VFP) {
|
||||
drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP);
|
||||
return MODE_BAD;
|
||||
}
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = {
|
||||
.atomic_begin = kmb_crtc_atomic_begin,
|
||||
.atomic_enable = kmb_crtc_atomic_enable,
|
||||
.atomic_disable = kmb_crtc_atomic_disable,
|
||||
.atomic_flush = kmb_crtc_atomic_flush,
|
||||
.mode_valid = kmb_crtc_mode_valid,
|
||||
};
|
||||
|
||||
int kmb_setup_crtc(struct drm_device *drm)
|
||||
|
@ -380,7 +380,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
|
||||
if (val & LAYER3_DMA_FIFO_UNDERFLOW)
|
||||
drm_dbg(&kmb->drm,
|
||||
"LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val);
|
||||
if (val & LAYER3_DMA_FIFO_UNDERFLOW)
|
||||
if (val & LAYER3_DMA_FIFO_OVERFLOW)
|
||||
drm_dbg(&kmb->drm,
|
||||
"LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val);
|
||||
}
|
||||
|
@ -20,11 +20,18 @@
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 1
|
||||
|
||||
/* Platform definitions */
|
||||
#define KMB_CRTC_MIN_VFP 4
|
||||
#define KMB_CRTC_MAX_WIDTH 1920 /* max width in pixels */
|
||||
#define KMB_CRTC_MAX_HEIGHT 1080 /* max height in pixels */
|
||||
#define KMB_CRTC_MIN_WIDTH 1920
|
||||
#define KMB_CRTC_MIN_HEIGHT 1080
|
||||
#define KMB_FB_MAX_WIDTH 1920
|
||||
#define KMB_FB_MAX_HEIGHT 1080
|
||||
#define KMB_FB_MIN_WIDTH 1
|
||||
#define KMB_FB_MIN_HEIGHT 1
|
||||
|
||||
#define KMB_MIN_VREFRESH 59 /*vertical refresh in Hz */
|
||||
#define KMB_MAX_VREFRESH 60 /*vertical refresh in Hz */
|
||||
#define KMB_LCD_DEFAULT_CLK 200000000
|
||||
#define KMB_SYS_CLK_MHZ 500
|
||||
|
||||
@ -50,6 +57,7 @@ struct kmb_drm_private {
|
||||
spinlock_t irq_lock;
|
||||
int irq_lcd;
|
||||
int sys_clk_mhz;
|
||||
struct disp_cfg init_disp_cfg[KMB_MAX_PLANES];
|
||||
struct layer_status plane_status[KMB_MAX_PLANES];
|
||||
int kmb_under_flow;
|
||||
int kmb_flush_done;
|
||||
|
@ -482,6 +482,10 @@ static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CLK_DIFF_LOW 50
|
||||
#define CLK_DIFF_HI 60
|
||||
#define SYSCLK_500 500
|
||||
|
||||
static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
|
||||
struct mipi_tx_frame_timing_cfg *fg_cfg)
|
||||
{
|
||||
@ -492,7 +496,12 @@ static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
|
||||
/* 500 Mhz system clock minus 50 to account for the difference in
|
||||
* MIPI clock speed in RTL tests
|
||||
*/
|
||||
sysclk = kmb_dsi->sys_clk_mhz - 50;
|
||||
if (kmb_dsi->sys_clk_mhz == SYSCLK_500) {
|
||||
sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW;
|
||||
} else {
|
||||
/* 700 Mhz clk*/
|
||||
sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI;
|
||||
}
|
||||
|
||||
/* PPL-Pixel Packing Layer, LLP-Low Level Protocol
|
||||
* Frame genartor timing parameters are clocked on the system clock,
|
||||
@ -1322,7 +1331,8 @@ static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
|
||||
static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi,
|
||||
struct drm_atomic_state *old_state)
|
||||
{
|
||||
struct regmap *msscam;
|
||||
|
||||
@ -1331,7 +1341,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
|
||||
dev_dbg(kmb_dsi->dev, "failed to get msscam syscon");
|
||||
return;
|
||||
}
|
||||
|
||||
drm_atomic_bridge_chain_enable(adv_bridge, old_state);
|
||||
/* DISABLE MIPI->CIF CONNECTION */
|
||||
regmap_write(msscam, MSS_MIPI_CIF_CFG, 0);
|
||||
|
||||
@ -1342,7 +1352,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
|
||||
}
|
||||
|
||||
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
|
||||
int sys_clk_mhz)
|
||||
int sys_clk_mhz, struct drm_atomic_state *old_state)
|
||||
{
|
||||
u64 data_rate;
|
||||
|
||||
@ -1384,18 +1394,13 @@ int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
|
||||
mipi_tx_init_cfg.lane_rate_mbps = data_rate;
|
||||
}
|
||||
|
||||
kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0);
|
||||
kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0);
|
||||
kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0);
|
||||
kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0);
|
||||
|
||||
/* Initialize mipi controller */
|
||||
mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg);
|
||||
|
||||
/* Dphy initialization */
|
||||
mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg);
|
||||
|
||||
connect_lcd_to_mipi(kmb_dsi);
|
||||
connect_lcd_to_mipi(kmb_dsi, old_state);
|
||||
dev_info(kmb_dsi->dev, "mipi hw initialized");
|
||||
|
||||
return 0;
|
||||
|
@ -380,7 +380,7 @@ int kmb_dsi_host_bridge_init(struct device *dev);
|
||||
struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev);
|
||||
void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi);
|
||||
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
|
||||
int sys_clk_mhz);
|
||||
int sys_clk_mhz, struct drm_atomic_state *old_state);
|
||||
int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi);
|
||||
int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi);
|
||||
int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi);
|
||||
|
@ -67,8 +67,21 @@ static const u32 kmb_formats_v[] = {
|
||||
|
||||
static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
|
||||
{
|
||||
struct kmb_drm_private *kmb;
|
||||
struct kmb_plane *kmb_plane = to_kmb_plane(plane);
|
||||
int i;
|
||||
int plane_id = kmb_plane->id;
|
||||
struct disp_cfg init_disp_cfg;
|
||||
|
||||
kmb = to_kmb(plane->dev);
|
||||
init_disp_cfg = kmb->init_disp_cfg[plane_id];
|
||||
/* Due to HW limitations, changing pixel format after initial
|
||||
* plane configuration is not supported.
|
||||
*/
|
||||
if (init_disp_cfg.format && init_disp_cfg.format != format) {
|
||||
drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration");
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < plane->format_count; i++) {
|
||||
if (plane->format_types[i] == format)
|
||||
return 0;
|
||||
@ -81,11 +94,17 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
|
||||
{
|
||||
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
|
||||
plane);
|
||||
struct kmb_drm_private *kmb;
|
||||
struct kmb_plane *kmb_plane = to_kmb_plane(plane);
|
||||
int plane_id = kmb_plane->id;
|
||||
struct disp_cfg init_disp_cfg;
|
||||
struct drm_framebuffer *fb;
|
||||
int ret;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
bool can_position;
|
||||
|
||||
kmb = to_kmb(plane->dev);
|
||||
init_disp_cfg = kmb->init_disp_cfg[plane_id];
|
||||
fb = new_plane_state->fb;
|
||||
if (!fb || !new_plane_state->crtc)
|
||||
return 0;
|
||||
@ -99,6 +118,16 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
|
||||
new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
|
||||
new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
|
||||
return -EINVAL;
|
||||
|
||||
/* Due to HW limitations, changing plane height or width after
|
||||
* initial plane configuration is not supported.
|
||||
*/
|
||||
if ((init_disp_cfg.width && init_disp_cfg.height) &&
|
||||
(init_disp_cfg.width != fb->width ||
|
||||
init_disp_cfg.height != fb->height)) {
|
||||
drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration");
|
||||
return -EINVAL;
|
||||
}
|
||||
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
|
||||
crtc_state =
|
||||
drm_atomic_get_existing_crtc_state(state,
|
||||
@ -335,6 +364,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
|
||||
unsigned char plane_id;
|
||||
int num_planes;
|
||||
static dma_addr_t addr[MAX_SUB_PLANES];
|
||||
struct disp_cfg *init_disp_cfg;
|
||||
|
||||
if (!plane || !new_plane_state || !old_plane_state)
|
||||
return;
|
||||
@ -357,7 +387,8 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
|
||||
}
|
||||
spin_unlock_irq(&kmb->irq_lock);
|
||||
|
||||
src_w = (new_plane_state->src_w >> 16);
|
||||
init_disp_cfg = &kmb->init_disp_cfg[plane_id];
|
||||
src_w = new_plane_state->src_w >> 16;
|
||||
src_h = new_plane_state->src_h >> 16;
|
||||
crtc_x = new_plane_state->crtc_x;
|
||||
crtc_y = new_plane_state->crtc_y;
|
||||
@ -500,6 +531,16 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
|
||||
|
||||
/* Enable DMA */
|
||||
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg);
|
||||
|
||||
/* Save initial display config */
|
||||
if (!init_disp_cfg->width ||
|
||||
!init_disp_cfg->height ||
|
||||
!init_disp_cfg->format) {
|
||||
init_disp_cfg->width = width;
|
||||
init_disp_cfg->height = height;
|
||||
init_disp_cfg->format = fb->format->format;
|
||||
}
|
||||
|
||||
drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg,
|
||||
kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id)));
|
||||
|
||||
|
@ -63,6 +63,12 @@ struct layer_status {
|
||||
u32 ctrl;
|
||||
};
|
||||
|
||||
struct disp_cfg {
|
||||
unsigned int width;
|
||||
unsigned int height;
|
||||
unsigned int format;
|
||||
};
|
||||
|
||||
struct kmb_plane *kmb_plane_init(struct drm_device *drm);
|
||||
void kmb_plane_destroy(struct drm_plane *plane);
|
||||
#endif /* __KMB_PLANE_H__ */
|
||||
|
@ -1838,6 +1838,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
|
||||
adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
|
||||
adreno_gpu->base.hw_apriv = true;
|
||||
|
||||
/*
|
||||
* For now only clamp to idle freq for devices where this is known not
|
||||
* to cause power supply issues:
|
||||
*/
|
||||
if (info && (info->revn == 618))
|
||||
gpu->clamp_to_idle = true;
|
||||
|
||||
a6xx_llc_slices_init(pdev, a6xx_gpu);
|
||||
|
||||
ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
|
||||
|
@ -203,6 +203,10 @@ struct msm_gpu {
|
||||
uint32_t suspend_count;
|
||||
|
||||
struct msm_gpu_state *crashstate;
|
||||
|
||||
/* Enable clamping to idle freq when inactive: */
|
||||
bool clamp_to_idle;
|
||||
|
||||
/* True if the hardware supports expanded apriv (a650 and newer) */
|
||||
bool hw_apriv;
|
||||
|
||||
|
@ -200,7 +200,8 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
|
||||
|
||||
idle_freq = get_freq(gpu);
|
||||
|
||||
msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
|
||||
if (gpu->clamp_to_idle)
|
||||
msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
|
||||
|
||||
df->idle_time = ktime_get();
|
||||
df->idle_freq = idle_freq;
|
||||
|
@ -173,7 +173,11 @@ static void mxsfb_irq_disable(struct drm_device *drm)
|
||||
struct mxsfb_drm_private *mxsfb = drm->dev_private;
|
||||
|
||||
mxsfb_enable_axi_clk(mxsfb);
|
||||
mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
|
||||
|
||||
/* Disable and clear VBLANK IRQ */
|
||||
writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
|
||||
writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
|
||||
|
||||
mxsfb_disable_axi_clk(mxsfb);
|
||||
}
|
||||
|
||||
|
@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
|
||||
.clock = 69700,
|
||||
|
||||
.hdisplay = 800,
|
||||
.hsync_start = 800 + 6,
|
||||
.hsync_end = 800 + 6 + 15,
|
||||
.htotal = 800 + 6 + 15 + 16,
|
||||
.hsync_start = 800 + 52,
|
||||
.hsync_end = 800 + 52 + 8,
|
||||
.htotal = 800 + 52 + 8 + 48,
|
||||
|
||||
.vdisplay = 1280,
|
||||
.vsync_start = 1280 + 8,
|
||||
.vsync_end = 1280 + 8 + 48,
|
||||
.vtotal = 1280 + 8 + 48 + 52,
|
||||
.vsync_start = 1280 + 16,
|
||||
.vsync_end = 1280 + 16 + 6,
|
||||
.vtotal = 1280 + 16 + 6 + 15,
|
||||
|
||||
.width_mm = 135,
|
||||
.height_mm = 217,
|
||||
|
@ -30,6 +30,7 @@ static void mock_setup(struct drm_plane_state *state)
|
||||
mock_device.driver = &mock_driver;
|
||||
mock_device.mode_config.prop_fb_damage_clips = &mock_prop;
|
||||
mock_plane.dev = &mock_device;
|
||||
mock_obj_props.count = 0;
|
||||
mock_plane.base.properties = &mock_obj_props;
|
||||
mock_prop.base.id = 1; /* 0 is an invalid id */
|
||||
mock_prop.dev = &mock_device;
|
||||
|
@ -190,6 +190,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
|
||||
struct ttm_transfer_obj *fbo;
|
||||
|
||||
fbo = container_of(bo, struct ttm_transfer_obj, base);
|
||||
dma_resv_fini(&fbo->base.base._resv);
|
||||
ttm_bo_put(fbo->bo);
|
||||
kfree(fbo);
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
#define _HYPERV_VMBUS_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/sync_bitops.h>
|
||||
#include <asm/hyperv-tlfs.h>
|
||||
#include <linux/atomic.h>
|
||||
|
@ -706,8 +706,9 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
|
||||
|
||||
/* Construct the family header first */
|
||||
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
|
||||
memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
|
||||
LS_DEVICE_NAME_MAX);
|
||||
strscpy_pad(header->device_name,
|
||||
dev_name(&query->port->agent->device->dev),
|
||||
LS_DEVICE_NAME_MAX);
|
||||
header->port_num = query->port->port_num;
|
||||
|
||||
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
|
||||
|
@ -878,6 +878,7 @@ void sc_disable(struct send_context *sc)
|
||||
{
|
||||
u64 reg;
|
||||
struct pio_buf *pbuf;
|
||||
LIST_HEAD(wake_list);
|
||||
|
||||
if (!sc)
|
||||
return;
|
||||
@ -912,19 +913,21 @@ void sc_disable(struct send_context *sc)
|
||||
spin_unlock(&sc->release_lock);
|
||||
|
||||
write_seqlock(&sc->waitlock);
|
||||
while (!list_empty(&sc->piowait)) {
|
||||
if (!list_empty(&sc->piowait))
|
||||
list_move(&sc->piowait, &wake_list);
|
||||
write_sequnlock(&sc->waitlock);
|
||||
while (!list_empty(&wake_list)) {
|
||||
struct iowait *wait;
|
||||
struct rvt_qp *qp;
|
||||
struct hfi1_qp_priv *priv;
|
||||
|
||||
wait = list_first_entry(&sc->piowait, struct iowait, list);
|
||||
wait = list_first_entry(&wake_list, struct iowait, list);
|
||||
qp = iowait_to_qp(wait);
|
||||
priv = qp->priv;
|
||||
list_del_init(&priv->s_iowait.list);
|
||||
priv->s_iowait.lock = NULL;
|
||||
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
|
||||
}
|
||||
write_sequnlock(&sc->waitlock);
|
||||
|
||||
spin_unlock_irq(&sc->alloc_lock);
|
||||
}
|
||||
|
@ -1035,12 +1035,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
|
||||
if (cq->avoid_mem_cflct) {
|
||||
ext_cqe = (__le64 *)((u8 *)cqe + 32);
|
||||
get_64bit_val(ext_cqe, 24, &qword7);
|
||||
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
|
||||
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
|
||||
} else {
|
||||
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
|
||||
ext_cqe = cq->cq_base[peek_head].buf;
|
||||
get_64bit_val(ext_cqe, 24, &qword7);
|
||||
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
|
||||
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
|
||||
if (!peek_head)
|
||||
polarity ^= 1;
|
||||
}
|
||||
|
@ -3379,9 +3379,13 @@ static void irdma_process_cqe(struct ib_wc *entry,
|
||||
}
|
||||
|
||||
if (cq_poll_info->ud_vlan_valid) {
|
||||
entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
|
||||
entry->wc_flags |= IB_WC_WITH_VLAN;
|
||||
u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
|
||||
|
||||
entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
|
||||
if (vlan) {
|
||||
entry->vlan_id = vlan;
|
||||
entry->wc_flags |= IB_WC_WITH_VLAN;
|
||||
}
|
||||
} else {
|
||||
entry->sl = 0;
|
||||
}
|
||||
|
@ -330,8 +330,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
|
||||
|
||||
tc_node->enable = true;
|
||||
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
vsi->unregister_qset(vsi, tc_node);
|
||||
goto reg_err;
|
||||
}
|
||||
}
|
||||
ibdev_dbg(to_ibdev(vsi->dev),
|
||||
"WS: Using node %d which represents VSI %d TC %d\n",
|
||||
@ -350,6 +352,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
|
||||
}
|
||||
goto exit;
|
||||
|
||||
reg_err:
|
||||
irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
|
||||
list_del(&tc_node->siblings);
|
||||
irdma_free_node(vsi, tc_node);
|
||||
leaf_add_err:
|
||||
if (list_empty(&vsi_node->child_list_head)) {
|
||||
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
|
||||
@ -369,11 +375,6 @@ vsi_add_err:
|
||||
exit:
|
||||
mutex_unlock(&vsi->dev->ws_mutex);
|
||||
return ret;
|
||||
|
||||
reg_err:
|
||||
mutex_unlock(&vsi->dev->ws_mutex);
|
||||
irdma_ws_remove(vsi, user_pri);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1334,7 +1334,6 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
|
||||
goto err_2;
|
||||
}
|
||||
mr->mmkey.type = MLX5_MKEY_MR;
|
||||
mr->desc_size = sizeof(struct mlx5_mtt);
|
||||
mr->umem = umem;
|
||||
set_mr_fields(dev, mr, umem->length, access_flags, iova);
|
||||
kvfree(in);
|
||||
@ -1528,6 +1527,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
ib_umem_release(&odp->umem);
|
||||
return ERR_CAST(mr);
|
||||
}
|
||||
xa_init(&mr->implicit_children);
|
||||
|
||||
odp->private = mr;
|
||||
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user