hyperv-fixes for 5.19-rc3

-----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEIbPD0id6easf0xsudhRwX5BBoF4FAmKsjCwTHHdlaS5saXVA
 a2VybmVsLm9yZwAKCRB2FHBfkEGgXpAqB/0akBXX+rxp/MzKKiuzOa53rV5IqPe6
 dbDhJtsyCeX2xiNtiDYqCsCmKRt+rmQgzrKykRLCeQatrLmjbUzEZYo1QmpmdFjK
 icZtYdso1FEW6v4h1j+p7dl/l3nqXWcG9gjCDecma9ZxXnpzNw5O9kBCpUyWGzwS
 85RJy2X5n7gyqiAyrDD5MSiUAL2n2FG2lHwK4EYjXsfPJEesx/gh8DEzVkMJWSqa
 nPyvtnPxGSDr22KTymF/Z2BJSYMcxaU528Ztto+DE8NhVWKk99eiMpyXagH3LygD
 gOHmnRS23eJhw/Mq44IHGf7t94bFuUBQraVvaGnaXGLoLv946oNMfLhl
 =miu9
 -----END PGP SIGNATURE-----

Merge tag 'hyperv-fixes-signed-20220617' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux

Pull hyperv fixes from Wei Liu:

 - Fix hv_init_clocksource annotation (Masahiro Yamada)

 - Two bug fixes for vmbus driver (Saurabh Sengar)

 - Fix SEV negotiation (Tianyu Lan)

 - Fix comments in code (Xiang Wang)

 - One minor fix to HID driver (Michael Kelley)

* tag 'hyperv-fixes-signed-20220617' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  x86/Hyper-V: Add SEV negotiate protocol support in Isolation VM
  Drivers: hv: vmbus: Release cpu lock in error case
  HID: hyperv: Correctly access fields declared as __le16
  clocksource: hyper-v: unexport __init-annotated hv_init_clocksource()
  Drivers: hv: Fix syntax errors in comments
  Drivers: hv: vmbus: Don't assign VMbus channel interrupts to isolated CPUs
This commit is contained in:
Linus Torvalds 2022-06-17 13:39:12 -05:00
commit 2d806a688f
8 changed files with 109 additions and 15 deletions

View File

@ -13,6 +13,7 @@
#include <linux/io.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/sev.h>
#include <asm/hypervisor.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
@ -405,6 +406,11 @@ void __init hyperv_init(void)
}
if (hv_isolation_type_snp()) {
/* Negotiate GHCB Version. */
if (!hv_ghcb_negotiate_protocol())
hv_ghcb_terminate(SEV_TERM_SET_GEN,
GHCB_SEV_ES_PROT_UNSUPPORTED);
hv_ghcb_pg = alloc_percpu(union hv_ghcb *);
if (!hv_ghcb_pg)
goto free_vp_assist_page;

View File

@ -53,6 +53,8 @@ union hv_ghcb {
} hypercall;
} __packed __aligned(HV_HYP_PAGE_SIZE);
static u16 hv_ghcb_version __ro_after_init;
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
{
union hv_ghcb *hv_ghcb;
@ -96,12 +98,85 @@ u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
return status;
}
static inline u64 rd_ghcb_msr(void)
{
return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
}
static inline void wr_ghcb_msr(u64 val)
{
native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
}
static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
u64 exit_info_1, u64 exit_info_2)
{
/* Fill in protocol and format specifiers */
ghcb->protocol_version = hv_ghcb_version;
ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
ghcb_set_sw_exit_code(ghcb, exit_code);
ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
VMGEXIT();
if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
return ES_VMM_ERROR;
else
return ES_OK;
}
void hv_ghcb_terminate(unsigned int set, unsigned int reason)
{
u64 val = GHCB_MSR_TERM_REQ;
/* Tell the hypervisor what went wrong. */
val |= GHCB_SEV_TERM_REASON(set, reason);
/* Request Guest Termination from Hypvervisor */
wr_ghcb_msr(val);
VMGEXIT();
while (true)
asm volatile("hlt\n" : : : "memory");
}
bool hv_ghcb_negotiate_protocol(void)
{
u64 ghcb_gpa;
u64 val;
/* Save ghcb page gpa. */
ghcb_gpa = rd_ghcb_msr();
/* Do the GHCB protocol version negotiation */
wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
VMGEXIT();
val = rd_ghcb_msr();
if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
return false;
if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
return false;
hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
GHCB_PROTOCOL_MAX);
/* Write ghcb page back after negotiating protocol. */
wr_ghcb_msr(ghcb_gpa);
VMGEXIT();
return true;
}
void hv_ghcb_msr_write(u64 msr, u64 value)
{
union hv_ghcb *hv_ghcb;
void **ghcb_base;
unsigned long flags;
struct es_em_ctxt ctxt;
if (!hv_ghcb_pg)
return;
@ -120,8 +195,7 @@ void hv_ghcb_msr_write(u64 msr, u64 value)
ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt,
SVM_EXIT_MSR, 1, 0))
if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
pr_warn("Fail to write msr via ghcb %llx.\n", msr);
local_irq_restore(flags);
@ -133,7 +207,6 @@ void hv_ghcb_msr_read(u64 msr, u64 *value)
union hv_ghcb *hv_ghcb;
void **ghcb_base;
unsigned long flags;
struct es_em_ctxt ctxt;
/* Check size of union hv_ghcb here. */
BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
@ -152,8 +225,7 @@ void hv_ghcb_msr_read(u64 msr, u64 *value)
}
ghcb_set_rcx(&hv_ghcb->ghcb, msr);
if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt,
SVM_EXIT_MSR, 0, 0))
if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
pr_warn("Fail to read msr via ghcb %llx.\n", msr);
else
*value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)

View File

@ -179,9 +179,13 @@ int hv_set_mem_host_visibility(unsigned long addr, int numpages, bool visible);
#ifdef CONFIG_AMD_MEM_ENCRYPT
void hv_ghcb_msr_write(u64 msr, u64 value);
void hv_ghcb_msr_read(u64 msr, u64 *value);
bool hv_ghcb_negotiate_protocol(void);
void hv_ghcb_terminate(unsigned int set, unsigned int reason);
#else
static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
#endif
extern bool hv_isolation_type_snp(void);

View File

@ -565,4 +565,3 @@ void __init hv_init_clocksource(void)
hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_msr);
}
EXPORT_SYMBOL_GPL(hv_init_clocksource);

View File

@ -199,7 +199,8 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
if (!input_device->hid_desc)
goto cleanup;
input_device->report_desc_size = desc->desc[0].wDescriptorLength;
input_device->report_desc_size = le16_to_cpu(
desc->desc[0].wDescriptorLength);
if (input_device->report_desc_size == 0) {
input_device->dev_info_status = -EINVAL;
goto cleanup;
@ -217,7 +218,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
desc->desc[0].wDescriptorLength);
le16_to_cpu(desc->desc[0].wDescriptorLength));
/* Send the ack */
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));

View File

@ -21,6 +21,7 @@
#include <linux/cpu.h>
#include <linux/hyperv.h>
#include <asm/mshyperv.h>
#include <linux/sched/isolation.h>
#include "hyperv_vmbus.h"
@ -638,6 +639,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/
if (newchannel->offermsg.offer.sub_channel_index == 0) {
mutex_unlock(&vmbus_connection.channel_mutex);
cpus_read_unlock();
/*
* Don't call free_channel(), because newchannel->kobj
* is not initialized yet.
@ -728,16 +730,20 @@ static void init_vp_index(struct vmbus_channel *channel)
u32 i, ncpu = num_online_cpus();
cpumask_var_t available_mask;
struct cpumask *allocated_mask;
const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
u32 target_cpu;
int numa_node;
if (!perf_chn ||
!alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
!alloc_cpumask_var(&available_mask, GFP_KERNEL) ||
cpumask_empty(hk_mask)) {
/*
* If the channel is not a performance critical
* channel, bind it to VMBUS_CONNECT_CPU.
* In case alloc_cpumask_var() fails, bind it to
* VMBUS_CONNECT_CPU.
* If all the cpus are isolated, bind it to
* VMBUS_CONNECT_CPU.
*/
channel->target_cpu = VMBUS_CONNECT_CPU;
if (perf_chn)
@ -758,17 +764,19 @@ static void init_vp_index(struct vmbus_channel *channel)
}
allocated_mask = &hv_context.hv_numa_map[numa_node];
if (cpumask_equal(allocated_mask, cpumask_of_node(numa_node))) {
retry:
cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node));
cpumask_and(available_mask, available_mask, hk_mask);
if (cpumask_empty(available_mask)) {
/*
* We have cycled through all the CPUs in the node;
* reset the allocated map.
*/
cpumask_clear(allocated_mask);
goto retry;
}
cpumask_xor(available_mask, allocated_mask,
cpumask_of_node(numa_node));
target_cpu = cpumask_first(available_mask);
cpumask_set_cpu(target_cpu, allocated_mask);

View File

@ -394,7 +394,7 @@ kvp_send_key(struct work_struct *dummy)
in_msg = kvp_transaction.kvp_msg;
/*
* The key/value strings sent from the host are encoded in
* The key/value strings sent from the host are encoded
* in utf16; convert it to utf8 strings.
* The host assures us that the utf16 strings will not exceed
* the max lengths specified. We will however, reserve room

View File

@ -21,6 +21,7 @@
#include <linux/kernel_stat.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/sched/isolation.h>
#include <linux/sched/task_stack.h>
#include <linux/delay.h>
@ -1770,6 +1771,9 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
if (target_cpu >= nr_cpumask_bits)
return -EINVAL;
if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
return -EINVAL;
/* No CPUs should come up or down during this. */
cpus_read_lock();