forked from Minki/linux
hyperv-fixes for 5.15-rc2
-----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEIbPD0id6easf0xsudhRwX5BBoF4FAmFB6pwTHHdlaS5saXVA a2VybmVsLm9yZwAKCRB2FHBfkEGgXoo5CAChbzKMMbqBHArnNCO+pKkUWmc7eYqJ U368ux75wWEy6ywCUxCHqhwnTrp5KJhyjTPi89V8Vwh+aNG6q86g2dT3I6qsoIby Dav9yw1NiExxNzAEiJVH/WgE+WGZUvWqzbKixdZWjDk9DWhVv7h96chik9dvh9SW /nm27o4sNmnFETQ+kh/hmX+8T6V8HeqZuL9WrGw4EW9At/WE16vjk47Wm5gJRl+j Z1KylALvOiarzzMH3Qx1IxvZ1789JtCIr2b5rHJH8tCPvPF0P2dihm/Wjf6xguyT tDMvquBdQnfugbZXQDy58Agp34Dw+fHCFaOmoruJePa78qqBYzujHvW9 =gBaz -----END PGP SIGNATURE----- Merge tag 'hyperv-fixes-signed-20210915' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux Pull hyperv fixes from Wei Liu: - Fix kernel crash caused by uio driver (Vitaly Kuznetsov) - Remove on-stack cpumask from HV APIC code (Wei Liu) * tag 'hyperv-fixes-signed-20210915' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux: x86/hyperv: remove on-stack cpumask from hv_send_ipi_mask_allbutself asm-generic/hyperv: provide cpumask_to_vpset_noself Drivers: hv: vmbus: Fix kernel crash upon unbinding a device from uio_hv_generic driver
This commit is contained in:
commit
ff1ffd71d5
@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
|
||||
/*
|
||||
* IPI implementation on Hyper-V.
|
||||
*/
|
||||
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
|
||||
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
|
||||
bool exclude_self)
|
||||
{
|
||||
struct hv_send_ipi_ex **arg;
|
||||
struct hv_send_ipi_ex *ipi_arg;
|
||||
@ -123,7 +124,10 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
|
||||
|
||||
if (!cpumask_equal(mask, cpu_present_mask)) {
|
||||
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
|
||||
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
|
||||
if (exclude_self)
|
||||
nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
|
||||
else
|
||||
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
|
||||
}
|
||||
if (nr_bank < 0)
|
||||
goto ipi_mask_ex_done;
|
||||
@ -138,15 +142,25 @@ ipi_mask_ex_done:
|
||||
return hv_result_success(status);
|
||||
}
|
||||
|
||||
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
static bool __send_ipi_mask(const struct cpumask *mask, int vector,
|
||||
bool exclude_self)
|
||||
{
|
||||
int cur_cpu, vcpu;
|
||||
int cur_cpu, vcpu, this_cpu = smp_processor_id();
|
||||
struct hv_send_ipi ipi_arg;
|
||||
u64 status;
|
||||
unsigned int weight;
|
||||
|
||||
trace_hyperv_send_ipi_mask(mask, vector);
|
||||
|
||||
if (cpumask_empty(mask))
|
||||
weight = cpumask_weight(mask);
|
||||
|
||||
/*
|
||||
* Do nothing if
|
||||
* 1. the mask is empty
|
||||
* 2. the mask only contains self when exclude_self is true
|
||||
*/
|
||||
if (weight == 0 ||
|
||||
(exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
|
||||
return true;
|
||||
|
||||
if (!hv_hypercall_pg)
|
||||
@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
ipi_arg.cpu_mask = 0;
|
||||
|
||||
for_each_cpu(cur_cpu, mask) {
|
||||
if (exclude_self && cur_cpu == this_cpu)
|
||||
continue;
|
||||
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
|
||||
if (vcpu == VP_INVAL)
|
||||
return false;
|
||||
@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
return hv_result_success(status);
|
||||
|
||||
do_ex_hypercall:
|
||||
return __send_ipi_mask_ex(mask, vector);
|
||||
return __send_ipi_mask_ex(mask, vector, exclude_self);
|
||||
}
|
||||
|
||||
static bool __send_ipi_one(int cpu, int vector)
|
||||
@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector)
|
||||
return false;
|
||||
|
||||
if (vp >= 64)
|
||||
return __send_ipi_mask_ex(cpumask_of(cpu), vector);
|
||||
return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
|
||||
|
||||
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
|
||||
return hv_result_success(status);
|
||||
@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector)
|
||||
|
||||
static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
if (!__send_ipi_mask(mask, vector))
|
||||
if (!__send_ipi_mask(mask, vector, false))
|
||||
orig_apic.send_IPI_mask(mask, vector);
|
||||
}
|
||||
|
||||
static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
struct cpumask new_mask;
|
||||
const struct cpumask *local_mask;
|
||||
|
||||
cpumask_copy(&new_mask, mask);
|
||||
cpumask_clear_cpu(this_cpu, &new_mask);
|
||||
local_mask = &new_mask;
|
||||
if (!__send_ipi_mask(local_mask, vector))
|
||||
if (!__send_ipi_mask(mask, vector, true))
|
||||
orig_apic.send_IPI_mask_allbutself(mask, vector);
|
||||
}
|
||||
|
||||
@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector)
|
||||
|
||||
static void hv_send_ipi_all(int vector)
|
||||
{
|
||||
if (!__send_ipi_mask(cpu_online_mask, vector))
|
||||
if (!__send_ipi_mask(cpu_online_mask, vector, false))
|
||||
orig_apic.send_IPI_all(vector);
|
||||
}
|
||||
|
||||
|
@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
|
||||
mutex_unlock(&ring_info->ring_buffer_mutex);
|
||||
|
||||
kfree(ring_info->pkt_buffer);
|
||||
ring_info->pkt_buffer = NULL;
|
||||
ring_info->pkt_buffer_size = 0;
|
||||
}
|
||||
|
||||
|
@ -197,10 +197,12 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
|
||||
return hv_vp_index[cpu_number];
|
||||
}
|
||||
|
||||
static inline int cpumask_to_vpset(struct hv_vpset *vpset,
|
||||
const struct cpumask *cpus)
|
||||
static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
|
||||
const struct cpumask *cpus,
|
||||
bool exclude_self)
|
||||
{
|
||||
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
|
||||
int this_cpu = smp_processor_id();
|
||||
|
||||
/* valid_bank_mask can represent up to 64 banks */
|
||||
if (hv_max_vp_index / 64 >= 64)
|
||||
@ -218,6 +220,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
|
||||
* Some banks may end up being empty but this is acceptable.
|
||||
*/
|
||||
for_each_cpu(cpu, cpus) {
|
||||
if (exclude_self && cpu == this_cpu)
|
||||
continue;
|
||||
vcpu = hv_cpu_number_to_vp_number(cpu);
|
||||
if (vcpu == VP_INVAL)
|
||||
return -1;
|
||||
@ -232,6 +236,19 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
|
||||
return nr_bank;
|
||||
}
|
||||
|
||||
static inline int cpumask_to_vpset(struct hv_vpset *vpset,
|
||||
const struct cpumask *cpus)
|
||||
{
|
||||
return __cpumask_to_vpset(vpset, cpus, false);
|
||||
}
|
||||
|
||||
static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset,
|
||||
const struct cpumask *cpus)
|
||||
{
|
||||
WARN_ON_ONCE(preemptible());
|
||||
return __cpumask_to_vpset(vpset, cpus, true);
|
||||
}
|
||||
|
||||
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
|
||||
bool hv_is_hyperv_initialized(void);
|
||||
bool hv_is_hibernation_supported(void);
|
||||
|
Loading…
Reference in New Issue
Block a user