KVM: mmu: Add slots_arch_lock for memslot arch fields

Add a new lock to protect the arch-specific fields of memslots if they
need to be modified in a kvm->srcu read critical section. A future
commit will use this lock to lazily allocate memslot rmaps for x86.

Signed-off-by: Ben Gardon <bgardon@google.com>
Message-Id: <20210518173414.450044-5-bgardon@google.com>
[Add Documentation/ hunk. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Ben Gardon 2021-05-18 10:34:11 -07:00 committed by Paolo Bonzini
parent ddc12f2a12
commit b10a038e84
3 changed files with 62 additions and 6 deletions

View File

@ -16,6 +16,11 @@ The acquisition orders for mutexes are as follows:
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare. them together is quite rare.
- Unlike kvm->slots_lock, kvm->slots_arch_lock is released before
synchronize_srcu(&kvm->srcu). Therefore kvm->slots_arch_lock
can be taken inside a kvm->srcu read-side critical section,
while kvm->slots_lock cannot.
On x86: On x86:
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock - vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock

View File

@ -523,6 +523,15 @@ struct kvm {
#endif /* KVM_HAVE_MMU_RWLOCK */ #endif /* KVM_HAVE_MMU_RWLOCK */
struct mutex slots_lock; struct mutex slots_lock;
/*
* Protects the arch-specific fields of struct kvm_memory_slots in
* use by the VM. To be used under the slots_lock (above) or in a
* kvm->srcu critical section where acquiring the slots_lock would
* lead to deadlock with the synchronize_srcu in
* install_new_memslots.
*/
struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */ struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];

View File

@ -909,6 +909,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
mutex_init(&kvm->lock); mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock); mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock); mutex_init(&kvm->slots_lock);
mutex_init(&kvm->slots_arch_lock);
INIT_LIST_HEAD(&kvm->devices); INIT_LIST_HEAD(&kvm->devices);
BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
@ -1281,6 +1282,14 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
rcu_assign_pointer(kvm->memslots[as_id], slots); rcu_assign_pointer(kvm->memslots[as_id], slots);
/*
* Acquired in kvm_set_memslot. Must be released before synchronize
* SRCU below in order to avoid deadlock with another thread
* acquiring the slots_arch_lock in an srcu critical section.
*/
mutex_unlock(&kvm->slots_arch_lock);
synchronize_srcu_expedited(&kvm->srcu); synchronize_srcu_expedited(&kvm->srcu);
/* /*
@ -1352,9 +1361,27 @@ static int kvm_set_memslot(struct kvm *kvm,
struct kvm_memslots *slots; struct kvm_memslots *slots;
int r; int r;
/*
* Released in install_new_memslots.
*
* Must be held from before the current memslots are copied until
* after the new memslots are installed with rcu_assign_pointer,
* then released before the synchronize srcu in install_new_memslots.
*
* When modifying memslots outside of the slots_lock, must be held
* before reading the pointer to the current memslots until after all
* changes to those memslots are complete.
*
* These rules ensure that installing new memslots does not lose
* changes made to the previous memslots.
*/
mutex_lock(&kvm->slots_arch_lock);
slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change);
if (!slots) if (!slots) {
mutex_unlock(&kvm->slots_arch_lock);
return -ENOMEM; return -ENOMEM;
}
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
/* /*
@ -1365,10 +1392,9 @@ static int kvm_set_memslot(struct kvm *kvm,
slot->flags |= KVM_MEMSLOT_INVALID; slot->flags |= KVM_MEMSLOT_INVALID;
/* /*
* We can re-use the old memslots, the only difference from the * We can re-use the memory from the old memslots.
* newly installed memslots is the invalid flag, which will get * It will be overwritten with a copy of the new memslots
* dropped by update_memslots anyway. We'll also revert to the * after reacquiring the slots_arch_lock below.
* old memslots if preparing the new memory region fails.
*/ */
slots = install_new_memslots(kvm, as_id, slots); slots = install_new_memslots(kvm, as_id, slots);
@ -1380,6 +1406,17 @@ static int kvm_set_memslot(struct kvm *kvm,
* - kvm_is_visible_gfn (mmu_check_root) * - kvm_is_visible_gfn (mmu_check_root)
*/ */
kvm_arch_flush_shadow_memslot(kvm, slot); kvm_arch_flush_shadow_memslot(kvm, slot);
/* Released in install_new_memslots. */
mutex_lock(&kvm->slots_arch_lock);
/*
* The arch-specific fields of the memslots could have changed
* between releasing the slots_arch_lock in
* install_new_memslots and here, so get a fresh copy of the
* slots.
*/
kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
} }
r = kvm_arch_prepare_memory_region(kvm, new, mem, change); r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
@ -1395,8 +1432,13 @@ static int kvm_set_memslot(struct kvm *kvm,
return 0; return 0;
out_slots: out_slots:
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
slot = id_to_memslot(slots, old->id);
slot->flags &= ~KVM_MEMSLOT_INVALID;
slots = install_new_memslots(kvm, as_id, slots); slots = install_new_memslots(kvm, as_id, slots);
} else {
mutex_unlock(&kvm->slots_arch_lock);
}
kvfree(slots); kvfree(slots);
return r; return r;
} }