mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
kvm: change memslot sorting rule from size to GFN
it will allow to use binary search for GFN -> memslot lookups, reducing lookup cost with large slots amount. Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
d4ae84a02b
commit
0e60b0799f
@ -666,10 +666,10 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert memslot and re-sort memslots based on their size,
|
||||
* so the larger slots will get better fit. Sorting algorithm
|
||||
* takes advantage of having initially sorted array and
|
||||
* known changed memslot position.
|
||||
* Insert memslot and re-sort memslots based on their GFN,
|
||||
* so binary search could be used to lookup GFN.
|
||||
* Sorting algorithm takes advantage of having initially
|
||||
* sorted array and known changed memslot position.
|
||||
*/
|
||||
static void update_memslots(struct kvm_memslots *slots,
|
||||
struct kvm_memory_slot *new)
|
||||
@ -679,14 +679,19 @@ static void update_memslots(struct kvm_memslots *slots,
|
||||
struct kvm_memory_slot *mslots = slots->memslots;
|
||||
|
||||
WARN_ON(mslots[i].id != id);
|
||||
if (!new->npages)
|
||||
new->base_gfn = 0;
|
||||
|
||||
while (i < KVM_MEM_SLOTS_NUM - 1 &&
|
||||
new->npages < mslots[i + 1].npages) {
|
||||
new->base_gfn <= mslots[i + 1].base_gfn) {
|
||||
if (!mslots[i + 1].npages)
|
||||
break;
|
||||
mslots[i] = mslots[i + 1];
|
||||
slots->id_to_index[mslots[i].id] = i;
|
||||
i++;
|
||||
}
|
||||
while (i > 0 &&
|
||||
new->npages > mslots[i - 1].npages) {
|
||||
new->base_gfn > mslots[i - 1].base_gfn) {
|
||||
mslots[i] = mslots[i - 1];
|
||||
slots->id_to_index[mslots[i].id] = i;
|
||||
i--;
|
||||
|
Loading…
Reference in New Issue
Block a user