kvm: selftests: add vm_phy_pages_alloc

Signed-off-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Andrew Jones 2018-09-18 19:54:29 +02:00 committed by Paolo Bonzini
parent eabe7881d2
commit d5106539cf
2 changed files with 38 additions and 22 deletions

View File

@ -110,6 +110,8 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
uint32_t pgd_memslot);
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot);
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot);
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
void *guest_code);

View File

@ -1378,10 +1378,11 @@ const char *exit_reason_str(unsigned int exit_reason)
}
/*
* Physical Page Allocate
* Physical Contiguous Page Allocator
*
* Input Args:
* vm - Virtual Machine
* num - number of pages
* paddr_min - Physical address minimum
* memslot - Memory region to allocate page from
*
@ -1390,16 +1391,18 @@ const char *exit_reason_str(unsigned int exit_reason)
* Return:
* Starting physical address
*
* Within the VM specified by vm, locates an available physical page
* at or above paddr_min. If found, the page is marked as in use
* and its address is returned. A TEST_ASSERT failure occurs if no
* page is available at or above paddr_min.
* Within the VM specified by vm, locates a range of available physical
* pages at or above paddr_min. If found, the pages are marked as in use
* and thier base address is returned. A TEST_ASSERT failure occurs if
* not enough pages are available at or above paddr_min.
*/
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot)
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot)
{
struct userspace_mem_region *region;
sparsebit_idx_t pg;
sparsebit_idx_t pg, base;
TEST_ASSERT(num > 0, "Must allocate at least one page");
TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
"not divisible by page size.\n"
@ -1407,25 +1410,36 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
paddr_min, vm->page_size);
region = memslot2region(vm, memslot);
pg = paddr_min >> vm->page_shift;
base = pg = paddr_min >> vm->page_shift;
/* Locate next available physical page at or above paddr_min. */
if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
pg = sparsebit_next_set(region->unused_phy_pages, pg);
if (pg == 0) {
fprintf(stderr, "No guest physical page available, "
"paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
paddr_min, vm->page_size, memslot);
fputs("---- vm dump ----\n", stderr);
vm_dump(stderr, vm, 2);
abort();
do {
for (; pg < base + num; ++pg) {
if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
break;
}
}
} while (pg && pg != base + num);
if (pg == 0) {
fprintf(stderr, "No guest physical page available, "
"paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
paddr_min, vm->page_size, memslot);
fputs("---- vm dump ----\n", stderr);
vm_dump(stderr, vm, 2);
abort();
}
/* Specify page as in use and return its address. */
sparsebit_clear(region->unused_phy_pages, pg);
for (pg = base; pg < base + num; ++pg)
sparsebit_clear(region->unused_phy_pages, pg);
return pg * vm->page_size;
return base * vm->page_size;
}
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot)
{
return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
}
/*