mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
KVM: s390: Add memcg accounting to KVM allocations
Almost all kvm allocations in the s390x KVM code can be attributed to the process that triggers the allocation (in other words, no global allocation for other guests). This will help the memcg controller to make the right decisions. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Acked-by: Janosch Frank <frankja@linux.ibm.com> Acked-by: Cornelia Huck <cohuck@redhat.com>
This commit is contained in:
parent
f8394f232b
commit
c419621873
@ -184,7 +184,7 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
|
||||
if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL);
|
||||
wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL_ACCOUNT);
|
||||
if (!wp_info->old_data)
|
||||
return -ENOMEM;
|
||||
/* try to backup the original value */
|
||||
@ -234,7 +234,7 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
|
||||
if (nr_wp > 0) {
|
||||
wp_info = kmalloc_array(nr_wp,
|
||||
sizeof(*wp_info),
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (!wp_info) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
@ -243,7 +243,7 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
|
||||
if (nr_bp > 0) {
|
||||
bp_info = kmalloc_array(nr_bp,
|
||||
sizeof(*bp_info),
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (!bp_info) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
@ -349,7 +349,7 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
|
||||
if (!wp_info || !wp_info->old_data || wp_info->len <= 0)
|
||||
continue;
|
||||
|
||||
temp = kmalloc(wp_info->len, GFP_KERNEL);
|
||||
temp = kmalloc(wp_info->len, GFP_KERNEL_ACCOUNT);
|
||||
if (!temp)
|
||||
continue;
|
||||
|
||||
|
@ -398,7 +398,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
|
||||
if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
sctns = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
sctns = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!sctns)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1792,7 +1792,7 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
|
||||
goto out;
|
||||
}
|
||||
gisa_out:
|
||||
tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL);
|
||||
tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
|
||||
if (tmp_inti) {
|
||||
tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
|
||||
tmp_inti->io.io_int_word = isc_to_int_word(isc);
|
||||
@ -2015,7 +2015,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
int rc;
|
||||
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
|
||||
if (!inti)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2414,7 +2414,7 @@ static int enqueue_floating_irq(struct kvm_device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
while (len >= sizeof(struct kvm_s390_irq)) {
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
|
||||
if (!inti)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2462,7 +2462,7 @@ static int register_io_adapter(struct kvm_device *dev,
|
||||
if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
|
||||
return -EINVAL;
|
||||
|
||||
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
|
||||
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL_ACCOUNT);
|
||||
if (!adapter)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -3290,7 +3290,7 @@ int kvm_s390_gib_init(u8 nisc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
|
||||
if (!gib) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -1254,7 +1254,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
|
||||
if (!proc) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@ -1416,7 +1416,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
struct kvm_s390_vm_cpu_processor *proc;
|
||||
int ret = 0;
|
||||
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
|
||||
if (!proc) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@ -1444,7 +1444,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
struct kvm_s390_vm_cpu_machine *mach;
|
||||
int ret = 0;
|
||||
|
||||
mach = kzalloc(sizeof(*mach), GFP_KERNEL);
|
||||
mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
|
||||
if (!mach) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@ -1812,7 +1812,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
|
||||
if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
|
||||
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
|
||||
if (!keys)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1857,7 +1857,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
|
||||
if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
|
||||
keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
|
||||
if (!keys)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2625,7 +2625,7 @@ static void sca_dispose(struct kvm *kvm)
|
||||
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
gfp_t alloc_flags = GFP_KERNEL;
|
||||
gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
|
||||
int i, rc;
|
||||
char debug_name[16];
|
||||
static unsigned long sca_offset;
|
||||
@ -2670,7 +2670,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
|
||||
kvm->arch.sie_page2 =
|
||||
(struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
(struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
|
||||
if (!kvm->arch.sie_page2)
|
||||
goto out_err;
|
||||
|
||||
@ -2900,7 +2900,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
|
||||
if (kvm->arch.use_esca)
|
||||
return 0;
|
||||
|
||||
new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
|
||||
new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
if (!new_sca)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -3133,7 +3133,7 @@ void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
|
||||
|
||||
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
|
||||
vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!vcpu->arch.sie_block->cbrlo)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
@ -3243,7 +3243,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
|
||||
sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
|
||||
sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!sie_page)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -879,7 +879,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
|
||||
switch (fc) {
|
||||
case 1: /* same handling for 1 and 2 */
|
||||
case 2:
|
||||
mem = get_zeroed_page(GFP_KERNEL);
|
||||
mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!mem)
|
||||
goto out_no_data;
|
||||
if (stsi((void *) mem, fc, sel1, sel2))
|
||||
@ -888,7 +888,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
|
||||
case 3:
|
||||
if (sel1 != 2 || sel2 != 2)
|
||||
goto out_no_data;
|
||||
mem = get_zeroed_page(GFP_KERNEL);
|
||||
mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!mem)
|
||||
goto out_no_data;
|
||||
handle_stsi_3_2_2(vcpu, (void *) mem);
|
||||
|
@ -60,7 +60,7 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
|
||||
if (kvm_s390_pv_cpu_get_handle(vcpu))
|
||||
return -EINVAL;
|
||||
|
||||
vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL,
|
||||
vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
|
||||
get_order(uv_info.guest_cpu_stor_len));
|
||||
if (!vcpu->arch.pv.stor_base)
|
||||
return -ENOMEM;
|
||||
@ -72,7 +72,7 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
|
||||
uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base;
|
||||
|
||||
/* Alloc Secure Instruction Data Area Designation */
|
||||
vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||
vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
if (!vcpu->arch.sie_block->sidad) {
|
||||
free_pages(vcpu->arch.pv.stor_base,
|
||||
get_order(uv_info.guest_cpu_stor_len));
|
||||
@ -120,7 +120,7 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
kvm->arch.pv.stor_var = NULL;
|
||||
kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL, get_order(base));
|
||||
kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
|
||||
if (!kvm->arch.pv.stor_base)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1234,7 +1234,7 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
|
||||
|
||||
mutex_lock(&kvm->arch.vsie.mutex);
|
||||
if (kvm->arch.vsie.page_count < nr_vcpus) {
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA);
|
||||
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
|
||||
if (!page) {
|
||||
mutex_unlock(&kvm->arch.vsie.mutex);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -1336,7 +1336,7 @@ out_put:
|
||||
void kvm_s390_vsie_init(struct kvm *kvm)
|
||||
{
|
||||
mutex_init(&kvm->arch.vsie.mutex);
|
||||
INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
|
||||
INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL_ACCOUNT);
|
||||
}
|
||||
|
||||
/* Destroy the vsie data structures. To be called when a vm is destroyed. */
|
||||
|
Loading…
Reference in New Issue
Block a user