mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 01:22:07 +00:00
KVM: MMU: improve active sp audit
Both audit_rmap() and audit_write_protection() need to walk all active sp, so we can do these checking in a sp walking Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
2f4f337248
commit
49edf87806
@ -65,6 +65,16 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
|
||||
return;
|
||||
}
|
||||
|
||||
typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
|
||||
|
||||
static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
|
||||
list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
|
||||
fn(kvm, sp);
|
||||
}
|
||||
|
||||
static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
|
||||
gva_t va, int level)
|
||||
{
|
||||
@ -175,67 +185,59 @@ void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu)
|
||||
mmu_spte_walk(vcpu, inspect_spte_has_rmap);
|
||||
}
|
||||
|
||||
static void check_mappings_rmap(struct kvm_vcpu *vcpu)
|
||||
static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
int i;
|
||||
|
||||
list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
|
||||
u64 *pt = sp->spt;
|
||||
if (sp->role.level != PT_PAGE_TABLE_LEVEL)
|
||||
return;
|
||||
|
||||
if (sp->role.level != PT_PAGE_TABLE_LEVEL)
|
||||
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
|
||||
if (!is_rmap_spte(sp->spt[i]))
|
||||
continue;
|
||||
|
||||
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
|
||||
if (!is_rmap_spte(pt[i]))
|
||||
continue;
|
||||
|
||||
inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
|
||||
}
|
||||
inspect_spte_has_rmap(kvm, sp->spt + i);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void audit_rmap(struct kvm_vcpu *vcpu)
|
||||
void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
{
|
||||
check_mappings_rmap(vcpu);
|
||||
}
|
||||
|
||||
static void audit_write_protection(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
struct kvm_memory_slot *slot;
|
||||
unsigned long *rmapp;
|
||||
u64 *spte;
|
||||
|
||||
list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
|
||||
if (sp->role.direct)
|
||||
continue;
|
||||
if (sp->unsync)
|
||||
continue;
|
||||
if (sp->role.invalid)
|
||||
continue;
|
||||
if (sp->role.direct || sp->unsync || sp->role.invalid)
|
||||
return;
|
||||
|
||||
slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
|
||||
rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
|
||||
slot = gfn_to_memslot(kvm, sp->gfn);
|
||||
rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
|
||||
|
||||
spte = rmap_next(vcpu->kvm, rmapp, NULL);
|
||||
while (spte) {
|
||||
if (is_writable_pte(*spte))
|
||||
printk(KERN_ERR "%s: (%s) shadow page has "
|
||||
spte = rmap_next(kvm, rmapp, NULL);
|
||||
while (spte) {
|
||||
if (is_writable_pte(*spte))
|
||||
printk(KERN_ERR "%s: (%s) shadow page has "
|
||||
"writable mappings: gfn %llx role %x\n",
|
||||
__func__, audit_msg, sp->gfn,
|
||||
sp->role.word);
|
||||
spte = rmap_next(vcpu->kvm, rmapp, spte);
|
||||
}
|
||||
spte = rmap_next(kvm, rmapp, spte);
|
||||
}
|
||||
}
|
||||
|
||||
static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
{
|
||||
check_mappings_rmap(kvm, sp);
|
||||
audit_write_protection(kvm, sp);
|
||||
}
|
||||
|
||||
static void audit_all_active_sps(struct kvm *kvm)
|
||||
{
|
||||
walk_all_active_sps(kvm, audit_sp);
|
||||
}
|
||||
|
||||
static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point)
|
||||
{
|
||||
audit_msg = audit_point_name[audit_point];
|
||||
audit_rmap(vcpu);
|
||||
audit_write_protection(vcpu);
|
||||
audit_all_active_sps(vcpu->kvm);
|
||||
if (strcmp("pre pte write", audit_msg) != 0)
|
||||
audit_mappings(vcpu);
|
||||
audit_sptes_have_rmaps(vcpu);
|
||||
|
Loading…
Reference in New Issue
Block a user