KVM: PPC: Book3S PR: Use SLB entry 0
We didn't make use of SLB entry 0 because ... of no good reason. SLB entry 0 will always be used by the Linux linear SLB entry, so the fact that slbia does not invalidate it doesn't matter as we overwrite SLB 0 on exit anyway. Just enable use of SLB entry 0 for our shadow SLB code. Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
000a25ddb7
commit
207438d4e2
@ -271,11 +271,8 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
|
|||||||
int found_inval = -1;
|
int found_inval = -1;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (!svcpu->slb_max)
|
|
||||||
svcpu->slb_max = 1;
|
|
||||||
|
|
||||||
/* Are we overwriting? */
|
/* Are we overwriting? */
|
||||||
for (i = 1; i < svcpu->slb_max; i++) {
|
for (i = 0; i < svcpu->slb_max; i++) {
|
||||||
if (!(svcpu->slb[i].esid & SLB_ESID_V))
|
if (!(svcpu->slb[i].esid & SLB_ESID_V))
|
||||||
found_inval = i;
|
found_inval = i;
|
||||||
else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
|
else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
|
||||||
@ -285,7 +282,7 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Found a spare entry that was invalidated before */
|
/* Found a spare entry that was invalidated before */
|
||||||
if (found_inval > 0) {
|
if (found_inval >= 0) {
|
||||||
r = found_inval;
|
r = found_inval;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -359,7 +356,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
|
|||||||
ulong seg_mask = -seg_size;
|
ulong seg_mask = -seg_size;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 1; i < svcpu->slb_max; i++) {
|
for (i = 0; i < svcpu->slb_max; i++) {
|
||||||
if ((svcpu->slb[i].esid & SLB_ESID_V) &&
|
if ((svcpu->slb[i].esid & SLB_ESID_V) &&
|
||||||
(svcpu->slb[i].esid & seg_mask) == ea) {
|
(svcpu->slb[i].esid & seg_mask) == ea) {
|
||||||
/* Invalidate this entry */
|
/* Invalidate this entry */
|
||||||
@ -373,7 +370,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
|
|||||||
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
|
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||||
svcpu->slb_max = 1;
|
svcpu->slb_max = 0;
|
||||||
svcpu->slb[0].esid = 0;
|
svcpu->slb[0].esid = 0;
|
||||||
svcpu_put(svcpu);
|
svcpu_put(svcpu);
|
||||||
}
|
}
|
||||||
|
@ -138,7 +138,8 @@ slb_do_enter:
|
|||||||
|
|
||||||
/* Restore bolted entries from the shadow and fix it along the way */
|
/* Restore bolted entries from the shadow and fix it along the way */
|
||||||
|
|
||||||
/* We don't store anything in entry 0, so we don't need to take care of it */
|
li r0, r0
|
||||||
|
slbmte r0, r0
|
||||||
slbia
|
slbia
|
||||||
isync
|
isync
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user