kvm: optimize out smp_mb after srcu_read_unlock
I noticed that srcu_read_lock/unlock both have a memory barrier, so just by moving srcu_read_unlock earlier we can get rid of one call to smp_mb() using smp_mb__after_srcu_read_unlock instead. Unsurprisingly, the gain is small but measureable using the unit test microbenchmark: before vmcall in the ballpark of 1410 cycles after vmcall in the ballpark of 1360 cycles Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
This commit is contained in:
parent
ce332f662d
commit
01b71917b5
@ -5966,10 +5966,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
vcpu->mode = IN_GUEST_MODE;
|
vcpu->mode = IN_GUEST_MODE;
|
||||||
|
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||||
|
|
||||||
/* We should set ->mode before check ->requests,
|
/* We should set ->mode before check ->requests,
|
||||||
* see the comment in make_all_cpus_request.
|
* see the comment in make_all_cpus_request.
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb__after_srcu_read_unlock();
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
@ -5979,12 +5981,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
smp_wmb();
|
smp_wmb();
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
r = 1;
|
r = 1;
|
||||||
goto cancel_injection;
|
goto cancel_injection;
|
||||||
}
|
}
|
||||||
|
|
||||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
|
||||||
|
|
||||||
if (req_immediate_exit)
|
if (req_immediate_exit)
|
||||||
smp_send_reschedule(vcpu->cpu);
|
smp_send_reschedule(vcpu->cpu);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user