forked from Minki/linux
drm/nv50-nvc0: prevent multiple vm/bar flushes occuring simultanenously
The per-vm mutex doesn't prevent this completely, a flush coming from the BAR VM could potentially happen at the same time as one for the channel VM. Not to mention that if/when we get per-client/channel VM, this will happen far more frequently. Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
ef1b287169
commit
6f70a4c3d1
@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
|
||||
void
|
||||
nv50_instmem_flush(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
nv_wr32(dev, 0x00330c, 0x00000001);
|
||||
if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
|
||||
NV_ERROR(dev, "PRAMIN flush timeout\n");
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
}
|
||||
|
||||
void
|
||||
nv84_instmem_flush(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
nv_wr32(dev, 0x070000, 0x00000001);
|
||||
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
|
||||
NV_ERROR(dev, "PRAMIN flush timeout\n");
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm)
|
||||
void
|
||||
nv50_vm_flush_engine(struct drm_device *dev, int engine)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
|
||||
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
|
||||
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user