mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 09:31:50 +00:00
drm/nouveau/instmem: completely new implementation, as a subdev module
v2 (Ben Skeggs): - some fixes for 64KiB PAGE_SIZE - fix porting issues in (currently unused) nv41/nv44 pciegart code Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
8a9b889e66
commit
3863c9bc88
@ -9,6 +9,7 @@ ccflags-y += -I$(src)
|
||||
nouveau-y := core/core/client.o
|
||||
nouveau-y += core/core/engine.o
|
||||
nouveau-y += core/core/enum.o
|
||||
nouveau-y += core/core/gpuobj.o
|
||||
nouveau-y += core/core/handle.o
|
||||
nouveau-y += core/core/mm.o
|
||||
nouveau-y += core/core/namedb.o
|
||||
@ -19,6 +20,9 @@ nouveau-y += core/core/printk.o
|
||||
nouveau-y += core/core/ramht.o
|
||||
nouveau-y += core/core/subdev.o
|
||||
|
||||
nouveau-y += core/subdev/bar/base.o
|
||||
nouveau-y += core/subdev/bar/nv50.o
|
||||
nouveau-y += core/subdev/bar/nvc0.o
|
||||
nouveau-y += core/subdev/bios/base.o
|
||||
nouveau-y += core/subdev/bios/bit.o
|
||||
nouveau-y += core/subdev/bios/conn.o
|
||||
@ -66,10 +70,10 @@ nouveau-y += core/subdev/gpio/nvd0.o
|
||||
nouveau-y += core/subdev/i2c/base.o
|
||||
nouveau-y += core/subdev/i2c/aux.o
|
||||
nouveau-y += core/subdev/i2c/bit.o
|
||||
nouveau-y += core/subdev/instmem/base.o
|
||||
nouveau-y += core/subdev/instmem/nv04.o
|
||||
nouveau-y += core/subdev/instmem/nv40.o
|
||||
nouveau-y += core/subdev/instmem/nv50.o
|
||||
nouveau-y += core/subdev/instmem/nvc0.o
|
||||
nouveau-y += core/subdev/ltcg/nvc0.o
|
||||
nouveau-y += core/subdev/mc/base.o
|
||||
nouveau-y += core/subdev/mc/nv04.o
|
||||
@ -80,6 +84,9 @@ nouveau-y += core/subdev/mc/nvc0.o
|
||||
nouveau-y += core/subdev/timer/base.o
|
||||
nouveau-y += core/subdev/timer/nv04.o
|
||||
nouveau-y += core/subdev/vm/base.o
|
||||
nouveau-y += core/subdev/vm/nv04.o
|
||||
nouveau-y += core/subdev/vm/nv41.o
|
||||
nouveau-y += core/subdev/vm/nv44.o
|
||||
nouveau-y += core/subdev/vm/nv50.o
|
||||
nouveau-y += core/subdev/vm/nvc0.o
|
||||
|
||||
|
@ -86,7 +86,6 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
|
||||
struct nouveau_ramht_entry *entry;
|
||||
struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
|
||||
unsigned long flags;
|
||||
@ -104,21 +103,21 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
|
||||
nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
|
||||
|
||||
if (dev_priv->card_type < NV_40) {
|
||||
ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
|
||||
ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->addr >> 4) |
|
||||
(chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
|
||||
(gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
|
||||
} else
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
ctx = (gpuobj->pinst >> 4) |
|
||||
ctx = (gpuobj->addr >> 4) |
|
||||
(chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
|
||||
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
|
||||
} else {
|
||||
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
|
||||
ctx = (gpuobj->cinst << 10) |
|
||||
ctx = (gpuobj->node->offset << 10) |
|
||||
(chan->id << 28) |
|
||||
chan->id; /* HASH_TAG */
|
||||
} else {
|
||||
ctx = (gpuobj->cinst >> 4) |
|
||||
ctx = (gpuobj->node->offset >> 4) |
|
||||
((gpuobj->engine <<
|
||||
NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
|
||||
}
|
||||
@ -137,7 +136,7 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
|
||||
nv_wo32(ramht, co + 4, ctx);
|
||||
|
||||
spin_unlock_irqrestore(&chan->ramht->lock, flags);
|
||||
instmem->flush(dev);
|
||||
nvimem_flush(dev);
|
||||
return 0;
|
||||
}
|
||||
NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
|
||||
@ -184,8 +183,6 @@ static void
|
||||
nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
|
||||
struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
|
||||
unsigned long flags;
|
||||
u32 co, ho;
|
||||
@ -201,7 +198,7 @@ nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
|
||||
chan->id, co, handle, nv_ro32(ramht, co + 4));
|
||||
nv_wo32(ramht, co + 0, 0x00000000);
|
||||
nv_wo32(ramht, co + 4, 0x00000000);
|
||||
instmem->flush(dev);
|
||||
nvimem_flush(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_util.h"
|
||||
#include <subdev/vm.h>
|
||||
#include <core/ramht.h>
|
||||
|
||||
/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_util.h"
|
||||
#include <subdev/vm.h>
|
||||
#include <core/ramht.h>
|
||||
#include "fuc/nva3.fuc.h"
|
||||
|
||||
@ -38,7 +37,6 @@ static int
|
||||
nva3_copy_context_new(struct nouveau_channel *chan, int engine)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ramin = chan->ramin;
|
||||
struct nouveau_gpuobj *ctx = NULL;
|
||||
int ret;
|
||||
@ -51,14 +49,14 @@ nva3_copy_context_new(struct nouveau_channel *chan, int engine)
|
||||
return ret;
|
||||
|
||||
nv_wo32(ramin, 0xc0, 0x00190000);
|
||||
nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
|
||||
nv_wo32(ramin, 0xc8, ctx->vinst);
|
||||
nv_wo32(ramin, 0xc4, ctx->addr + ctx->size - 1);
|
||||
nv_wo32(ramin, 0xc8, ctx->addr);
|
||||
nv_wo32(ramin, 0xcc, 0x00000000);
|
||||
nv_wo32(ramin, 0xd0, 0x00000000);
|
||||
nv_wo32(ramin, 0xd4, 0x00000000);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
atomic_inc(&chan->vm->engref[engine]);
|
||||
nvvm_engref(chan->vm, engine, 1);
|
||||
chan->engctx[engine] = ctx;
|
||||
return 0;
|
||||
}
|
||||
@ -84,7 +82,7 @@ nva3_copy_context_del(struct nouveau_channel *chan, int engine)
|
||||
for (i = 0xc0; i <= 0xd4; i += 4)
|
||||
nv_wo32(chan->ramin, i, 0x00000000);
|
||||
|
||||
atomic_dec(&chan->vm->engref[engine]);
|
||||
nvvm_engref(chan->vm, engine, -1);
|
||||
nouveau_gpuobj_ref(NULL, &ctx);
|
||||
chan->engctx[engine] = ctx;
|
||||
}
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_util.h"
|
||||
#include <subdev/vm.h>
|
||||
#include <core/ramht.h>
|
||||
#include "fuc/nvc0.fuc.h"
|
||||
|
||||
@ -49,7 +48,6 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
|
||||
struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
|
||||
struct nvc0_copy_chan *cctx;
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ramin = chan->ramin;
|
||||
int ret;
|
||||
|
||||
@ -62,14 +60,14 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_map_vm(cctx->mem, NV_MEM_ACCESS_RW, chan->vm,
|
||||
ret = nouveau_gpuobj_map_vm(cctx->mem, chan->vm, NV_MEM_ACCESS_RW,
|
||||
&cctx->vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(cctx->vma.offset));
|
||||
nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(cctx->vma.offset));
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -88,7 +86,7 @@ nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
|
||||
struct drm_device *dev = chan->dev;
|
||||
u32 inst;
|
||||
|
||||
inst = (chan->ramin->vinst >> 12);
|
||||
inst = (chan->ramin->addr >> 12);
|
||||
inst |= 0x40000000;
|
||||
|
||||
/* disable fifo access */
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_util.h"
|
||||
#include <subdev/vm.h>
|
||||
#include <core/ramht.h>
|
||||
|
||||
struct nv84_crypt_engine {
|
||||
@ -36,7 +35,6 @@ static int
|
||||
nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ramin = chan->ramin;
|
||||
struct nouveau_gpuobj *ctx;
|
||||
int ret;
|
||||
@ -49,14 +47,14 @@ nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
|
||||
return ret;
|
||||
|
||||
nv_wo32(ramin, 0xa0, 0x00190000);
|
||||
nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1);
|
||||
nv_wo32(ramin, 0xa8, ctx->vinst);
|
||||
nv_wo32(ramin, 0xa4, ctx->addr + ctx->size - 1);
|
||||
nv_wo32(ramin, 0xa8, ctx->addr);
|
||||
nv_wo32(ramin, 0xac, 0);
|
||||
nv_wo32(ramin, 0xb0, 0);
|
||||
nv_wo32(ramin, 0xb4, 0);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
atomic_inc(&chan->vm->engref[engine]);
|
||||
nvvm_engref(chan->vm, engine, 1);
|
||||
chan->engctx[engine] = ctx;
|
||||
return 0;
|
||||
}
|
||||
@ -68,7 +66,7 @@ nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
|
||||
struct drm_device *dev = chan->dev;
|
||||
u32 inst;
|
||||
|
||||
inst = (chan->ramin->vinst >> 12);
|
||||
inst = (chan->ramin->addr >> 12);
|
||||
inst |= 0x80000000;
|
||||
|
||||
/* mark context as invalid if still on the hardware, not
|
||||
@ -84,7 +82,7 @@ nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &ctx);
|
||||
|
||||
atomic_dec(&chan->vm->engref[engine]);
|
||||
nvvm_engref(chan->vm, engine, -1);
|
||||
chan->engctx[engine] = NULL;
|
||||
}
|
||||
|
||||
@ -93,7 +91,6 @@ nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
|
||||
u32 handle, u16 class)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret;
|
||||
|
||||
@ -104,7 +101,7 @@ nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
|
||||
obj->class = class;
|
||||
|
||||
nv_wo32(obj, 0x00, class);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
ret = nouveau_ramht_insert(chan, handle, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
|
@ -26,7 +26,6 @@
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_util.h"
|
||||
#include <subdev/vm.h>
|
||||
#include <core/ramht.h>
|
||||
|
||||
#include "fuc/nv98.fuc.h"
|
||||
@ -43,7 +42,6 @@ static int
|
||||
nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv98_crypt_priv *priv = nv_engine(dev, engine);
|
||||
struct nv98_crypt_chan *cctx;
|
||||
int ret;
|
||||
@ -52,7 +50,7 @@ nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
|
||||
if (!cctx)
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_inc(&chan->vm->engref[engine]);
|
||||
nvvm_engref(chan->vm, engine, 1);
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
|
||||
@ -60,12 +58,12 @@ nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
|
||||
goto error;
|
||||
|
||||
nv_wo32(chan->ramin, 0xa0, 0x00190000);
|
||||
nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
|
||||
nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
|
||||
nv_wo32(chan->ramin, 0xa4, cctx->mem->addr + cctx->mem->size - 1);
|
||||
nv_wo32(chan->ramin, 0xa8, cctx->mem->addr);
|
||||
nv_wo32(chan->ramin, 0xac, 0x00000000);
|
||||
nv_wo32(chan->ramin, 0xb0, 0x00000000);
|
||||
nv_wo32(chan->ramin, 0xb4, 0x00000000);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
error:
|
||||
if (ret)
|
||||
@ -84,7 +82,7 @@ nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &cctx->mem);
|
||||
|
||||
atomic_dec(&chan->vm->engref[engine]);
|
||||
nvvm_engref(chan->vm, engine, -1);
|
||||
chan->engctx[engine] = NULL;
|
||||
kfree(cctx);
|
||||
}
|
||||
|
@ -32,8 +32,6 @@
|
||||
#include <core/ramht.h>
|
||||
#include "nouveau_software.h"
|
||||
|
||||
#include <core/subdev/instmem/nv04.h>
|
||||
|
||||
static struct ramfc_desc {
|
||||
unsigned bits:6;
|
||||
unsigned ctxs:5;
|
||||
@ -120,7 +118,7 @@ nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
/* initialise default fifo context */
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x08, chan->pushbuf->pinst >> 4);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x08, chan->pushbuf->addr >> 4);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x10,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
@ -203,9 +201,9 @@ nv04_fifo_init(struct drm_device *dev, int engine)
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
|
||||
((dev_priv->ramht->bits - 9) << 16) |
|
||||
(dev_priv->ramht->gpuobj->pinst >> 8));
|
||||
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->pinst >> 8);
|
||||
nv_wr32(dev, NV03_PFIFO_RAMFC, priv->ramfc->pinst >> 8);
|
||||
(dev_priv->ramht->gpuobj->addr >> 8));
|
||||
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
|
||||
nv_wr32(dev, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
|
||||
|
||||
@ -486,15 +484,14 @@ int
|
||||
nv04_fifo_create(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
|
||||
struct nv04_fifo_priv *priv;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
|
||||
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
|
||||
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
|
||||
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
|
||||
|
||||
priv->base.base.destroy = nv04_fifo_destroy;
|
||||
priv->base.base.init = nv04_fifo_init;
|
||||
|
@ -31,8 +31,6 @@
|
||||
#include "nouveau_util.h"
|
||||
#include <core/ramht.h>
|
||||
|
||||
#include <core/subdev/instmem/nv04.h>
|
||||
|
||||
static struct ramfc_desc {
|
||||
unsigned bits:6;
|
||||
unsigned ctxs:5;
|
||||
@ -91,7 +89,7 @@ nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
/* initialise default fifo context */
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->pinst >> 4);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x14,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
@ -115,15 +113,14 @@ int
|
||||
nv10_fifo_create(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
|
||||
struct nv10_fifo_priv *priv;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
|
||||
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
|
||||
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
|
||||
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
|
||||
|
||||
priv->base.base.destroy = nv04_fifo_destroy;
|
||||
priv->base.base.init = nv04_fifo_init;
|
||||
|
@ -31,8 +31,6 @@
|
||||
#include "nouveau_util.h"
|
||||
#include <core/ramht.h>
|
||||
|
||||
#include <core/subdev/instmem/nv04.h>
|
||||
|
||||
static struct ramfc_desc {
|
||||
unsigned bits:6;
|
||||
unsigned ctxs:5;
|
||||
@ -96,7 +94,7 @@ nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
/* initialise default fifo context */
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->pinst >> 4);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x14,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
@ -131,10 +129,10 @@ nv17_fifo_init(struct drm_device *dev, int engine)
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
|
||||
((dev_priv->ramht->bits - 9) << 16) |
|
||||
(dev_priv->ramht->gpuobj->pinst >> 8));
|
||||
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->pinst >> 8);
|
||||
(dev_priv->ramht->gpuobj->addr >> 8));
|
||||
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
|
||||
nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
|
||||
priv->ramfc->pinst >> 8);
|
||||
priv->ramfc->addr >> 8);
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
|
||||
|
||||
@ -157,15 +155,14 @@ int
|
||||
nv17_fifo_create(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
|
||||
struct nv17_fifo_priv *priv;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
|
||||
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
|
||||
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
|
||||
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
|
||||
|
||||
priv->base.base.destroy = nv04_fifo_destroy;
|
||||
priv->base.base.init = nv17_fifo_init;
|
||||
|
@ -31,8 +31,6 @@
|
||||
#include "nouveau_util.h"
|
||||
#include <core/ramht.h>
|
||||
|
||||
#include <core/subdev/instmem/nv04.h>
|
||||
|
||||
static struct ramfc_desc {
|
||||
unsigned bits:6;
|
||||
unsigned ctxs:5;
|
||||
@ -104,7 +102,7 @@ nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
/* initialise default fifo context */
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->pinst >> 4);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
|
||||
nv_wo32(priv->ramfc, fctx->ramfc + 0x18, 0x30000000 |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
@ -144,8 +142,8 @@ nv40_fifo_init(struct drm_device *dev, int engine)
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
|
||||
((dev_priv->ramht->bits - 9) << 16) |
|
||||
(dev_priv->ramht->gpuobj->pinst >> 8));
|
||||
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->pinst >> 8);
|
||||
(dev_priv->ramht->gpuobj->addr >> 8));
|
||||
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x47:
|
||||
@ -163,7 +161,7 @@ nv40_fifo_init(struct drm_device *dev, int engine)
|
||||
default:
|
||||
nv_wr32(dev, 0x002230, 0x00000000);
|
||||
nv_wr32(dev, 0x002220, ((nvfb_vram_size(dev) - 512 * 1024 +
|
||||
priv->ramfc->pinst) >> 16) |
|
||||
priv->ramfc->addr) >> 16) |
|
||||
0x00030000);
|
||||
break;
|
||||
}
|
||||
@ -189,15 +187,14 @@ int
|
||||
nv40_fifo_create(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
|
||||
struct nv40_fifo_priv *priv;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
|
||||
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
|
||||
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
|
||||
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
|
||||
|
||||
priv->base.base.destroy = nv04_fifo_destroy;
|
||||
priv->base.base.init = nv40_fifo_init;
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "nouveau_drv.h"
|
||||
#include <engine/fifo.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
struct nv50_fifo_priv {
|
||||
struct nouveau_fifo_priv base;
|
||||
@ -45,7 +44,6 @@ void
|
||||
nv50_fifo_playlist_update(struct drm_device *dev)
|
||||
{
|
||||
struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *cur;
|
||||
int i, p;
|
||||
|
||||
@ -57,9 +55,9 @@ nv50_fifo_playlist_update(struct drm_device *dev)
|
||||
nv_wo32(cur, p++ * 4, i);
|
||||
}
|
||||
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
|
||||
nv_wr32(dev, 0x0032f4, cur->addr >> 12);
|
||||
nv_wr32(dev, 0x0032ec, p);
|
||||
nv_wr32(dev, 0x002500, 0x00000101);
|
||||
}
|
||||
@ -72,14 +70,14 @@ nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
|
||||
u64 instance = chan->ramin->vinst >> 12;
|
||||
u64 instance = chan->ramin->addr >> 12;
|
||||
unsigned long flags;
|
||||
int ret = 0, i;
|
||||
|
||||
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
|
||||
if (!fctx)
|
||||
return -ENOMEM;
|
||||
atomic_inc(&chan->vm->engref[engine]);
|
||||
nvvm_engref(chan->vm, engine, 1);
|
||||
|
||||
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
|
||||
NV50_USER(chan->id), PAGE_SIZE);
|
||||
@ -93,7 +91,7 @@ nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
nv_wo32(chan->ramin, 0x3c, 0x403f6078);
|
||||
nv_wo32(chan->ramin, 0x40, 0x00000000);
|
||||
nv_wo32(chan->ramin, 0x44, 0x01003fff);
|
||||
nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
|
||||
nv_wo32(chan->ramin, 0x48, chan->pushbuf->node->offset >> 4);
|
||||
nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
|
||||
nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
|
||||
drm_order(chan->dma.ib_max + 1) << 16);
|
||||
@ -102,9 +100,9 @@ nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
nv_wo32(chan->ramin, 0x7c, 0x30000001);
|
||||
nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
|
||||
(4 << 24) /* SEARCH_FULL */ |
|
||||
(chan->ramht->gpuobj->cinst >> 4));
|
||||
(chan->ramht->gpuobj->node->offset >> 4));
|
||||
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
|
||||
@ -141,7 +139,7 @@ nv50_fifo_kickoff(struct nouveau_channel *chan)
|
||||
me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
|
||||
|
||||
/* do the kickoff... */
|
||||
nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
|
||||
nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
|
||||
if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
|
||||
NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
|
||||
done = false;
|
||||
@ -177,7 +175,7 @@ nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
|
||||
chan->user = NULL;
|
||||
}
|
||||
|
||||
atomic_dec(&chan->vm->engref[engine]);
|
||||
nvvm_engref(chan->vm, engine, -1);
|
||||
chan->engctx[engine] = NULL;
|
||||
kfree(fctx);
|
||||
}
|
||||
@ -200,7 +198,7 @@ nv50_fifo_init(struct drm_device *dev, int engine)
|
||||
for (i = 0; i < 128; i++) {
|
||||
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
|
||||
if (chan && chan->engctx[engine])
|
||||
instance = 0x80000000 | chan->ramin->vinst >> 12;
|
||||
instance = 0x80000000 | chan->ramin->addr >> 12;
|
||||
else
|
||||
instance = 0x00000000;
|
||||
nv_wr32(dev, 0x002600 + (i * 4), instance);
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "nouveau_drv.h"
|
||||
#include <engine/fifo.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
struct nv84_fifo_priv {
|
||||
struct nouveau_fifo_priv base;
|
||||
@ -58,7 +57,7 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
|
||||
if (!fctx)
|
||||
return -ENOMEM;
|
||||
atomic_inc(&chan->vm->engref[engine]);
|
||||
nvvm_engref(chan->vm, engine, 1);
|
||||
|
||||
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
|
||||
NV50_USER(chan->id), PAGE_SIZE);
|
||||
@ -72,7 +71,7 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
instance = fctx->ramfc->vinst >> 8;
|
||||
instance = fctx->ramfc->addr >> 8;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
|
||||
if (ret)
|
||||
@ -81,7 +80,7 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
|
||||
nv_wo32(fctx->ramfc, 0x40, 0x00000000);
|
||||
nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
|
||||
nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
|
||||
nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->node->offset >> 4);
|
||||
nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
|
||||
nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
|
||||
drm_order(chan->dma.ib_max + 1) << 16);
|
||||
@ -90,14 +89,14 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
|
||||
nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
|
||||
(4 << 24) /* SEARCH_FULL */ |
|
||||
(chan->ramht->gpuobj->cinst >> 4));
|
||||
nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
|
||||
nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
|
||||
(chan->ramht->gpuobj->node->offset >> 4));
|
||||
nv_wo32(fctx->ramfc, 0x88, fctx->cache->addr >> 10);
|
||||
nv_wo32(fctx->ramfc, 0x98, chan->ramin->addr >> 12);
|
||||
|
||||
nv_wo32(chan->ramin, 0x00, chan->id);
|
||||
nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
|
||||
nv_wo32(chan->ramin, 0x04, fctx->ramfc->addr >> 8);
|
||||
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
|
||||
@ -127,7 +126,7 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
|
||||
save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
|
||||
|
||||
/* tell any engines on this channel to unload their contexts */
|
||||
nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
|
||||
nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
|
||||
if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
|
||||
NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
|
||||
|
||||
@ -145,7 +144,7 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
|
||||
nouveau_gpuobj_ref(NULL, &fctx->ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &fctx->cache);
|
||||
|
||||
atomic_dec(&chan->vm->engref[engine]);
|
||||
nvvm_engref(chan->vm, engine, -1);
|
||||
chan->engctx[engine] = NULL;
|
||||
kfree(fctx);
|
||||
}
|
||||
@ -169,7 +168,7 @@ nv84_fifo_init(struct drm_device *dev, int engine)
|
||||
for (i = 0; i < 128; i++) {
|
||||
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
|
||||
if (chan && (fctx = chan->engctx[engine]))
|
||||
instance = 0x80000000 | fctx->ramfc->vinst >> 8;
|
||||
instance = 0x80000000 | fctx->ramfc->addr >> 8;
|
||||
else
|
||||
instance = 0x00000000;
|
||||
nv_wr32(dev, 0x002600 + (i * 4), instance);
|
||||
@ -200,7 +199,7 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
|
||||
for (i = 0; i < priv->base.channels; i++) {
|
||||
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
|
||||
if (chan)
|
||||
nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
|
||||
nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
|
||||
if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
|
||||
NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
|
||||
return -EBUSY;
|
||||
|
@ -48,8 +48,6 @@ struct nvc0_fifo_chan {
|
||||
static void
|
||||
nvc0_fifo_playlist_update(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
|
||||
struct nouveau_gpuobj *cur;
|
||||
int i, p;
|
||||
@ -64,9 +62,9 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
|
||||
nv_wo32(cur, p + 4, 0x00000004);
|
||||
p += 8;
|
||||
}
|
||||
pinstmem->flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
nv_wr32(dev, 0x002270, cur->vinst >> 12);
|
||||
nv_wr32(dev, 0x002270, cur->addr >> 12);
|
||||
nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
|
||||
if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
|
||||
NV_ERROR(dev, "PFIFO - playlist update failed\n");
|
||||
@ -76,11 +74,9 @@ static int
|
||||
nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
|
||||
struct nvc0_fifo_chan *fctx;
|
||||
u64 usermem = priv->user.mem->vinst + chan->id * 0x1000;
|
||||
u64 usermem = priv->user.mem->addr + chan->id * 0x1000;
|
||||
u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
|
||||
int ret, i;
|
||||
|
||||
@ -115,10 +111,10 @@ nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
nv_wo32(chan->ramin, 0xb8, 0xf8000000);
|
||||
nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
|
||||
nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
|
||||
pinstmem->flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
|
||||
(chan->ramin->vinst >> 12));
|
||||
(chan->ramin->addr >> 12));
|
||||
nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
|
||||
nvc0_fifo_playlist_update(dev);
|
||||
|
||||
@ -198,7 +194,7 @@ nvc0_fifo_init(struct drm_device *dev, int engine)
|
||||
continue;
|
||||
|
||||
nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
|
||||
(chan->ramin->vinst >> 12));
|
||||
(chan->ramin->addr >> 12));
|
||||
nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
|
||||
}
|
||||
nvc0_fifo_playlist_update(dev);
|
||||
|
@ -55,8 +55,6 @@ struct nve0_fifo_chan {
|
||||
static void
|
||||
nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
|
||||
struct nve0_fifo_engine *peng = &priv->engine[engine];
|
||||
struct nouveau_gpuobj *cur;
|
||||
@ -84,9 +82,9 @@ nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
|
||||
nv_wo32(cur, p + 4, 0x00000000);
|
||||
p += 8;
|
||||
}
|
||||
pinstmem->flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
nv_wr32(dev, 0x002270, cur->vinst >> 12);
|
||||
nv_wr32(dev, 0x002270, cur->addr >> 12);
|
||||
nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
|
||||
if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
|
||||
NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
|
||||
@ -96,11 +94,9 @@ static int
|
||||
nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct nve0_fifo_priv *priv = nv_engine(dev, engine);
|
||||
struct nve0_fifo_chan *fctx;
|
||||
u64 usermem = priv->user.mem->vinst + chan->id * 512;
|
||||
u64 usermem = priv->user.mem->addr + chan->id * 512;
|
||||
u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
|
||||
int ret = 0, i;
|
||||
|
||||
@ -135,10 +131,10 @@ nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
|
||||
nv_wo32(chan->ramin, 0xe8, chan->id);
|
||||
nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
|
||||
nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
|
||||
pinstmem->flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
|
||||
(chan->ramin->vinst >> 12));
|
||||
(chan->ramin->addr >> 12));
|
||||
nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
|
||||
nve0_fifo_playlist_update(dev, fctx->engine);
|
||||
nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
|
||||
@ -207,7 +203,7 @@ nve0_fifo_init(struct drm_device *dev, int engine)
|
||||
continue;
|
||||
|
||||
nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
|
||||
(chan->ramin->vinst >> 12));
|
||||
(chan->ramin->addr >> 12));
|
||||
nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
|
||||
nve0_fifo_playlist_update(dev, fctx->engine);
|
||||
nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
|
||||
|
@ -52,7 +52,7 @@ nv20_graph_unload_context(struct drm_device *dev)
|
||||
return 0;
|
||||
grctx = chan->engctx[NVOBJ_ENGINE_GR];
|
||||
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4);
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->addr >> 4);
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
|
||||
NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
|
||||
|
||||
@ -437,7 +437,7 @@ nv20_graph_context_new(struct nouveau_channel *chan, int engine)
|
||||
/* CTX_USER */
|
||||
nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
|
||||
|
||||
nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4);
|
||||
nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->addr >> 4);
|
||||
chan->engctx[engine] = grctx;
|
||||
return 0;
|
||||
}
|
||||
@ -505,7 +505,7 @@ nv20_graph_init(struct drm_device *dev, int engine)
|
||||
nv_wr32(dev, NV03_PMC_ENABLE,
|
||||
nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
|
||||
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->addr >> 4);
|
||||
|
||||
nv20_graph_rdi(dev);
|
||||
|
||||
@ -592,7 +592,7 @@ nv30_graph_init(struct drm_device *dev, int engine)
|
||||
nv_wr32(dev, NV03_PMC_ENABLE,
|
||||
nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
|
||||
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
|
||||
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->addr >> 4);
|
||||
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
|
||||
|
@ -52,16 +52,16 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
|
||||
|
||||
/* Initialise default context values */
|
||||
nv40_grctx_fill(dev, grctx);
|
||||
nv_wo32(grctx, 0, grctx->vinst);
|
||||
nv_wo32(grctx, 0, grctx->addr);
|
||||
|
||||
/* init grctx pointer in ramfc, and on PFIFO if channel is
|
||||
* already active there
|
||||
*/
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4);
|
||||
nv_wo32(chan->ramfc, 0x38, grctx->addr >> 4);
|
||||
nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
|
||||
if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
|
||||
nv_wr32(dev, 0x0032e0, grctx->vinst >> 4);
|
||||
nv_wr32(dev, 0x0032e0, grctx->addr >> 4);
|
||||
nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
@ -75,7 +75,7 @@ nv40_graph_context_del(struct nouveau_channel *chan, int engine)
|
||||
struct nouveau_gpuobj *grctx = chan->engctx[engine];
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
u32 inst = 0x01000000 | (grctx->pinst >> 4);
|
||||
u32 inst = 0x01000000 | (grctx->addr >> 4);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
@ -357,7 +357,7 @@ nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
|
||||
continue;
|
||||
grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
|
||||
|
||||
if (grctx && grctx->pinst == inst)
|
||||
if (grctx && grctx->addr == inst)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include <engine/fifo.h>
|
||||
#include <core/ramht.h>
|
||||
#include "nouveau_dma.h"
|
||||
#include <subdev/vm.h>
|
||||
#include "nv50_evo.h"
|
||||
|
||||
struct nv50_graph_engine {
|
||||
@ -155,18 +154,18 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
|
||||
|
||||
hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
|
||||
nv_wo32(ramin, hdr + 0x00, 0x00190002);
|
||||
nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
|
||||
nv_wo32(ramin, hdr + 0x08, grctx->vinst);
|
||||
nv_wo32(ramin, hdr + 0x04, grctx->addr + grctx->size - 1);
|
||||
nv_wo32(ramin, hdr + 0x08, grctx->addr);
|
||||
nv_wo32(ramin, hdr + 0x0c, 0);
|
||||
nv_wo32(ramin, hdr + 0x10, 0);
|
||||
nv_wo32(ramin, hdr + 0x14, 0x00010000);
|
||||
|
||||
nv50_grctx_fill(dev, grctx);
|
||||
nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
|
||||
nv_wo32(grctx, 0x00000, chan->ramin->addr >> 12);
|
||||
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
|
||||
nvvm_engref(chan->vm, engine, 1);
|
||||
chan->engctx[NVOBJ_ENGINE_GR] = grctx;
|
||||
return 0;
|
||||
}
|
||||
@ -181,9 +180,9 @@ nv50_graph_context_del(struct nouveau_channel *chan, int engine)
|
||||
|
||||
for (i = hdr; i < hdr + 24; i += 4)
|
||||
nv_wo32(chan->ramin, i, 0);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
atomic_dec(&chan->vm->engref[engine]);
|
||||
nvvm_engref(chan->vm, engine, -1);
|
||||
nouveau_gpuobj_ref(NULL, &grctx);
|
||||
chan->engctx[engine] = NULL;
|
||||
}
|
||||
@ -193,7 +192,6 @@ nv50_graph_object_new(struct nouveau_channel *chan, int engine,
|
||||
u32 handle, u16 class)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret;
|
||||
|
||||
@ -207,7 +205,7 @@ nv50_graph_object_new(struct nouveau_channel *chan, int engine,
|
||||
nv_wo32(obj, 0x04, 0x00000000);
|
||||
nv_wo32(obj, 0x08, 0x00000000);
|
||||
nv_wo32(obj, 0x0c, 0x00000000);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
ret = nouveau_ramht_insert(chan, handle, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
@ -723,7 +721,7 @@ nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
|
||||
if (!chan || !chan->ramin)
|
||||
continue;
|
||||
|
||||
if (inst == chan->ramin->vinst)
|
||||
if (inst == chan->ramin->addr)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
|
@ -65,7 +65,7 @@ nvc0_graph_load_context(struct nouveau_channel *chan)
|
||||
struct drm_device *dev = chan->dev;
|
||||
|
||||
nv_wr32(dev, 0x409840, 0x00000030);
|
||||
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
|
||||
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
|
||||
nv_wr32(dev, 0x409504, 0x00000003);
|
||||
if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
|
||||
NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
|
||||
@ -90,7 +90,6 @@ nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
|
||||
static int
|
||||
nvc0_graph_construct_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
|
||||
struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
|
||||
struct drm_device *dev = chan->dev;
|
||||
@ -103,7 +102,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
|
||||
|
||||
if (!nouveau_ctxfw) {
|
||||
nv_wr32(dev, 0x409840, 0x80000000);
|
||||
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
|
||||
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
|
||||
nv_wr32(dev, 0x409504, 0x00000001);
|
||||
if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
|
||||
NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
|
||||
@ -118,7 +117,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
|
||||
nv_wo32(grch->grctx, 0x20, 0);
|
||||
nv_wo32(grch->grctx, 0x28, 0);
|
||||
nv_wo32(grch->grctx, 0x2c, 0);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
}
|
||||
|
||||
ret = nvc0_grctx_generate(chan);
|
||||
@ -127,7 +126,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
|
||||
|
||||
if (!nouveau_ctxfw) {
|
||||
nv_wr32(dev, 0x409840, 0x80000000);
|
||||
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
|
||||
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
|
||||
nv_wr32(dev, 0x409504, 0x00000002);
|
||||
if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
|
||||
NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
|
||||
@ -136,7 +135,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
|
||||
goto err;
|
||||
}
|
||||
} else {
|
||||
ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
|
||||
ret = nvc0_graph_unload_context_to(dev, chan->ramin->addr);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
@ -165,8 +164,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk408004, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, chan->vm,
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk408004, chan->vm,
|
||||
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
|
||||
&grch->unk408004_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -175,8 +174,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk40800c, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, chan->vm,
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk40800c, chan->vm,
|
||||
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
|
||||
&grch->unk40800c_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -186,8 +185,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk418810, NV_MEM_ACCESS_RW,
|
||||
chan->vm, &grch->unk418810_vma);
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk418810, chan->vm,
|
||||
NV_MEM_ACCESS_RW, &grch->unk418810_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -195,9 +194,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_map_vm(grch->mmio, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, chan->vm,
|
||||
&grch->mmio_vma);
|
||||
ret = nouveau_gpuobj_map_vm(grch->mmio, chan->vm, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, &grch->mmio_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -268,8 +266,6 @@ static int
|
||||
nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct nvc0_graph_priv *priv = nv_engine(dev, engine);
|
||||
struct nvc0_graph_chan *grch;
|
||||
struct nouveau_gpuobj *grctx;
|
||||
@ -285,9 +281,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_gpuobj_map_vm(grch->grctx, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, chan->vm,
|
||||
&grch->grctx_vma);
|
||||
ret = nouveau_gpuobj_map_vm(grch->grctx, chan->vm, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, &grch->grctx_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -299,7 +294,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
|
||||
|
||||
nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4);
|
||||
nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset));
|
||||
pinstmem->flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
if (!priv->grctx_vals) {
|
||||
ret = nvc0_graph_construct_context(chan);
|
||||
@ -324,7 +319,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
|
||||
nv_wo32(grctx, 0x28, 0);
|
||||
nv_wo32(grctx, 0x2c, 0);
|
||||
}
|
||||
pinstmem->flush(dev);
|
||||
nvimem_flush(dev);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
@ -373,8 +368,8 @@ nvc0_graph_init_obj418880(struct drm_device *dev)
|
||||
nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
|
||||
for (i = 0; i < 4; i++)
|
||||
nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
|
||||
nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
|
||||
nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
|
||||
nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
|
||||
nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -662,7 +657,7 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
|
||||
if (!chan || !chan->ramin)
|
||||
continue;
|
||||
|
||||
if (inst == chan->ramin->vinst)
|
||||
if (inst == chan->ramin->addr)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
|
@ -63,7 +63,7 @@ nve0_graph_load_context(struct nouveau_channel *chan)
|
||||
struct drm_device *dev = chan->dev;
|
||||
|
||||
nv_wr32(dev, 0x409840, 0x00000030);
|
||||
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
|
||||
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
|
||||
nv_wr32(dev, 0x409504, 0x00000003);
|
||||
if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
|
||||
NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
|
||||
@ -88,7 +88,6 @@ nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
|
||||
static int
|
||||
nve0_graph_construct_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
|
||||
struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
|
||||
struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
|
||||
struct drm_device *dev = chan->dev;
|
||||
@ -105,13 +104,13 @@ nve0_graph_construct_context(struct nouveau_channel *chan)
|
||||
nv_wo32(grch->grctx, 0x20, 0);
|
||||
nv_wo32(grch->grctx, 0x28, 0);
|
||||
nv_wo32(grch->grctx, 0x2c, 0);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
ret = nve0_grctx_generate(chan);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
|
||||
ret = nve0_graph_unload_context_to(dev, chan->ramin->addr);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -141,8 +140,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk408004, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, chan->vm,
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk408004, chan->vm,
|
||||
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
|
||||
&grch->unk408004_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -151,8 +150,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk40800c, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, chan->vm,
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk40800c, chan->vm,
|
||||
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
|
||||
&grch->unk40800c_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -162,8 +161,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk418810, NV_MEM_ACCESS_RW,
|
||||
chan->vm, &grch->unk418810_vma);
|
||||
ret = nouveau_gpuobj_map_vm(grch->unk418810, chan->vm,
|
||||
NV_MEM_ACCESS_RW, &grch->unk418810_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -171,8 +170,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_map_vm(grch->mmio, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, chan->vm,
|
||||
ret = nouveau_gpuobj_map_vm(grch->mmio, chan->vm,
|
||||
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
|
||||
&grch->mmio_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -221,8 +220,6 @@ static int
|
||||
nve0_graph_context_new(struct nouveau_channel *chan, int engine)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct nve0_graph_priv *priv = nv_engine(dev, engine);
|
||||
struct nve0_graph_chan *grch;
|
||||
struct nouveau_gpuobj *grctx;
|
||||
@ -238,9 +235,8 @@ nve0_graph_context_new(struct nouveau_channel *chan, int engine)
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_gpuobj_map_vm(grch->grctx, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, chan->vm,
|
||||
&grch->grctx_vma);
|
||||
ret = nouveau_gpuobj_map_vm(grch->grctx, chan->vm, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, &grch->grctx_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -252,7 +248,7 @@ nve0_graph_context_new(struct nouveau_channel *chan, int engine)
|
||||
|
||||
nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4);
|
||||
nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset));
|
||||
pinstmem->flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
if (!priv->grctx_vals) {
|
||||
ret = nve0_graph_construct_context(chan);
|
||||
@ -272,7 +268,7 @@ nve0_graph_context_new(struct nouveau_channel *chan, int engine)
|
||||
nv_wo32(grctx, 0x28, 0);
|
||||
nv_wo32(grctx, 0x2c, 0);
|
||||
|
||||
pinstmem->flush(dev);
|
||||
nvimem_flush(dev);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
@ -321,8 +317,8 @@ nve0_graph_init_obj418880(struct drm_device *dev)
|
||||
nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
|
||||
for (i = 0; i < 4; i++)
|
||||
nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
|
||||
nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
|
||||
nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
|
||||
nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
|
||||
nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -591,7 +587,7 @@ nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
|
||||
if (!chan || !chan->ramin)
|
||||
continue;
|
||||
|
||||
if (inst == chan->ramin->vinst)
|
||||
if (inst == chan->ramin->addr)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
|
@ -74,8 +74,8 @@ nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
|
||||
if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
|
||||
nv_wr32(dev, 0x00330c, ctx->pinst >> 4);
|
||||
nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4);
|
||||
nv_wr32(dev, 0x00330c, ctx->addr >> 4);
|
||||
nv_wo32(chan->ramfc, 0x54, ctx->addr >> 4);
|
||||
nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
@ -90,7 +90,7 @@ nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
|
||||
struct nouveau_gpuobj *ctx = chan->engctx[engine];
|
||||
struct drm_device *dev = chan->dev;
|
||||
unsigned long flags;
|
||||
u32 inst = 0x80000000 | (ctx->pinst >> 4);
|
||||
u32 inst = 0x80000000 | (ctx->addr >> 4);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
|
||||
@ -224,7 +224,7 @@ nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
|
||||
continue;
|
||||
|
||||
ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
|
||||
if (ctx && ctx->pinst == inst)
|
||||
if (ctx && ctx->addr == inst)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
|
||||
|
@ -47,7 +47,6 @@ static int
|
||||
nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ramin = chan->ramin;
|
||||
struct nouveau_gpuobj *ctx = NULL;
|
||||
int ret;
|
||||
@ -60,15 +59,15 @@ nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
|
||||
return ret;
|
||||
|
||||
nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
|
||||
nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1);
|
||||
nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst);
|
||||
nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->addr + ctx->size - 1);
|
||||
nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->addr);
|
||||
nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
|
||||
nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
|
||||
nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
|
||||
|
||||
nv_wo32(ctx, 0x70, 0x00801ec1);
|
||||
nv_wo32(ctx, 0x7c, 0x0000037c);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
chan->engctx[engine] = ctx;
|
||||
return 0;
|
||||
@ -93,7 +92,6 @@ nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
|
||||
u32 handle, u16 class)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *obj = NULL;
|
||||
int ret;
|
||||
|
||||
@ -107,7 +105,7 @@ nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
|
||||
nv_wo32(obj, 0x04, 0x00000000);
|
||||
nv_wo32(obj, 0x08, 0x00000000);
|
||||
nv_wo32(obj, 0x0c, 0x00000000);
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
ret = nouveau_ramht_insert(chan, handle, obj);
|
||||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_util.h"
|
||||
#include <subdev/vm.h>
|
||||
#include <core/ramht.h>
|
||||
|
||||
struct nv98_ppp_engine {
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_util.h"
|
||||
#include <subdev/vm.h>
|
||||
#include <core/ramht.h>
|
||||
|
||||
/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
|
||||
|
55
drivers/gpu/drm/nouveau/core/include/subdev/bar.h
Normal file
55
drivers/gpu/drm/nouveau/core/include/subdev/bar.h
Normal file
@ -0,0 +1,55 @@
|
||||
#ifndef __NOUVEAU_BAR_H__
|
||||
#define __NOUVEAU_BAR_H__
|
||||
|
||||
#include <core/subdev.h>
|
||||
#include <core/device.h>
|
||||
|
||||
#include <subdev/fb.h>
|
||||
|
||||
struct nouveau_vma;
|
||||
|
||||
struct nouveau_bar {
|
||||
struct nouveau_subdev base;
|
||||
|
||||
int (*alloc)(struct nouveau_bar *, struct nouveau_object *,
|
||||
struct nouveau_mem *, struct nouveau_object **);
|
||||
void __iomem *iomem;
|
||||
|
||||
int (*kmap)(struct nouveau_bar *, struct nouveau_mem *,
|
||||
u32 flags, struct nouveau_vma *);
|
||||
int (*umap)(struct nouveau_bar *, struct nouveau_mem *,
|
||||
u32 flags, struct nouveau_vma *);
|
||||
void (*unmap)(struct nouveau_bar *, struct nouveau_vma *);
|
||||
void (*flush)(struct nouveau_bar *);
|
||||
};
|
||||
|
||||
static inline struct nouveau_bar *
|
||||
nouveau_bar(void *obj)
|
||||
{
|
||||
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
|
||||
}
|
||||
|
||||
#define nouveau_bar_create(p,e,o,d) \
|
||||
nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
|
||||
#define nouveau_bar_init(p) \
|
||||
nouveau_subdev_init(&(p)->base)
|
||||
#define nouveau_bar_fini(p,s) \
|
||||
nouveau_subdev_fini(&(p)->base, (s))
|
||||
|
||||
int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
|
||||
struct nouveau_oclass *, int, void **);
|
||||
void nouveau_bar_destroy(struct nouveau_bar *);
|
||||
|
||||
void _nouveau_bar_dtor(struct nouveau_object *);
|
||||
#define _nouveau_bar_init _nouveau_subdev_init
|
||||
#define _nouveau_bar_fini _nouveau_subdev_fini
|
||||
|
||||
extern struct nouveau_oclass nv50_bar_oclass;
|
||||
extern struct nouveau_oclass nvc0_bar_oclass;
|
||||
|
||||
int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
|
||||
struct nouveau_mem *, struct nouveau_object **);
|
||||
|
||||
void nv84_bar_flush(struct nouveau_bar *);
|
||||
|
||||
#endif
|
@ -6,6 +6,7 @@
|
||||
#include <core/device.h>
|
||||
#endif
|
||||
#include <core/mm.h>
|
||||
|
||||
#include <subdev/vm.h>
|
||||
|
||||
/* memory type/access flags, do not match hardware values */
|
||||
|
74
drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
Normal file
74
drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
Normal file
@ -0,0 +1,74 @@
|
||||
#ifndef __NOUVEAU_INSTMEM_H__
|
||||
#define __NOUVEAU_INSTMEM_H__
|
||||
|
||||
#include <core/subdev.h>
|
||||
#include <core/device.h>
|
||||
#include <core/mm.h>
|
||||
|
||||
struct nouveau_instobj {
|
||||
struct nouveau_object base;
|
||||
struct list_head head;
|
||||
struct nouveau_mm heap;
|
||||
u32 *suspend;
|
||||
u64 addr;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
static inline struct nouveau_instobj *
|
||||
nv_memobj(void *obj)
|
||||
{
|
||||
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
|
||||
if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
|
||||
nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
|
||||
#endif
|
||||
return obj;
|
||||
}
|
||||
|
||||
#define nouveau_instobj_create(p,e,o,d) \
|
||||
nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
|
||||
#define nouveau_instobj_init(p) \
|
||||
nouveau_object_init(&(p)->base)
|
||||
#define nouveau_instobj_fini(p,s) \
|
||||
nouveau_object_fini(&(p)->base, (s))
|
||||
|
||||
int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
|
||||
struct nouveau_oclass *, int, void **);
|
||||
void nouveau_instobj_destroy(struct nouveau_instobj *);
|
||||
|
||||
void _nouveau_instobj_dtor(struct nouveau_object *);
|
||||
#define _nouveau_instobj_init nouveau_object_init
|
||||
#define _nouveau_instobj_fini nouveau_object_fini
|
||||
|
||||
struct nouveau_instmem {
|
||||
struct nouveau_subdev base;
|
||||
struct list_head list;
|
||||
|
||||
u32 reserved;
|
||||
int (*alloc)(struct nouveau_instmem *, struct nouveau_object *,
|
||||
u32 size, u32 align, struct nouveau_object **);
|
||||
};
|
||||
|
||||
static inline struct nouveau_instmem *
|
||||
nouveau_instmem(void *obj)
|
||||
{
|
||||
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
|
||||
}
|
||||
|
||||
#define nouveau_instmem_create(p,e,o,d) \
|
||||
nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
|
||||
#define nouveau_instmem_destroy(p) \
|
||||
nouveau_subdev_destroy(&(p)->base)
|
||||
int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
|
||||
struct nouveau_oclass *, int, void **);
|
||||
int nouveau_instmem_init(struct nouveau_instmem *);
|
||||
int nouveau_instmem_fini(struct nouveau_instmem *, bool);
|
||||
|
||||
#define _nouveau_instmem_dtor _nouveau_subdev_dtor
|
||||
int _nouveau_instmem_init(struct nouveau_object *);
|
||||
int _nouveau_instmem_fini(struct nouveau_object *, bool);
|
||||
|
||||
extern struct nouveau_oclass nv04_instmem_oclass;
|
||||
extern struct nouveau_oclass nv40_instmem_oclass;
|
||||
extern struct nouveau_oclass nv50_instmem_oclass;
|
||||
|
||||
#endif
|
@ -25,10 +25,14 @@
|
||||
#ifndef __NOUVEAU_VM_H__
|
||||
#define __NOUVEAU_VM_H__
|
||||
|
||||
#ifndef XXX_THIS_IS_A_HACK
|
||||
#include <core/object.h>
|
||||
#include <core/subdev.h>
|
||||
#include <core/device.h>
|
||||
#endif
|
||||
#include <core/mm.h>
|
||||
|
||||
struct nouveau_mem;
|
||||
|
||||
#ifndef XXX_THIS_IS_A_HACK
|
||||
struct nouveau_vm_pgt {
|
||||
struct nouveau_gpuobj *obj[2];
|
||||
u32 refcount[2];
|
||||
@ -38,6 +42,10 @@ struct nouveau_vm_pgd {
|
||||
struct list_head head;
|
||||
struct nouveau_gpuobj *obj;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct nouveau_gpuobj;
|
||||
struct nouveau_mem;
|
||||
|
||||
struct nouveau_vma {
|
||||
struct list_head head;
|
||||
@ -49,21 +57,29 @@ struct nouveau_vma {
|
||||
};
|
||||
|
||||
struct nouveau_vm {
|
||||
struct drm_device *dev;
|
||||
struct nouveau_vmmgr *vmm;
|
||||
struct nouveau_mm mm;
|
||||
int refcount;
|
||||
|
||||
struct list_head pgd_list;
|
||||
atomic_t engref[16];
|
||||
atomic_t engref[64]; //NVDEV_SUBDEV_NR];
|
||||
|
||||
struct nouveau_vm_pgt *pgt;
|
||||
u32 fpde;
|
||||
u32 lpde;
|
||||
};
|
||||
|
||||
#ifndef XXX_THIS_IS_A_HACK
|
||||
struct nouveau_vmmgr {
|
||||
struct nouveau_subdev base;
|
||||
|
||||
u32 pgt_bits;
|
||||
u8 spg_shift;
|
||||
u8 lpg_shift;
|
||||
|
||||
int (*create)(struct nouveau_vmmgr *, u64 offset, u64 length,
|
||||
u64 mm_offset, struct nouveau_vm **);
|
||||
|
||||
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2]);
|
||||
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
@ -71,16 +87,48 @@ struct nouveau_vm {
|
||||
u64 phys, u64 delta);
|
||||
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
||||
|
||||
void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
||||
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
|
||||
void (*flush)(struct nouveau_vm *);
|
||||
};
|
||||
|
||||
/* nouveau_vm.c */
|
||||
int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
|
||||
static inline struct nouveau_vmmgr *
|
||||
nouveau_vmmgr(void *obj)
|
||||
{
|
||||
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VM];
|
||||
}
|
||||
|
||||
#define nouveau_vmmgr_create(p,e,o,i,f,d) \
|
||||
nouveau_subdev_create((p), (e), (o), 0, (i), (f), (d))
|
||||
#define nouveau_vmmgr_destroy(p) \
|
||||
nouveau_subdev_destroy(&(p)->base)
|
||||
#define nouveau_vmmgr_init(p) \
|
||||
nouveau_subdev_init(&(p)->base)
|
||||
#define nouveau_vmmgr_fini(p,s) \
|
||||
nouveau_subdev_fini(&(p)->base, (s))
|
||||
|
||||
#define _nouveau_vmmgr_dtor _nouveau_subdev_dtor
|
||||
#define _nouveau_vmmgr_init _nouveau_subdev_init
|
||||
#define _nouveau_vmmgr_fini _nouveau_subdev_fini
|
||||
|
||||
extern struct nouveau_oclass nv04_vmmgr_oclass;
|
||||
extern struct nouveau_oclass nv41_vmmgr_oclass;
|
||||
extern struct nouveau_oclass nv44_vmmgr_oclass;
|
||||
extern struct nouveau_oclass nv50_vmmgr_oclass;
|
||||
extern struct nouveau_oclass nvc0_vmmgr_oclass;
|
||||
|
||||
int nv04_vm_create(struct nouveau_vmmgr *, u64, u64, u64,
|
||||
struct nouveau_vm **);
|
||||
void nv04_vmmgr_dtor(struct nouveau_object *);
|
||||
|
||||
void nv50_vm_flush_engine(struct nouveau_subdev *, int engine);
|
||||
void nvc0_vm_flush_engine(struct nouveau_subdev *, u64 addr, int type);
|
||||
|
||||
/* nouveau_vm.c */
|
||||
int nouveau_vm_create(struct nouveau_vmmgr *, u64 offset, u64 length,
|
||||
u64 mm_offset, u32 block, struct nouveau_vm **);
|
||||
int nouveau_vm_new(struct nouveau_device *, u64 offset, u64 length,
|
||||
u64 mm_offset, struct nouveau_vm **);
|
||||
#endif
|
||||
int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
|
||||
struct nouveau_gpuobj *pgd);
|
||||
int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
|
||||
@ -93,26 +141,6 @@ void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
|
||||
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
|
||||
struct nouveau_mem *);
|
||||
void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
struct nouveau_mem *mem);
|
||||
/* nv50_vm.c */
|
||||
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2]);
|
||||
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
|
||||
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
||||
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
|
||||
void nv50_vm_flush(struct nouveau_vm *);
|
||||
void nv50_vm_flush_engine(struct drm_device *, int engine);
|
||||
|
||||
/* nvc0_vm.c */
|
||||
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2]);
|
||||
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
|
||||
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
||||
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
|
||||
void nvc0_vm_flush(struct nouveau_vm *);
|
||||
struct nouveau_mem *mem);
|
||||
|
||||
#endif
|
||||
|
135
drivers/gpu/drm/nouveau/core/subdev/bar/base.c
Normal file
135
drivers/gpu/drm/nouveau/core/subdev/bar/base.c
Normal file
@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include <core/object.h>
|
||||
#include <subdev/bar.h>
|
||||
|
||||
struct nouveau_barobj {
|
||||
struct nouveau_object base;
|
||||
struct nouveau_vma vma;
|
||||
void __iomem *iomem;
|
||||
};
|
||||
|
||||
static int
|
||||
nouveau_barobj_ctor(struct nouveau_object *parent,
|
||||
struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *mem, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct nouveau_bar *bar = (void *)engine;
|
||||
struct nouveau_barobj *barobj;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_object_create(parent, engine, oclass, 0, &barobj);
|
||||
*pobject = nv_object(barobj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
barobj->iomem = bar->iomem + (u32)barobj->vma.offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_barobj_dtor(struct nouveau_object *object)
|
||||
{
|
||||
struct nouveau_bar *bar = (void *)object->engine;
|
||||
struct nouveau_barobj *barobj = (void *)object;
|
||||
if (barobj->vma.node)
|
||||
bar->unmap(bar, &barobj->vma);
|
||||
nouveau_object_destroy(&barobj->base);
|
||||
}
|
||||
|
||||
static u32
|
||||
nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
|
||||
{
|
||||
struct nouveau_barobj *barobj = (void *)object;
|
||||
return ioread32_native(barobj->iomem + addr);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
|
||||
{
|
||||
struct nouveau_barobj *barobj = (void *)object;
|
||||
iowrite32_native(data, barobj->iomem + addr);
|
||||
}
|
||||
|
||||
static struct nouveau_oclass
|
||||
nouveau_barobj_oclass = {
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nouveau_barobj_ctor,
|
||||
.dtor = nouveau_barobj_dtor,
|
||||
.init = nouveau_object_init,
|
||||
.fini = nouveau_object_fini,
|
||||
.rd32 = nouveau_barobj_rd32,
|
||||
.wr32 = nouveau_barobj_wr32,
|
||||
},
|
||||
};
|
||||
|
||||
int
|
||||
nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent,
|
||||
struct nouveau_mem *mem, struct nouveau_object **pobject)
|
||||
{
|
||||
struct nouveau_object *engine = nv_object(bar);
|
||||
return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass,
|
||||
mem, 0, pobject);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_bar_create_(struct nouveau_object *parent,
|
||||
struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, int length, void **pobject)
|
||||
{
|
||||
struct nouveau_device *device = nv_device(parent);
|
||||
struct nouveau_bar *bar;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL",
|
||||
"bar", length, pobject);
|
||||
bar = *pobject;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bar->iomem = ioremap(pci_resource_start(device->pdev, 3),
|
||||
pci_resource_len(device->pdev, 3));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_bar_destroy(struct nouveau_bar *bar)
|
||||
{
|
||||
if (bar->iomem)
|
||||
iounmap(bar->iomem);
|
||||
nouveau_subdev_destroy(&bar->base);
|
||||
}
|
||||
|
||||
void
|
||||
_nouveau_bar_dtor(struct nouveau_object *object)
|
||||
{
|
||||
struct nouveau_bar *bar = (void *)object;
|
||||
nouveau_bar_destroy(bar);
|
||||
}
|
263
drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
Normal file
263
drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
Normal file
@ -0,0 +1,263 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include <core/gpuobj.h>
|
||||
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/bar.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
struct nv50_bar_priv {
|
||||
struct nouveau_bar base;
|
||||
spinlock_t lock;
|
||||
struct nouveau_gpuobj *mem;
|
||||
struct nouveau_gpuobj *pad;
|
||||
struct nouveau_gpuobj *pgd;
|
||||
struct nouveau_vm *bar1_vm;
|
||||
struct nouveau_gpuobj *bar1;
|
||||
struct nouveau_vm *bar3_vm;
|
||||
struct nouveau_gpuobj *bar3;
|
||||
};
|
||||
|
||||
static int
|
||||
nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
|
||||
u32 flags, struct nouveau_vma *vma)
|
||||
{
|
||||
struct nv50_bar_priv *priv = (void *)bar;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nouveau_vm_map(vma, mem);
|
||||
nv50_vm_flush_engine(nv_subdev(bar), 6);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
|
||||
u32 flags, struct nouveau_vma *vma)
|
||||
{
|
||||
struct nv50_bar_priv *priv = (void *)bar;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nouveau_vm_map(vma, mem);
|
||||
nv50_vm_flush_engine(nv_subdev(bar), 6);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
|
||||
{
|
||||
nouveau_vm_unmap(vma);
|
||||
nv50_vm_flush_engine(nv_subdev(bar), 6);
|
||||
nouveau_vm_put(vma);
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_bar_flush(struct nouveau_bar *bar)
|
||||
{
|
||||
struct nv50_bar_priv *priv = (void *)bar;
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
nv_wr32(priv, 0x00330c, 0x00000001);
|
||||
if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
|
||||
nv_warn(priv, "flush timeout\n");
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
void
|
||||
nv84_bar_flush(struct nouveau_bar *bar)
|
||||
{
|
||||
struct nv50_bar_priv *priv = (void *)bar;
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
nv_wr32(bar, 0x070000, 0x00000001);
|
||||
if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
|
||||
nv_warn(priv, "flush timeout\n");
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct nouveau_device *device = nv_device(parent);
|
||||
struct nouveau_object *heap;
|
||||
struct nouveau_vm *vm;
|
||||
struct nv50_bar_priv *priv;
|
||||
u64 start, limit;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_bar_create(parent, engine, oclass, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, NVOBJ_FLAG_HEAP,
|
||||
&priv->mem);
|
||||
heap = nv_object(priv->mem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, heap, (device->chipset == 0x50) ?
|
||||
0x1400 : 0x0200, 0, 0, &priv->pad);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, heap, 0x4000, 0, 0, &priv->pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* BAR3 */
|
||||
start = 0x0100000000ULL;
|
||||
limit = start + pci_resource_len(device->pdev, 3);
|
||||
|
||||
ret = nouveau_vm_new(device, start, limit, start, &vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, heap, ((limit-- - start) >> 12) * 8,
|
||||
0x1000, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&vm->pgt[0].obj[0]);
|
||||
vm->pgt[0].refcount[0] = 1;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd);
|
||||
nouveau_vm_ref(NULL, &vm, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar3);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_wo32(priv->bar3, 0x00, 0x7fc00000);
|
||||
nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
|
||||
nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
|
||||
nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
|
||||
upper_32_bits(start));
|
||||
nv_wo32(priv->bar3, 0x10, 0x00000000);
|
||||
nv_wo32(priv->bar3, 0x14, 0x00000000);
|
||||
|
||||
/* BAR1 */
|
||||
start = 0x0000000000ULL;
|
||||
limit = start + pci_resource_len(device->pdev, 1);
|
||||
|
||||
ret = nouveau_vm_new(device, start, limit--, start, &vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd);
|
||||
nouveau_vm_ref(NULL, &vm, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_wo32(priv->bar1, 0x00, 0x7fc00000);
|
||||
nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
|
||||
nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
|
||||
nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
|
||||
upper_32_bits(start));
|
||||
nv_wo32(priv->bar1, 0x10, 0x00000000);
|
||||
nv_wo32(priv->bar1, 0x14, 0x00000000);
|
||||
|
||||
priv->base.alloc = nouveau_bar_alloc;
|
||||
priv->base.kmap = nv50_bar_kmap;
|
||||
priv->base.umap = nv50_bar_umap;
|
||||
priv->base.unmap = nv50_bar_unmap;
|
||||
if (device->chipset == 0x50)
|
||||
priv->base.flush = nv50_bar_flush;
|
||||
else
|
||||
priv->base.flush = nv84_bar_flush;
|
||||
spin_lock_init(&priv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_bar_dtor(struct nouveau_object *object)
|
||||
{
|
||||
struct nv50_bar_priv *priv = (void *)object;
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar1);
|
||||
nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar3);
|
||||
if (priv->bar3_vm) {
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
|
||||
nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
|
||||
}
|
||||
nouveau_gpuobj_ref(NULL, &priv->pgd);
|
||||
nouveau_gpuobj_ref(NULL, &priv->pad);
|
||||
nouveau_gpuobj_ref(NULL, &priv->mem);
|
||||
nouveau_bar_destroy(&priv->base);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_bar_init(struct nouveau_object *object)
|
||||
{
|
||||
struct nv50_bar_priv *priv = (void *)object;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_bar_init(&priv->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
|
||||
nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
|
||||
nv50_vm_flush_engine(nv_subdev(priv), 6);
|
||||
|
||||
nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
|
||||
nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
|
||||
nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
|
||||
nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_bar_fini(struct nouveau_object *object, bool suspend)
|
||||
{
|
||||
struct nv50_bar_priv *priv = (void *)object;
|
||||
return nouveau_bar_fini(&priv->base, suspend);
|
||||
}
|
||||
|
||||
struct nouveau_oclass
|
||||
nv50_bar_oclass = {
|
||||
.handle = NV_SUBDEV(BAR, 0x50),
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nv50_bar_ctor,
|
||||
.dtor = nv50_bar_dtor,
|
||||
.init = nv50_bar_init,
|
||||
.fini = nv50_bar_fini,
|
||||
},
|
||||
};
|
215
drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
Normal file
215
drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
Normal file
@ -0,0 +1,215 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include <core/gpuobj.h>
|
||||
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/bar.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
struct nvc0_bar_priv {
|
||||
struct nouveau_bar base;
|
||||
spinlock_t lock;
|
||||
struct {
|
||||
struct nouveau_gpuobj *mem;
|
||||
struct nouveau_gpuobj *pgd;
|
||||
struct nouveau_vm *vm;
|
||||
} bar[2];
|
||||
};
|
||||
|
||||
static int
|
||||
nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
|
||||
u32 flags, struct nouveau_vma *vma)
|
||||
{
|
||||
struct nvc0_bar_priv *priv = (void *)bar;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nouveau_vm_map(vma, mem);
|
||||
nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[0].pgd->addr, 5);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
|
||||
u32 flags, struct nouveau_vma *vma)
|
||||
{
|
||||
struct nvc0_bar_priv *priv = (void *)bar;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vm_get(priv->bar[1].vm, mem->size << 12,
|
||||
mem->page_shift, flags, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nouveau_vm_map(vma, mem);
|
||||
nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[1].pgd->addr, 5);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
|
||||
{
|
||||
struct nvc0_bar_priv *priv = (void *)bar;
|
||||
int i = !(vma->vm == priv->bar[0].vm);
|
||||
|
||||
nouveau_vm_unmap(vma);
|
||||
nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[i].pgd->addr, 5);
|
||||
nouveau_vm_put(vma);
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct nouveau_device *device = nv_device(parent);
|
||||
struct pci_dev *pdev = device->pdev;
|
||||
struct nvc0_bar_priv *priv;
|
||||
struct nouveau_gpuobj *mem;
|
||||
struct nouveau_vm *vm;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_bar_create(parent, engine, oclass, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* BAR3 */
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[0].mem);
|
||||
mem = priv->bar[0].mem;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[0].pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 3), 0, &vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, NULL,
|
||||
(pci_resource_len(pdev, 3) >> 12) * 8,
|
||||
0x1000, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&vm->pgt[0].obj[0]);
|
||||
vm->pgt[0].refcount[0] = 1;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd);
|
||||
nouveau_vm_ref(NULL, &vm, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
|
||||
nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
|
||||
nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 3) - 1));
|
||||
nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
|
||||
|
||||
/* BAR1 */
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[1].mem);
|
||||
mem = priv->bar[1].mem;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[1].pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 1), 0, &vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd);
|
||||
nouveau_vm_ref(NULL, &vm, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
|
||||
nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
|
||||
nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 1) - 1));
|
||||
nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 1) - 1));
|
||||
|
||||
priv->base.alloc = nouveau_bar_alloc;
|
||||
priv->base.kmap = nvc0_bar_kmap;
|
||||
priv->base.umap = nvc0_bar_umap;
|
||||
priv->base.unmap = nvc0_bar_unmap;
|
||||
priv->base.flush = nv84_bar_flush;
|
||||
spin_lock_init(&priv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_bar_dtor(struct nouveau_object *object)
|
||||
{
|
||||
struct nvc0_bar_priv *priv = (void *)object;
|
||||
|
||||
nouveau_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar[1].pgd);
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar[1].mem);
|
||||
|
||||
if (priv->bar[0].vm) {
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
|
||||
nouveau_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
|
||||
}
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar[0].pgd);
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar[0].mem);
|
||||
|
||||
nouveau_bar_destroy(&priv->base);
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_bar_init(struct nouveau_object *object)
|
||||
{
|
||||
struct nvc0_bar_priv *priv = (void *)object;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_bar_init(&priv->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
|
||||
nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
|
||||
nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
|
||||
|
||||
nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
|
||||
nv_wr32(priv, 0x001714, 0xc0000000 | priv->bar[0].mem->addr >> 12);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_oclass
|
||||
nvc0_bar_oclass = {
|
||||
.handle = NV_SUBDEV(BAR, 0xc0),
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nvc0_bar_ctor,
|
||||
.dtor = nvc0_bar_dtor,
|
||||
.init = nvc0_bar_init,
|
||||
.fini = _nouveau_bar_fini,
|
||||
},
|
||||
};
|
@ -30,6 +30,8 @@
|
||||
#include <subdev/mc.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
int
|
||||
nv04_identify(struct nouveau_device *device)
|
||||
@ -43,6 +45,8 @@ nv04_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x05:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -52,6 +56,8 @@ nv04_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
default:
|
||||
nv_fatal(device, "unknown RIVA chipset\n");
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include <subdev/mc.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
int
|
||||
nv10_identify(struct nouveau_device *device)
|
||||
@ -45,6 +47,8 @@ nv10_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x15:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -55,6 +59,8 @@ nv10_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x16:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -65,6 +71,8 @@ nv10_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x1a:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -75,6 +83,8 @@ nv10_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x11:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -85,6 +95,8 @@ nv10_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x17:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -95,6 +107,8 @@ nv10_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x1f:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -105,6 +119,8 @@ nv10_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x18:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -115,6 +131,8 @@ nv10_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
default:
|
||||
nv_fatal(device, "unknown Celsius chipset\n");
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include <subdev/mc.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
int
|
||||
nv20_identify(struct nouveau_device *device)
|
||||
@ -45,6 +47,8 @@ nv20_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x25:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -55,6 +59,8 @@ nv20_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x28:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -65,6 +71,8 @@ nv20_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x2a:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -75,6 +83,8 @@ nv20_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
default:
|
||||
nv_fatal(device, "unknown Kelvin chipset\n");
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include <subdev/mc.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
int
|
||||
nv30_identify(struct nouveau_device *device)
|
||||
@ -45,6 +47,8 @@ nv30_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x35:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -55,6 +59,8 @@ nv30_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x31:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -65,6 +71,8 @@ nv30_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x36:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -75,6 +83,8 @@ nv30_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x34:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -85,6 +95,8 @@ nv30_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
default:
|
||||
nv_fatal(device, "unknown Rankine chipset\n");
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include <subdev/mc.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
int
|
||||
nv40_identify(struct nouveau_device *device)
|
||||
@ -45,6 +47,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x41:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -55,6 +59,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x42:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -65,6 +71,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x43:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -75,6 +83,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x45:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -85,6 +95,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x47:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -95,6 +107,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x49:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -105,6 +119,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x4b:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -115,6 +131,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x44:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -125,6 +143,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x46:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -135,6 +155,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x4a:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -145,6 +167,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x4c:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -155,6 +179,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x4e:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -165,6 +191,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x63:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -175,6 +203,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x67:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -185,6 +215,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
case 0x68:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -195,6 +227,8 @@ nv40_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
|
||||
break;
|
||||
default:
|
||||
nv_fatal(device, "unknown Curie chipset\n");
|
||||
|
@ -31,6 +31,9 @@
|
||||
#include <subdev/mc.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/vm.h>
|
||||
#include <subdev/bar.h>
|
||||
|
||||
int
|
||||
nv50_identify(struct nouveau_device *device)
|
||||
@ -45,6 +48,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0x84:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -55,6 +61,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0x86:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -65,6 +74,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0x92:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -75,6 +87,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0x94:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -85,6 +100,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0x96:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -95,6 +113,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0x98:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -105,6 +126,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0xa0:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -115,6 +139,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0xaa:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -125,6 +152,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0xac:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -135,6 +165,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0xa3:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -145,6 +178,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0xa5:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -155,6 +191,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0xa8:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -165,6 +204,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
case 0xaf:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -175,6 +217,9 @@ nv50_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
|
||||
break;
|
||||
default:
|
||||
nv_fatal(device, "unknown Tesla chipset\n");
|
||||
|
@ -32,6 +32,9 @@
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/ltcg.h>
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/vm.h>
|
||||
#include <subdev/bar.h>
|
||||
|
||||
int
|
||||
nvc0_identify(struct nouveau_device *device)
|
||||
@ -47,6 +50,9 @@ nvc0_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
|
||||
break;
|
||||
case 0xc4:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -58,6 +64,9 @@ nvc0_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
|
||||
break;
|
||||
case 0xc3:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -69,6 +78,9 @@ nvc0_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
|
||||
break;
|
||||
case 0xce:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -80,6 +92,9 @@ nvc0_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
|
||||
break;
|
||||
case 0xcf:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -91,6 +106,9 @@ nvc0_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
|
||||
break;
|
||||
case 0xc1:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -102,6 +120,9 @@ nvc0_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
|
||||
break;
|
||||
case 0xc8:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -113,6 +134,9 @@ nvc0_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
|
||||
break;
|
||||
case 0xd9:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -124,6 +148,9 @@ nvc0_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
|
||||
break;
|
||||
default:
|
||||
nv_fatal(device, "unknown Fermi chipset\n");
|
||||
|
@ -32,6 +32,9 @@
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/ltcg.h>
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/vm.h>
|
||||
#include <subdev/bar.h>
|
||||
|
||||
int
|
||||
nve0_identify(struct nouveau_device *device)
|
||||
@ -47,6 +50,9 @@ nve0_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
|
||||
break;
|
||||
case 0xe7:
|
||||
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
|
||||
@ -58,6 +64,9 @@ nve0_identify(struct nouveau_device *device)
|
||||
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
|
||||
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
|
||||
break;
|
||||
default:
|
||||
nv_fatal(device, "unknown Kepler chipset\n");
|
||||
|
@ -50,54 +50,14 @@ nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
|
||||
static void
|
||||
nv40_fb_init_gart(struct nv40_fb_priv *priv)
|
||||
{
|
||||
#if 0
|
||||
struct nouveau_gpuobj *gart = ndev->gart_info.sg_ctxdma;
|
||||
|
||||
if (ndev->gart_info.type != NOUVEAU_GART_HW) {
|
||||
#endif
|
||||
nv_wr32(priv, 0x100800, 0x00000001);
|
||||
#if 0
|
||||
return;
|
||||
}
|
||||
|
||||
nv_wr32(ndev, 0x100800, gart->pinst | 0x00000002);
|
||||
nv_mask(ndev, 0x10008c, 0x00000100, 0x00000100);
|
||||
nv_wr32(ndev, 0x100820, 0x00000000);
|
||||
#endif
|
||||
nv_wr32(priv, 0x100800, 0x00000001);
|
||||
}
|
||||
|
||||
static void
|
||||
nv44_fb_init_gart(struct nv40_fb_priv *priv)
|
||||
{
|
||||
#if 0
|
||||
struct nouveau_gpuobj *gart = ndev->gart_info.sg_ctxdma;
|
||||
u32 vinst;
|
||||
|
||||
if (ndev->gart_info.type != NOUVEAU_GART_HW) {
|
||||
#endif
|
||||
nv_wr32(priv, 0x100850, 0x80000000);
|
||||
nv_wr32(priv, 0x100800, 0x00000001);
|
||||
#if 0
|
||||
return;
|
||||
}
|
||||
|
||||
/* calculate vram address of this PRAMIN block, object
|
||||
* must be allocated on 512KiB alignment, and not exceed
|
||||
* a total size of 512KiB for this to work correctly
|
||||
*/
|
||||
vinst = nv_rd32(ndev, 0x10020c);
|
||||
vinst -= ((gart->pinst >> 19) + 1) << 19;
|
||||
|
||||
nv_wr32(ndev, 0x100850, 0x80000000);
|
||||
nv_wr32(ndev, 0x100818, ndev->gart_info.dummy.addr);
|
||||
|
||||
nv_wr32(ndev, 0x100804, ndev->gart_info.aper_size);
|
||||
nv_wr32(ndev, 0x100850, 0x00008000);
|
||||
nv_mask(ndev, 0x10008c, 0x00000200, 0x00000200);
|
||||
nv_wr32(ndev, 0x100820, 0x00000000);
|
||||
nv_wr32(ndev, 0x10082c, 0x00000001);
|
||||
nv_wr32(ndev, 0x100800, vinst | 0x00000010);
|
||||
#endif
|
||||
nv_wr32(priv, 0x100850, 0x80000000);
|
||||
nv_wr32(priv, 0x100800, 0x00000001);
|
||||
}
|
||||
|
||||
static int
|
||||
|
135
drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
Normal file
135
drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
Normal file
@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include <subdev/instmem.h>
|
||||
|
||||
int
|
||||
nouveau_instobj_create_(struct nouveau_object *parent,
|
||||
struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass,
|
||||
int length, void **pobject)
|
||||
{
|
||||
struct nouveau_instmem *imem = (void *)engine;
|
||||
struct nouveau_instobj *iobj;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
|
||||
length, pobject);
|
||||
iobj = *pobject;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
list_add(&iobj->head, &imem->list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_instobj_destroy(struct nouveau_instobj *iobj)
|
||||
{
|
||||
if (iobj->head.prev)
|
||||
list_del(&iobj->head);
|
||||
return nouveau_object_destroy(&iobj->base);
|
||||
}
|
||||
|
||||
void
|
||||
_nouveau_instobj_dtor(struct nouveau_object *object)
|
||||
{
|
||||
struct nouveau_instobj *iobj = (void *)object;
|
||||
return nouveau_instobj_destroy(iobj);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_instmem_create_(struct nouveau_object *parent,
|
||||
struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass,
|
||||
int length, void **pobject)
|
||||
{
|
||||
struct nouveau_instmem *imem;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_subdev_create_(parent, engine, oclass, 0,
|
||||
"INSTMEM", "instmem", length, pobject);
|
||||
imem = *pobject;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
INIT_LIST_HEAD(&imem->list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_instmem_init(struct nouveau_instmem *imem)
|
||||
{
|
||||
struct nouveau_instobj *iobj;
|
||||
int ret, i;
|
||||
|
||||
ret = nouveau_subdev_init(&imem->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
list_for_each_entry(iobj, &imem->list, head) {
|
||||
if (iobj->suspend) {
|
||||
for (i = 0; i < iobj->size; i += 4)
|
||||
nv_wo32(iobj, i, iobj->suspend[i / 4]);
|
||||
vfree(iobj->suspend);
|
||||
iobj->suspend = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
|
||||
{
|
||||
struct nouveau_instobj *iobj;
|
||||
int i;
|
||||
|
||||
if (suspend) {
|
||||
list_for_each_entry(iobj, &imem->list, head) {
|
||||
iobj->suspend = vmalloc(iobj->size);
|
||||
if (iobj->suspend) {
|
||||
for (i = 0; i < iobj->size; i += 4)
|
||||
iobj->suspend[i / 4] = nv_ro32(iobj, i);
|
||||
} else
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return nouveau_subdev_fini(&imem->base, suspend);
|
||||
}
|
||||
|
||||
int
|
||||
_nouveau_instmem_init(struct nouveau_object *object)
|
||||
{
|
||||
struct nouveau_instmem *imem = (void *)object;
|
||||
return nouveau_instmem_init(imem);
|
||||
}
|
||||
|
||||
int
|
||||
_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
|
||||
{
|
||||
struct nouveau_instmem *imem = (void *)object;
|
||||
return nouveau_instmem_fini(imem, suspend);
|
||||
}
|
@ -1,141 +1,199 @@
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include <engine/fifo.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/fb.h>
|
||||
|
||||
#include "nv04.h"
|
||||
|
||||
int
|
||||
nv04_instmem_init(struct drm_device *dev)
|
||||
static int
|
||||
nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct nv04_instmem_priv *priv = (void *)engine;
|
||||
struct nv04_instobj_priv *node;
|
||||
int ret, align;
|
||||
|
||||
align = (unsigned long)data;
|
||||
if (!align)
|
||||
align = 1;
|
||||
|
||||
ret = nouveau_instobj_create(parent, engine, oclass, &node);
|
||||
*pobject = nv_object(node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
node->base.addr = node->mem->offset;
|
||||
node->base.size = node->mem->length;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_instobj_dtor(struct nouveau_object *object)
|
||||
{
|
||||
struct nv04_instmem_priv *priv = (void *)object->engine;
|
||||
struct nv04_instobj_priv *node = (void *)object;
|
||||
nouveau_mm_free(&priv->heap, &node->mem);
|
||||
nouveau_instobj_destroy(&node->base);
|
||||
}
|
||||
|
||||
static u32
|
||||
nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
|
||||
{
|
||||
struct nv04_instobj_priv *node = (void *)object;
|
||||
return nv_ro32(object->engine, node->mem->offset + addr);
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
|
||||
{
|
||||
struct nv04_instobj_priv *node = (void *)object;
|
||||
nv_wo32(object->engine, node->mem->offset + addr, data);
|
||||
}
|
||||
|
||||
static struct nouveau_oclass
|
||||
nv04_instobj_oclass = {
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nv04_instobj_ctor,
|
||||
.dtor = nv04_instobj_dtor,
|
||||
.init = _nouveau_instobj_init,
|
||||
.fini = _nouveau_instobj_fini,
|
||||
.rd32 = nv04_instobj_rd32,
|
||||
.wr32 = nv04_instobj_wr32,
|
||||
},
|
||||
};
|
||||
|
||||
int
|
||||
nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
|
||||
u32 size, u32 align, struct nouveau_object **pobject)
|
||||
{
|
||||
struct nouveau_object *engine = nv_object(imem);
|
||||
struct nv04_instmem_priv *priv = (void *)(imem);
|
||||
int ret;
|
||||
|
||||
ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
|
||||
(void *)(unsigned long)align, size, pobject);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* INSTMEM itself creates objects to reserve (and preserve across
|
||||
* suspend/resume) various fixed data locations, each one of these
|
||||
* takes a reference on INSTMEM itself, causing it to never be
|
||||
* freed. We drop all the self-references here to avoid this.
|
||||
*/
|
||||
if (unlikely(!priv->created))
|
||||
atomic_dec(&engine->refcount);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv04_instmem_priv *priv;
|
||||
int ret;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
dev_priv->engine.instmem.priv = priv;
|
||||
ret = nouveau_instmem_create(parent, engine, oclass, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* PRAMIN aperture maps over the end of vram, reserve the space */
|
||||
dev_priv->ramin_available = true;
|
||||
dev_priv->ramin_rsvd_vram = 512 * 1024;
|
||||
/* PRAMIN aperture maps over the end of VRAM, reserve it */
|
||||
priv->base.reserved = 512 * 1024;
|
||||
priv->base.alloc = nv04_instmem_alloc;
|
||||
|
||||
ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_rsvd_vram);
|
||||
ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* 0x00000-0x10000: reserve for probable vbios image */
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0, 0, &priv->vbios);
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* 0x10000-0x18000: reserve for RAMHT */
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x08000, 0, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&priv->ramht);
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* 0x18000-0x18200: reserve for RAMRO */
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x00200, 0, 0, &priv->ramro);
|
||||
/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x00800, 0,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* 0x18200-0x18a00: reserve for RAMFC (enough for 32 nv30 channels) */
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x00800, 0, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&priv->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_ramht_new(dev, priv->ramht, &dev_priv->ramht);
|
||||
/* 0x18800-0x18a00: reserve for RAMRO */
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x00200, 0, 0, &priv->ramro);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->created = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_instmem_takedown(struct drm_device *dev)
|
||||
nv04_instmem_dtor(struct nouveau_object *object)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv04_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||
|
||||
nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
|
||||
struct nv04_instmem_priv *priv = (void *)object;
|
||||
nouveau_gpuobj_ref(NULL, &priv->ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &priv->ramro);
|
||||
nouveau_gpuobj_ref(NULL, &priv->ramht);
|
||||
|
||||
if (drm_mm_initialized(&dev_priv->ramin_heap))
|
||||
drm_mm_takedown(&dev_priv->ramin_heap);
|
||||
|
||||
kfree(priv);
|
||||
dev_priv->engine.instmem.priv = NULL;
|
||||
nouveau_gpuobj_ref(NULL, &priv->vbios);
|
||||
nouveau_mm_fini(&priv->heap);
|
||||
if (priv->iomem)
|
||||
iounmap(priv->iomem);
|
||||
nouveau_instmem_destroy(&priv->base);
|
||||
}
|
||||
|
||||
int
|
||||
nv04_instmem_suspend(struct drm_device *dev)
|
||||
static u32
|
||||
nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
|
||||
{
|
||||
return 0;
|
||||
return nv_rd32(object, 0x700000 + addr);
|
||||
}
|
||||
|
||||
void
|
||||
nv04_instmem_resume(struct drm_device *dev)
|
||||
static void
|
||||
nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
|
||||
{
|
||||
return nv_wr32(object, 0x700000 + addr, data);
|
||||
}
|
||||
|
||||
int
|
||||
nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
|
||||
u32 size, u32 align)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
struct drm_mm_node *ramin = NULL;
|
||||
|
||||
do {
|
||||
if (drm_mm_pre_get(&dev_priv->ramin_heap))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
|
||||
if (ramin == NULL) {
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ramin = drm_mm_get_block_atomic(ramin, size, align);
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
} while (ramin == NULL);
|
||||
|
||||
gpuobj->node = ramin;
|
||||
gpuobj->vinst = ramin->start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
drm_mm_put_block(gpuobj->node);
|
||||
gpuobj->node = NULL;
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
}
|
||||
|
||||
int
|
||||
nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
gpuobj->pinst = gpuobj->vinst;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
nv04_instmem_flush(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
struct nouveau_oclass
|
||||
nv04_instmem_oclass = {
|
||||
.handle = NV_SUBDEV(INSTMEM, 0x04),
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nv04_instmem_ctor,
|
||||
.dtor = nv04_instmem_dtor,
|
||||
.init = _nouveau_instmem_init,
|
||||
.fini = _nouveau_instmem_fini,
|
||||
.rd32 = nv04_instmem_rd32,
|
||||
.wr32 = nv04_instmem_wr32,
|
||||
},
|
||||
};
|
||||
|
@ -1,11 +1,32 @@
|
||||
#ifndef __NV04_INSTMEM_H__
|
||||
#define __NV04_INSTMEM_H__
|
||||
|
||||
#include <core/gpuobj.h>
|
||||
#include <core/mm.h>
|
||||
|
||||
#include <subdev/instmem.h>
|
||||
|
||||
struct nv04_instmem_priv {
|
||||
struct nouveau_instmem base;
|
||||
bool created;
|
||||
|
||||
void __iomem *iomem;
|
||||
struct nouveau_mm heap;
|
||||
|
||||
struct nouveau_gpuobj *vbios;
|
||||
struct nouveau_gpuobj *ramht;
|
||||
struct nouveau_gpuobj *ramro;
|
||||
struct nouveau_gpuobj *ramfc;
|
||||
};
|
||||
|
||||
struct nv04_instobj_priv {
|
||||
struct nouveau_instobj base;
|
||||
struct nouveau_mm_node *mem;
|
||||
};
|
||||
|
||||
void nv04_instmem_dtor(struct nouveau_object *);
|
||||
|
||||
int nv04_instmem_alloc(struct nouveau_instmem *, struct nouveau_object *,
|
||||
u32 size, u32 align, struct nouveau_object **pobject);
|
||||
|
||||
#endif
|
||||
|
@ -1,165 +1,139 @@
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include <engine/fifo.h>
|
||||
#include <core/ramht.h>
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "nv04.h"
|
||||
|
||||
int nv40_instmem_init(struct drm_device *dev)
|
||||
static inline int
|
||||
nv44_graph_class(struct nv04_instmem_priv *priv)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv04_instmem_priv *priv;
|
||||
u32 vs, rsvd;
|
||||
int ret;
|
||||
if ((nv_device(priv)->chipset & 0xf0) == 0x60)
|
||||
return 1;
|
||||
return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f)));
|
||||
}
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
dev_priv->engine.instmem.priv = priv;
|
||||
static int
|
||||
nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct nouveau_device *device = nv_device(parent);
|
||||
struct pci_dev *pdev = device->pdev;
|
||||
struct nv04_instmem_priv *priv;
|
||||
int ret, bar, vs;
|
||||
|
||||
ret = nouveau_instmem_create(parent, engine, oclass, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* map bar */
|
||||
if (pci_resource_len(pdev, 2))
|
||||
bar = 2;
|
||||
else
|
||||
bar = 3;
|
||||
|
||||
priv->iomem = ioremap(pci_resource_start(pdev, bar),
|
||||
pci_resource_len(pdev, bar));
|
||||
if (!priv->iomem) {
|
||||
nv_error(priv, "unable to map PRAMIN BAR\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* PRAMIN aperture maps over the end of vram, reserve enough space
|
||||
* to fit graphics contexts for every channel, the magics come
|
||||
* from engine/graph/nv40.c
|
||||
*/
|
||||
vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
|
||||
if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
|
||||
else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
|
||||
else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
|
||||
else rsvd = 0x4a40 * vs;
|
||||
rsvd += 16 * 1024;
|
||||
rsvd *= 32; /* per-channel */
|
||||
rsvd += 512 * 1024; /* pci(e)gart table */
|
||||
rsvd += 512 * 1024; /* object storage */
|
||||
dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
|
||||
dev_priv->ramin_available = true;
|
||||
vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
|
||||
if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
|
||||
else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs;
|
||||
else if (nv44_graph_class(priv)) priv->base.reserved = 0x4980 * vs;
|
||||
else priv->base.reserved = 0x4a40 * vs;
|
||||
priv->base.reserved += 16 * 1024;
|
||||
priv->base.reserved *= 32; /* per-channel */
|
||||
priv->base.reserved += 512 * 1024; /* pci(e)gart table */
|
||||
priv->base.reserved += 512 * 1024; /* object storage */
|
||||
|
||||
ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_rsvd_vram);
|
||||
priv->base.reserved = round_up(priv->base.reserved, 4096);
|
||||
priv->base.alloc = nv04_instmem_alloc;
|
||||
|
||||
ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* 0x00000-0x10000: reserve for probable vbios image */
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0, 0, &priv->vbios);
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* 0x10000-0x18000: reserve for RAMHT */
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x08000, 0, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&priv->ramht);
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* 0x18000-0x18200: reserve for RAMRO
|
||||
* 0x18200-0x20000: padding
|
||||
*/
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x08000, 0, 0, &priv->ramro);
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 0, &priv->ramro);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* 0x20000-0x21000: reserve for RAMFC
|
||||
* 0x21000-0x40000: padding + some unknown stuff (see below)
|
||||
*
|
||||
* It appears something is controlled by 0x2220/0x2230 on certain
|
||||
* NV4x chipsets as well as RAMFC. When 0x2230 == 0 ("new style"
|
||||
* control) the upper 16-bits of 0x2220 points at this other
|
||||
* mysterious table that's clobbering important things.
|
||||
*
|
||||
* We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
|
||||
* smashed to pieces on us, so reserve 0x30000-0x40000 too..
|
||||
* 0x21000-0x40000: padding and some unknown crap
|
||||
*/
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x20000, 0, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&priv->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_ramht_new(dev, priv->ramht, &dev_priv->ramht);
|
||||
ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->created = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv40_instmem_takedown(struct drm_device *dev)
|
||||
static u32
|
||||
nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv04_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||
|
||||
nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
|
||||
nouveau_gpuobj_ref(NULL, &priv->ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &priv->ramro);
|
||||
nouveau_gpuobj_ref(NULL, &priv->ramht);
|
||||
|
||||
if (drm_mm_initialized(&dev_priv->ramin_heap))
|
||||
drm_mm_takedown(&dev_priv->ramin_heap);
|
||||
|
||||
kfree(priv);
|
||||
dev_priv->engine.instmem.priv = NULL;
|
||||
struct nv04_instmem_priv *priv = (void *)object;
|
||||
return ioread32_native(priv->iomem + addr);
|
||||
}
|
||||
|
||||
int
|
||||
nv40_instmem_suspend(struct drm_device *dev)
|
||||
static void
|
||||
nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
|
||||
{
|
||||
return 0;
|
||||
struct nv04_instmem_priv *priv = (void *)object;
|
||||
iowrite32_native(data, priv->iomem + addr);
|
||||
}
|
||||
|
||||
void
|
||||
nv40_instmem_resume(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
nv40_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
|
||||
u32 size, u32 align)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
struct drm_mm_node *ramin = NULL;
|
||||
|
||||
do {
|
||||
if (drm_mm_pre_get(&dev_priv->ramin_heap))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
|
||||
if (ramin == NULL) {
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ramin = drm_mm_get_block_atomic(ramin, size, align);
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
} while (ramin == NULL);
|
||||
|
||||
gpuobj->node = ramin;
|
||||
gpuobj->vinst = ramin->start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv40_instmem_put(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
drm_mm_put_block(gpuobj->node);
|
||||
gpuobj->node = NULL;
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
}
|
||||
|
||||
int
|
||||
nv40_instmem_map(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
gpuobj->pinst = gpuobj->vinst;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv40_instmem_unmap(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
nv40_instmem_flush(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
struct nouveau_oclass
|
||||
nv40_instmem_oclass = {
|
||||
.handle = NV_SUBDEV(INSTMEM, 0x40),
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nv40_instmem_ctor,
|
||||
.dtor = nv04_instmem_dtor,
|
||||
.init = _nouveau_instmem_init,
|
||||
.fini = _nouveau_instmem_fini,
|
||||
.rd32 = nv40_instmem_rd32,
|
||||
.wr32 = nv40_instmem_wr32,
|
||||
},
|
||||
};
|
||||
|
@ -1,390 +1,172 @@
|
||||
/*
|
||||
* Copyright (C) 2007 Ben Skeggs.
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* All Rights Reserved.
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/fb.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include <subdev/vm.h>
|
||||
|
||||
#define BAR1_VM_BASE 0x0020000000ULL
|
||||
#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
|
||||
#define BAR3_VM_BASE 0x0000000000ULL
|
||||
#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
|
||||
#include <core/mm.h>
|
||||
|
||||
struct nv50_instmem_priv {
|
||||
uint32_t save1700[5]; /* 0x1700->0x1710 */
|
||||
|
||||
struct nouveau_gpuobj *bar1_dmaobj;
|
||||
struct nouveau_gpuobj *bar3_dmaobj;
|
||||
struct nouveau_instmem base;
|
||||
spinlock_t lock;
|
||||
u64 addr;
|
||||
};
|
||||
|
||||
static void
|
||||
nv50_channel_del(struct nouveau_channel **pchan)
|
||||
struct nv50_instobj_priv {
|
||||
struct nouveau_instobj base;
|
||||
struct nouveau_mem *mem;
|
||||
};
|
||||
|
||||
static int
|
||||
nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct nouveau_channel *chan;
|
||||
struct nouveau_fb *pfb = nouveau_fb(parent);
|
||||
struct nv50_instobj_priv *node;
|
||||
u32 align = (unsigned long)data;
|
||||
int ret;
|
||||
|
||||
chan = *pchan;
|
||||
*pchan = NULL;
|
||||
if (!chan)
|
||||
return;
|
||||
size = max((size + 4095) & ~4095, (u32)4096);
|
||||
align = max((align + 4095) & ~4095, (u32)4096);
|
||||
|
||||
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
|
||||
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
|
||||
if (drm_mm_initialized(&chan->ramin_heap))
|
||||
drm_mm_takedown(&chan->ramin_heap);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
||||
kfree(chan);
|
||||
ret = nouveau_instobj_create(parent, engine, oclass, &node);
|
||||
*pobject = nv_object(node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pfb->ram.get(pfb, size, align, 0, 0x800, &node->mem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
node->base.addr = node->mem->offset;
|
||||
node->base.size = node->mem->size << 12;
|
||||
node->mem->page_shift = 12;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_instobj_dtor(struct nouveau_object *object)
|
||||
{
|
||||
struct nv50_instobj_priv *node = (void *)object;
|
||||
struct nouveau_fb *pfb = nouveau_fb(object);
|
||||
pfb->ram.put(pfb, &node->mem);
|
||||
nouveau_instobj_destroy(&node->base);
|
||||
}
|
||||
|
||||
static u32
|
||||
nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
|
||||
{
|
||||
struct nv50_instmem_priv *priv = (void *)object->engine;
|
||||
struct nv50_instobj_priv *node = (void *)object;
|
||||
unsigned long flags;
|
||||
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
|
||||
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
|
||||
u32 data;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
if (unlikely(priv->addr != base)) {
|
||||
nv_wr32(priv, 0x001700, base >> 16);
|
||||
priv->addr = base;
|
||||
}
|
||||
data = nv_rd32(priv, 0x700000 + addr);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return data;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
|
||||
{
|
||||
struct nv50_instmem_priv *priv = (void *)object->engine;
|
||||
struct nv50_instobj_priv *node = (void *)object;
|
||||
unsigned long flags;
|
||||
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
|
||||
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
if (unlikely(priv->addr != base)) {
|
||||
nv_wr32(priv, 0x001700, base >> 16);
|
||||
priv->addr = base;
|
||||
}
|
||||
nv_wr32(priv, 0x700000 + addr, data);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static struct nouveau_oclass
|
||||
nv50_instobj_oclass = {
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nv50_instobj_ctor,
|
||||
.dtor = nv50_instobj_dtor,
|
||||
.init = _nouveau_instobj_init,
|
||||
.fini = _nouveau_instobj_fini,
|
||||
.rd32 = nv50_instobj_rd32,
|
||||
.wr32 = nv50_instobj_wr32,
|
||||
},
|
||||
};
|
||||
|
||||
static int
|
||||
nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
|
||||
u32 size, u32 align, struct nouveau_object **pobject)
|
||||
{
|
||||
struct nouveau_object *engine = nv_object(imem);
|
||||
return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass,
|
||||
(void *)(unsigned long)align, size, pobject);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
|
||||
struct nouveau_channel **pchan)
|
||||
nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
|
||||
struct nouveau_channel *chan;
|
||||
int ret, i;
|
||||
|
||||
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
|
||||
if (!chan)
|
||||
return -ENOMEM;
|
||||
chan->dev = dev;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
|
||||
if (ret) {
|
||||
nv50_channel_del(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_mm_init(&chan->ramin_heap, pgd, chan->ramin->size - pgd);
|
||||
if (ret) {
|
||||
nv50_channel_del(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, 0x4000, 0, 0, &chan->vm_pd);
|
||||
if (ret) {
|
||||
nv50_channel_del(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < 0x4000; i += 8) {
|
||||
nv_wo32(chan->vm_pd, i + 0, 0x00000000);
|
||||
nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
|
||||
}
|
||||
|
||||
ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
|
||||
if (ret) {
|
||||
nv50_channel_del(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*pchan = chan;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv50_instmem_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv50_instmem_priv *priv;
|
||||
struct nouveau_channel *chan;
|
||||
struct nouveau_vm *vm;
|
||||
int ret, i;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
dev_priv->engine.instmem.priv = priv;
|
||||
|
||||
/* Save state, will restore at takedown. */
|
||||
for (i = 0x1700; i <= 0x1710; i += 4)
|
||||
priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
|
||||
|
||||
/* Global PRAMIN heap */
|
||||
ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Failed to init RAMIN heap\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* BAR3 */
|
||||
ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
|
||||
&dev_priv->bar3_vm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
|
||||
0x1000, NVOBJ_FLAG_DONT_MAP |
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
if (ret)
|
||||
goto error;
|
||||
dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
|
||||
|
||||
nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
|
||||
ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
|
||||
if (ret)
|
||||
goto error;
|
||||
dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
|
||||
|
||||
ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
|
||||
NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
|
||||
NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
|
||||
&priv->bar3_dmaobj);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
|
||||
nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
|
||||
nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
|
||||
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
dev_priv->ramin_available = true;
|
||||
|
||||
tmp = nv_ro32(chan->ramin, 0);
|
||||
nv_wo32(chan->ramin, 0, ~tmp);
|
||||
if (nv_ro32(chan->ramin, 0) != ~tmp) {
|
||||
NV_ERROR(dev, "PRAMIN readback failed\n");
|
||||
ret = -EIO;
|
||||
goto error;
|
||||
}
|
||||
nv_wo32(chan->ramin, 0, tmp);
|
||||
|
||||
/* BAR1 */
|
||||
ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
|
||||
if (ret)
|
||||
goto error;
|
||||
nouveau_vm_ref(NULL, &vm, NULL);
|
||||
|
||||
ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
|
||||
NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
|
||||
NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
|
||||
&priv->bar1_dmaobj);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
|
||||
for (i = 0; i < 8; i++)
|
||||
nv_wr32(dev, 0x1900 + (i*4), 0);
|
||||
|
||||
/* Create shared channel VM, space is reserved at the beginning
|
||||
* to catch "NULL pointer" references
|
||||
*/
|
||||
ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
|
||||
&dev_priv->chan_vm);
|
||||
ret = nouveau_instmem_create(parent, engine, oclass, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
nv50_instmem_takedown(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_instmem_takedown(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||
struct nouveau_channel *chan = dev_priv->channels.ptr[0];
|
||||
int i;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
if (!priv)
|
||||
return;
|
||||
|
||||
dev_priv->ramin_available = false;
|
||||
|
||||
nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
|
||||
|
||||
for (i = 0x1700; i <= 0x1710; i += 4)
|
||||
nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
|
||||
|
||||
nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
|
||||
dev_priv->channels.ptr[127] = 0;
|
||||
nv50_channel_del(&dev_priv->channels.ptr[0]);
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
|
||||
|
||||
if (drm_mm_initialized(&dev_priv->ramin_heap))
|
||||
drm_mm_takedown(&dev_priv->ramin_heap);
|
||||
|
||||
dev_priv->engine.instmem.priv = NULL;
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
int
|
||||
nv50_instmem_suspend(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->ramin_available = false;
|
||||
spin_lock_init(&priv->lock);
|
||||
priv->base.alloc = nv50_instmem_alloc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_instmem_resume(struct drm_device *dev)
|
||||
static int
|
||||
nv50_instmem_fini(struct nouveau_object *object, bool suspend)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||
struct nouveau_channel *chan = dev_priv->channels.ptr[0];
|
||||
int i;
|
||||
|
||||
/* Poke the relevant regs, and pray it works :) */
|
||||
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
|
||||
nv_wr32(dev, NV50_PUNK_UNK1710, 0);
|
||||
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
|
||||
NV50_PUNK_BAR_CFG_BASE_VALID);
|
||||
nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
|
||||
NV50_PUNK_BAR1_CTXDMA_VALID);
|
||||
nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
|
||||
NV50_PUNK_BAR3_CTXDMA_VALID);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
nv_wr32(dev, 0x1900 + (i*4), 0);
|
||||
|
||||
dev_priv->ramin_available = true;
|
||||
struct nv50_instmem_priv *priv = (void *)object;
|
||||
priv->addr = ~0ULL;
|
||||
return nouveau_instmem_fini(&priv->base, suspend);
|
||||
}
|
||||
|
||||
struct nv50_gpuobj_node {
|
||||
struct nouveau_mem *vram;
|
||||
struct nouveau_vma chan_vma;
|
||||
u32 align;
|
||||
struct nouveau_oclass
|
||||
nv50_instmem_oclass = {
|
||||
.handle = NV_SUBDEV(INSTMEM, 0x50),
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nv50_instmem_ctor,
|
||||
.dtor = _nouveau_instmem_dtor,
|
||||
.init = _nouveau_instmem_init,
|
||||
.fini = nv50_instmem_fini,
|
||||
},
|
||||
};
|
||||
|
||||
int
|
||||
nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
|
||||
u32 size, u32 align)
|
||||
{
|
||||
struct drm_device *dev = gpuobj->dev;
|
||||
struct nv50_gpuobj_node *node = NULL;
|
||||
int ret;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
node->align = align;
|
||||
|
||||
size = (size + 4095) & ~4095;
|
||||
align = max(align, (u32)4096);
|
||||
|
||||
ret = nvfb_vram_get(dev, size, align, 0, 0x800, &node->vram);
|
||||
if (ret) {
|
||||
kfree(node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
gpuobj->vinst = node->vram->offset;
|
||||
gpuobj->size = size;
|
||||
gpuobj->node = node;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct drm_device *dev = gpuobj->dev;
|
||||
struct nv50_gpuobj_node *node;
|
||||
|
||||
node = gpuobj->node;
|
||||
gpuobj->node = NULL;
|
||||
|
||||
if (node->chan_vma.node) {
|
||||
nouveau_vm_unmap(&node->chan_vma);
|
||||
nouveau_vm_put(&node->chan_vma);
|
||||
}
|
||||
nvfb_vram_put(dev, &node->vram);
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
int
|
||||
nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
struct nv50_gpuobj_node *node = gpuobj->node;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
|
||||
NV_MEM_ACCESS_RW, &node->vram->bar_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nouveau_vm_map(&node->vram->bar_vma, node->vram);
|
||||
gpuobj->pinst = node->vram->bar_vma.offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
struct nv50_gpuobj_node *node = gpuobj->node;
|
||||
|
||||
if (node->vram->bar_vma.node) {
|
||||
nouveau_vm_unmap(&node->vram->bar_vma);
|
||||
nouveau_vm_put(&node->vram->bar_vma);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nv50_instmem_flush(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->vm_lock, flags);
|
||||
nv_wr32(dev, 0x00330c, 0x00000001);
|
||||
if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
|
||||
NV_ERROR(dev, "PRAMIN flush timeout\n");
|
||||
spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
|
||||
}
|
||||
|
||||
void
|
||||
nv84_instmem_flush(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->vm_lock, flags);
|
||||
nv_wr32(dev, 0x070000, 0x00000001);
|
||||
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
|
||||
NV_ERROR(dev, "PRAMIN flush timeout\n");
|
||||
spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
|
||||
}
|
||||
|
@ -1,222 +0,0 @@
|
||||
/*
|
||||
* Copyright 2010 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include <subdev/vm.h>
|
||||
|
||||
struct nvc0_instmem_priv {
|
||||
struct nouveau_gpuobj *bar1_pgd;
|
||||
struct nouveau_channel *bar1;
|
||||
struct nouveau_gpuobj *bar3_pgd;
|
||||
struct nouveau_channel *bar3;
|
||||
};
|
||||
|
||||
int
|
||||
nvc0_instmem_suspend(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->ramin_available = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_instmem_resume(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||
|
||||
nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
|
||||
nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
|
||||
nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
|
||||
dev_priv->ramin_available = true;
|
||||
}
|
||||
|
||||
static void
|
||||
nvc0_channel_del(struct nouveau_channel **pchan)
|
||||
{
|
||||
struct nouveau_channel *chan;
|
||||
|
||||
chan = *pchan;
|
||||
*pchan = NULL;
|
||||
if (!chan)
|
||||
return;
|
||||
|
||||
nouveau_vm_ref(NULL, &chan->vm, NULL);
|
||||
if (drm_mm_initialized(&chan->ramin_heap))
|
||||
drm_mm_takedown(&chan->ramin_heap);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
||||
kfree(chan);
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
|
||||
struct nouveau_channel **pchan,
|
||||
struct nouveau_gpuobj *pgd, u64 vm_size)
|
||||
{
|
||||
struct nouveau_channel *chan;
|
||||
int ret;
|
||||
|
||||
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
|
||||
if (!chan)
|
||||
return -ENOMEM;
|
||||
chan->dev = dev;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
|
||||
if (ret) {
|
||||
nvc0_channel_del(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
|
||||
if (ret) {
|
||||
nvc0_channel_del(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_vm_ref(vm, &chan->vm, NULL);
|
||||
if (ret) {
|
||||
nvc0_channel_del(&chan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
|
||||
nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
|
||||
nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
|
||||
nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
|
||||
|
||||
*pchan = chan;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvc0_instmem_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
struct nvc0_instmem_priv *priv;
|
||||
struct nouveau_vm *vm = NULL;
|
||||
int ret;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
pinstmem->priv = priv;
|
||||
|
||||
/* BAR3 VM */
|
||||
ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
|
||||
&dev_priv->bar3_vm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL,
|
||||
(pci_resource_len(pdev, 3) >> 12) * 8, 0,
|
||||
NVOBJ_FLAG_DONT_MAP |
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
if (ret)
|
||||
goto error;
|
||||
dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
|
||||
|
||||
nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
|
||||
if (ret)
|
||||
goto error;
|
||||
nouveau_vm_ref(NULL, &vm, NULL);
|
||||
|
||||
ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
|
||||
priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* BAR1 VM */
|
||||
ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
|
||||
if (ret)
|
||||
goto error;
|
||||
nouveau_vm_ref(NULL, &vm, NULL);
|
||||
|
||||
ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
|
||||
priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* channel vm */
|
||||
ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
|
||||
&dev_priv->chan_vm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
nvc0_instmem_resume(dev);
|
||||
return 0;
|
||||
error:
|
||||
nvc0_instmem_takedown(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_instmem_takedown(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||
struct nouveau_vm *vm = NULL;
|
||||
|
||||
nvc0_instmem_suspend(dev);
|
||||
|
||||
nv_wr32(dev, 0x1704, 0x00000000);
|
||||
nv_wr32(dev, 0x1714, 0x00000000);
|
||||
|
||||
nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
|
||||
|
||||
nvc0_channel_del(&priv->bar1);
|
||||
nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
|
||||
|
||||
nvc0_channel_del(&priv->bar3);
|
||||
nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
|
||||
nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
|
||||
nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
|
||||
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||
nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
|
||||
|
||||
dev_priv->engine.instmem.priv = NULL;
|
||||
kfree(priv);
|
||||
}
|
@ -22,22 +22,24 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include <core/gpuobj.h>
|
||||
#include <core/mm.h>
|
||||
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
void
|
||||
nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
struct nouveau_vmmgr *vmm = vm->vmm;
|
||||
struct nouveau_mm_node *r;
|
||||
int big = vma->node->type != vm->spg_shift;
|
||||
int big = vma->node->type != vmm->spg_shift;
|
||||
u32 offset = vma->node->offset + (delta >> 12);
|
||||
u32 bits = vma->node->type - 12;
|
||||
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vm->pgt_bits - bits);
|
||||
u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vmm->pgt_bits - bits);
|
||||
u32 end, len;
|
||||
|
||||
delta = 0;
|
||||
@ -53,7 +55,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
|
||||
end = max;
|
||||
len = end - pte;
|
||||
|
||||
vm->map(vma, pgt, node, pte, len, phys, delta);
|
||||
vmm->map(vma, pgt, node, pte, len, phys, delta);
|
||||
|
||||
num -= len;
|
||||
pte += len;
|
||||
@ -67,7 +69,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
|
||||
}
|
||||
}
|
||||
|
||||
vm->flush(vm);
|
||||
vmm->flush(vm);
|
||||
}
|
||||
|
||||
void
|
||||
@ -81,13 +83,14 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
struct nouveau_mem *mem)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
int big = vma->node->type != vm->spg_shift;
|
||||
struct nouveau_vmmgr *vmm = vm->vmm;
|
||||
int big = vma->node->type != vmm->spg_shift;
|
||||
u32 offset = vma->node->offset + (delta >> 12);
|
||||
u32 bits = vma->node->type - 12;
|
||||
u32 num = length >> vma->node->type;
|
||||
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vm->pgt_bits - bits);
|
||||
u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vmm->pgt_bits - bits);
|
||||
unsigned m, sglen;
|
||||
u32 end, len;
|
||||
int i;
|
||||
@ -105,7 +108,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
for (m = 0; m < len; m++) {
|
||||
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
|
||||
|
||||
vm->map_sg(vma, pgt, mem, pte, 1, &addr);
|
||||
vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
|
||||
num--;
|
||||
pte++;
|
||||
|
||||
@ -120,7 +123,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
for (; m < sglen; m++) {
|
||||
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
|
||||
|
||||
vm->map_sg(vma, pgt, mem, pte, 1, &addr);
|
||||
vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
|
||||
num--;
|
||||
pte++;
|
||||
if (num == 0)
|
||||
@ -130,7 +133,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
|
||||
}
|
||||
finish:
|
||||
vm->flush(vm);
|
||||
vmm->flush(vm);
|
||||
}
|
||||
|
||||
void
|
||||
@ -138,14 +141,15 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
struct nouveau_mem *mem)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
struct nouveau_vmmgr *vmm = vm->vmm;
|
||||
dma_addr_t *list = mem->pages;
|
||||
int big = vma->node->type != vm->spg_shift;
|
||||
int big = vma->node->type != vmm->spg_shift;
|
||||
u32 offset = vma->node->offset + (delta >> 12);
|
||||
u32 bits = vma->node->type - 12;
|
||||
u32 num = length >> vma->node->type;
|
||||
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vm->pgt_bits - bits);
|
||||
u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vmm->pgt_bits - bits);
|
||||
u32 end, len;
|
||||
|
||||
while (num) {
|
||||
@ -156,7 +160,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
end = max;
|
||||
len = end - pte;
|
||||
|
||||
vm->map_sg(vma, pgt, mem, pte, len, list);
|
||||
vmm->map_sg(vma, pgt, mem, pte, len, list);
|
||||
|
||||
num -= len;
|
||||
pte += len;
|
||||
@ -167,20 +171,21 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
}
|
||||
}
|
||||
|
||||
vm->flush(vm);
|
||||
vmm->flush(vm);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
int big = vma->node->type != vm->spg_shift;
|
||||
struct nouveau_vmmgr *vmm = vm->vmm;
|
||||
int big = vma->node->type != vmm->spg_shift;
|
||||
u32 offset = vma->node->offset + (delta >> 12);
|
||||
u32 bits = vma->node->type - 12;
|
||||
u32 num = length >> vma->node->type;
|
||||
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vm->pgt_bits - bits);
|
||||
u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vmm->pgt_bits - bits);
|
||||
u32 end, len;
|
||||
|
||||
while (num) {
|
||||
@ -191,7 +196,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
|
||||
end = max;
|
||||
len = end - pte;
|
||||
|
||||
vm->unmap(pgt, pte, len);
|
||||
vmm->unmap(pgt, pte, len);
|
||||
|
||||
num -= len;
|
||||
pte += len;
|
||||
@ -201,7 +206,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
|
||||
}
|
||||
}
|
||||
|
||||
vm->flush(vm);
|
||||
vmm->flush(vm);
|
||||
}
|
||||
|
||||
void
|
||||
@ -213,6 +218,7 @@ nouveau_vm_unmap(struct nouveau_vma *vma)
|
||||
static void
|
||||
nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
|
||||
{
|
||||
struct nouveau_vmmgr *vmm = vm->vmm;
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
struct nouveau_vm_pgt *vpgt;
|
||||
struct nouveau_gpuobj *pgt;
|
||||
@ -227,7 +233,7 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
|
||||
vpgt->obj[big] = NULL;
|
||||
|
||||
list_for_each_entry(vpgd, &vm->pgd_list, head) {
|
||||
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
|
||||
vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
|
||||
}
|
||||
|
||||
mutex_unlock(&vm->mm.mutex);
|
||||
@ -239,18 +245,19 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
|
||||
static int
|
||||
nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
|
||||
{
|
||||
struct nouveau_vmmgr *vmm = vm->vmm;
|
||||
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
struct nouveau_gpuobj *pgt;
|
||||
int big = (type != vm->spg_shift);
|
||||
int big = (type != vmm->spg_shift);
|
||||
u32 pgt_size;
|
||||
int ret;
|
||||
|
||||
pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
|
||||
pgt_size = (1 << (vmm->pgt_bits + 12)) >> type;
|
||||
pgt_size *= 8;
|
||||
|
||||
mutex_unlock(&vm->mm.mutex);
|
||||
ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
|
||||
ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &pgt);
|
||||
mutex_lock(&vm->mm.mutex);
|
||||
if (unlikely(ret))
|
||||
@ -266,7 +273,7 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
|
||||
|
||||
vpgt->obj[big] = pgt;
|
||||
list_for_each_entry(vpgd, &vm->pgd_list, head) {
|
||||
vm->map_pgt(vpgd->obj, pde, vpgt->obj);
|
||||
vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -276,6 +283,7 @@ int
|
||||
nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
|
||||
u32 access, struct nouveau_vma *vma)
|
||||
{
|
||||
struct nouveau_vmmgr *vmm = vm->vmm;
|
||||
u32 align = (1 << page_shift) >> 12;
|
||||
u32 msize = size >> 12;
|
||||
u32 fpde, lpde, pde;
|
||||
@ -289,11 +297,11 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
|
||||
return ret;
|
||||
}
|
||||
|
||||
fpde = (vma->node->offset >> vm->pgt_bits);
|
||||
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
|
||||
fpde = (vma->node->offset >> vmm->pgt_bits);
|
||||
lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
|
||||
for (pde = fpde; pde <= lpde; pde++) {
|
||||
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
|
||||
int big = (vma->node->type != vm->spg_shift);
|
||||
int big = (vma->node->type != vmm->spg_shift);
|
||||
|
||||
if (likely(vpgt->refcount[big])) {
|
||||
vpgt->refcount[big]++;
|
||||
@ -321,90 +329,67 @@ void
|
||||
nouveau_vm_put(struct nouveau_vma *vma)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
struct nouveau_vmmgr *vmm = vm->vmm;
|
||||
u32 fpde, lpde;
|
||||
|
||||
if (unlikely(vma->node == NULL))
|
||||
return;
|
||||
fpde = (vma->node->offset >> vm->pgt_bits);
|
||||
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
|
||||
fpde = (vma->node->offset >> vmm->pgt_bits);
|
||||
lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
|
||||
|
||||
mutex_lock(&vm->mm.mutex);
|
||||
nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
|
||||
nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
|
||||
nouveau_mm_free(&vm->mm, &vma->node);
|
||||
mutex_unlock(&vm->mm.mutex);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
|
||||
struct nouveau_vm **pvm)
|
||||
nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
|
||||
u64 mm_offset, u32 block, struct nouveau_vm **pvm)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_vm *vm;
|
||||
u64 mm_length = (offset + length) - mm_offset;
|
||||
u32 block, pgt_bits;
|
||||
int ret;
|
||||
|
||||
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
|
||||
vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
|
||||
if (!vm)
|
||||
return -ENOMEM;
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
vm->map_pgt = nv50_vm_map_pgt;
|
||||
vm->map = nv50_vm_map;
|
||||
vm->map_sg = nv50_vm_map_sg;
|
||||
vm->unmap = nv50_vm_unmap;
|
||||
vm->flush = nv50_vm_flush;
|
||||
vm->spg_shift = 12;
|
||||
vm->lpg_shift = 16;
|
||||
INIT_LIST_HEAD(&vm->pgd_list);
|
||||
vm->vmm = vmm;
|
||||
vm->refcount = 1;
|
||||
vm->fpde = offset >> (vmm->pgt_bits + 12);
|
||||
vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
|
||||
|
||||
pgt_bits = 29;
|
||||
block = (1 << pgt_bits);
|
||||
if (length < block)
|
||||
block = length;
|
||||
|
||||
} else
|
||||
if (dev_priv->card_type >= NV_C0) {
|
||||
vm->map_pgt = nvc0_vm_map_pgt;
|
||||
vm->map = nvc0_vm_map;
|
||||
vm->map_sg = nvc0_vm_map_sg;
|
||||
vm->unmap = nvc0_vm_unmap;
|
||||
vm->flush = nvc0_vm_flush;
|
||||
vm->spg_shift = 12;
|
||||
vm->lpg_shift = 17;
|
||||
pgt_bits = 27;
|
||||
block = 4096;
|
||||
} else {
|
||||
kfree(vm);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
vm->fpde = offset >> pgt_bits;
|
||||
vm->lpde = (offset + length - 1) >> pgt_bits;
|
||||
vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
|
||||
vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
|
||||
if (!vm->pgt) {
|
||||
kfree(vm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&vm->pgd_list);
|
||||
vm->dev = dev;
|
||||
vm->refcount = 1;
|
||||
vm->pgt_bits = pgt_bits - 12;
|
||||
|
||||
ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
|
||||
block >> 12);
|
||||
if (ret) {
|
||||
kfree(vm->pgt);
|
||||
kfree(vm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*pvm = vm;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
|
||||
u64 mm_offset, struct nouveau_vm **pvm)
|
||||
{
|
||||
struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
|
||||
return vmm->create(vmm, offset, length, mm_offset, pvm);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
|
||||
{
|
||||
struct nouveau_vmmgr *vmm = vm->vmm;
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
int i;
|
||||
|
||||
@ -419,7 +404,7 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
|
||||
|
||||
mutex_lock(&vm->mm.mutex);
|
||||
for (i = vm->fpde; i <= vm->lpde; i++)
|
||||
vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
|
||||
vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
|
||||
list_add(&vpgd->head, &vm->pgd_list);
|
||||
mutex_unlock(&vm->mm.mutex);
|
||||
return 0;
|
||||
|
150
drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
Normal file
150
drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
Normal file
@ -0,0 +1,150 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include <core/gpuobj.h>
|
||||
|
||||
#include "nv04.h"
|
||||
|
||||
#define NV04_PDMA_SIZE (128 * 1024 * 1024)
|
||||
#define NV04_PDMA_PAGE ( 4 * 1024)
|
||||
|
||||
/*******************************************************************************
|
||||
* VM map/unmap callbacks
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
||||
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
|
||||
{
|
||||
pte = 0x00008 + (pte * 4);
|
||||
while (cnt) {
|
||||
u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
|
||||
u32 phys = (u32)*list++;
|
||||
while (cnt && page--) {
|
||||
nv_wo32(pgt, pte, phys | 3);
|
||||
phys += NV04_PDMA_PAGE;
|
||||
pte += 4;
|
||||
cnt -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
{
|
||||
pte = 0x00008 + (pte * 4);
|
||||
while (cnt--) {
|
||||
nv_wo32(pgt, pte, 0x00000000);
|
||||
pte += 4;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_vm_flush(struct nouveau_vm *vm)
|
||||
{
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* VM object
|
||||
******************************************************************************/
|
||||
|
||||
int
|
||||
nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart,
|
||||
struct nouveau_vm **pvm)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* VMMGR subdev
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct nv04_vmmgr_priv *priv;
|
||||
struct nouveau_gpuobj *dma;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART",
|
||||
"pcigart", &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.create = nv04_vm_create;
|
||||
priv->base.pgt_bits = 32 - 12;
|
||||
priv->base.spg_shift = 12;
|
||||
priv->base.lpg_shift = 12;
|
||||
priv->base.map_sg = nv04_vm_map_sg;
|
||||
priv->base.unmap = nv04_vm_unmap;
|
||||
priv->base.flush = nv04_vm_flush;
|
||||
|
||||
ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
|
||||
&priv->vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, NULL,
|
||||
(NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
|
||||
8, 16, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&priv->vm->pgt[0].obj[0]);
|
||||
dma = priv->vm->pgt[0].obj[0];
|
||||
priv->vm->pgt[0].refcount[0] = 1;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
|
||||
nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_vmmgr_dtor(struct nouveau_object *object)
|
||||
{
|
||||
struct nv04_vmmgr_priv *priv = (void *)object;
|
||||
if (priv->vm) {
|
||||
nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
|
||||
nouveau_vm_ref(NULL, &priv->vm, NULL);
|
||||
}
|
||||
if (priv->page) {
|
||||
pci_unmap_page(nv_device(priv)->pdev, priv->null,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
__free_page(priv->page);
|
||||
}
|
||||
nouveau_vmmgr_destroy(&priv->base);
|
||||
}
|
||||
|
||||
struct nouveau_oclass
|
||||
nv04_vmmgr_oclass = {
|
||||
.handle = NV_SUBDEV(VM, 0x04),
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nv04_vmmgr_ctor,
|
||||
.dtor = nv04_vmmgr_dtor,
|
||||
.init = _nouveau_vmmgr_init,
|
||||
.fini = _nouveau_vmmgr_fini,
|
||||
},
|
||||
};
|
13
drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
Normal file
13
drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
Normal file
@ -0,0 +1,13 @@
|
||||
#ifndef __NV04_VMMGR_PRIV__
|
||||
#define __NV04_VMMGR_PRIV__
|
||||
|
||||
#include <subdev/vm.h>
|
||||
|
||||
struct nv04_vmmgr_priv {
|
||||
struct nouveau_vmmgr base;
|
||||
struct nouveau_vm *vm;
|
||||
struct page *page;
|
||||
dma_addr_t null;
|
||||
};
|
||||
|
||||
#endif
|
149
drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
Normal file
149
drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
Normal file
@ -0,0 +1,149 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include <core/gpuobj.h>
|
||||
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
#include "nv04.h"
|
||||
|
||||
#define NV41_GART_SIZE (512 * 1024 * 1024)
|
||||
#define NV41_GART_PAGE ( 4 * 1024)
|
||||
|
||||
/*******************************************************************************
|
||||
* VM map/unmap callbacks
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
nv41_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
||||
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
|
||||
{
|
||||
pte = pte * 4;
|
||||
while (cnt) {
|
||||
u32 page = PAGE_SIZE / NV41_GART_PAGE;
|
||||
u64 phys = (u64)*list++;
|
||||
while (cnt && page--) {
|
||||
nv_wo32(pgt, pte, (phys >> 7) | 1);
|
||||
phys += NV41_GART_PAGE;
|
||||
pte += 4;
|
||||
cnt -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
{
|
||||
pte = pte * 4;
|
||||
while (cnt--) {
|
||||
nv_wo32(pgt, pte, 0x00000000);
|
||||
pte += 4;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv41_vm_flush(struct nouveau_vm *vm)
|
||||
{
|
||||
struct nv04_vm_priv *priv = (void *)vm->vmm;
|
||||
|
||||
mutex_lock(&nv_subdev(priv)->mutex);
|
||||
nv_wr32(priv, 0x100810, 0x00000022);
|
||||
if (!nv_wait(priv, 0x100810, 0x00000100, 0x00000100)) {
|
||||
nv_warn(priv, "flush timeout, 0x%08x\n",
|
||||
nv_rd32(priv, 0x100810));
|
||||
}
|
||||
nv_wr32(priv, 0x100810, 0x00000000);
|
||||
mutex_unlock(&nv_subdev(priv)->mutex);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* VMMGR subdev
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct nv04_vmmgr_priv *priv;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
|
||||
"pciegart", &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.create = nv04_vm_create;
|
||||
priv->base.pgt_bits = 32 - 12;
|
||||
priv->base.spg_shift = 12;
|
||||
priv->base.lpg_shift = 12;
|
||||
priv->base.map_sg = nv41_vm_map_sg;
|
||||
priv->base.unmap = nv41_vm_unmap;
|
||||
priv->base.flush = nv41_vm_flush;
|
||||
|
||||
ret = nouveau_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
|
||||
&priv->vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, NULL,
|
||||
(NV41_GART_SIZE / NV41_GART_PAGE) * 4,
|
||||
16, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&priv->vm->pgt[0].obj[0]);
|
||||
priv->vm->pgt[0].refcount[0] = 1;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv41_vmmgr_init(struct nouveau_object *object)
|
||||
{
|
||||
struct nv04_vmmgr_priv *priv = (void *)object;
|
||||
struct nouveau_gpuobj *dma = priv->vm->pgt[0].obj[0];
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vmmgr_init(&priv->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
|
||||
nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
|
||||
nv_wr32(priv, 0x100820, 0x00000000);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_oclass
|
||||
nv41_vmmgr_oclass = {
|
||||
.handle = NV_SUBDEV(VM, 0x41),
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nv41_vmmgr_ctor,
|
||||
.dtor = nv04_vmmgr_dtor,
|
||||
.init = nv41_vmmgr_init,
|
||||
.fini = _nouveau_vmmgr_fini,
|
||||
},
|
||||
};
|
257
drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
Normal file
257
drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
Normal file
@ -0,0 +1,257 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include <core/gpuobj.h>
|
||||
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
#include "nv04.h"
|
||||
|
||||
#define NV44_GART_SIZE (512 * 1024 * 1024)
|
||||
#define NV44_GART_PAGE ( 4 * 1024)
|
||||
|
||||
/*******************************************************************************
|
||||
* VM map/unmap callbacks
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
nv44_vm_flush_priv(struct nv04_vmmgr_priv *priv, u32 base, u32 size)
|
||||
{
|
||||
nv_wr32(priv, 0x100814, (size - 1) << 12);
|
||||
nv_wr32(priv, 0x100808, base | 0x20);
|
||||
if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
|
||||
nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808));
|
||||
nv_wr32(priv, 0x100808, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null,
|
||||
dma_addr_t *list, u32 pte, u32 cnt)
|
||||
{
|
||||
u32 base = (pte << 2) & ~0x0000000f;
|
||||
u32 tmp[4];
|
||||
|
||||
tmp[0] = nv_ro32(pgt, base + 0x0);
|
||||
tmp[1] = nv_ro32(pgt, base + 0x4);
|
||||
tmp[2] = nv_ro32(pgt, base + 0x8);
|
||||
tmp[3] = nv_ro32(pgt, base + 0xc);
|
||||
while (cnt--) {
|
||||
u32 addr = list ? (*list++ >> 12) : (null >> 12);
|
||||
switch (pte++ & 0x3) {
|
||||
case 0:
|
||||
tmp[0] &= ~0x07ffffff;
|
||||
tmp[0] |= addr;
|
||||
break;
|
||||
case 1:
|
||||
tmp[0] &= ~0xf8000000;
|
||||
tmp[0] |= addr << 27;
|
||||
tmp[1] &= ~0x003fffff;
|
||||
tmp[1] |= addr >> 5;
|
||||
break;
|
||||
case 2:
|
||||
tmp[1] &= ~0xffc00000;
|
||||
tmp[1] |= addr << 22;
|
||||
tmp[2] &= ~0x0001ffff;
|
||||
tmp[2] |= addr >> 10;
|
||||
break;
|
||||
case 3:
|
||||
tmp[2] &= ~0xfffe0000;
|
||||
tmp[2] |= addr << 17;
|
||||
tmp[3] &= ~0x00000fff;
|
||||
tmp[3] |= addr >> 15;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
nv_wo32(pgt, base + 0x0, tmp[0]);
|
||||
nv_wo32(pgt, base + 0x4, tmp[1]);
|
||||
nv_wo32(pgt, base + 0x8, tmp[2]);
|
||||
nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
|
||||
}
|
||||
|
||||
static void
|
||||
nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
||||
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
|
||||
{
|
||||
struct nv04_vmmgr_priv *priv = (void *)vma->vm->vmm;
|
||||
u32 base = pte << 12;
|
||||
u32 size = cnt;
|
||||
u32 tmp[4];
|
||||
int i;
|
||||
|
||||
if (pte & 3) {
|
||||
u32 max = 4 - (pte & 3);
|
||||
u32 part = (cnt > max) ? max : cnt;
|
||||
nv44_vm_fill(pgt, priv->null, list, pte, part);
|
||||
pte += part;
|
||||
list += part;
|
||||
cnt -= part;
|
||||
}
|
||||
|
||||
while (cnt >= 4) {
|
||||
for (i = 0; i < 4; i++)
|
||||
tmp[i] = *list++ >> 12;
|
||||
nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
|
||||
nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
|
||||
nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
|
||||
nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
|
||||
cnt -= 4;
|
||||
}
|
||||
|
||||
if (cnt)
|
||||
nv44_vm_fill(pgt, priv->null, list, pte, cnt);
|
||||
nv44_vm_flush_priv(priv, base, size);
|
||||
}
|
||||
|
||||
static void
|
||||
nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
{
|
||||
struct nv04_vmmgr_priv *priv = (void *)nouveau_vmmgr(pgt);
|
||||
u32 base = pte << 12;
|
||||
u32 size = cnt;
|
||||
|
||||
if (pte & 3) {
|
||||
u32 max = 4 - (pte & 3);
|
||||
u32 part = (cnt > max) ? max : cnt;
|
||||
nv44_vm_fill(pgt, priv->null, NULL, pte, part);
|
||||
pte += part;
|
||||
cnt -= part;
|
||||
}
|
||||
|
||||
while (cnt >= 4) {
|
||||
nv_wo32(pgt, pte++ * 4, 0x00000000);
|
||||
nv_wo32(pgt, pte++ * 4, 0x00000000);
|
||||
nv_wo32(pgt, pte++ * 4, 0x00000000);
|
||||
nv_wo32(pgt, pte++ * 4, 0x00000000);
|
||||
cnt -= 4;
|
||||
}
|
||||
|
||||
if (cnt)
|
||||
nv44_vm_fill(pgt, priv->null, NULL, pte, cnt);
|
||||
nv44_vm_flush_priv(priv, base, size);
|
||||
}
|
||||
|
||||
static void
|
||||
nv44_vm_flush(struct nouveau_vm *vm)
|
||||
{
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* VMMGR subdev
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct nouveau_device *device = nv_device(parent);
|
||||
struct nv04_vmmgr_priv *priv;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
|
||||
"pciegart", &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.create = nv04_vm_create;
|
||||
priv->base.pgt_bits = 32 - 12;
|
||||
priv->base.spg_shift = 12;
|
||||
priv->base.lpg_shift = 12;
|
||||
priv->base.map_sg = nv44_vm_map_sg;
|
||||
priv->base.unmap = nv44_vm_unmap;
|
||||
priv->base.flush = nv44_vm_flush;
|
||||
|
||||
priv->page = alloc_page(GFP_DMA32 | GFP_KERNEL);
|
||||
if (priv->page) {
|
||||
priv->null = pci_map_page(device->pdev, priv->page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(device->pdev, priv->null)) {
|
||||
__free_page(priv->page);
|
||||
priv->page = NULL;
|
||||
priv->null = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!priv->page)
|
||||
nv_warn(priv, "unable to allocate dummy page\n");
|
||||
|
||||
ret = nouveau_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
|
||||
&priv->vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(parent, NULL,
|
||||
(NV44_GART_SIZE / NV44_GART_PAGE) * 4,
|
||||
512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&priv->vm->pgt[0].obj[0]);
|
||||
priv->vm->pgt[0].refcount[0] = 1;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv44_vmmgr_init(struct nouveau_object *object)
|
||||
{
|
||||
struct nv04_vmmgr_priv *priv = (void *)object;
|
||||
struct nouveau_gpuobj *gart = priv->vm->pgt[0].obj[0];
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vmmgr_init(&priv->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* calculate vram address of this PRAMIN block, object must be
|
||||
* allocated on 512KiB alignment, and not exceed a total size
|
||||
* of 512KiB for this to work correctly
|
||||
*/
|
||||
addr = nv_rd32(priv, 0x10020c);
|
||||
addr -= ((gart->addr >> 19) + 1) << 19;
|
||||
|
||||
nv_wr32(priv, 0x100850, 0x80000000);
|
||||
nv_wr32(priv, 0x100818, priv->null);
|
||||
nv_wr32(priv, 0x100804, NV44_GART_SIZE);
|
||||
nv_wr32(priv, 0x100850, 0x00008000);
|
||||
nv_mask(priv, 0x10008c, 0x00000200, 0x00000200);
|
||||
nv_wr32(priv, 0x100820, 0x00000000);
|
||||
nv_wr32(priv, 0x10082c, 0x00000001);
|
||||
nv_wr32(priv, 0x100800, addr | 0x00000010);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_oclass
|
||||
nv44_vmmgr_oclass = {
|
||||
.handle = NV_SUBDEV(VM, 0x44),
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nv44_vmmgr_ctor,
|
||||
.dtor = nv04_vmmgr_dtor,
|
||||
.init = nv44_vmmgr_init,
|
||||
.fini = _nouveau_vmmgr_fini,
|
||||
},
|
||||
};
|
@ -22,11 +22,18 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include <core/device.h>
|
||||
#include <core/gpuobj.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
struct nv50_vmmgr_priv {
|
||||
struct nouveau_vmmgr base;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
void
|
||||
nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2])
|
||||
@ -35,11 +42,11 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
u32 coverage = 0;
|
||||
|
||||
if (pgt[0]) {
|
||||
phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
|
||||
phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
|
||||
coverage = (pgt[0]->size >> 3) << 12;
|
||||
} else
|
||||
if (pgt[1]) {
|
||||
phys = 0x00000001 | pgt[1]->vinst; /* present */
|
||||
phys = 0x00000001 | pgt[1]->addr; /* present */
|
||||
coverage = (pgt[1]->size >> 3) << 16;
|
||||
}
|
||||
|
||||
@ -73,15 +80,14 @@ void
|
||||
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
||||
struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
|
||||
u32 comp = (mem->memtype & 0x180) >> 7;
|
||||
u32 block, target;
|
||||
int i;
|
||||
|
||||
/* IGPs don't have real VRAM, re-target to stolen system memory */
|
||||
target = 0;
|
||||
if (nvfb_vram_sys_base(dev_priv->dev)) {
|
||||
phys += nvfb_vram_sys_base(dev_priv->dev);
|
||||
if (nouveau_fb(vma->vm->vmm)->ram.stolen) {
|
||||
phys += nouveau_fb(vma->vm->vmm)->ram.stolen;
|
||||
target = 3;
|
||||
}
|
||||
|
||||
@ -145,33 +151,81 @@ nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
void
|
||||
nv50_vm_flush(struct nouveau_vm *vm)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct nouveau_engine *engine;
|
||||
int i;
|
||||
|
||||
pinstmem->flush(vm->dev);
|
||||
|
||||
/* BAR */
|
||||
if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
|
||||
nv50_vm_flush_engine(vm->dev, 6);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
|
||||
if (atomic_read(&vm->engref[i]))
|
||||
dev_priv->eng[i]->tlb_flush(vm->dev, i);
|
||||
#if 0
|
||||
for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
|
||||
if (atomic_read(&vm->engref[i])) {
|
||||
engine = nouveau_engine(vm->vmm, i);
|
||||
if (engine && engine->tlb_flush)
|
||||
engine->tlb_flush(engine);
|
||||
}
|
||||
}
|
||||
#else
|
||||
nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x06); /* bar */
|
||||
nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x05); /* fifo */
|
||||
nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x00); /* gr */
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
nv50_vm_flush_engine(struct drm_device *dev, int engine)
|
||||
nv50_vm_flush_engine(struct nouveau_subdev *subdev, int engine)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv50_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->vm_lock, flags);
|
||||
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
|
||||
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
|
||||
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
|
||||
spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
nv_wr32(subdev, 0x100c80, (engine << 16) | 1);
|
||||
if (!nv_wait(subdev, 0x100c80, 0x00000001, 0x00000000))
|
||||
nv_error(subdev, "vm flush timeout: engine %d\n", engine);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
|
||||
u64 mm_offset, struct nouveau_vm **pvm)
|
||||
{
|
||||
u32 block = (1 << (vmm->pgt_bits + 12));
|
||||
if (block > length)
|
||||
block = length;
|
||||
|
||||
return nouveau_vm_create(vmm, offset, length, mm_offset, block, pvm);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct nv50_vmmgr_priv *priv;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.pgt_bits = 29 - 12;
|
||||
priv->base.spg_shift = 12;
|
||||
priv->base.lpg_shift = 16;
|
||||
priv->base.create = nv50_vm_create;
|
||||
priv->base.map_pgt = nv50_vm_map_pgt;
|
||||
priv->base.map = nv50_vm_map;
|
||||
priv->base.map_sg = nv50_vm_map_sg;
|
||||
priv->base.unmap = nv50_vm_unmap;
|
||||
priv->base.flush = nv50_vm_flush;
|
||||
spin_lock_init(&priv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_oclass
|
||||
nv50_vmmgr_oclass = {
|
||||
.handle = NV_SUBDEV(VM, 0x50),
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nv50_vmmgr_ctor,
|
||||
.dtor = _nouveau_vmmgr_dtor,
|
||||
.init = _nouveau_vmmgr_init,
|
||||
.fini = _nouveau_vmmgr_fini,
|
||||
},
|
||||
};
|
||||
|
@ -22,11 +22,18 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include <core/device.h>
|
||||
#include <core/gpuobj.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
struct nvc0_vmmgr_priv {
|
||||
struct nouveau_vmmgr base;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
void
|
||||
nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
|
||||
struct nouveau_gpuobj *pgt[2])
|
||||
@ -34,9 +41,9 @@ nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
|
||||
u32 pde[2] = { 0, 0 };
|
||||
|
||||
if (pgt[0])
|
||||
pde[1] = 0x00000001 | (pgt[0]->vinst >> 8);
|
||||
pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
|
||||
if (pgt[1])
|
||||
pde[0] = 0x00000001 | (pgt[1]->vinst >> 8);
|
||||
pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
|
||||
|
||||
nv_wo32(pgd, (index * 8) + 0, pde[0]);
|
||||
nv_wo32(pgd, (index * 8) + 4, pde[1]);
|
||||
@ -99,38 +106,82 @@ nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
|
||||
{
|
||||
struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
|
||||
unsigned long flags;
|
||||
|
||||
/* looks like maybe a "free flush slots" counter, the
|
||||
* faster you write to 0x100cbc to more it decreases
|
||||
*/
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
|
||||
nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
|
||||
nv_rd32(subdev, 0x100c80), type);
|
||||
}
|
||||
|
||||
nv_wr32(subdev, 0x100cb8, addr >> 8);
|
||||
nv_wr32(subdev, 0x100cbc, 0x80000000 | type);
|
||||
|
||||
/* wait for flush to be queued? */
|
||||
if (!nv_wait(subdev, 0x100c80, 0x00008000, 0x00008000)) {
|
||||
nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
|
||||
nv_rd32(subdev, 0x100c80), type);
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
void
|
||||
nvc0_vm_flush(struct nouveau_vm *vm)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct drm_device *dev = vm->dev;
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
unsigned long flags;
|
||||
u32 engine;
|
||||
|
||||
engine = 1;
|
||||
if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm)
|
||||
engine |= 4;
|
||||
|
||||
pinstmem->flush(vm->dev);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->vm_lock, flags);
|
||||
list_for_each_entry(vpgd, &vm->pgd_list, head) {
|
||||
/* looks like maybe a "free flush slots" counter, the
|
||||
* faster you write to 0x100cbc to more it decreases
|
||||
*/
|
||||
if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
|
||||
NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
|
||||
nv_rd32(dev, 0x100c80), engine);
|
||||
}
|
||||
nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
|
||||
nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
|
||||
/* wait for flush to be queued? */
|
||||
if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
|
||||
NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
|
||||
nv_rd32(dev, 0x100c80), engine);
|
||||
}
|
||||
nvc0_vm_flush_engine(nv_subdev(vm->vmm), vpgd->obj->addr, 1);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
|
||||
u64 mm_offset, struct nouveau_vm **pvm)
|
||||
{
|
||||
return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm);
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, void *data, u32 size,
|
||||
struct nouveau_object **pobject)
|
||||
{
|
||||
struct nvc0_vmmgr_priv *priv;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.pgt_bits = 27 - 12;
|
||||
priv->base.spg_shift = 12;
|
||||
priv->base.lpg_shift = 17;
|
||||
priv->base.create = nvc0_vm_create;
|
||||
priv->base.map_pgt = nvc0_vm_map_pgt;
|
||||
priv->base.map = nvc0_vm_map;
|
||||
priv->base.map_sg = nvc0_vm_map_sg;
|
||||
priv->base.unmap = nvc0_vm_unmap;
|
||||
priv->base.flush = nvc0_vm_flush;
|
||||
spin_lock_init(&priv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_oclass
|
||||
nvc0_vmmgr_oclass = {
|
||||
.handle = NV_SUBDEV(VM, 0xc0),
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nvc0_vmmgr_ctor,
|
||||
.dtor = _nouveau_vmmgr_dtor,
|
||||
.init = _nouveau_vmmgr_init,
|
||||
.fini = _nouveau_vmmgr_fini,
|
||||
},
|
||||
};
|
||||
|
@ -34,7 +34,6 @@
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
#include <core/mm.h>
|
||||
#include <subdev/vm.h>
|
||||
#include "nouveau_fence.h"
|
||||
#include <core/ramht.h>
|
||||
|
||||
@ -114,9 +113,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
|
||||
nvbo->bo.bdev = &dev_priv->ttm.bdev;
|
||||
|
||||
nvbo->page_shift = 12;
|
||||
if (dev_priv->bar1_vm) {
|
||||
if (dev_priv->chan_vm) {
|
||||
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
|
||||
nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
|
||||
nvbo->page_shift = nvvm_lpg_shift(dev_priv->chan_vm);
|
||||
}
|
||||
|
||||
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
|
||||
@ -419,6 +418,9 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
case TTM_PL_TT:
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
man->func = &nouveau_gart_manager;
|
||||
else
|
||||
if (dev_priv->gart_info.type != NOUVEAU_GART_AGP)
|
||||
man->func = &nv04_gart_manager;
|
||||
else
|
||||
man->func = &ttm_bo_manager_func;
|
||||
switch (dev_priv->gart_info.type) {
|
||||
@ -1044,7 +1046,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
|
||||
nouveau_vm_map(vma, new_mem->mm_node);
|
||||
} else
|
||||
if (new_mem && new_mem->mem_type == TTM_PL_TT &&
|
||||
nvbo->page_shift == vma->vm->spg_shift) {
|
||||
nvbo->page_shift == nvvm_spg_shift(vma->vm)) {
|
||||
if (((struct nouveau_mem *)new_mem->mm_node)->sg)
|
||||
nouveau_vm_map_sg_table(vma, 0, new_mem->
|
||||
num_pages << PAGE_SHIFT,
|
||||
@ -1184,40 +1186,19 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
#endif
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
{
|
||||
struct nouveau_mem *node = mem->mm_node;
|
||||
u8 page_shift;
|
||||
|
||||
if (!dev_priv->bar1_vm) {
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
mem->bus.base = pci_resource_start(dev->pdev, 1);
|
||||
mem->bus.is_iomem = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (dev_priv->card_type >= NV_C0)
|
||||
page_shift = node->page_shift;
|
||||
else
|
||||
page_shift = 12;
|
||||
|
||||
ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
|
||||
page_shift, NV_MEM_ACCESS_RW,
|
||||
&node->bar_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nouveau_vm_map(&node->bar_vma, node);
|
||||
if (ret) {
|
||||
nouveau_vm_put(&node->bar_vma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mem->bus.offset = node->bar_vma.offset;
|
||||
if (dev_priv->card_type == NV_50) /*XXX*/
|
||||
mem->bus.offset -= 0x0020000000ULL;
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
mem->bus.base = pci_resource_start(dev->pdev, 1);
|
||||
mem->bus.is_iomem = true;
|
||||
}
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
struct nouveau_mem *node = mem->mm_node;
|
||||
|
||||
ret = nvbar_map(dev, node, NV_MEM_ACCESS_RW,
|
||||
&node->bar_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mem->bus.offset = node->bar_vma.offset;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@ -1231,14 +1212,13 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
|
||||
struct nouveau_mem *node = mem->mm_node;
|
||||
|
||||
if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
|
||||
if (mem->mem_type != TTM_PL_VRAM)
|
||||
return;
|
||||
|
||||
if (!node->bar_vma.node)
|
||||
return;
|
||||
|
||||
nouveau_vm_unmap(&node->bar_vma);
|
||||
nouveau_vm_put(&node->bar_vma);
|
||||
nvbar_unmap(dev_priv->dev, &node->bar_vma);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -145,6 +145,9 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
|
||||
/* allocate hw channel id */
|
||||
spin_lock_irqsave(&dev_priv->channels.lock, flags);
|
||||
for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
|
||||
if ( dev_priv->card_type == NV_50 && chan->id == 0)
|
||||
continue;
|
||||
|
||||
if (!dev_priv->channels.ptr[chan->id]) {
|
||||
nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
|
||||
break;
|
||||
|
@ -11,6 +11,8 @@
|
||||
#include <subdev/mc.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <subdev/bar.h>
|
||||
#include <subdev/vm.h>
|
||||
|
||||
void *nouveau_newpriv(struct drm_device *);
|
||||
|
||||
@ -438,3 +440,146 @@ nv50_fb_vm_trap(struct drm_device *dev, int disp)
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
nv50_fb_trap(nouveau_fb(drm->device), disp);
|
||||
}
|
||||
|
||||
#include <core/subdev/instmem/nv04.h>
|
||||
|
||||
struct nouveau_gpuobj *
|
||||
nvimem_ramro(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
struct nv04_instmem_priv *imem = (void *)nouveau_instmem(drm->device);
|
||||
return imem->ramro;
|
||||
}
|
||||
|
||||
struct nouveau_gpuobj *
|
||||
nvimem_ramfc(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
struct nv04_instmem_priv *imem = (void *)nouveau_instmem(drm->device);
|
||||
return imem->ramfc;
|
||||
}
|
||||
|
||||
int _nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_gpuobj *par,
|
||||
int size, int align, u32 flags,
|
||||
struct nouveau_gpuobj **pobj)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
int ret;
|
||||
|
||||
if (!par)
|
||||
flags |= NVOBJ_FLAG_HEAP;
|
||||
|
||||
ret = nouveau_gpuobj_new(drm->device, nv_object(par), size, align,
|
||||
flags, pobj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
(*pobj)->dev = dev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nv_ri32(struct drm_device *dev , u32 addr)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
struct nouveau_instmem *imem = nouveau_instmem(drm->device);
|
||||
return nv_ro32(imem, addr);
|
||||
}
|
||||
|
||||
void nv_wi32(struct drm_device *dev, u32 addr, u32 data)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
struct nouveau_instmem *imem = nouveau_instmem(drm->device);
|
||||
nv_wo32(imem, addr, data);
|
||||
}
|
||||
|
||||
u32 nvimem_reserved(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
struct nouveau_instmem *imem = nouveau_instmem(drm->device);
|
||||
return imem->reserved;
|
||||
}
|
||||
|
||||
int
|
||||
nvbar_map(struct drm_device *dev, struct nouveau_mem *mem, u32 flags,
|
||||
struct nouveau_vma *vma)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
struct nouveau_bar *bar = nouveau_bar(drm->device);
|
||||
return bar->umap(bar, mem, flags, vma);
|
||||
}
|
||||
|
||||
void
|
||||
nvbar_unmap(struct drm_device *dev, struct nouveau_vma *vma)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
struct nouveau_bar *bar = nouveau_bar(drm->device);
|
||||
bar->unmap(bar, vma);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_map_bar(struct nouveau_gpuobj *gpuobj, u32 flags,
|
||||
struct nouveau_vma *vma)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(gpuobj->dev);
|
||||
struct nouveau_bar *bar = nouveau_bar(drm->device);
|
||||
struct nouveau_instobj *iobj = (void *)
|
||||
nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
|
||||
struct nouveau_mem **mem = (void *)(iobj + 1);
|
||||
struct nouveau_mem *node = *mem;
|
||||
|
||||
return bar->umap(bar, node, flags, vma);
|
||||
}
|
||||
|
||||
void
|
||||
nvimem_flush(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
void _nv50_vm_flush_engine(struct drm_device *dev, int engine)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
nv50_vm_flush_engine(nv_subdev(drm->device), engine);
|
||||
}
|
||||
|
||||
int _nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length,
|
||||
u64 mm_offset, struct nouveau_vm **pvm)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
return nouveau_vm_new(nv_device(drm->device), offset, length, mm_offset, pvm);
|
||||
}
|
||||
|
||||
#include <core/subdev/vm/nv04.h>
|
||||
struct nouveau_vm *
|
||||
nv04vm_ref(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_newpriv(dev);
|
||||
struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
|
||||
struct nv04_vmmgr_priv *priv = (void *)vmm;
|
||||
return priv->vm;
|
||||
}
|
||||
|
||||
struct nouveau_gpuobj *
|
||||
nv04vm_refdma(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_gpuobj *gpuobj = NULL;
|
||||
nouveau_gpuobj_ref(nv04vm_ref(dev)->pgt[0].obj[0], &gpuobj);
|
||||
return gpuobj;
|
||||
}
|
||||
|
||||
void
|
||||
nvvm_engref(struct nouveau_vm *vm, int eng, int ref)
|
||||
{
|
||||
atomic_add(ref, &vm->engref[eng]);
|
||||
}
|
||||
|
||||
int
|
||||
nvvm_spg_shift(struct nouveau_vm *vm)
|
||||
{
|
||||
return vm->vmm->spg_shift;
|
||||
}
|
||||
|
||||
int
|
||||
nvvm_lpg_shift(struct nouveau_vm *vm)
|
||||
{
|
||||
return vm->vmm->lpg_shift;
|
||||
}
|
||||
|
@ -82,4 +82,46 @@ int nvfb_vram_rank_B(struct drm_device *);
|
||||
|
||||
void nv50_fb_vm_trap(struct drm_device *, int);
|
||||
|
||||
struct nouveau_gpuobj *nvimem_ramro(struct drm_device *);
|
||||
struct nouveau_gpuobj *nvimem_ramfc(struct drm_device *);
|
||||
|
||||
int _nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_gpuobj *par,
|
||||
int size, int align, u32 flags,
|
||||
struct nouveau_gpuobj **pboj);
|
||||
|
||||
u32 nv_ri32(struct drm_device *, u32);
|
||||
void nv_wi32(struct drm_device *, u32, u32);
|
||||
u32 nvimem_reserved(struct drm_device *);
|
||||
|
||||
void nvimem_flush(struct drm_device *);
|
||||
|
||||
void _nv50_vm_flush_engine(struct drm_device *dev, int engine);
|
||||
|
||||
int _nouveau_vm_new(struct drm_device *, u64 offset, u64 length,
|
||||
u64 mm_offset, struct nouveau_vm **);
|
||||
|
||||
struct nouveau_vma;
|
||||
int nouveau_gpuobj_map_bar(struct nouveau_gpuobj *, u32, struct nouveau_vma *);
|
||||
|
||||
int
|
||||
nvbar_map(struct drm_device *dev, struct nouveau_mem *mem, u32 flags,
|
||||
struct nouveau_vma *vma);
|
||||
void
|
||||
nvbar_unmap(struct drm_device *dev, struct nouveau_vma *vma);
|
||||
|
||||
struct nouveau_vm *
|
||||
nv04vm_ref(struct drm_device *dev);
|
||||
|
||||
struct nouveau_gpuobj *
|
||||
nv04vm_refdma(struct drm_device *dev);
|
||||
|
||||
void
|
||||
nvvm_engref(struct nouveau_vm *, int, int);
|
||||
|
||||
int
|
||||
nvvm_spg_shift(struct nouveau_vm *);
|
||||
|
||||
int
|
||||
nvvm_lpg_shift(struct nouveau_vm *);
|
||||
|
||||
#endif
|
||||
|
@ -152,7 +152,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
|
||||
struct nouveau_channel *chan;
|
||||
struct drm_crtc *crtc;
|
||||
@ -204,20 +203,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
|
||||
}
|
||||
}
|
||||
|
||||
ret = pinstmem->suspend(dev);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "... failed: %d\n", ret);
|
||||
goto out_abort;
|
||||
}
|
||||
|
||||
NV_INFO(dev, "Suspending GPU objects...\n");
|
||||
ret = nouveau_gpuobj_suspend(dev);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "... failed: %d\n", ret);
|
||||
pinstmem->resume(dev);
|
||||
goto out_abort;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_abort:
|
||||
@ -247,11 +232,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
NV_INFO(dev, "Restoring GPU objects...\n");
|
||||
nouveau_gpuobj_resume(dev);
|
||||
|
||||
NV_INFO(dev, "Reinitialising engines...\n");
|
||||
engine->instmem.resume(dev);
|
||||
for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
|
||||
if (dev_priv->eng[i])
|
||||
dev_priv->eng[i]->init(dev, i);
|
||||
|
@ -46,7 +46,9 @@
|
||||
#include "ttm/ttm_module.h"
|
||||
|
||||
#define XXX_THIS_IS_A_HACK
|
||||
#include <subdev/vm.h>
|
||||
#include <subdev/fb.h>
|
||||
#include <core/gpuobj.h>
|
||||
|
||||
enum blah {
|
||||
NV_MEM_TYPE_UNKNOWN = 0,
|
||||
@ -83,11 +85,20 @@ nouveau_fpriv(struct drm_file *file_priv)
|
||||
|
||||
struct nouveau_grctx;
|
||||
struct nouveau_mem;
|
||||
#include <subdev/vm.h>
|
||||
|
||||
#include <subdev/bios/pll.h>
|
||||
#include "nouveau_compat.h"
|
||||
|
||||
#define nouveau_gpuobj_new(d,c,s,a,f,o) \
|
||||
_nouveau_gpuobj_new((d), (c) ? ((struct nouveau_channel *)(c))->ramin : NULL, \
|
||||
(s), (a), (f), (o))
|
||||
|
||||
#define nouveau_vm_new(d,o,l,m,v) \
|
||||
_nouveau_vm_new((d), (o), (l), (m), (v))
|
||||
|
||||
#define nv50_vm_flush_engine(d,e) \
|
||||
_nv50_vm_flush_engine((d), (e))
|
||||
|
||||
#define MAX_NUM_DCB_ENTRIES 16
|
||||
|
||||
#define NOUVEAU_MAX_CHANNEL_NR 4096
|
||||
@ -172,34 +183,6 @@ enum nouveau_flags {
|
||||
#define NVOBJ_ENGINE_NR 16
|
||||
#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/
|
||||
|
||||
#define NVOBJ_FLAG_DONT_MAP (1 << 0)
|
||||
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
|
||||
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
|
||||
|
||||
#define NVOBJ_CINST_GLOBAL 0xdeadbeef
|
||||
|
||||
struct nouveau_gpuobj {
|
||||
struct drm_device *dev;
|
||||
struct kref refcount;
|
||||
struct list_head list;
|
||||
|
||||
void *node;
|
||||
u32 *suspend;
|
||||
|
||||
uint32_t flags;
|
||||
|
||||
u32 size;
|
||||
u32 pinst; /* PRAMIN BAR offset */
|
||||
u32 cinst; /* Channel offset */
|
||||
u64 vinst; /* VRAM address */
|
||||
|
||||
uint32_t engine;
|
||||
uint32_t class;
|
||||
|
||||
void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct nouveau_page_flip_state {
|
||||
struct list_head head;
|
||||
struct drm_pending_vblank_event *event;
|
||||
@ -259,7 +242,6 @@ struct nouveau_channel {
|
||||
|
||||
/* Objects */
|
||||
struct nouveau_gpuobj *ramin; /* Private instmem */
|
||||
struct drm_mm ramin_heap; /* Private PRAMIN heap */
|
||||
struct nouveau_ramht *ramht; /* Hash table */
|
||||
|
||||
/* GPU object info for stuff used in-kernel (mm_enabled) */
|
||||
@ -301,23 +283,6 @@ struct nouveau_exec_engine {
|
||||
void (*tlb_flush)(struct drm_device *, int engine);
|
||||
};
|
||||
|
||||
struct nouveau_instmem_engine {
|
||||
void *priv;
|
||||
|
||||
int (*init)(struct drm_device *dev);
|
||||
void (*takedown)(struct drm_device *dev);
|
||||
int (*suspend)(struct drm_device *dev);
|
||||
void (*resume)(struct drm_device *dev);
|
||||
|
||||
int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *,
|
||||
u32 size, u32 align);
|
||||
void (*put)(struct nouveau_gpuobj *);
|
||||
int (*map)(struct nouveau_gpuobj *);
|
||||
void (*unmap)(struct nouveau_gpuobj *);
|
||||
|
||||
void (*flush)(struct drm_device *);
|
||||
};
|
||||
|
||||
struct nouveau_display_engine {
|
||||
void *priv;
|
||||
int (*early_init)(struct drm_device *);
|
||||
@ -499,7 +464,6 @@ struct nouveau_pm_engine {
|
||||
};
|
||||
|
||||
struct nouveau_engine {
|
||||
struct nouveau_instmem_engine instmem;
|
||||
struct nouveau_display_engine display;
|
||||
struct nouveau_pm_engine pm;
|
||||
};
|
||||
@ -599,14 +563,7 @@ struct drm_nouveau_private {
|
||||
int flags;
|
||||
u32 crystal;
|
||||
|
||||
spinlock_t ramin_lock;
|
||||
void __iomem *ramin;
|
||||
u32 ramin_size;
|
||||
u32 ramin_base;
|
||||
bool ramin_available;
|
||||
struct drm_mm ramin_heap;
|
||||
struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR];
|
||||
struct list_head gpuobj_list;
|
||||
struct list_head classes;
|
||||
|
||||
struct nouveau_bo *vga_ram;
|
||||
@ -648,8 +605,6 @@ struct drm_nouveau_private {
|
||||
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
|
||||
struct nouveau_ramht *ramht;
|
||||
|
||||
uint32_t ramin_rsvd_vram;
|
||||
|
||||
struct {
|
||||
enum {
|
||||
NOUVEAU_GART_NONE = 0,
|
||||
@ -663,11 +618,6 @@ struct drm_nouveau_private {
|
||||
|
||||
struct ttm_backend_func *func;
|
||||
|
||||
struct {
|
||||
struct page *page;
|
||||
dma_addr_t addr;
|
||||
} dummy;
|
||||
|
||||
struct nouveau_gpuobj *sg_ctxdma;
|
||||
} gart_info;
|
||||
|
||||
@ -682,10 +632,6 @@ struct drm_nouveau_private {
|
||||
uint64_t fb_aper_free;
|
||||
int fb_mtrr;
|
||||
|
||||
/* BAR control (NV50-) */
|
||||
struct nouveau_vm *bar1_vm;
|
||||
struct nouveau_vm *bar3_vm;
|
||||
|
||||
/* G8x/G9x virtual address space */
|
||||
struct nouveau_vm *chan_vm;
|
||||
|
||||
@ -797,6 +743,7 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
|
||||
struct nouveau_fence *fence);
|
||||
extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
|
||||
extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
|
||||
extern const struct ttm_mem_type_manager_func nv04_gart_manager;
|
||||
|
||||
/* nouveau_notifier.c */
|
||||
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
|
||||
@ -844,11 +791,6 @@ extern int nouveau_channel_idle(struct nouveau_channel *chan);
|
||||
return ret; \
|
||||
} while (0)
|
||||
|
||||
extern int nouveau_gpuobj_early_init(struct drm_device *);
|
||||
extern int nouveau_gpuobj_init(struct drm_device *);
|
||||
extern void nouveau_gpuobj_takedown(struct drm_device *);
|
||||
extern int nouveau_gpuobj_suspend(struct drm_device *dev);
|
||||
extern void nouveau_gpuobj_resume(struct drm_device *dev);
|
||||
extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
|
||||
extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
|
||||
int (*exec)(struct nouveau_channel *,
|
||||
@ -858,11 +800,6 @@ extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
|
||||
extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
|
||||
uint32_t vram_h, uint32_t tt_h);
|
||||
extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
|
||||
extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
|
||||
uint32_t size, int align, uint32_t flags,
|
||||
struct nouveau_gpuobj **);
|
||||
extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *,
|
||||
struct nouveau_gpuobj **);
|
||||
extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
|
||||
uint64_t offset, uint64_t size, int access,
|
||||
int target, struct nouveau_gpuobj **);
|
||||
@ -874,11 +811,8 @@ extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
|
||||
int class, u64 base, u64 size, int target,
|
||||
int access, u32 type, u32 comp);
|
||||
|
||||
|
||||
int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, u32 flags,
|
||||
struct nouveau_vm *vm, struct nouveau_vma *vma);
|
||||
int nouveau_gpuobj_map_bar(struct nouveau_gpuobj *gpuobj, u32 flags,
|
||||
struct nouveau_vma *vma);
|
||||
int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
|
||||
u32 flags, struct nouveau_vma *vma);
|
||||
void nouveau_gpuobj_unmap(struct nouveau_vma *vma);
|
||||
|
||||
/* nouveau_irq.c */
|
||||
@ -1060,49 +994,6 @@ extern int nv84_vp_create(struct drm_device *dev);
|
||||
/* nv98_ppp.c */
|
||||
extern int nv98_ppp_create(struct drm_device *dev);
|
||||
|
||||
/* nv04_instmem.c */
|
||||
extern int nv04_instmem_init(struct drm_device *);
|
||||
extern void nv04_instmem_takedown(struct drm_device *);
|
||||
extern int nv04_instmem_suspend(struct drm_device *);
|
||||
extern void nv04_instmem_resume(struct drm_device *);
|
||||
extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
|
||||
u32 size, u32 align);
|
||||
extern void nv04_instmem_put(struct nouveau_gpuobj *);
|
||||
extern int nv04_instmem_map(struct nouveau_gpuobj *);
|
||||
extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
|
||||
extern void nv04_instmem_flush(struct drm_device *);
|
||||
|
||||
/* nv40_instmem.c */
|
||||
extern int nv40_instmem_init(struct drm_device *);
|
||||
extern void nv40_instmem_takedown(struct drm_device *);
|
||||
extern int nv40_instmem_suspend(struct drm_device *);
|
||||
extern void nv40_instmem_resume(struct drm_device *);
|
||||
extern int nv40_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
|
||||
u32 size, u32 align);
|
||||
extern void nv40_instmem_put(struct nouveau_gpuobj *);
|
||||
extern int nv40_instmem_map(struct nouveau_gpuobj *);
|
||||
extern void nv40_instmem_unmap(struct nouveau_gpuobj *);
|
||||
extern void nv40_instmem_flush(struct drm_device *);
|
||||
|
||||
/* nv50_instmem.c */
|
||||
extern int nv50_instmem_init(struct drm_device *);
|
||||
extern void nv50_instmem_takedown(struct drm_device *);
|
||||
extern int nv50_instmem_suspend(struct drm_device *);
|
||||
extern void nv50_instmem_resume(struct drm_device *);
|
||||
extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
|
||||
u32 size, u32 align);
|
||||
extern void nv50_instmem_put(struct nouveau_gpuobj *);
|
||||
extern int nv50_instmem_map(struct nouveau_gpuobj *);
|
||||
extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
|
||||
extern void nv50_instmem_flush(struct drm_device *);
|
||||
extern void nv84_instmem_flush(struct drm_device *);
|
||||
|
||||
/* nvc0_instmem.c */
|
||||
extern int nvc0_instmem_init(struct drm_device *);
|
||||
extern void nvc0_instmem_takedown(struct drm_device *);
|
||||
extern int nvc0_instmem_suspend(struct drm_device *);
|
||||
extern void nvc0_instmem_resume(struct drm_device *);
|
||||
|
||||
extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
||||
@ -1260,23 +1151,6 @@ static inline void nvchan_wr32(struct nouveau_channel *chan,
|
||||
#define nv_wait_cb(dev, func, data) \
|
||||
nouveau_wait_cb(dev, 2000000000ULL, (func), (data))
|
||||
|
||||
/* PRAMIN access */
|
||||
static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
return ioread32_native(dev_priv->ramin + offset);
|
||||
}
|
||||
|
||||
static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
iowrite32_native(val, dev_priv->ramin + offset);
|
||||
}
|
||||
|
||||
/* object access */
|
||||
extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset);
|
||||
extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
|
||||
|
||||
/*
|
||||
* Logging
|
||||
* Argument d is (struct drm_device *).
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include <engine/fifo.h>
|
||||
#include <core/ramht.h>
|
||||
#include "nouveau_software.h"
|
||||
#include <subdev/vm.h>
|
||||
|
||||
struct nouveau_gpuobj_method {
|
||||
struct list_head head;
|
||||
@ -135,173 +134,12 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
|
||||
uint32_t size, int align, uint32_t flags,
|
||||
struct nouveau_gpuobj **gpuobj_ret)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
struct drm_mm_node *ramin = NULL;
|
||||
int ret, i;
|
||||
|
||||
NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
|
||||
chan ? chan->id : -1, size, align, flags);
|
||||
|
||||
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
|
||||
if (!gpuobj)
|
||||
return -ENOMEM;
|
||||
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
|
||||
gpuobj->dev = dev;
|
||||
gpuobj->flags = flags;
|
||||
kref_init(&gpuobj->refcount);
|
||||
gpuobj->size = size;
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
|
||||
if (chan) {
|
||||
ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
|
||||
if (ramin)
|
||||
ramin = drm_mm_get_block(ramin, size, align);
|
||||
if (!ramin) {
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
gpuobj->pinst = chan->ramin->pinst;
|
||||
if (gpuobj->pinst != ~0)
|
||||
gpuobj->pinst += ramin->start;
|
||||
|
||||
gpuobj->cinst = ramin->start;
|
||||
gpuobj->vinst = ramin->start + chan->ramin->vinst;
|
||||
gpuobj->node = ramin;
|
||||
} else {
|
||||
ret = instmem->get(gpuobj, chan, size, align);
|
||||
if (ret) {
|
||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = -ENOSYS;
|
||||
if (!(flags & NVOBJ_FLAG_DONT_MAP))
|
||||
ret = instmem->map(gpuobj);
|
||||
if (ret)
|
||||
gpuobj->pinst = ~0;
|
||||
|
||||
gpuobj->cinst = NVOBJ_CINST_GLOBAL;
|
||||
}
|
||||
|
||||
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
|
||||
for (i = 0; i < gpuobj->size; i += 4)
|
||||
nv_wo32(gpuobj, i, 0);
|
||||
instmem->flush(dev);
|
||||
}
|
||||
|
||||
|
||||
*gpuobj_ret = gpuobj;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
INIT_LIST_HEAD(&dev_priv->gpuobj_list);
|
||||
INIT_LIST_HEAD(&dev_priv->classes);
|
||||
spin_lock_init(&dev_priv->ramin_lock);
|
||||
dev_priv->ramin_base = ~0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_gpuobj_takedown(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj_method *om, *tm;
|
||||
struct nouveau_gpuobj_class *oc, *tc;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
|
||||
list_for_each_entry_safe(om, tm, &oc->methods, head) {
|
||||
list_del(&om->head);
|
||||
kfree(om);
|
||||
}
|
||||
list_del(&oc->head);
|
||||
kfree(oc);
|
||||
}
|
||||
|
||||
WARN_ON(!list_empty(&dev_priv->gpuobj_list));
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
nouveau_gpuobj_del(struct kref *ref)
|
||||
{
|
||||
struct nouveau_gpuobj *gpuobj =
|
||||
container_of(ref, struct nouveau_gpuobj, refcount);
|
||||
struct drm_device *dev = gpuobj->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
|
||||
int i;
|
||||
|
||||
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
|
||||
|
||||
if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
|
||||
for (i = 0; i < gpuobj->size; i += 4)
|
||||
nv_wo32(gpuobj, i, 0);
|
||||
instmem->flush(dev);
|
||||
}
|
||||
|
||||
if (gpuobj->dtor)
|
||||
gpuobj->dtor(dev, gpuobj);
|
||||
|
||||
if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
|
||||
if (gpuobj->node) {
|
||||
instmem->unmap(gpuobj);
|
||||
instmem->put(gpuobj);
|
||||
}
|
||||
} else {
|
||||
if (gpuobj->node) {
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
drm_mm_put_block(gpuobj->node);
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&dev_priv->ramin_lock);
|
||||
list_del(&gpuobj->list);
|
||||
spin_unlock(&dev_priv->ramin_lock);
|
||||
|
||||
kfree(gpuobj);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
|
||||
{
|
||||
if (ref)
|
||||
kref_get(&ref->refcount);
|
||||
|
||||
if (*ptr)
|
||||
kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
|
||||
|
||||
*ptr = ref;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
|
||||
u64 base, u64 size, int target, int access,
|
||||
u32 type, u32 comp)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
u32 flags0;
|
||||
|
||||
flags0 = (comp << 29) | (type << 22) | class;
|
||||
@ -343,7 +181,7 @@ nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
|
||||
nv_wo32(obj, offset + 0x10, 0x00000000);
|
||||
nv_wo32(obj, offset + 0x14, 0x00000000);
|
||||
|
||||
pinstmem->flush(obj->dev);
|
||||
nvimem_flush(obj->dev);
|
||||
}
|
||||
|
||||
int
|
||||
@ -485,10 +323,6 @@ nv04_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_mm_init(&chan->ramin_heap, 0, chan->ramin->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -503,10 +337,6 @@ nv50_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_mm_init(&chan->ramin_heap, 0, chan->ramin->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -533,10 +363,6 @@ nv84_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_mm_init(&chan->ramin_heap, 0, chan->ramin->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->engptr);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -552,30 +378,20 @@ static int
|
||||
nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct nouveau_gpuobj *pgd = NULL;
|
||||
struct nouveau_vm_pgd *vpgd;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* create page directory for this vm if none currently exists,
|
||||
* will be destroyed automagically when last reference to the
|
||||
* vm is removed
|
||||
*/
|
||||
if (list_empty(&vm->pgd_list)) {
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
nouveau_vm_ref(vm, &chan->vm, pgd);
|
||||
nouveau_gpuobj_ref(NULL, &pgd);
|
||||
ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &chan->vm_pd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* point channel at vm's page directory */
|
||||
vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
|
||||
nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
|
||||
nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
|
||||
nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
|
||||
|
||||
nv_wo32(chan->ramin, 0x0200, lower_32_bits(chan->vm_pd->addr));
|
||||
nv_wo32(chan->ramin, 0x0204, upper_32_bits(chan->vm_pd->addr));
|
||||
nv_wo32(chan->ramin, 0x0208, 0xffffffff);
|
||||
nv_wo32(chan->ramin, 0x020c, 0x000000ff);
|
||||
|
||||
@ -698,132 +514,5 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
||||
nouveau_gpuobj_ref(NULL, &chan->engptr);
|
||||
|
||||
if (drm_mm_initialized(&chan->ramin_heap))
|
||||
drm_mm_takedown(&chan->ramin_heap);
|
||||
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_suspend(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
int i;
|
||||
|
||||
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
|
||||
if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
|
||||
continue;
|
||||
|
||||
gpuobj->suspend = vmalloc(gpuobj->size);
|
||||
if (!gpuobj->suspend) {
|
||||
nouveau_gpuobj_resume(dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < gpuobj->size; i += 4)
|
||||
gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_gpuobj_resume(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
int i;
|
||||
|
||||
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
|
||||
if (!gpuobj->suspend)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < gpuobj->size; i += 4)
|
||||
nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
|
||||
|
||||
vfree(gpuobj->suspend);
|
||||
gpuobj->suspend = NULL;
|
||||
}
|
||||
|
||||
dev_priv->engine.instmem.flush(dev);
|
||||
}
|
||||
|
||||
u32
|
||||
nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
struct drm_device *dev = gpuobj->dev;
|
||||
unsigned long flags;
|
||||
|
||||
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
|
||||
u64 ptr = gpuobj->vinst + offset;
|
||||
u32 base = ptr >> 16;
|
||||
u32 val;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->vm_lock, flags);
|
||||
if (dev_priv->ramin_base != base) {
|
||||
dev_priv->ramin_base = base;
|
||||
nv_wr32(dev, 0x001700, dev_priv->ramin_base);
|
||||
}
|
||||
val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
|
||||
spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
|
||||
return val;
|
||||
}
|
||||
|
||||
return nv_ri32(dev, gpuobj->pinst + offset);
|
||||
}
|
||||
|
||||
void
|
||||
nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
struct drm_device *dev = gpuobj->dev;
|
||||
unsigned long flags;
|
||||
|
||||
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
|
||||
u64 ptr = gpuobj->vinst + offset;
|
||||
u32 base = ptr >> 16;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->vm_lock, flags);
|
||||
if (dev_priv->ramin_base != base) {
|
||||
dev_priv->ramin_base = base;
|
||||
nv_wr32(dev, 0x001700, dev_priv->ramin_base);
|
||||
}
|
||||
nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
|
||||
spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
nv_wi32(dev, gpuobj->pinst + offset, val);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, u32 flags,
|
||||
struct nouveau_vm *vm, struct nouveau_vma *vma)
|
||||
{
|
||||
struct nouveau_mem **mem = gpuobj->node;
|
||||
struct nouveau_mem *node = *mem;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vm_get(vm, node->size << 12, 12, flags, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nouveau_vm_map(vma, node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gpuobj_map_bar(struct nouveau_gpuobj *gpuobj, u32 flags,
|
||||
struct nouveau_vma *vma)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||
return nouveau_gpuobj_map_vm(gpuobj, flags, dev_priv->bar1_vm, vma);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_gpuobj_unmap(struct nouveau_vma *vma)
|
||||
{
|
||||
nouveau_vm_unmap(vma);
|
||||
nouveau_vm_put(vma);
|
||||
}
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_pm.h"
|
||||
#include <core/mm.h>
|
||||
#include <subdev/vm.h>
|
||||
#include <engine/fifo.h>
|
||||
#include "nouveau_fence.h"
|
||||
|
||||
@ -220,7 +219,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
|
||||
dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
|
||||
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
|
||||
|
||||
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
|
||||
dev_priv->fb_available_size -= nvimem_reserved(dev);
|
||||
dev_priv->fb_aper_free = dev_priv->fb_available_size;
|
||||
|
||||
/* mappable vram */
|
||||
@ -1058,3 +1057,71 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
|
||||
nouveau_gart_manager_del,
|
||||
nouveau_gart_manager_debug
|
||||
};
|
||||
|
||||
static int
|
||||
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
man->priv = nv04vm_ref(dev);
|
||||
return (man->priv != NULL) ? 0 : -ENODEV;
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct nouveau_vm *vm = man->priv;
|
||||
nouveau_vm_ref(NULL, &vm, NULL);
|
||||
man->priv = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_mem *node = mem->mm_node;
|
||||
if (node->vma[0].node)
|
||||
nouveau_vm_put(&node->vma[0]);
|
||||
kfree(mem->mm_node);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_mem *node;
|
||||
int ret;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
node->page_shift = 12;
|
||||
|
||||
ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
|
||||
NV_MEM_ACCESS_RW, &node->vma[0]);
|
||||
if (ret) {
|
||||
kfree(node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mem->mm_node = node;
|
||||
mem->start = node->vma[0].offset >> PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
|
||||
{
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func nv04_gart_manager = {
|
||||
nv04_gart_manager_init,
|
||||
nv04_gart_manager_fini,
|
||||
nv04_gart_manager_new,
|
||||
nv04_gart_manager_del,
|
||||
nv04_gart_manager_debug
|
||||
};
|
||||
|
@ -96,16 +96,6 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
|
||||
drm_mm_takedown(&chan->notifier_heap);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
|
||||
struct nouveau_gpuobj *gpuobj)
|
||||
{
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
if (gpuobj->priv)
|
||||
drm_mm_put_block(gpuobj->priv);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
|
||||
int size, uint32_t start, uint32_t end,
|
||||
@ -147,8 +137,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
|
||||
NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
nobj->dtor = nouveau_notifier_gpuobj_dtor;
|
||||
nobj->priv = mem;
|
||||
|
||||
ret = nouveau_ramht_insert(chan, handle, nobj);
|
||||
nouveau_gpuobj_ref(NULL, &nobj);
|
||||
|
@ -13,7 +13,7 @@ struct nouveau_sgdma_be {
|
||||
*/
|
||||
struct ttm_dma_tt ttm;
|
||||
struct drm_device *dev;
|
||||
u64 offset;
|
||||
struct nouveau_mem *node;
|
||||
};
|
||||
|
||||
static void
|
||||
@ -32,25 +32,18 @@ static int
|
||||
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_device *dev = nvbe->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
||||
unsigned i, j, pte;
|
||||
struct nouveau_mem *node = mem->mm_node;
|
||||
u64 size = mem->num_pages << 12;
|
||||
|
||||
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
|
||||
|
||||
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
|
||||
uint32_t offset_l = lower_32_bits(dma_offset);
|
||||
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
|
||||
offset_l += NV_CTXDMA_PAGE_SIZE;
|
||||
}
|
||||
if (ttm->sg) {
|
||||
node->sg = ttm->sg;
|
||||
nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
|
||||
} else {
|
||||
node->pages = nvbe->ttm.dma_address;
|
||||
nouveau_vm_map_sg(&node->vma[0], 0, size, node);
|
||||
}
|
||||
|
||||
nvbe->node = node;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -58,22 +51,7 @@ static int
|
||||
nv04_sgdma_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_device *dev = nvbe->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
|
||||
unsigned i, j, pte;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
if (ttm->state != tt_bound)
|
||||
return 0;
|
||||
|
||||
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
|
||||
}
|
||||
|
||||
nouveau_vm_unmap(&nvbe->node->vma[0]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -83,206 +61,6 @@ static struct ttm_backend_func nv04_sgdma_backend = {
|
||||
.destroy = nouveau_sgdma_destroy
|
||||
};
|
||||
|
||||
static void
|
||||
nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
|
||||
{
|
||||
struct drm_device *dev = nvbe->dev;
|
||||
|
||||
nv_wr32(dev, 0x100810, 0x00000022);
|
||||
if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
|
||||
NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
|
||||
nv_rd32(dev, 0x100810));
|
||||
nv_wr32(dev, 0x100810, 0x00000000);
|
||||
}
|
||||
|
||||
static int
|
||||
nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
||||
dma_addr_t *list = nvbe->ttm.dma_address;
|
||||
u32 pte = mem->start << 2;
|
||||
u32 cnt = ttm->num_pages;
|
||||
|
||||
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||
|
||||
while (cnt--) {
|
||||
nv_wo32(pgt, pte, (*list++ >> 7) | 1);
|
||||
pte += 4;
|
||||
}
|
||||
|
||||
nv41_sgdma_flush(nvbe);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv41_sgdma_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
||||
u32 pte = (nvbe->offset >> 12) << 2;
|
||||
u32 cnt = ttm->num_pages;
|
||||
|
||||
while (cnt--) {
|
||||
nv_wo32(pgt, pte, 0x00000000);
|
||||
pte += 4;
|
||||
}
|
||||
|
||||
nv41_sgdma_flush(nvbe);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ttm_backend_func nv41_sgdma_backend = {
|
||||
.bind = nv41_sgdma_bind,
|
||||
.unbind = nv41_sgdma_unbind,
|
||||
.destroy = nouveau_sgdma_destroy
|
||||
};
|
||||
|
||||
static void
|
||||
nv44_sgdma_flush(struct ttm_tt *ttm)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_device *dev = nvbe->dev;
|
||||
|
||||
nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
|
||||
nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
|
||||
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
|
||||
NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
|
||||
nv_rd32(dev, 0x100808));
|
||||
nv_wr32(dev, 0x100808, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
|
||||
dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
|
||||
u32 pte, tmp[4];
|
||||
|
||||
pte = base >> 2;
|
||||
base &= ~0x0000000f;
|
||||
|
||||
tmp[0] = nv_ro32(pgt, base + 0x0);
|
||||
tmp[1] = nv_ro32(pgt, base + 0x4);
|
||||
tmp[2] = nv_ro32(pgt, base + 0x8);
|
||||
tmp[3] = nv_ro32(pgt, base + 0xc);
|
||||
while (cnt--) {
|
||||
u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
|
||||
switch (pte++ & 0x3) {
|
||||
case 0:
|
||||
tmp[0] &= ~0x07ffffff;
|
||||
tmp[0] |= addr;
|
||||
break;
|
||||
case 1:
|
||||
tmp[0] &= ~0xf8000000;
|
||||
tmp[0] |= addr << 27;
|
||||
tmp[1] &= ~0x003fffff;
|
||||
tmp[1] |= addr >> 5;
|
||||
break;
|
||||
case 2:
|
||||
tmp[1] &= ~0xffc00000;
|
||||
tmp[1] |= addr << 22;
|
||||
tmp[2] &= ~0x0001ffff;
|
||||
tmp[2] |= addr >> 10;
|
||||
break;
|
||||
case 3:
|
||||
tmp[2] &= ~0xfffe0000;
|
||||
tmp[2] |= addr << 17;
|
||||
tmp[3] &= ~0x00000fff;
|
||||
tmp[3] |= addr >> 15;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
tmp[3] |= 0x40000000;
|
||||
|
||||
nv_wo32(pgt, base + 0x0, tmp[0]);
|
||||
nv_wo32(pgt, base + 0x4, tmp[1]);
|
||||
nv_wo32(pgt, base + 0x8, tmp[2]);
|
||||
nv_wo32(pgt, base + 0xc, tmp[3]);
|
||||
}
|
||||
|
||||
static int
|
||||
nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
||||
dma_addr_t *list = nvbe->ttm.dma_address;
|
||||
u32 pte = mem->start << 2, tmp[4];
|
||||
u32 cnt = ttm->num_pages;
|
||||
int i;
|
||||
|
||||
nvbe->offset = mem->start << PAGE_SHIFT;
|
||||
|
||||
if (pte & 0x0000000c) {
|
||||
u32 max = 4 - ((pte >> 2) & 0x3);
|
||||
u32 part = (cnt > max) ? max : cnt;
|
||||
nv44_sgdma_fill(pgt, list, pte, part);
|
||||
pte += (part << 2);
|
||||
list += part;
|
||||
cnt -= part;
|
||||
}
|
||||
|
||||
while (cnt >= 4) {
|
||||
for (i = 0; i < 4; i++)
|
||||
tmp[i] = *list++ >> 12;
|
||||
nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
|
||||
nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
|
||||
nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
|
||||
nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
|
||||
pte += 0x10;
|
||||
cnt -= 4;
|
||||
}
|
||||
|
||||
if (cnt)
|
||||
nv44_sgdma_fill(pgt, list, pte, cnt);
|
||||
|
||||
nv44_sgdma_flush(ttm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv44_sgdma_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
|
||||
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
|
||||
u32 pte = (nvbe->offset >> 12) << 2;
|
||||
u32 cnt = ttm->num_pages;
|
||||
|
||||
if (pte & 0x0000000c) {
|
||||
u32 max = 4 - ((pte >> 2) & 0x3);
|
||||
u32 part = (cnt > max) ? max : cnt;
|
||||
nv44_sgdma_fill(pgt, NULL, pte, part);
|
||||
pte += (part << 2);
|
||||
cnt -= part;
|
||||
}
|
||||
|
||||
while (cnt >= 4) {
|
||||
nv_wo32(pgt, pte + 0x0, 0x00000000);
|
||||
nv_wo32(pgt, pte + 0x4, 0x00000000);
|
||||
nv_wo32(pgt, pte + 0x8, 0x00000000);
|
||||
nv_wo32(pgt, pte + 0xc, 0x00000000);
|
||||
pte += 0x10;
|
||||
cnt -= 4;
|
||||
}
|
||||
|
||||
if (cnt)
|
||||
nv44_sgdma_fill(pgt, NULL, pte, cnt);
|
||||
|
||||
nv44_sgdma_flush(ttm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ttm_backend_func nv44_sgdma_backend = {
|
||||
.bind = nv44_sgdma_bind,
|
||||
.unbind = nv44_sgdma_unbind,
|
||||
.destroy = nouveau_sgdma_destroy
|
||||
};
|
||||
|
||||
static int
|
||||
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
{
|
||||
@ -337,82 +115,24 @@ int
|
||||
nouveau_sgdma_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *gpuobj = NULL;
|
||||
u32 aper_size, align;
|
||||
int ret;
|
||||
u32 aper_size;
|
||||
|
||||
if (dev_priv->card_type >= NV_40)
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
aper_size = 512 * 1024 * 1024;
|
||||
else
|
||||
aper_size = 128 * 1024 * 1024;
|
||||
|
||||
/* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
|
||||
* christmas. The cards before it have them, the cards after
|
||||
* it have them, why is NV44 so unloved?
|
||||
*/
|
||||
dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
|
||||
if (!dev_priv->gart_info.dummy.page)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_priv->gart_info.dummy.addr =
|
||||
pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
|
||||
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
|
||||
NV_ERROR(dev, "error mapping dummy page\n");
|
||||
__free_page(dev_priv->gart_info.dummy.page);
|
||||
dev_priv->gart_info.dummy.page = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
dev_priv->gart_info.aper_base = 0;
|
||||
dev_priv->gart_info.aper_size = aper_size;
|
||||
dev_priv->gart_info.type = NOUVEAU_GART_HW;
|
||||
dev_priv->gart_info.func = &nv50_sgdma_backend;
|
||||
} else
|
||||
if (0 && pci_is_pcie(dev->pdev) &&
|
||||
dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
|
||||
if (nv44_graph_class(dev)) {
|
||||
dev_priv->gart_info.func = &nv44_sgdma_backend;
|
||||
align = 512 * 1024;
|
||||
} else {
|
||||
dev_priv->gart_info.func = &nv41_sgdma_backend;
|
||||
align = 16;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_priv->gart_info.sg_ctxdma = gpuobj;
|
||||
dev_priv->gart_info.aper_base = 0;
|
||||
dev_priv->gart_info.aper_size = aper_size;
|
||||
dev_priv->gart_info.type = NOUVEAU_GART_HW;
|
||||
} else {
|
||||
ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC |
|
||||
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
|
||||
(1 << 12) /* PT present */ |
|
||||
(0 << 13) /* PT *not* linear */ |
|
||||
(0 << 14) /* RW */ |
|
||||
(2 << 16) /* PCI */);
|
||||
nv_wo32(gpuobj, 4, aper_size - 1);
|
||||
|
||||
dev_priv->gart_info.sg_ctxdma = gpuobj;
|
||||
dev_priv->gart_info.aper_base = 0;
|
||||
dev_priv->gart_info.aper_size = aper_size;
|
||||
dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
|
||||
dev_priv->gart_info.func = &nv04_sgdma_backend;
|
||||
dev_priv->gart_info.sg_ctxdma = nv04vm_refdma(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -424,13 +144,6 @@ nouveau_sgdma_takedown(struct drm_device *dev)
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
|
||||
|
||||
if (dev_priv->gart_info.dummy.page) {
|
||||
pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
__free_page(dev_priv->gart_info.dummy.page);
|
||||
dev_priv->gart_info.dummy.page = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t
|
||||
|
@ -52,15 +52,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
|
||||
switch (dev_priv->chipset & 0xf0) {
|
||||
case 0x00:
|
||||
engine->instmem.init = nv04_instmem_init;
|
||||
engine->instmem.takedown = nv04_instmem_takedown;
|
||||
engine->instmem.suspend = nv04_instmem_suspend;
|
||||
engine->instmem.resume = nv04_instmem_resume;
|
||||
engine->instmem.get = nv04_instmem_get;
|
||||
engine->instmem.put = nv04_instmem_put;
|
||||
engine->instmem.map = nv04_instmem_map;
|
||||
engine->instmem.unmap = nv04_instmem_unmap;
|
||||
engine->instmem.flush = nv04_instmem_flush;
|
||||
engine->display.early_init = nv04_display_early_init;
|
||||
engine->display.late_takedown = nv04_display_late_takedown;
|
||||
engine->display.create = nv04_display_create;
|
||||
@ -72,15 +63,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->pm.clocks_set = nv04_pm_clocks_set;
|
||||
break;
|
||||
case 0x10:
|
||||
engine->instmem.init = nv04_instmem_init;
|
||||
engine->instmem.takedown = nv04_instmem_takedown;
|
||||
engine->instmem.suspend = nv04_instmem_suspend;
|
||||
engine->instmem.resume = nv04_instmem_resume;
|
||||
engine->instmem.get = nv04_instmem_get;
|
||||
engine->instmem.put = nv04_instmem_put;
|
||||
engine->instmem.map = nv04_instmem_map;
|
||||
engine->instmem.unmap = nv04_instmem_unmap;
|
||||
engine->instmem.flush = nv04_instmem_flush;
|
||||
engine->display.early_init = nv04_display_early_init;
|
||||
engine->display.late_takedown = nv04_display_late_takedown;
|
||||
engine->display.create = nv04_display_create;
|
||||
@ -92,15 +74,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->pm.clocks_set = nv04_pm_clocks_set;
|
||||
break;
|
||||
case 0x20:
|
||||
engine->instmem.init = nv04_instmem_init;
|
||||
engine->instmem.takedown = nv04_instmem_takedown;
|
||||
engine->instmem.suspend = nv04_instmem_suspend;
|
||||
engine->instmem.resume = nv04_instmem_resume;
|
||||
engine->instmem.get = nv04_instmem_get;
|
||||
engine->instmem.put = nv04_instmem_put;
|
||||
engine->instmem.map = nv04_instmem_map;
|
||||
engine->instmem.unmap = nv04_instmem_unmap;
|
||||
engine->instmem.flush = nv04_instmem_flush;
|
||||
engine->display.early_init = nv04_display_early_init;
|
||||
engine->display.late_takedown = nv04_display_late_takedown;
|
||||
engine->display.create = nv04_display_create;
|
||||
@ -112,15 +85,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->pm.clocks_set = nv04_pm_clocks_set;
|
||||
break;
|
||||
case 0x30:
|
||||
engine->instmem.init = nv04_instmem_init;
|
||||
engine->instmem.takedown = nv04_instmem_takedown;
|
||||
engine->instmem.suspend = nv04_instmem_suspend;
|
||||
engine->instmem.resume = nv04_instmem_resume;
|
||||
engine->instmem.get = nv04_instmem_get;
|
||||
engine->instmem.put = nv04_instmem_put;
|
||||
engine->instmem.map = nv04_instmem_map;
|
||||
engine->instmem.unmap = nv04_instmem_unmap;
|
||||
engine->instmem.flush = nv04_instmem_flush;
|
||||
engine->display.early_init = nv04_display_early_init;
|
||||
engine->display.late_takedown = nv04_display_late_takedown;
|
||||
engine->display.create = nv04_display_create;
|
||||
@ -135,15 +99,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
break;
|
||||
case 0x40:
|
||||
case 0x60:
|
||||
engine->instmem.init = nv40_instmem_init;
|
||||
engine->instmem.takedown = nv40_instmem_takedown;
|
||||
engine->instmem.suspend = nv40_instmem_suspend;
|
||||
engine->instmem.resume = nv40_instmem_resume;
|
||||
engine->instmem.get = nv40_instmem_get;
|
||||
engine->instmem.put = nv40_instmem_put;
|
||||
engine->instmem.map = nv40_instmem_map;
|
||||
engine->instmem.unmap = nv40_instmem_unmap;
|
||||
engine->instmem.flush = nv40_instmem_flush;
|
||||
engine->display.early_init = nv04_display_early_init;
|
||||
engine->display.late_takedown = nv04_display_late_takedown;
|
||||
engine->display.create = nv04_display_create;
|
||||
@ -163,18 +118,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
case 0x80: /* gotta love NVIDIA's consistency.. */
|
||||
case 0x90:
|
||||
case 0xa0:
|
||||
engine->instmem.init = nv50_instmem_init;
|
||||
engine->instmem.takedown = nv50_instmem_takedown;
|
||||
engine->instmem.suspend = nv50_instmem_suspend;
|
||||
engine->instmem.resume = nv50_instmem_resume;
|
||||
engine->instmem.get = nv50_instmem_get;
|
||||
engine->instmem.put = nv50_instmem_put;
|
||||
engine->instmem.map = nv50_instmem_map;
|
||||
engine->instmem.unmap = nv50_instmem_unmap;
|
||||
if (dev_priv->chipset == 0x50)
|
||||
engine->instmem.flush = nv50_instmem_flush;
|
||||
else
|
||||
engine->instmem.flush = nv84_instmem_flush;
|
||||
engine->display.early_init = nv50_display_early_init;
|
||||
engine->display.late_takedown = nv50_display_late_takedown;
|
||||
engine->display.create = nv50_display_create;
|
||||
@ -212,15 +155,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->pm.pwm_set = nv50_pm_pwm_set;
|
||||
break;
|
||||
case 0xc0:
|
||||
engine->instmem.init = nvc0_instmem_init;
|
||||
engine->instmem.takedown = nvc0_instmem_takedown;
|
||||
engine->instmem.suspend = nvc0_instmem_suspend;
|
||||
engine->instmem.resume = nvc0_instmem_resume;
|
||||
engine->instmem.get = nv50_instmem_get;
|
||||
engine->instmem.put = nv50_instmem_put;
|
||||
engine->instmem.map = nv50_instmem_map;
|
||||
engine->instmem.unmap = nv50_instmem_unmap;
|
||||
engine->instmem.flush = nv84_instmem_flush;
|
||||
engine->display.early_init = nv50_display_early_init;
|
||||
engine->display.late_takedown = nv50_display_late_takedown;
|
||||
engine->display.create = nv50_display_create;
|
||||
@ -237,15 +171,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->pm.pwm_set = nv50_pm_pwm_set;
|
||||
break;
|
||||
case 0xd0:
|
||||
engine->instmem.init = nvc0_instmem_init;
|
||||
engine->instmem.takedown = nvc0_instmem_takedown;
|
||||
engine->instmem.suspend = nvc0_instmem_suspend;
|
||||
engine->instmem.resume = nvc0_instmem_resume;
|
||||
engine->instmem.get = nv50_instmem_get;
|
||||
engine->instmem.put = nv50_instmem_put;
|
||||
engine->instmem.map = nv50_instmem_map;
|
||||
engine->instmem.unmap = nv50_instmem_unmap;
|
||||
engine->instmem.flush = nv84_instmem_flush;
|
||||
engine->display.early_init = nouveau_stub_init;
|
||||
engine->display.late_takedown = nouveau_stub_takedown;
|
||||
engine->display.create = nvd0_display_create;
|
||||
@ -260,15 +185,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->pm.voltage_set = nouveau_voltage_gpio_set;
|
||||
break;
|
||||
case 0xe0:
|
||||
engine->instmem.init = nvc0_instmem_init;
|
||||
engine->instmem.takedown = nvc0_instmem_takedown;
|
||||
engine->instmem.suspend = nvc0_instmem_suspend;
|
||||
engine->instmem.resume = nvc0_instmem_resume;
|
||||
engine->instmem.get = nv50_instmem_get;
|
||||
engine->instmem.put = nv50_instmem_put;
|
||||
engine->instmem.map = nv50_instmem_map;
|
||||
engine->instmem.unmap = nv50_instmem_unmap;
|
||||
engine->instmem.flush = nv84_instmem_flush;
|
||||
engine->display.early_init = nouveau_stub_init;
|
||||
engine->display.late_takedown = nouveau_stub_takedown;
|
||||
engine->display.create = nvd0_display_create;
|
||||
@ -354,8 +270,10 @@ nouveau_card_channel_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->channel)
|
||||
if (dev_priv->channel) {
|
||||
nouveau_channel_put_unlocked(&dev_priv->channel);
|
||||
nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
@ -365,6 +283,10 @@ nouveau_card_channel_init(struct drm_device *dev)
|
||||
struct nouveau_channel *chan;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x1000, &dev_priv->chan_vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
|
||||
dev_priv->channel = chan;
|
||||
if (ret)
|
||||
@ -400,6 +322,7 @@ nouveau_card_init(struct drm_device *dev)
|
||||
spin_lock_init(&dev_priv->tile.lock);
|
||||
spin_lock_init(&dev_priv->context_switch_lock);
|
||||
spin_lock_init(&dev_priv->vm_lock);
|
||||
INIT_LIST_HEAD(&dev_priv->classes);
|
||||
|
||||
/* Make the CRTCs and I2C buses accessible */
|
||||
ret = engine->display.early_init(dev);
|
||||
@ -419,17 +342,9 @@ nouveau_card_init(struct drm_device *dev)
|
||||
nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_init(dev);
|
||||
if (ret)
|
||||
goto out_bios;
|
||||
|
||||
ret = engine->instmem.init(dev);
|
||||
if (ret)
|
||||
goto out_gpuobj;
|
||||
|
||||
ret = nouveau_mem_vram_init(dev);
|
||||
if (ret)
|
||||
goto out_instmem;
|
||||
goto out_bios;
|
||||
|
||||
ret = nouveau_mem_gart_init(dev);
|
||||
if (ret)
|
||||
@ -652,10 +567,6 @@ out_engine:
|
||||
nouveau_mem_gart_fini(dev);
|
||||
out_ttmvram:
|
||||
nouveau_mem_vram_fini(dev);
|
||||
out_instmem:
|
||||
engine->instmem.takedown(dev);
|
||||
out_gpuobj:
|
||||
nouveau_gpuobj_takedown(dev);
|
||||
out_bios:
|
||||
nouveau_bios_takedown(dev);
|
||||
out_display_early:
|
||||
@ -703,9 +614,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
|
||||
nouveau_mem_gart_fini(dev);
|
||||
nouveau_mem_vram_fini(dev);
|
||||
|
||||
engine->instmem.takedown(dev);
|
||||
nouveau_gpuobj_takedown(dev);
|
||||
|
||||
nouveau_bios_takedown(dev);
|
||||
engine->display.late_takedown(dev);
|
||||
|
||||
@ -955,32 +863,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
|
||||
if (ret)
|
||||
goto err_priv;
|
||||
|
||||
/* Map PRAMIN BAR, or on older cards, the aperture within BAR0 */
|
||||
if (dev_priv->card_type >= NV_40) {
|
||||
int ramin_bar = 2;
|
||||
if (pci_resource_len(dev->pdev, ramin_bar) == 0)
|
||||
ramin_bar = 3;
|
||||
|
||||
dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
|
||||
dev_priv->ramin =
|
||||
ioremap(pci_resource_start(dev->pdev, ramin_bar),
|
||||
dev_priv->ramin_size);
|
||||
if (!dev_priv->ramin) {
|
||||
NV_ERROR(dev, "Failed to map PRAMIN BAR\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_priv;
|
||||
}
|
||||
} else {
|
||||
dev_priv->ramin_size = 1 * 1024 * 1024;
|
||||
dev_priv->ramin = ioremap(pci_resource_start(dev->pdev, 0),
|
||||
dev_priv->ramin_size);
|
||||
if (!dev_priv->ramin) {
|
||||
NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_priv;
|
||||
}
|
||||
}
|
||||
|
||||
nouveau_OF_copy_vbios_to_ramin(dev);
|
||||
|
||||
/* Special flags */
|
||||
@ -992,12 +874,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
|
||||
/* For kernel modesetting, init card now and bring up fbcon */
|
||||
ret = nouveau_card_init(dev);
|
||||
if (ret)
|
||||
goto err_ramin;
|
||||
goto err_priv;
|
||||
|
||||
return 0;
|
||||
|
||||
err_ramin:
|
||||
iounmap(dev_priv->ramin);
|
||||
err_priv:
|
||||
dev->dev_private = dev_priv->newpriv;
|
||||
kfree(dev_priv);
|
||||
@ -1016,8 +896,6 @@ int nouveau_unload(struct drm_device *dev)
|
||||
|
||||
nouveau_card_takedown(dev);
|
||||
|
||||
iounmap(dev_priv->ramin);
|
||||
|
||||
dev->dev_private = dev_priv->newpriv;
|
||||
kfree(dev_priv);
|
||||
return 0;
|
||||
|
@ -217,7 +217,7 @@ nv50_display_init(struct drm_device *dev)
|
||||
return ret;
|
||||
evo = nv50_display(dev)->master;
|
||||
|
||||
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
|
||||
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->addr >> 8) | 9);
|
||||
|
||||
ret = RING_SPACE(evo, 3);
|
||||
if (ret)
|
||||
|
@ -70,7 +70,7 @@ nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size
|
||||
nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
|
||||
NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
|
||||
nv_wo32(obj, 0x14, flags5);
|
||||
dev_priv->engine.instmem.flush(obj->dev);
|
||||
nvimem_flush(obj->dev);
|
||||
}
|
||||
|
||||
int
|
||||
@ -263,12 +263,6 @@ nv50_evo_create(struct drm_device *dev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
|
||||
@ -294,7 +288,7 @@ nv50_evo_create(struct drm_device *dev)
|
||||
goto err;
|
||||
|
||||
ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
|
||||
disp->ntfy->vinst, disp->ntfy->size, NULL);
|
||||
disp->ntfy->addr, disp->ntfy->size, NULL);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -48,7 +48,7 @@ mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
|
||||
if (!gpuobj)
|
||||
return -ENOENT;
|
||||
|
||||
pch->base.vblank.ctxdma = gpuobj->cinst >> 4;
|
||||
pch->base.vblank.ctxdma = gpuobj->node->offset >> 4;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -105,7 +105,7 @@ nv50_software_context_new(struct nouveau_channel *chan, int engine)
|
||||
return -ENOMEM;
|
||||
|
||||
nouveau_software_context_new(&pch->base);
|
||||
pch->base.vblank.channel = chan->ramin->vinst >> 12;
|
||||
pch->base.vblank.channel = chan->ramin->addr >> 12;
|
||||
chan->engctx[engine] = pch;
|
||||
|
||||
/* dma objects for display sync channel semaphore blocks */
|
||||
|
@ -106,7 +106,7 @@ nv84_fence_context_new(struct nouveau_channel *chan, int engine)
|
||||
nouveau_fence_context_new(&fctx->base);
|
||||
|
||||
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
|
||||
priv->mem->vinst, priv->mem->size,
|
||||
priv->mem->addr, priv->mem->size,
|
||||
NV_MEM_ACCESS_RW,
|
||||
NV_MEM_TARGET_VRAM, &obj);
|
||||
if (ret == 0) {
|
||||
|
@ -1903,7 +1903,7 @@ nvd0_display_init(struct drm_device *dev)
|
||||
}
|
||||
|
||||
/* point at our hash table / objects, enable interrupts */
|
||||
nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9);
|
||||
nv_wr32(dev, 0x610010, (disp->mem->addr >> 8) | 9);
|
||||
nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
|
||||
|
||||
/* init master */
|
||||
@ -1967,7 +1967,6 @@ int
|
||||
nvd0_display_create(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
|
||||
struct dcb_table *dcb = &dev_priv->vbios.dcb;
|
||||
struct drm_connector *connector, *tmp;
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
@ -2106,7 +2105,7 @@ nvd0_display_create(struct drm_device *dev)
|
||||
((dmao + 0x60) << 9));
|
||||
}
|
||||
|
||||
pinstmem->flush(dev);
|
||||
nvimem_flush(dev);
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
|
Loading…
Reference in New Issue
Block a user