drm/nouveau/dma: convert user classes to new-style nvkm_object

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2015-08-20 14:54:18 +10:00
parent 19fef52d93
commit 0710cc3148
18 changed files with 367 additions and 269 deletions

View File

@ -10,6 +10,7 @@ struct nvkm_client {
struct nvkm_client_notify *notify[16];
struct rb_root objroot;
struct rb_root dmaroot;
struct nvkm_handle *root;

View File

@ -1,24 +1,35 @@
#ifndef __NVKM_DMA_H__
#define __NVKM_DMA_H__
#include <core/engine.h>
struct nvkm_client;
struct nvkm_gpuobj;
struct nvkm_dmaobj {
struct nvkm_object base;
const struct nvkm_dmaobj_func *func;
struct nvkm_dma *dma;
struct nvkm_object object;
u32 target;
u32 access;
u64 start;
u64 limit;
struct rb_node rb;
u64 handle; /*XXX HANDLE MERGE */
};
struct nvkm_dmaobj_func {
int (*bind)(struct nvkm_dmaobj *, struct nvkm_gpuobj *, int align,
struct nvkm_gpuobj **);
};
struct nvkm_dma {
struct nvkm_engine engine;
/* creates a "physical" dma object from a struct nvkm_dmaobj */
int (*bind)(struct nvkm_dmaobj *dmaobj, struct nvkm_gpuobj *parent,
struct nvkm_gpuobj **);
};
struct nvkm_dmaobj *
nvkm_dma_search(struct nvkm_dma *, struct nvkm_client *, u64 object);
extern struct nvkm_oclass *nv04_dmaeng_oclass;
extern struct nvkm_oclass *nv50_dmaeng_oclass;
extern struct nvkm_oclass *gf100_dmaeng_oclass;

View File

@ -314,6 +314,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg,
client->device = device;
client->debug = nvkm_dbgopt(dbg, "CLIENT");
client->objroot = RB_ROOT;
client->dmaroot = RB_ROOT;
ret = nvkm_handle_create(NULL, ~0, &client->object, &client->root);
if (ret)

View File

@ -292,11 +292,12 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
(1ULL << NVDEV_ENGINE_FIFO) |
(1ULL << NVDEV_ENGINE_DISP) |
(1ULL << NVDEV_ENGINE_PM);
const struct nvkm_device_oclass *sclass;
const struct nvkm_device_oclass *sclass = NULL;
int i;
for (; i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
if ((engine = nvkm_device_engine(device, i))) {
for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) {
if ((engine = nvkm_device_engine(device, i)) &&
!engine->func) {
struct nvkm_oclass *sclass = engine->sclass;
int c = 0;
while (sclass && sclass->ofuncs) {
@ -312,17 +313,27 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
sclass++;
}
index -= c;
continue;
}
if (!(engine = nvkm_device_engine(device, i)) ||
!(engine->func->base.sclass))
continue;
oclass->engine = engine;
index -= engine->func->base.sclass(oclass, index, &sclass);
}
switch (index) {
case 0: sclass = &nvkm_control_oclass; break;
default:
return -EINVAL;
if (!sclass) {
switch (index) {
case 0: sclass = &nvkm_control_oclass; break;
default:
return -EINVAL;
}
oclass->base = sclass->base;
}
oclass->ctor = nvkm_udevice_child_new;
oclass->base = sclass->base;
oclass->priv = sclass;
return 0;
}

View File

@ -209,8 +209,9 @@ nv50_disp_dmac_create_(struct nvkm_object *parent,
struct nvkm_oclass *oclass, u64 pushbuf, int head,
int length, void **pobject)
{
struct nvkm_device *device = parent->engine->subdev.device;
struct nvkm_client *client = nvkm_client(parent);
struct nvkm_handle *handle;
struct nvkm_dma *dma = device->dma;
struct nvkm_dmaobj *dmaobj;
struct nv50_disp_dmac *dmac;
int ret;
@ -221,27 +222,19 @@ nv50_disp_dmac_create_(struct nvkm_object *parent,
if (ret)
return ret;
handle = nvkm_client_search(client, pushbuf);
if (!handle)
dmaobj = nvkm_dma_search(dma, client, pushbuf);
if (!dmaobj)
return -ENOENT;
dmaobj = (void *)handle->object;
switch (nv_mclass(dmaobj)) {
case 0x0002:
case 0x003d:
if (dmaobj->limit - dmaobj->start != 0xfff)
return -EINVAL;
if (dmaobj->limit - dmaobj->start != 0xfff)
return -EINVAL;
switch (dmaobj->target) {
case NV_MEM_TARGET_VRAM:
dmac->push = 0x00000001 | dmaobj->start >> 8;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
dmac->push = 0x00000003 | dmaobj->start >> 8;
break;
default:
return -EINVAL;
}
switch (dmaobj->target) {
case NV_MEM_TARGET_VRAM:
dmac->push = 0x00000001 | dmaobj->start >> 8;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
dmac->push = 0x00000003 | dmaobj->start >> 8;
break;
default:
return -EINVAL;

View File

@ -24,77 +24,182 @@
#include "priv.h"
#include <core/client.h>
#include <core/gpuobj.h>
struct hack {
struct nvkm_gpuobj object;
struct nvkm_gpuobj *parent;
};
#include <nvif/class.h>
static void
dtor(struct nvkm_object *object)
struct nvkm_dmaobj *
nvkm_dma_search(struct nvkm_dma *dma, struct nvkm_client *client, u64 object)
{
struct hack *hack = (void *)object;
nvkm_gpuobj_del(&hack->parent);
nvkm_object_destroy(&hack->object.object);
struct rb_node *node = client->dmaroot.rb_node;
while (node) {
struct nvkm_dmaobj *dmaobj =
container_of(node, typeof(*dmaobj), rb);
if (object < dmaobj->handle)
node = node->rb_left;
else
if (object > dmaobj->handle)
node = node->rb_right;
else
return dmaobj;
}
return NULL;
}
static struct nvkm_oclass
hack = {
.handle = NV_GPUOBJ_CLASS,
static int
nvkm_dma_oclass_new(struct nvkm_device *device,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_dma *dma = nvkm_dma(oclass->engine);
struct nvkm_dma_impl *impl = (void *)dma->engine.subdev.object.oclass;
struct nvkm_dmaobj *dmaobj = NULL;
struct nvkm_client *client = oclass->client;
struct rb_node **ptr = &client->dmaroot.rb_node;
struct rb_node *parent = NULL;
int ret;
ret = impl->class_new(dma, oclass, data, size, &dmaobj);
if (dmaobj)
*pobject = &dmaobj->object;
if (ret)
return ret;
dmaobj->handle = oclass->object;
while (*ptr) {
struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb);
parent = *ptr;
if (dmaobj->handle < obj->handle)
ptr = &parent->rb_left;
else
if (dmaobj->handle > obj->handle)
ptr = &parent->rb_right;
else
return -EEXIST;
}
rb_link_node(&dmaobj->rb, parent, ptr);
rb_insert_color(&dmaobj->rb, &client->dmaroot);
return 0;
}
static const struct nvkm_device_oclass
nvkm_dma_oclass_base = {
.ctor = nvkm_dma_oclass_new,
};
static const struct nvkm_sclass
nvkm_dma_sclass[] = {
{ 0, 0, NV_DMA_FROM_MEMORY },
{ 0, 0, NV_DMA_TO_MEMORY },
{ 0, 0, NV_DMA_IN_MEMORY },
};
static int
nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index,
const struct nvkm_device_oclass **class)
{
const int count = ARRAY_SIZE(nvkm_dma_sclass);
if (index < count) {
const struct nvkm_sclass *oclass = &nvkm_dma_sclass[index];
sclass->base = oclass[0];
sclass->engn = oclass;
*class = &nvkm_dma_oclass_base;
return index;
}
return count;
}
static const struct nvkm_engine_func
nvkm_dma = {
.base.sclass = nvkm_dma_oclass_base_get,
};
#include <core/gpuobj.h>
static struct nvkm_oclass empty = {
.ofuncs = &(struct nvkm_ofuncs) {
.dtor = dtor,
.dtor = nvkm_object_destroy,
.init = _nvkm_object_init,
.fini = _nvkm_object_fini,
},
};
static int
nvkm_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_gpuobj *pargpu,
struct nvkm_gpuobj **pgpuobj)
nvkm_dmaobj_compat_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
const struct nvkm_dma_impl *impl = (void *)
nv_oclass(nv_object(dmaobj)->engine);
int ret = 0;
if (&dmaobj->base == &pargpu->object) { /* ctor bind */
struct nvkm_object *parent = (void *)pargpu;
struct hack *object;
if (parent->parent->parent == &nvkm_client(parent)->object) {
/* delayed, or no, binding */
return 0;
}
pargpu = (void *)nv_pclass((void *)pargpu, NV_GPUOBJ_CLASS);
ret = nvkm_object_create(parent, NULL, &hack, NV_GPUOBJ_CLASS, &object);
if (ret == 0) {
nvkm_object_ref(NULL, &parent);
*pgpuobj = &object->object;
ret = impl->bind(dmaobj, pargpu, &object->parent);
if (ret)
return ret;
object->object.node = object->parent->node;
object->object.addr = object->parent->addr;
object->object.size = object->parent->size;
return 0;
}
struct nvkm_oclass hack = {
.base.oclass = oclass->handle,
.client = nvkm_client(parent),
.parent = parent,
.engine = nv_engine(engine),
};
struct nvkm_dma *dma = (void *)engine;
struct nvkm_dma_impl *impl = (void *)dma->engine.subdev.object.oclass;
struct nvkm_dmaobj *dmaobj = NULL;
struct nvkm_gpuobj *gpuobj;
int ret;
ret = impl->class_new(dma, &hack, data, size, &dmaobj);
if (dmaobj)
*pobject = &dmaobj->object;
if (ret)
return ret;
}
return impl->bind(dmaobj, pargpu, pgpuobj);
gpuobj = (void *)nv_pclass(parent, NV_GPUOBJ_CLASS);
ret = dmaobj->func->bind(dmaobj, gpuobj, 16, &gpuobj);
nvkm_object_ref(NULL, pobject);
if (ret)
return ret;
ret = nvkm_object_create(parent, engine, &empty, 0, pobject);
if (ret)
return ret;
gpuobj->object.parent = *pobject;
gpuobj->object.engine = &dma->engine;
gpuobj->object.oclass = oclass;
gpuobj->object.pclass = NV_GPUOBJ_CLASS;
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
gpuobj->object._magic = NVKM_OBJECT_MAGIC;
#endif
*pobject = &gpuobj->object;
return 0;
}
static void
nvkm_dmaobj_compat_dtor(struct nvkm_object *object)
{
struct nvkm_object *parent = object->parent;
struct nvkm_gpuobj *gpuobj = (void *)object;
nvkm_gpuobj_del(&gpuobj);
nvkm_object_ref(NULL, &parent);
}
static struct nvkm_ofuncs
nvkm_dmaobj_compat_ofuncs = {
.ctor = nvkm_dmaobj_compat_ctor,
.dtor = nvkm_dmaobj_compat_dtor,
.init = _nvkm_object_init,
.fini = _nvkm_object_fini,
};
static struct nvkm_oclass
nvkm_dma_compat_sclass[] = {
{ NV_DMA_FROM_MEMORY, &nvkm_dmaobj_compat_ofuncs },
{ NV_DMA_TO_MEMORY, &nvkm_dmaobj_compat_ofuncs },
{ NV_DMA_IN_MEMORY, &nvkm_dmaobj_compat_ofuncs },
{}
};
int
_nvkm_dma_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
const struct nvkm_dma_impl *impl = (void *)oclass;
struct nvkm_dma *dmaeng;
int ret;
@ -104,7 +209,7 @@ _nvkm_dma_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
nv_engine(dmaeng)->sclass = impl->sclass;
dmaeng->bind = nvkm_dmaobj_bind;
dmaeng->engine.sclass = nvkm_dma_compat_sclass;
dmaeng->engine.func = &nvkm_dma;
return 0;
}

View File

@ -33,6 +33,5 @@ gf100_dmaeng_oclass = &(struct nvkm_dma_impl) {
.init = _nvkm_dma_init,
.fini = _nvkm_dma_fini,
},
.sclass = gf100_dmaeng_sclass,
.bind = gf100_dmaobj_bind,
.class_new = gf100_dmaobj_new,
}.base;

View File

@ -33,6 +33,5 @@ gf110_dmaeng_oclass = &(struct nvkm_dma_impl) {
.init = _nvkm_dma_init,
.fini = _nvkm_dma_fini,
},
.sclass = gf110_dmaeng_sclass,
.bind = gf110_dmaobj_bind,
.class_new = gf110_dmaobj_new,
}.base;

View File

@ -33,6 +33,5 @@ nv04_dmaeng_oclass = &(struct nvkm_dma_impl) {
.init = _nvkm_dma_init,
.fini = _nvkm_dma_fini,
},
.sclass = nv04_dmaeng_sclass,
.bind = nv04_dmaobj_bind,
.class_new = nv04_dmaobj_new,
}.base;

View File

@ -33,6 +33,5 @@ nv50_dmaeng_oclass = &(struct nvkm_dma_impl) {
.init = _nvkm_dma_init,
.fini = _nvkm_dma_fini,
},
.sclass = nv50_dmaeng_sclass,
.bind = nv50_dmaobj_bind,
.class_new = nv50_dmaobj_new,
}.base;

View File

@ -1,5 +1,6 @@
#ifndef __NVKM_DMA_PRIV_H__
#define __NVKM_DMA_PRIV_H__
#define nvkm_dma(p) container_of((p), struct nvkm_dma, engine)
#include <engine/dma.h>
int _nvkm_dma_ctor(struct nvkm_object *, struct nvkm_object *,
@ -14,5 +15,7 @@ struct nvkm_dma_impl {
struct nvkm_oclass *sclass;
int (*bind)(struct nvkm_dmaobj *, struct nvkm_gpuobj *,
struct nvkm_gpuobj **);
int (*class_new)(struct nvkm_dma *, const struct nvkm_oclass *,
void *data, u32 size, struct nvkm_dmaobj **);
};
#endif

View File

@ -24,34 +24,57 @@
#include "user.h"
#include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
#include <nvif/class.h>
#include <nvif/unpack.h>
static int
nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
return dmaobj->func->bind(dmaobj, gpuobj, align, pgpuobj);
}
static void *
nvkm_dmaobj_dtor(struct nvkm_object *base)
{
struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
if (!RB_EMPTY_NODE(&dmaobj->rb))
rb_erase(&dmaobj->rb, &dmaobj->object.client->dmaroot);
return dmaobj;
}
static const struct nvkm_object_func
nvkm_dmaobj_func = {
.dtor = nvkm_dmaobj_dtor,
.bind = nvkm_dmaobj_bind,
};
int
nvkm_dmaobj_create_(struct nvkm_object *parent,
struct nvkm_object *engine,
struct nvkm_oclass *oclass, void **pdata, u32 *psize,
int length, void **pobject)
nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
const struct nvkm_oclass *oclass, void **pdata, u32 *psize,
struct nvkm_dmaobj *dmaobj)
{
union {
struct nv_dma_v0 v0;
} *args = *pdata;
struct nvkm_instmem *instmem = nvkm_instmem(parent);
struct nvkm_client *client = nvkm_client(parent);
struct nvkm_device *device = nv_device(parent);
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_dmaobj *dmaobj;
struct nvkm_device *device = dma->engine.subdev.device;
struct nvkm_client *client = oclass->client;
struct nvkm_object *parent = oclass->parent;
struct nvkm_instmem *instmem = device->imem;
struct nvkm_fb *fb = device->fb;
void *data = *pdata;
u32 size = *psize;
int ret;
ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
dmaobj = *pobject;
if (ret)
return ret;
nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
dmaobj->func = func;
dmaobj->dma = dma;
RB_CLEAR_NODE(&dmaobj->rb);
nvif_ioctl(parent, "create dma size %d\n", *psize);
if (nvif_unpack(args->v0, 0, 0, true)) {

View File

@ -1,29 +1,18 @@
#ifndef __NVKM_DMA_USER_H__
#define __NVKM_DMA_USER_H__
#define nvkm_dmaobj(p) container_of((p), struct nvkm_dmaobj, object)
#include "priv.h"
#define nvkm_dmaobj_create(p,e,c,pa,sa,d) \
nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d)
int nvkm_dmaobj_create_(struct nvkm_object *, struct nvkm_object *,
struct nvkm_oclass *, void **, u32 *,
int, void **);
#define _nvkm_dmaobj_dtor nvkm_object_destroy
#define _nvkm_dmaobj_init _nvkm_object_init
#define _nvkm_dmaobj_fini _nvkm_object_fini
extern struct nvkm_oclass nv04_dmaeng_sclass[];
extern struct nvkm_oclass nv50_dmaeng_sclass[];
extern struct nvkm_oclass gf100_dmaeng_sclass[];
extern struct nvkm_oclass gf110_dmaeng_sclass[];
int nv04_dmaobj_bind(struct nvkm_dmaobj *, struct nvkm_gpuobj *,
struct nvkm_gpuobj **);
int nv50_dmaobj_bind(struct nvkm_dmaobj *, struct nvkm_gpuobj *,
struct nvkm_gpuobj **);
int gf100_dmaobj_bind(struct nvkm_dmaobj *, struct nvkm_gpuobj *,
struct nvkm_gpuobj **);
int gf110_dmaobj_bind(struct nvkm_dmaobj *, struct nvkm_gpuobj *,
struct nvkm_gpuobj **);
int nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *, struct nvkm_dma *,
const struct nvkm_oclass *, void **data, u32 *size,
struct nvkm_dmaobj *);
int nv04_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
struct nvkm_dmaobj **);
int nv50_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
struct nvkm_dmaobj **);
int gf100_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
struct nvkm_dmaobj **);
int gf110_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
struct nvkm_dmaobj **);
#endif

View File

@ -21,6 +21,7 @@
*
* Authors: Ben Skeggs
*/
#define gf100_dmaobj(p) container_of((p), struct gf100_dmaobj, base)
#include "user.h"
#include <core/client.h>
@ -36,18 +37,18 @@ struct gf100_dmaobj {
u32 flags5;
};
int
gf100_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
struct nvkm_gpuobj **pgpuobj)
static int
gf100_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct gf100_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
struct gf100_dmaobj *dmaobj = gf100_dmaobj(base);
struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
int ret;
ret = nvkm_gpuobj_new(device, 24, 32, false, parent, pgpuobj);
ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj));
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
@ -60,23 +61,32 @@ gf100_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
return ret;
}
static int
gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
static const struct nvkm_dmaobj_func
gf100_dmaobj_func = {
.bind = gf100_dmaobj_bind,
};
int
gf100_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
{
struct nvkm_dma *dmaeng = (void *)engine;
union {
struct gf100_dma_v0 v0;
} *args;
struct nvkm_object *parent = oclass->parent;
struct gf100_dmaobj *dmaobj;
u32 kind, user, unkn;
int ret;
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &dmaobj);
*pobject = nv_object(dmaobj);
if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
return -ENOMEM;
*pdmaobj = &dmaobj->base;
ret = nvkm_dmaobj_ctor(&gf100_dmaobj_func, dma, oclass,
&data, &size, &dmaobj->base);
if (ret)
return ret;
args = data;
nvif_ioctl(parent, "create gf100 dma size %d\n", size);
@ -103,7 +113,7 @@ gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (user > 2)
return -EINVAL;
dmaobj->flags0 |= (kind << 22) | (user << 20);
dmaobj->flags0 |= (kind << 22) | (user << 20) | oclass->base.oclass;
dmaobj->flags5 |= (unkn << 16);
switch (dmaobj->base.target) {
@ -135,21 +145,5 @@ gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
break;
}
return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
return 0;
}
static struct nvkm_ofuncs
gf100_dmaobj_ofuncs = {
.ctor = gf100_dmaobj_ctor,
.dtor = _nvkm_dmaobj_dtor,
.init = _nvkm_dmaobj_init,
.fini = _nvkm_dmaobj_fini,
};
struct nvkm_oclass
gf100_dmaeng_sclass[] = {
{ NV_DMA_FROM_MEMORY, &gf100_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY, &gf100_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY, &gf100_dmaobj_ofuncs },
{}
};

View File

@ -21,6 +21,7 @@
*
* Authors: Ben Skeggs
*/
#define gf110_dmaobj(p) container_of((p), struct gf110_dmaobj, base)
#include "user.h"
#include <core/client.h>
@ -35,15 +36,15 @@ struct gf110_dmaobj {
u32 flags0;
};
int
gf110_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
struct nvkm_gpuobj **pgpuobj)
static int
gf110_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct gf110_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
struct gf110_dmaobj *dmaobj = gf110_dmaobj(base);
struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
int ret;
ret = nvkm_gpuobj_new(device, 24, 32, false, parent, pgpuobj);
ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
@ -58,23 +59,32 @@ gf110_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
return ret;
}
static int
gf110_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
static const struct nvkm_dmaobj_func
gf110_dmaobj_func = {
.bind = gf110_dmaobj_bind,
};
int
gf110_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
{
struct nvkm_dma *dmaeng = (void *)engine;
union {
struct gf110_dma_v0 v0;
} *args;
struct nvkm_object *parent = oclass->parent;
struct gf110_dmaobj *dmaobj;
u32 kind, page;
int ret;
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &dmaobj);
*pobject = nv_object(dmaobj);
if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
return -ENOMEM;
*pdmaobj = &dmaobj->base;
ret = nvkm_dmaobj_ctor(&gf110_dmaobj_func, dma, oclass,
&data, &size, &dmaobj->base);
if (ret)
return ret;
args = data;
nvif_ioctl(parent, "create gf110 dma size %d\n", size);
@ -117,21 +127,5 @@ gf110_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return -EINVAL;
}
return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
return 0;
}
static struct nvkm_ofuncs
gf110_dmaobj_ofuncs = {
.ctor = gf110_dmaobj_ctor,
.dtor = _nvkm_dmaobj_dtor,
.init = _nvkm_dmaobj_init,
.fini = _nvkm_dmaobj_fini,
};
struct nvkm_oclass
gf110_dmaeng_sclass[] = {
{ NV_DMA_FROM_MEMORY, &gf110_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY, &gf110_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY, &gf110_dmaobj_ofuncs },
{}
};

View File

@ -21,6 +21,7 @@
*
* Authors: Ben Skeggs
*/
#define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
#include "user.h"
#include <core/gpuobj.h>
@ -36,19 +37,19 @@ struct nv04_dmaobj {
u32 flags2;
};
int
nv04_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
struct nvkm_gpuobj **pgpuobj)
static int
nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nv04_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
struct nv04_dmaobj *dmaobj = nv04_dmaobj(base);
struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
u64 offset = dmaobj->base.start & 0xfffff000;
u64 adjust = dmaobj->base.start & 0x00000fff;
u32 length = dmaobj->base.limit - dmaobj->base.start;
int ret;
if (dmaobj->clone) {
struct nv04_mmu *mmu = nv04_mmu(dmaobj);
struct nv04_mmu *mmu = nv04_mmu(device->mmu);
struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
if (!dmaobj->base.start)
return nvkm_gpuobj_wrap(pgt, pgpuobj);
@ -58,7 +59,7 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
nvkm_done(pgt);
}
ret = nvkm_gpuobj_new(device, 16, 16, false, parent, pgpuobj);
ret = nvkm_gpuobj_new(device, 16, align, false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
@ -71,19 +72,26 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
return ret;
}
static int
nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
static const struct nvkm_dmaobj_func
nv04_dmaobj_func = {
.bind = nv04_dmaobj_bind,
};
int
nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
{
struct nvkm_dma *dmaeng = (void *)engine;
struct nv04_mmu *mmu = nv04_mmu(engine);
struct nv04_mmu *mmu = nv04_mmu(dma);
struct nv04_dmaobj *dmaobj;
int ret;
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &dmaobj);
*pobject = nv_object(dmaobj);
if (ret || (ret = -ENOSYS, size))
if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
return -ENOMEM;
*pdmaobj = &dmaobj->base;
ret = nvkm_dmaobj_ctor(&nv04_dmaobj_func, dma, oclass,
&data, &size, &dmaobj->base);
if (ret)
return ret;
if (dmaobj->base.target == NV_MEM_TARGET_VM) {
@ -93,7 +101,7 @@ nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
dmaobj->base.access = NV_MEM_ACCESS_RW;
}
dmaobj->flags0 = nv_mclass(dmaobj);
dmaobj->flags0 = oclass->base.oclass;
switch (dmaobj->base.target) {
case NV_MEM_TARGET_VRAM:
dmaobj->flags0 |= 0x00003000;
@ -121,21 +129,5 @@ nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return -EINVAL;
}
return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
return 0;
}
static struct nvkm_ofuncs
nv04_dmaobj_ofuncs = {
.ctor = nv04_dmaobj_ctor,
.dtor = _nvkm_dmaobj_dtor,
.init = _nvkm_dmaobj_init,
.fini = _nvkm_dmaobj_fini,
};
struct nvkm_oclass
nv04_dmaeng_sclass[] = {
{ NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs },
{}
};

View File

@ -21,6 +21,7 @@
*
* Authors: Ben Skeggs
*/
#define nv50_dmaobj(p) container_of((p), struct nv50_dmaobj, base)
#include "user.h"
#include <core/client.h>
@ -36,18 +37,18 @@ struct nv50_dmaobj {
u32 flags5;
};
int
nv50_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
struct nvkm_gpuobj **pgpuobj)
static int
nv50_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nv50_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
struct nv50_dmaobj *dmaobj = nv50_dmaobj(base);
struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
int ret;
ret = nvkm_gpuobj_new(device, 24, 32, false, parent, pgpuobj);
ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj));
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
@ -60,23 +61,32 @@ nv50_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
return ret;
}
static int
nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
static const struct nvkm_dmaobj_func
nv50_dmaobj_func = {
.bind = nv50_dmaobj_bind,
};
int
nv50_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
{
struct nvkm_dma *dmaeng = (void *)engine;
union {
struct nv50_dma_v0 v0;
} *args;
struct nvkm_object *parent = oclass->parent;
struct nv50_dmaobj *dmaobj;
u32 user, part, comp, kind;
int ret;
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &dmaobj);
*pobject = nv_object(dmaobj);
if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
return -ENOMEM;
*pdmaobj = &dmaobj->base;
ret = nvkm_dmaobj_ctor(&nv50_dmaobj_func, dma, oclass,
&data, &size, &dmaobj->base);
if (ret)
return ret;
args = data;
nvif_ioctl(parent, "create nv50 dma size %d\n", size);
@ -107,7 +117,8 @@ nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
return -EINVAL;
dmaobj->flags0 = (comp << 29) | (kind << 22) | (user << 20);
dmaobj->flags0 = (comp << 29) | (kind << 22) | (user << 20) |
oclass->base.oclass;
dmaobj->flags5 = (part << 16);
switch (dmaobj->base.target) {
@ -141,21 +152,5 @@ nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
return -EINVAL;
}
return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
return 0;
}
static struct nvkm_ofuncs
nv50_dmaobj_ofuncs = {
.ctor = nv50_dmaobj_ctor,
.dtor = _nvkm_dmaobj_dtor,
.init = _nvkm_dmaobj_init,
.fini = _nvkm_dmaobj_fini,
};
struct nvkm_oclass
nv50_dmaeng_sclass[] = {
{ NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs },
{}
};

View File

@ -99,14 +99,13 @@ nvkm_fifo_channel_create_(struct nvkm_object *parent,
u64 engmask, int len, void **ptr)
{
struct nvkm_client *client = nvkm_client(parent);
struct nvkm_handle *handle;
struct nvkm_dmaobj *dmaobj;
struct nvkm_fifo *fifo = (void *)engine;
struct nvkm_fifo_base *base = (void *)parent;
struct nvkm_fifo_chan *chan;
struct nvkm_dma *dma;
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_dma *dma = device->dma;
unsigned long flags;
int ret;
@ -119,21 +118,12 @@ nvkm_fifo_channel_create_(struct nvkm_object *parent,
/* validate dma object representing push buffer */
if (pushbuf) {
handle = nvkm_client_search(client, pushbuf);
if (!handle)
dmaobj = nvkm_dma_search(dma, client, pushbuf);
if (!dmaobj)
return -ENOENT;
dmaobj = (void *)handle->object;
dma = (void *)dmaobj->base.engine;
switch (dmaobj->base.oclass->handle) {
case NV_DMA_FROM_MEMORY:
case NV_DMA_IN_MEMORY:
break;
default:
return -EINVAL;
}
ret = dma->bind(dmaobj, &base->gpuobj, &chan->pushgpu);
ret = dmaobj->func->bind(dmaobj, &base->gpuobj, 16,
&chan->pushgpu);
if (ret)
return ret;
}