linux/drivers/gpu/drm/nouveau/nouveau_sgdma.c
Christian König dde5da2379 drm/ttm: add bo as parameter to the ttm_tt_create callback
Instead of calculating the size in bytes just to recalculate the number
of pages from it pass the BO directly to the function.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Roger He <Hongbo.He@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-03-14 14:38:27 -05:00

108 lines
2.3 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/pagemap.h>
#include <linux/slab.h>
#include "nouveau_drv.h"
#include "nouveau_mem.h"
#include "nouveau_ttm.h"
struct nouveau_sgdma_be {
/* this has to be the first field so populate/unpopulated in
* nouve_bo.c works properly, otherwise have to move them here
*/
struct ttm_dma_tt ttm;
struct nouveau_mem *mem;
};
static void
nouveau_sgdma_destroy(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (ttm) {
ttm_dma_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
}
static int
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
ret = nouveau_mem_host(reg, &nvbe->ttm);
if (ret)
return ret;
ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
if (ret) {
nouveau_mem_fini(mem);
return ret;
}
nvbe->mem = mem;
return 0;
}
static int
nv04_sgdma_unbind(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
nouveau_mem_fini(nvbe->mem);
return 0;
}
static struct ttm_backend_func nv04_sgdma_backend = {
.bind = nv04_sgdma_bind,
.unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static int
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
ret = nouveau_mem_host(reg, &nvbe->ttm);
if (ret)
return ret;
nvbe->mem = mem;
return 0;
}
static struct ttm_backend_func nv50_sgdma_backend = {
.bind = nv50_sgdma_bind,
.unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe)
return NULL;
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
nvbe->ttm.ttm.func = &nv04_sgdma_backend;
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
/*
* A failing ttm_dma_tt_init() will call ttm_tt_destroy()
* and thus our nouveau_sgdma_destroy() hook, so we don't need
* to free nvbe here.
*/
return NULL;
return &nvbe->ttm.ttm;
}