drm/nouveau/fb: transition nvkm_ram away from being based on nvkm_object
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
a8dae9fe0e
commit
d36a99d2da
@ -27,7 +27,7 @@ struct nvkm_mm {
|
||||
static inline bool
|
||||
nvkm_mm_initialised(struct nvkm_mm *mm)
|
||||
{
|
||||
return mm->block_size != 0;
|
||||
return mm->heap_nodes;
|
||||
}
|
||||
|
||||
int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
|
||||
@ -37,4 +37,5 @@ int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
|
||||
int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
|
||||
u32 size_min, u32 align, struct nvkm_mm_node **);
|
||||
void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
|
||||
void nvkm_mm_dump(struct nvkm_mm *, const char *);
|
||||
#endif
|
||||
|
@ -18,7 +18,7 @@
|
||||
#define NV_MEM_TARGET_VM 3
|
||||
#define NV_MEM_TARGET_GART 4
|
||||
|
||||
#define NV_MEM_TYPE_VM 0x7f
|
||||
#define NVKM_RAM_TYPE_VM 0x7f
|
||||
#define NV_MEM_COMP_VM 0x03
|
||||
|
||||
struct nvkm_mem {
|
||||
@ -52,9 +52,6 @@ struct nvkm_fb {
|
||||
|
||||
struct nvkm_ram *ram;
|
||||
|
||||
struct nvkm_mm vram;
|
||||
struct nvkm_mm tags;
|
||||
|
||||
struct {
|
||||
struct nvkm_fb_tile region[16];
|
||||
int regions;
|
||||
@ -112,36 +109,35 @@ struct nvkm_ram_data {
|
||||
u32 freq;
|
||||
};
|
||||
|
||||
enum nvkm_ram_type {
|
||||
NVKM_RAM_TYPE_UNKNOWN = 0,
|
||||
NVKM_RAM_TYPE_STOLEN,
|
||||
NVKM_RAM_TYPE_SGRAM,
|
||||
NVKM_RAM_TYPE_SDRAM,
|
||||
NVKM_RAM_TYPE_DDR1,
|
||||
NVKM_RAM_TYPE_DDR2,
|
||||
NVKM_RAM_TYPE_DDR3,
|
||||
NVKM_RAM_TYPE_GDDR2,
|
||||
NVKM_RAM_TYPE_GDDR3,
|
||||
NVKM_RAM_TYPE_GDDR4,
|
||||
NVKM_RAM_TYPE_GDDR5
|
||||
};
|
||||
|
||||
struct nvkm_ram {
|
||||
struct nvkm_object base;
|
||||
enum {
|
||||
NV_MEM_TYPE_UNKNOWN = 0,
|
||||
NV_MEM_TYPE_STOLEN,
|
||||
NV_MEM_TYPE_SGRAM,
|
||||
NV_MEM_TYPE_SDRAM,
|
||||
NV_MEM_TYPE_DDR1,
|
||||
NV_MEM_TYPE_DDR2,
|
||||
NV_MEM_TYPE_DDR3,
|
||||
NV_MEM_TYPE_GDDR2,
|
||||
NV_MEM_TYPE_GDDR3,
|
||||
NV_MEM_TYPE_GDDR4,
|
||||
NV_MEM_TYPE_GDDR5
|
||||
} type;
|
||||
u64 stolen;
|
||||
const struct nvkm_ram_func *func;
|
||||
struct nvkm_fb *fb;
|
||||
enum nvkm_ram_type type;
|
||||
u64 size;
|
||||
u32 tags;
|
||||
|
||||
#define NVKM_RAM_MM_SHIFT 12
|
||||
struct nvkm_mm vram;
|
||||
struct nvkm_mm tags;
|
||||
u64 stolen;
|
||||
|
||||
int ranks;
|
||||
int parts;
|
||||
int part_mask;
|
||||
|
||||
int (*get)(struct nvkm_fb *, u64 size, u32 align, u32 size_nc,
|
||||
u32 type, struct nvkm_mem **);
|
||||
void (*put)(struct nvkm_fb *, struct nvkm_mem **);
|
||||
|
||||
int (*calc)(struct nvkm_fb *, u32 freq);
|
||||
int (*prog)(struct nvkm_fb *);
|
||||
void (*tidy)(struct nvkm_fb *);
|
||||
u32 freq;
|
||||
u32 mr[16];
|
||||
u32 mr1_nuts;
|
||||
@ -151,4 +147,17 @@ struct nvkm_ram {
|
||||
struct nvkm_ram_data xition;
|
||||
struct nvkm_ram_data target;
|
||||
};
|
||||
|
||||
struct nvkm_ram_func {
|
||||
void *(*dtor)(struct nvkm_ram *);
|
||||
int (*init)(struct nvkm_ram *);
|
||||
|
||||
int (*get)(struct nvkm_ram *, u64 size, u32 align, u32 size_nc,
|
||||
u32 type, struct nvkm_mem **);
|
||||
void (*put)(struct nvkm_ram *, struct nvkm_mem **);
|
||||
|
||||
int (*calc)(struct nvkm_ram *, u32 freq);
|
||||
int (*prog)(struct nvkm_ram *);
|
||||
void (*tidy)(struct nvkm_ram *);
|
||||
};
|
||||
#endif
|
||||
|
@ -64,9 +64,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
|
||||
struct nvkm_fb *fb = nvxx_fb(&drm->device);
|
||||
struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
|
||||
nvkm_mem_node_cleanup(mem->mm_node);
|
||||
fb->ram->put(fb, (struct nvkm_mem **)&mem->mm_node);
|
||||
ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -76,7 +76,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
|
||||
struct nvkm_fb *fb = nvxx_fb(&drm->device);
|
||||
struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nvkm_mem *node;
|
||||
u32 size_nc = 0;
|
||||
@ -88,9 +88,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
|
||||
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
|
||||
size_nc = 1 << nvbo->page_shift;
|
||||
|
||||
ret = fb->ram->get(fb, mem->num_pages << PAGE_SHIFT,
|
||||
mem->page_alignment << PAGE_SHIFT, size_nc,
|
||||
(nvbo->tile_flags >> 8) & 0x3ff, &node);
|
||||
ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
|
||||
mem->page_alignment << PAGE_SHIFT, size_nc,
|
||||
(nvbo->tile_flags >> 8) & 0x3ff, &node);
|
||||
if (ret) {
|
||||
mem->mm_node = NULL;
|
||||
return (ret == -ENOSPC) ? 0 : ret;
|
||||
@ -103,38 +103,11 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
|
||||
{
|
||||
struct nvkm_fb *fb = man->priv;
|
||||
struct nvkm_mm *mm = &fb->vram;
|
||||
struct nvkm_mm_node *r;
|
||||
u32 total = 0, free = 0;
|
||||
|
||||
mutex_lock(&nv_subdev(fb)->mutex);
|
||||
list_for_each_entry(r, &mm->nodes, nl_entry) {
|
||||
printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
|
||||
prefix, r->type, ((u64)r->offset << 12),
|
||||
(((u64)r->offset + r->length) << 12));
|
||||
|
||||
total += r->length;
|
||||
if (!r->type)
|
||||
free += r->length;
|
||||
}
|
||||
mutex_unlock(&nv_subdev(fb)->mutex);
|
||||
|
||||
printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
|
||||
prefix, (u64)total << 12, (u64)free << 12);
|
||||
printk(KERN_DEBUG "%s block: 0x%08x\n",
|
||||
prefix, mm->block_size << 12);
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
|
||||
nouveau_vram_manager_init,
|
||||
nouveau_vram_manager_fini,
|
||||
nouveau_vram_manager_new,
|
||||
nouveau_vram_manager_del,
|
||||
nouveau_vram_manager_debug
|
||||
};
|
||||
|
||||
static int
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
|
||||
list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
|
||||
|
||||
static void
|
||||
void
|
||||
nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
|
||||
{
|
||||
struct nvkm_mm_node *node;
|
||||
|
@ -569,7 +569,7 @@ nv50_gr_construct_mmio(struct nvkm_grctx *ctx)
|
||||
else if (device->chipset < 0xa0)
|
||||
gr_def(ctx, 0x407d08, 0x00390040);
|
||||
else {
|
||||
if (nvkm_fb(device)->ram->type != NV_MEM_TYPE_GDDR5)
|
||||
if (nvkm_fb(device)->ram->type != NVKM_RAM_TYPE_GDDR5)
|
||||
gr_def(ctx, 0x407d08, 0x003d0040);
|
||||
else
|
||||
gr_def(ctx, 0x407d08, 0x003c0040);
|
||||
|
@ -174,7 +174,7 @@ static int
|
||||
nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &clk->subdev;
|
||||
struct nvkm_fb *fb = subdev->device->fb;
|
||||
struct nvkm_ram *ram = subdev->device->fb->ram;
|
||||
struct nvkm_pstate *pstate;
|
||||
int ret, idx = 0;
|
||||
|
||||
@ -186,14 +186,14 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
|
||||
nvkm_debug(subdev, "setting performance state %d\n", pstatei);
|
||||
clk->pstate = pstatei;
|
||||
|
||||
if (fb->ram && fb->ram->calc) {
|
||||
if (ram && ram->func->calc) {
|
||||
int khz = pstate->base.domain[nv_clk_src_mem];
|
||||
do {
|
||||
ret = fb->ram->calc(fb, khz);
|
||||
ret = ram->func->calc(ram, khz);
|
||||
if (ret == 0)
|
||||
ret = fb->ram->prog(fb);
|
||||
ret = ram->func->prog(ram);
|
||||
} while (ret > 0);
|
||||
fb->ram->tidy(fb);
|
||||
ram->func->tidy(ram);
|
||||
}
|
||||
|
||||
return nvkm_cstate_prog(clk, pstate, 0);
|
||||
|
@ -23,6 +23,8 @@ nvkm-y += nvkm/subdev/fb/gf100.o
|
||||
nvkm-y += nvkm/subdev/fb/gk104.o
|
||||
nvkm-y += nvkm/subdev/fb/gk20a.o
|
||||
nvkm-y += nvkm/subdev/fb/gm107.o
|
||||
|
||||
nvkm-y += nvkm/subdev/fb/ram.o
|
||||
nvkm-y += nvkm/subdev/fb/ramnv04.o
|
||||
nvkm-y += nvkm/subdev/fb/ramnv10.o
|
||||
nvkm-y += nvkm/subdev/fb/ramnv1a.o
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "ram.h"
|
||||
|
||||
#include <subdev/bios.h>
|
||||
#include <subdev/bios/M0203.h>
|
||||
@ -37,32 +38,24 @@ nvkm_fb_bios_memtype(struct nvkm_bios *bios)
|
||||
|
||||
if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
|
||||
switch (M0203E.type) {
|
||||
case M0203E_TYPE_DDR2 : return NV_MEM_TYPE_DDR2;
|
||||
case M0203E_TYPE_DDR3 : return NV_MEM_TYPE_DDR3;
|
||||
case M0203E_TYPE_GDDR3: return NV_MEM_TYPE_GDDR3;
|
||||
case M0203E_TYPE_GDDR5: return NV_MEM_TYPE_GDDR5;
|
||||
case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
|
||||
case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
|
||||
case M0203E_TYPE_GDDR3: return NVKM_RAM_TYPE_GDDR3;
|
||||
case M0203E_TYPE_GDDR5: return NVKM_RAM_TYPE_GDDR5;
|
||||
default:
|
||||
nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
|
||||
return NV_MEM_TYPE_UNKNOWN;
|
||||
return NVKM_RAM_TYPE_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
nvkm_warn(subdev, "M0203E not matched!\n");
|
||||
return NV_MEM_TYPE_UNKNOWN;
|
||||
return NVKM_RAM_TYPE_UNKNOWN;
|
||||
}
|
||||
|
||||
int
|
||||
_nvkm_fb_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
struct nvkm_fb *fb = (void *)object;
|
||||
int ret;
|
||||
|
||||
if (fb->ram) {
|
||||
ret = nv_ofuncs(fb->ram)->fini(nv_object(fb->ram), suspend);
|
||||
if (ret && suspend)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return nvkm_subdev_fini(&fb->subdev, suspend);
|
||||
}
|
||||
|
||||
@ -76,11 +69,8 @@ _nvkm_fb_init(struct nvkm_object *object)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (fb->ram) {
|
||||
ret = nv_ofuncs(fb->ram)->init(nv_object(fb->ram));
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (fb->ram)
|
||||
nvkm_ram_init(fb->ram);
|
||||
|
||||
for (i = 0; i < fb->tile.regions; i++)
|
||||
fb->tile.prog(fb, i, &fb->tile.region[i]);
|
||||
@ -96,13 +86,8 @@ _nvkm_fb_dtor(struct nvkm_object *object)
|
||||
|
||||
for (i = 0; i < fb->tile.regions; i++)
|
||||
fb->tile.fini(fb, i, &fb->tile.region[i]);
|
||||
nvkm_mm_fini(&fb->tags);
|
||||
|
||||
if (fb->ram) {
|
||||
nvkm_mm_fini(&fb->vram);
|
||||
nvkm_object_ref(NULL, (struct nvkm_object **)&fb->ram);
|
||||
}
|
||||
|
||||
nvkm_ram_del(&fb->ram);
|
||||
nvkm_subdev_destroy(&fb->subdev);
|
||||
}
|
||||
|
||||
@ -111,20 +96,6 @@ nvkm_fb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, int length, void **pobject)
|
||||
{
|
||||
struct nvkm_fb_impl *impl = (void *)oclass;
|
||||
static const char *name[] = {
|
||||
[NV_MEM_TYPE_UNKNOWN] = "of unknown memory type",
|
||||
[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
|
||||
[NV_MEM_TYPE_SGRAM ] = "SGRAM",
|
||||
[NV_MEM_TYPE_SDRAM ] = "SDRAM",
|
||||
[NV_MEM_TYPE_DDR1 ] = "DDR1",
|
||||
[NV_MEM_TYPE_DDR2 ] = "DDR2",
|
||||
[NV_MEM_TYPE_DDR3 ] = "DDR3",
|
||||
[NV_MEM_TYPE_GDDR2 ] = "GDDR2",
|
||||
[NV_MEM_TYPE_GDDR3 ] = "GDDR3",
|
||||
[NV_MEM_TYPE_GDDR4 ] = "GDDR4",
|
||||
[NV_MEM_TYPE_GDDR5 ] = "GDDR5",
|
||||
};
|
||||
struct nvkm_object *ram;
|
||||
struct nvkm_fb *fb;
|
||||
int ret;
|
||||
|
||||
@ -136,33 +107,14 @@ nvkm_fb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
|
||||
fb->memtype_valid = impl->memtype;
|
||||
|
||||
if (!impl->ram)
|
||||
if (!impl->ram_new)
|
||||
return 0;
|
||||
|
||||
ret = nvkm_object_ctor(nv_object(fb), NULL, impl->ram, NULL, 0, &ram);
|
||||
ret = impl->ram_new(fb, &fb->ram);
|
||||
if (ret) {
|
||||
nvkm_error(&fb->subdev, "vram init failed, %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
fb->ram = (void *)ram;
|
||||
|
||||
if (!nvkm_mm_initialised(&fb->vram)) {
|
||||
ret = nvkm_mm_init(&fb->vram, 0, fb->ram->size >> 12, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!nvkm_mm_initialised(&fb->tags)) {
|
||||
ret = nvkm_mm_init(&fb->tags, 0, fb->ram->tags ?
|
||||
++fb->ram->tags : 0, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_debug(&fb->subdev, "%d compression tags\n", fb->ram->tags);
|
||||
}
|
||||
|
||||
nvkm_info(&fb->subdev, "%d MiB %s\n", (int)(fb->ram->size >> 20),
|
||||
name[fb->ram->type]);
|
||||
return 0;
|
||||
}
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv50.h"
|
||||
#include "ram.h"
|
||||
|
||||
struct nvkm_oclass *
|
||||
g84_fb_oclass = &(struct nv50_fb_impl) {
|
||||
@ -33,6 +34,6 @@ g84_fb_oclass = &(struct nv50_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv50_fb_memtype_valid,
|
||||
.base.ram = &nv50_ram_oclass,
|
||||
.base.ram_new = nv50_ram_new,
|
||||
.trap = 0x001d07ff,
|
||||
}.base.base;
|
||||
|
@ -22,7 +22,7 @@
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
* Roy Spliet <rspliet@eclipso.eu>
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "ram.h"
|
||||
|
||||
struct ramxlat {
|
||||
int id;
|
||||
|
@ -21,7 +21,7 @@
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "ram.h"
|
||||
|
||||
/* binary driver only executes this path if the condition (a) is true
|
||||
* for any configuration (combination of rammap+ramcfg+timing) that
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "ram.h"
|
||||
|
||||
extern const u8 gf100_pte_storage_type_map[256];
|
||||
|
||||
@ -113,5 +114,5 @@ gf100_fb_oclass = &(struct nvkm_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.memtype = gf100_fb_memtype_valid,
|
||||
.ram = &gf100_ram_oclass,
|
||||
.ram_new = gf100_ram_new,
|
||||
}.base;
|
||||
|
@ -1,7 +1,6 @@
|
||||
#ifndef __NVKM_RAM_NVC0_H__
|
||||
#define __NVKM_RAM_NVC0_H__
|
||||
#include "priv.h"
|
||||
#include "nv50.h"
|
||||
|
||||
struct gf100_fb {
|
||||
struct nvkm_fb base;
|
||||
@ -15,14 +14,4 @@ int gf100_fb_ctor(struct nvkm_object *, struct nvkm_object *,
|
||||
void gf100_fb_dtor(struct nvkm_object *);
|
||||
int gf100_fb_init(struct nvkm_object *);
|
||||
bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
|
||||
|
||||
#define gf100_ram_create(p,e,o,m,d) \
|
||||
gf100_ram_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
|
||||
int gf100_ram_create_(struct nvkm_object *, struct nvkm_object *,
|
||||
struct nvkm_oclass *, u32, int, void **);
|
||||
int gf100_ram_get(struct nvkm_fb *, u64, u32, u32, u32,
|
||||
struct nvkm_mem **);
|
||||
void gf100_ram_put(struct nvkm_fb *, struct nvkm_mem **);
|
||||
|
||||
int gk104_ram_init(struct nvkm_object*);
|
||||
#endif
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "ram.h"
|
||||
|
||||
struct nvkm_oclass *
|
||||
gk104_fb_oclass = &(struct nvkm_fb_impl) {
|
||||
@ -33,5 +34,5 @@ gk104_fb_oclass = &(struct nvkm_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.memtype = gf100_fb_memtype_valid,
|
||||
.ram = &gk104_ram_oclass,
|
||||
.ram_new = gk104_ram_new,
|
||||
}.base;
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "ram.h"
|
||||
|
||||
struct nvkm_oclass *
|
||||
gm107_fb_oclass = &(struct nvkm_fb_impl) {
|
||||
@ -33,5 +34,5 @@ gm107_fb_oclass = &(struct nvkm_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.memtype = gf100_fb_memtype_valid,
|
||||
.ram = &gm107_ram_oclass,
|
||||
.ram_new = gm107_ram_new,
|
||||
}.base;
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv50.h"
|
||||
#include "ram.h"
|
||||
|
||||
struct nvkm_oclass *
|
||||
gt215_fb_oclass = &(struct nv50_fb_impl) {
|
||||
@ -33,6 +34,6 @@ gt215_fb_oclass = &(struct nv50_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv50_fb_memtype_valid,
|
||||
.base.ram = >215_ram_oclass,
|
||||
.base.ram_new = gt215_ram_new,
|
||||
.trap = 0x000d0fff,
|
||||
}.base.base;
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv50.h"
|
||||
#include "ram.h"
|
||||
|
||||
struct nvkm_oclass *
|
||||
mcp77_fb_oclass = &(struct nv50_fb_impl) {
|
||||
@ -33,6 +34,6 @@ mcp77_fb_oclass = &(struct nv50_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv50_fb_memtype_valid,
|
||||
.base.ram = &mcp77_ram_oclass,
|
||||
.base.ram_new = mcp77_ram_new,
|
||||
.trap = 0x001d07ff,
|
||||
}.base.base;
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv50.h"
|
||||
#include "ram.h"
|
||||
|
||||
struct nvkm_oclass *
|
||||
mcp89_fb_oclass = &(struct nv50_fb_impl) {
|
||||
@ -33,6 +34,6 @@ mcp89_fb_oclass = &(struct nv50_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv50_fb_memtype_valid,
|
||||
.base.ram = &mcp77_ram_oclass,
|
||||
.base.ram_new = mcp77_ram_new,
|
||||
.trap = 0x089d1fff,
|
||||
}.base.base;
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
#include "regsnv04.h"
|
||||
|
||||
bool
|
||||
@ -84,5 +85,5 @@ nv04_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv04_ram_oclass,
|
||||
.base.ram_new = nv04_ram_new,
|
||||
}.base.base;
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
void
|
||||
nv10_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
|
||||
@ -63,7 +64,7 @@ nv10_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv10_ram_oclass,
|
||||
.base.ram_new = nv10_ram_new,
|
||||
.tile.regions = 8,
|
||||
.tile.init = nv10_fb_tile_init,
|
||||
.tile.fini = nv10_fb_tile_fini,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
struct nvkm_oclass *
|
||||
nv1a_fb_oclass = &(struct nv04_fb_impl) {
|
||||
@ -35,7 +36,7 @@ nv1a_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv1a_ram_oclass,
|
||||
.base.ram_new = nv1a_ram_new,
|
||||
.tile.regions = 8,
|
||||
.tile.init = nv10_fb_tile_init,
|
||||
.tile.fini = nv10_fb_tile_fini,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
void
|
||||
nv20_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
|
||||
@ -44,7 +45,7 @@ nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
|
||||
{
|
||||
u32 tiles = DIV_ROUND_UP(size, 0x40);
|
||||
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
|
||||
if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
|
||||
else tile->zcomp = 0x04000000; /* Z24S8 */
|
||||
tile->zcomp |= tile->tag->offset;
|
||||
@ -62,7 +63,7 @@ nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
|
||||
tile->limit = 0;
|
||||
tile->pitch = 0;
|
||||
tile->zcomp = 0;
|
||||
nvkm_mm_free(&fb->tags, &tile->tag);
|
||||
nvkm_mm_free(&fb->ram->tags, &tile->tag);
|
||||
}
|
||||
|
||||
void
|
||||
@ -86,7 +87,7 @@ nv20_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv20_ram_oclass,
|
||||
.base.ram_new = nv20_ram_new,
|
||||
.tile.regions = 8,
|
||||
.tile.init = nv20_fb_tile_init,
|
||||
.tile.comp = nv20_fb_tile_comp,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
static void
|
||||
nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
|
||||
@ -31,7 +32,7 @@ nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
|
||||
{
|
||||
u32 tiles = DIV_ROUND_UP(size, 0x40);
|
||||
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
|
||||
if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
|
||||
else tile->zcomp = 0x00200000; /* Z24S8 */
|
||||
tile->zcomp |= tile->tag->offset;
|
||||
@ -51,7 +52,7 @@ nv25_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv20_ram_oclass,
|
||||
.base.ram_new = nv20_ram_new,
|
||||
.tile.regions = 8,
|
||||
.tile.init = nv20_fb_tile_init,
|
||||
.tile.comp = nv25_fb_tile_comp,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
void
|
||||
nv30_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
|
||||
@ -50,7 +51,7 @@ nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
|
||||
{
|
||||
u32 tiles = DIV_ROUND_UP(size, 0x40);
|
||||
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
|
||||
if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
|
||||
else tile->zcomp |= 0x02000000; /* Z24S8 */
|
||||
tile->zcomp |= ((tile->tag->offset ) >> 6);
|
||||
@ -130,7 +131,7 @@ nv30_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv20_ram_oclass,
|
||||
.base.ram_new = nv20_ram_new,
|
||||
.tile.regions = 8,
|
||||
.tile.init = nv30_fb_tile_init,
|
||||
.tile.comp = nv30_fb_tile_comp,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
static void
|
||||
nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
|
||||
@ -31,7 +32,7 @@ nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
|
||||
{
|
||||
u32 tiles = DIV_ROUND_UP(size, 0x40);
|
||||
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
|
||||
if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
|
||||
else tile->zcomp |= 0x08000000; /* Z24S8 */
|
||||
tile->zcomp |= ((tile->tag->offset ) >> 6);
|
||||
@ -52,7 +53,7 @@ nv35_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv20_ram_oclass,
|
||||
.base.ram_new = nv20_ram_new,
|
||||
.tile.regions = 8,
|
||||
.tile.init = nv30_fb_tile_init,
|
||||
.tile.comp = nv35_fb_tile_comp,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
static void
|
||||
nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
|
||||
@ -31,7 +32,7 @@ nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
|
||||
{
|
||||
u32 tiles = DIV_ROUND_UP(size, 0x40);
|
||||
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
|
||||
if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
|
||||
else tile->zcomp |= 0x20000000; /* Z24S8 */
|
||||
tile->zcomp |= ((tile->tag->offset ) >> 6);
|
||||
@ -52,7 +53,7 @@ nv36_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv20_ram_oclass,
|
||||
.base.ram_new = nv20_ram_new,
|
||||
.tile.regions = 8,
|
||||
.tile.init = nv30_fb_tile_init,
|
||||
.tile.comp = nv36_fb_tile_comp,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
void
|
||||
nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
|
||||
@ -32,7 +33,7 @@ nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
|
||||
u32 tiles = DIV_ROUND_UP(size, 0x80);
|
||||
u32 tags = round_up(tiles / fb->ram->parts, 0x100);
|
||||
if ( (flags & 2) &&
|
||||
!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
|
||||
tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
|
||||
tile->zcomp |= ((tile->tag->offset ) >> 8);
|
||||
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
|
||||
@ -67,7 +68,7 @@ nv40_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv40_ram_oclass,
|
||||
.base.ram_new = nv40_ram_new,
|
||||
.tile.regions = 8,
|
||||
.tile.init = nv30_fb_tile_init,
|
||||
.tile.comp = nv40_fb_tile_comp,
|
||||
|
@ -1,14 +0,0 @@
|
||||
#ifndef __NVKM_FB_NV40_H__
|
||||
#define __NVKM_FB_NV40_H__
|
||||
#include "priv.h"
|
||||
|
||||
struct nv40_ram {
|
||||
struct nvkm_ram base;
|
||||
u32 ctrl;
|
||||
u32 coef;
|
||||
};
|
||||
|
||||
int nv40_ram_calc(struct nvkm_fb *, u32);
|
||||
int nv40_ram_prog(struct nvkm_fb *);
|
||||
void nv40_ram_tidy(struct nvkm_fb *);
|
||||
#endif
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
void
|
||||
nv41_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
|
||||
@ -61,7 +62,7 @@ nv41_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv41_ram_oclass,
|
||||
.base.ram_new = nv41_ram_new,
|
||||
.tile.regions = 12,
|
||||
.tile.init = nv30_fb_tile_init,
|
||||
.tile.comp = nv40_fb_tile_comp,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
static void
|
||||
nv44_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
|
||||
@ -71,7 +72,7 @@ nv44_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv44_ram_oclass,
|
||||
.base.ram_new = nv44_ram_new,
|
||||
.tile.regions = 12,
|
||||
.tile.init = nv44_fb_tile_init,
|
||||
.tile.fini = nv20_fb_tile_fini,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
void
|
||||
nv46_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
|
||||
@ -49,7 +50,7 @@ nv46_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv44_ram_oclass,
|
||||
.base.ram_new = nv44_ram_new,
|
||||
.tile.regions = 15,
|
||||
.tile.init = nv46_fb_tile_init,
|
||||
.tile.fini = nv20_fb_tile_fini,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
struct nvkm_oclass *
|
||||
nv47_fb_oclass = &(struct nv04_fb_impl) {
|
||||
@ -35,7 +36,7 @@ nv47_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv41_ram_oclass,
|
||||
.base.ram_new = nv41_ram_new,
|
||||
.tile.regions = 15,
|
||||
.tile.init = nv30_fb_tile_init,
|
||||
.tile.comp = nv40_fb_tile_comp,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
struct nvkm_oclass *
|
||||
nv49_fb_oclass = &(struct nv04_fb_impl) {
|
||||
@ -35,7 +36,7 @@ nv49_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv49_ram_oclass,
|
||||
.base.ram_new = nv49_ram_new,
|
||||
.tile.regions = 15,
|
||||
.tile.init = nv30_fb_tile_init,
|
||||
.tile.comp = nv40_fb_tile_comp,
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
#include "nv04.h"
|
||||
#include "ram.h"
|
||||
|
||||
struct nvkm_oclass *
|
||||
nv4e_fb_oclass = &(struct nv04_fb_impl) {
|
||||
@ -35,7 +36,7 @@ nv4e_fb_oclass = &(struct nv04_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv04_fb_memtype_valid,
|
||||
.base.ram = &nv4e_ram_oclass,
|
||||
.base.ram_new = nv4e_ram_new,
|
||||
.tile.regions = 12,
|
||||
.tile.init = nv46_fb_tile_init,
|
||||
.tile.fini = nv20_fb_tile_fini,
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv50.h"
|
||||
#include "ram.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/engctx.h>
|
||||
@ -299,6 +300,6 @@ nv50_fb_oclass = &(struct nv50_fb_impl) {
|
||||
.fini = _nvkm_fb_fini,
|
||||
},
|
||||
.base.memtype = nv50_fb_memtype_valid,
|
||||
.base.ram = &nv50_ram_oclass,
|
||||
.base.ram_new = nv50_ram_new,
|
||||
.trap = 0x000707ff,
|
||||
}.base.base;
|
||||
|
@ -19,13 +19,5 @@ struct nv50_fb_impl {
|
||||
u32 trap;
|
||||
};
|
||||
|
||||
#define nv50_ram_create(p,e,o,d) \
|
||||
nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
|
||||
int nv50_ram_create_(struct nvkm_object *, struct nvkm_object *,
|
||||
struct nvkm_oclass *, int, void **);
|
||||
int nv50_ram_get(struct nvkm_fb *, u64 size, u32 align, u32 ncmin,
|
||||
u32 memtype, struct nvkm_mem **);
|
||||
void nv50_ram_put(struct nvkm_fb *, struct nvkm_mem **);
|
||||
void __nv50_ram_put(struct nvkm_fb *, struct nvkm_mem *);
|
||||
extern int nv50_fb_memtype[0x80];
|
||||
#endif
|
||||
|
@ -3,42 +3,6 @@
|
||||
#include <subdev/fb.h>
|
||||
struct nvkm_bios;
|
||||
|
||||
#define nvkm_ram_create(p,e,o,d) \
|
||||
nvkm_object_create_((p), (e), (o), 0, sizeof(**d), (void **)d)
|
||||
#define nvkm_ram_destroy(p) \
|
||||
nvkm_object_destroy(&(p)->base)
|
||||
#define nvkm_ram_init(p) \
|
||||
nvkm_object_init(&(p)->base)
|
||||
#define nvkm_ram_fini(p,s) \
|
||||
nvkm_object_fini(&(p)->base, (s))
|
||||
|
||||
#define nvkm_ram_create_(p,e,o,s,d) \
|
||||
nvkm_object_create_((p), (e), (o), 0, (s), (void **)d)
|
||||
#define _nvkm_ram_dtor nvkm_object_destroy
|
||||
#define _nvkm_ram_init nvkm_object_init
|
||||
#define _nvkm_ram_fini nvkm_object_fini
|
||||
|
||||
extern struct nvkm_oclass nv04_ram_oclass;
|
||||
extern struct nvkm_oclass nv10_ram_oclass;
|
||||
extern struct nvkm_oclass nv1a_ram_oclass;
|
||||
extern struct nvkm_oclass nv20_ram_oclass;
|
||||
extern struct nvkm_oclass nv40_ram_oclass;
|
||||
extern struct nvkm_oclass nv41_ram_oclass;
|
||||
extern struct nvkm_oclass nv44_ram_oclass;
|
||||
extern struct nvkm_oclass nv49_ram_oclass;
|
||||
extern struct nvkm_oclass nv4e_ram_oclass;
|
||||
extern struct nvkm_oclass nv50_ram_oclass;
|
||||
extern struct nvkm_oclass gt215_ram_oclass;
|
||||
extern struct nvkm_oclass mcp77_ram_oclass;
|
||||
extern struct nvkm_oclass gf100_ram_oclass;
|
||||
extern struct nvkm_oclass gk104_ram_oclass;
|
||||
extern struct nvkm_oclass gm107_ram_oclass;
|
||||
|
||||
int nvkm_sddr2_calc(struct nvkm_ram *ram);
|
||||
int nvkm_sddr3_calc(struct nvkm_ram *ram);
|
||||
int nvkm_gddr3_calc(struct nvkm_ram *ram);
|
||||
int nvkm_gddr5_calc(struct nvkm_ram *ram, bool nuts);
|
||||
|
||||
#define nvkm_fb_create(p,e,c,d) \
|
||||
nvkm_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
|
||||
#define nvkm_fb_destroy(p) ({ \
|
||||
@ -62,7 +26,7 @@ int _nvkm_fb_fini(struct nvkm_object *, bool);
|
||||
|
||||
struct nvkm_fb_impl {
|
||||
struct nvkm_oclass base;
|
||||
struct nvkm_oclass *ram;
|
||||
int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
|
||||
bool (*memtype)(struct nvkm_fb *, u32);
|
||||
};
|
||||
|
||||
|
100
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
Normal file
100
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
Normal file
@ -0,0 +1,100 @@
|
||||
/*
|
||||
* Copyright 2015 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs <bskeggs@redhat.com>
|
||||
*/
|
||||
#include "ram.h"
|
||||
|
||||
int
|
||||
nvkm_ram_init(struct nvkm_ram *ram)
|
||||
{
|
||||
if (ram->func->init)
|
||||
return ram->func->init(ram);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_ram_del(struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_ram *ram = *pram;
|
||||
if (ram && !WARN_ON(!ram->func)) {
|
||||
if (ram->func->dtor)
|
||||
*pram = ram->func->dtor(ram);
|
||||
nvkm_mm_fini(&ram->tags);
|
||||
nvkm_mm_fini(&ram->vram);
|
||||
kfree(*pram);
|
||||
*pram = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
|
||||
enum nvkm_ram_type type, u64 size, u32 tags,
|
||||
struct nvkm_ram *ram)
|
||||
{
|
||||
static const char *name[] = {
|
||||
[NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
|
||||
[NVKM_RAM_TYPE_STOLEN ] = "stolen system memory",
|
||||
[NVKM_RAM_TYPE_SGRAM ] = "SGRAM",
|
||||
[NVKM_RAM_TYPE_SDRAM ] = "SDRAM",
|
||||
[NVKM_RAM_TYPE_DDR1 ] = "DDR1",
|
||||
[NVKM_RAM_TYPE_DDR2 ] = "DDR2",
|
||||
[NVKM_RAM_TYPE_DDR3 ] = "DDR3",
|
||||
[NVKM_RAM_TYPE_GDDR2 ] = "GDDR2",
|
||||
[NVKM_RAM_TYPE_GDDR3 ] = "GDDR3",
|
||||
[NVKM_RAM_TYPE_GDDR4 ] = "GDDR4",
|
||||
[NVKM_RAM_TYPE_GDDR5 ] = "GDDR5",
|
||||
};
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
int ret;
|
||||
|
||||
nvkm_info(subdev, "%d MiB %s\n", (int)(size >> 20), name[type]);
|
||||
ram->func = func;
|
||||
ram->fb = fb;
|
||||
ram->type = type;
|
||||
ram->size = size;
|
||||
|
||||
if (!nvkm_mm_initialised(&ram->vram)) {
|
||||
ret = nvkm_mm_init(&ram->vram, 0, size >> NVKM_RAM_MM_SHIFT, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!nvkm_mm_initialised(&ram->tags)) {
|
||||
ret = nvkm_mm_init(&ram->tags, 0, tags ? ++tags : 0, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_debug(subdev, "%d compression tags\n", tags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
|
||||
enum nvkm_ram_type type, u64 size, u32 tags,
|
||||
struct nvkm_ram **pram)
|
||||
{
|
||||
if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
return nvkm_ram_ctor(func, fb, type, size, tags, *pram);
|
||||
}
|
50
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
Normal file
50
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
Normal file
@ -0,0 +1,50 @@
|
||||
#ifndef __NVKM_FB_RAM_PRIV_H__
|
||||
#define __NVKM_FB_RAM_PRIV_H__
|
||||
#include "priv.h"
|
||||
|
||||
int nvkm_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
|
||||
enum nvkm_ram_type, u64 size, u32 tags,
|
||||
struct nvkm_ram *);
|
||||
int nvkm_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
|
||||
enum nvkm_ram_type, u64 size, u32 tags,
|
||||
struct nvkm_ram **);
|
||||
void nvkm_ram_del(struct nvkm_ram **);
|
||||
int nvkm_ram_init(struct nvkm_ram *);
|
||||
|
||||
extern const struct nvkm_ram_func nv04_ram_func;
|
||||
|
||||
int nv50_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
|
||||
struct nvkm_ram *);
|
||||
int nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
|
||||
void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
|
||||
void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
|
||||
|
||||
int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
|
||||
u32, struct nvkm_ram *);
|
||||
int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
|
||||
void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
|
||||
|
||||
int gk104_ram_init(struct nvkm_ram *ram);
|
||||
|
||||
/* RAM type-specific MR calculation routines */
|
||||
int nvkm_sddr2_calc(struct nvkm_ram *);
|
||||
int nvkm_sddr3_calc(struct nvkm_ram *);
|
||||
int nvkm_gddr3_calc(struct nvkm_ram *);
|
||||
int nvkm_gddr5_calc(struct nvkm_ram *, bool nuts);
|
||||
|
||||
int nv04_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int nv10_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int nv1a_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int nv20_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int nv40_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int nv41_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int nv44_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int nv49_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int nv4e_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int nv50_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int gt215_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
|
||||
#endif
|
@ -21,7 +21,8 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#define gf100_ram(p) container_of((p), struct gf100_ram, base)
|
||||
#include "ram.h"
|
||||
#include "ramfuc.h"
|
||||
|
||||
#include <core/option.h>
|
||||
@ -107,7 +108,7 @@ static void
|
||||
gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
|
||||
{
|
||||
struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
|
||||
struct nvkm_fb *fb = nvkm_fb(ram);
|
||||
struct nvkm_fb *fb = ram->base.fb;
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
u32 part = nvkm_rd32(device, 0x022438), i;
|
||||
u32 mask = nvkm_rd32(device, 0x022554);
|
||||
@ -124,14 +125,14 @@ gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
gf100_ram_calc(struct nvkm_ram *base, u32 freq)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct gf100_ram *ram = gf100_ram(base);
|
||||
struct gf100_ramfuc *fuc = &ram->fuc;
|
||||
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_clk *clk = device->clk;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
struct gf100_ram *ram = (void *)fb->ram;
|
||||
struct gf100_ramfuc *fuc = &ram->fuc;
|
||||
struct nvbios_ramcfg cfg;
|
||||
u8 ver, cnt, len, strap;
|
||||
struct {
|
||||
@ -152,7 +153,7 @@ gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
}
|
||||
|
||||
/* locate specific data set for the attached memory */
|
||||
strap = nvbios_ramcfg_index(nv_subdev(fb));
|
||||
strap = nvbios_ramcfg_index(subdev);
|
||||
if (strap >= cnt) {
|
||||
nvkm_error(subdev, "invalid ramcfg strap\n");
|
||||
return -EINVAL;
|
||||
@ -177,7 +178,7 @@ gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
timing.data = 0;
|
||||
}
|
||||
|
||||
ret = ram_init(fuc, fb);
|
||||
ret = ram_init(fuc, ram->base.fb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -212,8 +213,8 @@ gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
|
||||
if (mode == 1 && from == 0) {
|
||||
/* calculate refpll */
|
||||
ret = gt215_pll_calc(nv_subdev(fb), &ram->refpll,
|
||||
ram->mempll.refclk, &N1, NULL, &M1, &P);
|
||||
ret = gt215_pll_calc(subdev, &ram->refpll, ram->mempll.refclk,
|
||||
&N1, NULL, &M1, &P);
|
||||
if (ret <= 0) {
|
||||
nvkm_error(subdev, "unable to calc refpll\n");
|
||||
return ret ? ret : -ERANGE;
|
||||
@ -227,7 +228,7 @@ gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
|
||||
|
||||
/* calculate mempll */
|
||||
ret = gt215_pll_calc(nv_subdev(fb), &ram->mempll, freq,
|
||||
ret = gt215_pll_calc(subdev, &ram->mempll, freq,
|
||||
&N1, NULL, &M1, &P);
|
||||
if (ret <= 0) {
|
||||
nvkm_error(subdev, "unable to calc refpll\n");
|
||||
@ -404,49 +405,48 @@ gf100_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_ram_prog(struct nvkm_fb *fb)
|
||||
gf100_ram_prog(struct nvkm_ram *base)
|
||||
{
|
||||
struct nvkm_device *device = nv_device(fb);
|
||||
struct gf100_ram *ram = (void *)fb->ram;
|
||||
struct gf100_ramfuc *fuc = &ram->fuc;
|
||||
ram_exec(fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
|
||||
struct gf100_ram *ram = gf100_ram(base);
|
||||
struct nvkm_device *device = ram->base.fb->subdev.device;
|
||||
ram_exec(&ram->fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_ram_tidy(struct nvkm_fb *fb)
|
||||
gf100_ram_tidy(struct nvkm_ram *base)
|
||||
{
|
||||
struct gf100_ram *ram = (void *)fb->ram;
|
||||
struct gf100_ramfuc *fuc = &ram->fuc;
|
||||
ram_exec(fuc, false);
|
||||
struct gf100_ram *ram = gf100_ram(base);
|
||||
ram_exec(&ram->fuc, false);
|
||||
}
|
||||
|
||||
extern const u8 gf100_pte_storage_type_map[256];
|
||||
|
||||
void
|
||||
gf100_ram_put(struct nvkm_fb *fb, struct nvkm_mem **pmem)
|
||||
gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
|
||||
{
|
||||
struct nvkm_ltc *ltc = nvkm_ltc(fb);
|
||||
struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
|
||||
struct nvkm_mem *mem = *pmem;
|
||||
|
||||
*pmem = NULL;
|
||||
if (unlikely(mem == NULL))
|
||||
return;
|
||||
|
||||
mutex_lock(&fb->subdev.mutex);
|
||||
mutex_lock(&ram->fb->subdev.mutex);
|
||||
if (mem->tag)
|
||||
ltc->tags_free(ltc, &mem->tag);
|
||||
__nv50_ram_put(fb, mem);
|
||||
mutex_unlock(&fb->subdev.mutex);
|
||||
__nv50_ram_put(ram, mem);
|
||||
mutex_unlock(&ram->fb->subdev.mutex);
|
||||
|
||||
kfree(mem);
|
||||
}
|
||||
|
||||
int
|
||||
gf100_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
|
||||
gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
|
||||
u32 memtype, struct nvkm_mem **pmem)
|
||||
{
|
||||
struct nvkm_mm *mm = &fb->vram;
|
||||
struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
|
||||
struct nvkm_mm *mm = &ram->vram;
|
||||
struct nvkm_mm_node *r;
|
||||
struct nvkm_mem *mem;
|
||||
int type = (memtype & 0x0ff);
|
||||
@ -454,9 +454,9 @@ gf100_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
|
||||
const bool comp = gf100_pte_storage_type_map[type] != type;
|
||||
int ret;
|
||||
|
||||
size >>= 12;
|
||||
align >>= 12;
|
||||
ncmin >>= 12;
|
||||
size >>= NVKM_RAM_MM_SHIFT;
|
||||
align >>= NVKM_RAM_MM_SHIFT;
|
||||
ncmin >>= NVKM_RAM_MM_SHIFT;
|
||||
if (!ncmin)
|
||||
ncmin = size;
|
||||
|
||||
@ -467,12 +467,10 @@ gf100_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
|
||||
INIT_LIST_HEAD(&mem->regions);
|
||||
mem->size = size;
|
||||
|
||||
mutex_lock(&fb->subdev.mutex);
|
||||
mutex_lock(&ram->fb->subdev.mutex);
|
||||
if (comp) {
|
||||
struct nvkm_ltc *ltc = nvkm_ltc(fb);
|
||||
|
||||
/* compression only works with lpages */
|
||||
if (align == (1 << (17 - 12))) {
|
||||
if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
|
||||
int n = size >> 5;
|
||||
ltc->tags_alloc(ltc, n, &mem->tag);
|
||||
}
|
||||
@ -488,157 +486,158 @@ gf100_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
|
||||
else
|
||||
ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
|
||||
if (ret) {
|
||||
mutex_unlock(&fb->subdev.mutex);
|
||||
fb->ram->put(fb, &mem);
|
||||
mutex_unlock(&ram->fb->subdev.mutex);
|
||||
ram->func->put(ram, &mem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_add_tail(&r->rl_entry, &mem->regions);
|
||||
size -= r->length;
|
||||
} while (size);
|
||||
mutex_unlock(&fb->subdev.mutex);
|
||||
mutex_unlock(&ram->fb->subdev.mutex);
|
||||
|
||||
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
|
||||
mem->offset = (u64)r->offset << 12;
|
||||
mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
|
||||
*pmem = mem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, u32 maskaddr, int size,
|
||||
void **pobject)
|
||||
static int
|
||||
gf100_ram_init(struct nvkm_ram *base)
|
||||
{
|
||||
static const u8 train0[] = {
|
||||
0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
|
||||
0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
|
||||
};
|
||||
static const u32 train1[] = {
|
||||
0x00000000, 0xffffffff,
|
||||
0x55555555, 0xaaaaaaaa,
|
||||
0x33333333, 0xcccccccc,
|
||||
0xf0f0f0f0, 0x0f0f0f0f,
|
||||
0x00ff00ff, 0xff00ff00,
|
||||
0x0000ffff, 0xffff0000,
|
||||
};
|
||||
struct gf100_ram *ram = gf100_ram(base);
|
||||
struct nvkm_device *device = ram->base.fb->subdev.device;
|
||||
int i;
|
||||
|
||||
switch (ram->base.type) {
|
||||
case NVKM_RAM_TYPE_GDDR5:
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* prepare for ddr link training, and load training patterns */
|
||||
for (i = 0; i < 0x30; i++) {
|
||||
nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
|
||||
nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
|
||||
nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
|
||||
nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
|
||||
nvkm_wr32(device, 0x10f918, train1[i % 12]);
|
||||
nvkm_wr32(device, 0x10f91c, train1[i % 12]);
|
||||
nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
|
||||
nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
|
||||
nvkm_wr32(device, 0x10f918, train1[i % 12]);
|
||||
nvkm_wr32(device, 0x10f91c, train1[i % 12]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_ram_func
|
||||
gf100_ram_func = {
|
||||
.init = gf100_ram_init,
|
||||
.get = gf100_ram_get,
|
||||
.put = gf100_ram_put,
|
||||
.calc = gf100_ram_calc,
|
||||
.prog = gf100_ram_prog,
|
||||
.tidy = gf100_ram_tidy,
|
||||
};
|
||||
|
||||
int
|
||||
gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
|
||||
u32 maskaddr, struct nvkm_ram *ram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
struct nvkm_ram *ram;
|
||||
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
|
||||
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
|
||||
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
|
||||
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
|
||||
u32 parts = nvkm_rd32(device, 0x022438);
|
||||
u32 pmask = nvkm_rd32(device, maskaddr);
|
||||
u32 bsize = nvkm_rd32(device, 0x10f20c);
|
||||
u32 offset, length;
|
||||
u64 bsize = (u64)nvkm_rd32(device, 0x10f20c) << 20;
|
||||
u64 psize, size = 0;
|
||||
enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
|
||||
bool uniform = true;
|
||||
int ret, part;
|
||||
|
||||
ret = nvkm_ram_create_(parent, engine, oclass, size, pobject);
|
||||
ram = *pobject;
|
||||
if (ret)
|
||||
return ret;
|
||||
int ret, i;
|
||||
|
||||
nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800));
|
||||
nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask);
|
||||
|
||||
ram->type = nvkm_fb_bios_memtype(bios);
|
||||
ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
|
||||
|
||||
/* read amount of vram attached to each memory controller */
|
||||
for (part = 0; part < parts; part++) {
|
||||
if (!(pmask & (1 << part))) {
|
||||
u32 size = nvkm_rd32(device, 0x11020c + (part * 0x1000));
|
||||
if (size != bsize) {
|
||||
if (size < bsize)
|
||||
bsize = size;
|
||||
uniform = false;
|
||||
}
|
||||
for (i = 0; i < parts; i++) {
|
||||
if (pmask & (1 << i))
|
||||
continue;
|
||||
|
||||
nvkm_debug(subdev, "%d: size %08x\n", part, size);
|
||||
ram->size += (u64)size << 20;
|
||||
psize = (u64)nvkm_rd32(device, 0x11020c + (i * 0x1000)) << 20;
|
||||
if (psize != bsize) {
|
||||
if (psize < bsize)
|
||||
bsize = psize;
|
||||
uniform = false;
|
||||
}
|
||||
|
||||
nvkm_debug(subdev, "%d: %d MiB\n", i, (u32)(psize >> 20));
|
||||
size += psize;
|
||||
}
|
||||
|
||||
ret = nvkm_ram_ctor(func, fb, type, size, 0, ram);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_mm_fini(&ram->vram);
|
||||
|
||||
/* if all controllers have the same amount attached, there's no holes */
|
||||
if (uniform) {
|
||||
offset = rsvd_head;
|
||||
length = (ram->size >> 12) - rsvd_head - rsvd_tail;
|
||||
ret = nvkm_mm_init(&fb->vram, offset, length, 1);
|
||||
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
(size - rsvd_head - rsvd_tail) >>
|
||||
NVKM_RAM_MM_SHIFT, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
/* otherwise, address lowest common amount from 0GiB */
|
||||
ret = nvkm_mm_init(&fb->vram, rsvd_head,
|
||||
(bsize << 8) * parts - rsvd_head, 1);
|
||||
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
((bsize * parts) - rsvd_head) >>
|
||||
NVKM_RAM_MM_SHIFT, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* and the rest starting from (8GiB + common_size) */
|
||||
offset = (0x0200000000ULL >> 12) + (bsize << 8);
|
||||
length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
|
||||
|
||||
ret = nvkm_mm_init(&fb->vram, offset, length, 1);
|
||||
ret = nvkm_mm_init(&ram->vram, (0x0200000000ULL + bsize) >>
|
||||
NVKM_RAM_MM_SHIFT,
|
||||
(size - (bsize * parts) - rsvd_tail) >>
|
||||
NVKM_RAM_MM_SHIFT, 1);
|
||||
if (ret)
|
||||
nvkm_mm_fini(&fb->vram);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ram->get = gf100_ram_get;
|
||||
ram->put = gf100_ram_put;
|
||||
ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_ram_init(struct nvkm_object *object)
|
||||
int
|
||||
gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = (void *)object->parent;
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
struct gf100_ram *ram = (void *)object;
|
||||
int ret, i;
|
||||
|
||||
ret = nvkm_ram_init(&ram->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* prepare for ddr link training, and load training patterns */
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_GDDR5: {
|
||||
static const u8 train0[] = {
|
||||
0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
|
||||
0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
|
||||
};
|
||||
static const u32 train1[] = {
|
||||
0x00000000, 0xffffffff,
|
||||
0x55555555, 0xaaaaaaaa,
|
||||
0x33333333, 0xcccccccc,
|
||||
0xf0f0f0f0, 0x0f0f0f0f,
|
||||
0x00ff00ff, 0xff00ff00,
|
||||
0x0000ffff, 0xffff0000,
|
||||
};
|
||||
|
||||
for (i = 0; i < 0x30; i++) {
|
||||
nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
|
||||
nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
|
||||
nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
|
||||
nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
|
||||
nvkm_wr32(device, 0x10f918, train1[i % 12]);
|
||||
nvkm_wr32(device, 0x10f91c, train1[i % 12]);
|
||||
nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
|
||||
nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
|
||||
nvkm_wr32(device, 0x10f918, train1[i % 12]);
|
||||
nvkm_wr32(device, 0x10f91c, train1[i % 12]);
|
||||
}
|
||||
} break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_bios *bios = subdev->device->bios;
|
||||
struct gf100_ram *ram;
|
||||
int ret;
|
||||
|
||||
ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
*pram = &ram->base;
|
||||
|
||||
ret = gf100_ram_ctor(&gf100_ram_func, fb, 0x022554, &ram->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -654,17 +653,6 @@ gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_GDDR5:
|
||||
ram->base.calc = gf100_ram_calc;
|
||||
ram->base.prog = gf100_ram_prog;
|
||||
ram->base.tidy = gf100_ram_tidy;
|
||||
break;
|
||||
default:
|
||||
nvkm_warn(subdev, "reclocking of this ram type unsupported\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
|
||||
ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
|
||||
ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
|
||||
@ -725,14 +713,3 @@ gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
gf100_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = gf100_ram_ctor,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = gf100_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
@ -21,8 +21,9 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#define gk104_ram(p) container_of((p), struct gk104_ram, base)
|
||||
#include "ram.h"
|
||||
#include "ramfuc.h"
|
||||
#include "gf100.h"
|
||||
|
||||
#include <core/option.h>
|
||||
#include <subdev/bios.h>
|
||||
@ -228,7 +229,7 @@ static void
|
||||
gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
|
||||
u32 _mask, u32 _data, u32 _copy)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(ram);
|
||||
struct nvkm_fb *fb = ram->base.fb;
|
||||
struct ramfuc *fuc = &ram->fuc.base;
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
u32 addr = 0x110000 + (reg->addr & 0xfff);
|
||||
@ -248,9 +249,8 @@ gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
|
||||
gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
|
||||
|
||||
static int
|
||||
gk104_ram_calc_gddr5(struct nvkm_fb *fb, u32 freq)
|
||||
gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
|
||||
{
|
||||
struct gk104_ram *ram = (void *)fb->ram;
|
||||
struct gk104_ramfuc *fuc = &ram->fuc;
|
||||
struct nvkm_ram_data *next = ram->base.next;
|
||||
int vc = !next->bios.ramcfg_11_02_08;
|
||||
@ -674,9 +674,8 @@ gk104_ram_calc_gddr5(struct nvkm_fb *fb, u32 freq)
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
gk104_ram_calc_sddr3(struct nvkm_fb *fb, u32 freq)
|
||||
gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
|
||||
{
|
||||
struct gk104_ram *ram = (void *)fb->ram;
|
||||
struct gk104_ramfuc *fuc = &ram->fuc;
|
||||
const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
|
||||
const u32 runk0 = ram->fN1 << 16;
|
||||
@ -926,9 +925,9 @@ gk104_ram_calc_sddr3(struct nvkm_fb *fb, u32 freq)
|
||||
******************************************************************************/
|
||||
|
||||
static int
|
||||
gk104_ram_calc_data(struct nvkm_fb *fb, u32 khz, struct nvkm_ram_data *data)
|
||||
gk104_ram_calc_data(struct gk104_ram *ram, u32 khz, struct nvkm_ram_data *data)
|
||||
{
|
||||
struct gk104_ram *ram = (void *)fb->ram;
|
||||
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
|
||||
struct nvkm_ram_data *cfg;
|
||||
u32 mhz = khz / 1000;
|
||||
|
||||
@ -941,20 +940,19 @@ gk104_ram_calc_data(struct nvkm_fb *fb, u32 khz, struct nvkm_ram_data *data)
|
||||
}
|
||||
}
|
||||
|
||||
nvkm_error(&fb->subdev, "ramcfg data for %dMHz not found\n", mhz);
|
||||
nvkm_error(subdev, "ramcfg data for %dMHz not found\n", mhz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_ram_calc_xits(struct nvkm_fb *fb, struct nvkm_ram_data *next)
|
||||
gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
|
||||
{
|
||||
struct gk104_ram *ram = (void *)fb->ram;
|
||||
struct gk104_ramfuc *fuc = &ram->fuc;
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
|
||||
int refclk, i;
|
||||
int ret;
|
||||
|
||||
ret = ram_init(fuc, fb);
|
||||
ret = ram_init(fuc, ram->base.fb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -974,7 +972,7 @@ gk104_ram_calc_xits(struct nvkm_fb *fb, struct nvkm_ram_data *next)
|
||||
refclk = fuc->mempll.refclk;
|
||||
|
||||
/* calculate refpll coefficients */
|
||||
ret = gt215_pll_calc(nv_subdev(fb), &fuc->refpll, refclk, &ram->N1,
|
||||
ret = gt215_pll_calc(subdev, &fuc->refpll, refclk, &ram->N1,
|
||||
&ram->fN1, &ram->M1, &ram->P1);
|
||||
fuc->mempll.refclk = ret;
|
||||
if (ret <= 0) {
|
||||
@ -991,7 +989,7 @@ gk104_ram_calc_xits(struct nvkm_fb *fb, struct nvkm_ram_data *next)
|
||||
fuc->mempll.min_p = 1;
|
||||
fuc->mempll.max_p = 2;
|
||||
|
||||
ret = gt215_pll_calc(nv_subdev(fb), &fuc->mempll, next->freq,
|
||||
ret = gt215_pll_calc(subdev, &fuc->mempll, next->freq,
|
||||
&ram->N2, NULL, &ram->M2, &ram->P2);
|
||||
if (ret <= 0) {
|
||||
nvkm_error(subdev, "unable to calc mempll\n");
|
||||
@ -1006,15 +1004,15 @@ gk104_ram_calc_xits(struct nvkm_fb *fb, struct nvkm_ram_data *next)
|
||||
ram->base.freq = next->freq;
|
||||
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_DDR3:
|
||||
case NVKM_RAM_TYPE_DDR3:
|
||||
ret = nvkm_sddr3_calc(&ram->base);
|
||||
if (ret == 0)
|
||||
ret = gk104_ram_calc_sddr3(fb, next->freq);
|
||||
ret = gk104_ram_calc_sddr3(ram, next->freq);
|
||||
break;
|
||||
case NV_MEM_TYPE_GDDR5:
|
||||
case NVKM_RAM_TYPE_GDDR5:
|
||||
ret = nvkm_gddr5_calc(&ram->base, ram->pnuts != 0);
|
||||
if (ret == 0)
|
||||
ret = gk104_ram_calc_gddr5(fb, next->freq);
|
||||
ret = gk104_ram_calc_gddr5(ram, next->freq);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
@ -1025,21 +1023,21 @@ gk104_ram_calc_xits(struct nvkm_fb *fb, struct nvkm_ram_data *next)
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
gk104_ram_calc(struct nvkm_ram *base, u32 freq)
|
||||
{
|
||||
struct nvkm_clk *clk = nvkm_clk(fb);
|
||||
struct gk104_ram *ram = (void *)fb->ram;
|
||||
struct gk104_ram *ram = gk104_ram(base);
|
||||
struct nvkm_clk *clk = ram->base.fb->subdev.device->clk;
|
||||
struct nvkm_ram_data *xits = &ram->base.xition;
|
||||
struct nvkm_ram_data *copy;
|
||||
int ret;
|
||||
|
||||
if (ram->base.next == NULL) {
|
||||
ret = gk104_ram_calc_data(fb, clk->read(clk, nv_clk_src_mem),
|
||||
ret = gk104_ram_calc_data(ram, clk->read(clk, nv_clk_src_mem),
|
||||
&ram->base.former);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = gk104_ram_calc_data(fb, freq, &ram->base.target);
|
||||
ret = gk104_ram_calc_data(ram, freq, &ram->base.target);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1063,14 +1061,13 @@ gk104_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
ram->base.next = &ram->base.target;
|
||||
}
|
||||
|
||||
return gk104_ram_calc_xits(fb, ram->base.next);
|
||||
return gk104_ram_calc_xits(ram, ram->base.next);
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_ram_prog_0(struct nvkm_fb *fb, u32 freq)
|
||||
gk104_ram_prog_0(struct gk104_ram *ram, u32 freq)
|
||||
{
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
struct gk104_ram *ram = (void *)fb->ram;
|
||||
struct nvkm_device *device = ram->base.fb->subdev.device;
|
||||
struct nvkm_ram_data *cfg;
|
||||
u32 mhz = freq / 1000;
|
||||
u32 mask, data;
|
||||
@ -1144,11 +1141,11 @@ gk104_ram_prog_0(struct nvkm_fb *fb, u32 freq)
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_ram_prog(struct nvkm_fb *fb)
|
||||
gk104_ram_prog(struct nvkm_ram *base)
|
||||
{
|
||||
struct nvkm_device *device = nv_device(fb);
|
||||
struct gk104_ram *ram = (void *)fb->ram;
|
||||
struct gk104_ram *ram = gk104_ram(base);
|
||||
struct gk104_ramfuc *fuc = &ram->fuc;
|
||||
struct nvkm_device *device = ram->base.fb->subdev.device;
|
||||
struct nvkm_ram_data *next = ram->base.next;
|
||||
|
||||
if (!nvkm_boolopt(device->cfgopt, "NvMemExec", true)) {
|
||||
@ -1156,20 +1153,19 @@ gk104_ram_prog(struct nvkm_fb *fb)
|
||||
return (ram->base.next == &ram->base.xition);
|
||||
}
|
||||
|
||||
gk104_ram_prog_0(fb, 1000);
|
||||
gk104_ram_prog_0(ram, 1000);
|
||||
ram_exec(fuc, true);
|
||||
gk104_ram_prog_0(fb, next->freq);
|
||||
gk104_ram_prog_0(ram, next->freq);
|
||||
|
||||
return (ram->base.next == &ram->base.xition);
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_ram_tidy(struct nvkm_fb *fb)
|
||||
gk104_ram_tidy(struct nvkm_ram *base)
|
||||
{
|
||||
struct gk104_ram *ram = (void *)fb->ram;
|
||||
struct gk104_ramfuc *fuc = &ram->fuc;
|
||||
struct gk104_ram *ram = gk104_ram(base);
|
||||
ram->base.next = NULL;
|
||||
ram_exec(fuc, false);
|
||||
ram_exec(&ram->fuc, false);
|
||||
}
|
||||
|
||||
struct gk104_ram_train {
|
||||
@ -1185,10 +1181,10 @@ struct gk104_ram_train {
|
||||
};
|
||||
|
||||
static int
|
||||
gk104_ram_train_type(struct nvkm_fb *fb, int i, u8 ramcfg,
|
||||
gk104_ram_train_type(struct nvkm_ram *ram, int i, u8 ramcfg,
|
||||
struct gk104_ram_train *train)
|
||||
{
|
||||
struct nvkm_bios *bios = nvkm_bios(fb);
|
||||
struct nvkm_bios *bios = ram->fb->subdev.device->bios;
|
||||
struct nvbios_M0205E M0205E;
|
||||
struct nvbios_M0205S M0205S;
|
||||
struct nvbios_M0209E M0209E;
|
||||
@ -1246,9 +1242,9 @@ gk104_ram_train_type(struct nvkm_fb *fb, int i, u8 ramcfg,
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_ram_train_init_0(struct nvkm_fb *fb, struct gk104_ram_train *train)
|
||||
gk104_ram_train_init_0(struct nvkm_ram *ram, struct gk104_ram_train *train)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_subdev *subdev = &ram->fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int i, j;
|
||||
|
||||
@ -1282,9 +1278,9 @@ gk104_ram_train_init_0(struct nvkm_fb *fb, struct gk104_ram_train *train)
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_ram_train_init(struct nvkm_fb *fb)
|
||||
gk104_ram_train_init(struct nvkm_ram *ram)
|
||||
{
|
||||
u8 ramcfg = nvbios_ramcfg_index(nv_subdev(fb));
|
||||
u8 ramcfg = nvbios_ramcfg_index(&ram->fb->subdev);
|
||||
struct gk104_ram_train *train;
|
||||
int ret, i;
|
||||
|
||||
@ -1292,14 +1288,14 @@ gk104_ram_train_init(struct nvkm_fb *fb)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < 0x100; i++) {
|
||||
ret = gk104_ram_train_type(fb, i, ramcfg, train);
|
||||
ret = gk104_ram_train_type(ram, i, ramcfg, train);
|
||||
if (ret && ret != -ENOENT)
|
||||
break;
|
||||
}
|
||||
|
||||
switch (fb->ram->type) {
|
||||
case NV_MEM_TYPE_GDDR5:
|
||||
ret = gk104_ram_train_init_0(fb, train);
|
||||
switch (ram->type) {
|
||||
case NVKM_RAM_TYPE_GDDR5:
|
||||
ret = gk104_ram_train_init_0(ram, train);
|
||||
break;
|
||||
default:
|
||||
ret = 0;
|
||||
@ -1311,19 +1307,14 @@ gk104_ram_train_init(struct nvkm_fb *fb)
|
||||
}
|
||||
|
||||
int
|
||||
gk104_ram_init(struct nvkm_object *object)
|
||||
gk104_ram_init(struct nvkm_ram *ram)
|
||||
{
|
||||
struct nvkm_fb *fb = (void *)object->parent;
|
||||
struct gk104_ram *ram = (void *)object;
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
struct nvkm_subdev *subdev = &ram->fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
u8 ver, hdr, cnt, len, snr, ssz;
|
||||
u32 data, save;
|
||||
int ret, i;
|
||||
|
||||
ret = nvkm_ram_init(&ram->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
int i;
|
||||
|
||||
/* run a bunch of tables from rammap table. there's actually
|
||||
* individual pointers for each rammap entry too, but, nvidia
|
||||
@ -1347,7 +1338,7 @@ gk104_ram_init(struct nvkm_object *object)
|
||||
if (i != save >> 4) {
|
||||
nvkm_mask(device, 0x10f65c, 0x000000f0, i << 4);
|
||||
nvbios_exec(&(struct nvbios_init) {
|
||||
.subdev = nv_subdev(fb),
|
||||
.subdev = subdev,
|
||||
.bios = bios,
|
||||
.offset = nvbios_rd32(bios, data),
|
||||
.execute = 1,
|
||||
@ -1359,14 +1350,13 @@ gk104_ram_init(struct nvkm_object *object)
|
||||
nvkm_wr32(device, 0x10ecc0, 0xffffffff);
|
||||
nvkm_mask(device, 0x10f160, 0x00000010, 0x00000010);
|
||||
|
||||
return gk104_ram_train_init(fb);
|
||||
return gk104_ram_train_init(ram);
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i)
|
||||
{
|
||||
struct nvkm_fb *fb = (void *)nv_object(ram)->parent;
|
||||
struct nvkm_bios *bios = nvkm_bios(fb);
|
||||
struct nvkm_bios *bios = ram->base.fb->subdev.device->bios;
|
||||
struct nvkm_ram_data *cfg;
|
||||
struct nvbios_ramcfg *d = &ram->diff;
|
||||
struct nvbios_ramcfg *p, *n;
|
||||
@ -1432,25 +1422,33 @@ done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_ram_dtor(struct nvkm_object *object)
|
||||
static void *
|
||||
gk104_ram_dtor(struct nvkm_ram *base)
|
||||
{
|
||||
struct gk104_ram *ram = (void *)object;
|
||||
struct gk104_ram *ram = gk104_ram(base);
|
||||
struct nvkm_ram_data *cfg, *tmp;
|
||||
|
||||
list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) {
|
||||
kfree(cfg);
|
||||
}
|
||||
|
||||
nvkm_ram_destroy(&ram->base);
|
||||
return ram;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
static const struct nvkm_ram_func
|
||||
gk104_ram_func = {
|
||||
.dtor = gk104_ram_dtor,
|
||||
.init = gk104_ram_init,
|
||||
.get = gf100_ram_get,
|
||||
.put = gf100_ram_put,
|
||||
.calc = gk104_ram_calc,
|
||||
.prog = gk104_ram_prog,
|
||||
.tidy = gk104_ram_tidy,
|
||||
};
|
||||
|
||||
int
|
||||
gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
@ -1461,25 +1459,16 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
u8 ramcfg = nvbios_ramcfg_index(nv_subdev(fb));
|
||||
u32 tmp;
|
||||
|
||||
ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
*pram = &ram->base;
|
||||
|
||||
ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
INIT_LIST_HEAD(&ram->cfg);
|
||||
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_DDR3:
|
||||
case NV_MEM_TYPE_GDDR5:
|
||||
ram->base.calc = gk104_ram_calc;
|
||||
ram->base.prog = gk104_ram_prog;
|
||||
ram->base.tidy = gk104_ram_tidy;
|
||||
break;
|
||||
default:
|
||||
nvkm_warn(subdev, "reclocking of this RAM type is unsupported\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* calculate a mask of differently configured memory partitions,
|
||||
* because, of course reclocking wasn't complicated enough
|
||||
* already without having to treat some of them differently to
|
||||
@ -1596,7 +1585,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
|
||||
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_GDDR5:
|
||||
case NVKM_RAM_TYPE_GDDR5:
|
||||
ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
|
||||
ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
|
||||
ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
|
||||
@ -1608,7 +1597,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
|
||||
ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
|
||||
break;
|
||||
case NV_MEM_TYPE_DDR3:
|
||||
case NVKM_RAM_TYPE_DDR3:
|
||||
ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
|
||||
ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
|
||||
break;
|
||||
@ -1634,14 +1623,3 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
gk104_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = gk104_ram_ctor,
|
||||
.dtor = gk104_ram_dtor,
|
||||
.init = gk104_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
@ -21,31 +21,20 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "ram.h"
|
||||
|
||||
static int
|
||||
gm107_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_ram *ram;
|
||||
int ret;
|
||||
|
||||
ret = gf100_ram_create(parent, engine, oclass, 0x021c14, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
gm107_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = gm107_ram_ctor,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = gk104_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
static const struct nvkm_ram_func
|
||||
gm107_ram_func = {
|
||||
.init = gk104_ram_init,
|
||||
.get = gf100_ram_get,
|
||||
.put = gf100_ram_put,
|
||||
};
|
||||
|
||||
int
|
||||
gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
||||
return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
|
||||
}
|
||||
|
@ -22,9 +22,9 @@
|
||||
* Authors: Ben Skeggs
|
||||
* Roy Spliet <rspliet@eclipso.eu>
|
||||
*/
|
||||
|
||||
#define gt215_ram(p) container_of((p), struct gt215_ram, base)
|
||||
#include "ram.h"
|
||||
#include "ramfuc.h"
|
||||
#include "nv50.h"
|
||||
|
||||
#include <core/option.h>
|
||||
#include <subdev/bios.h>
|
||||
@ -153,12 +153,11 @@ gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
|
||||
* Link training for (at least) DDR3
|
||||
*/
|
||||
int
|
||||
gt215_link_train(struct nvkm_fb *fb)
|
||||
gt215_link_train(struct gt215_ram *ram)
|
||||
{
|
||||
struct gt215_ram *ram = (void *)fb->ram;
|
||||
struct gt215_ltrain *train = &ram->ltrain;
|
||||
struct gt215_ramfuc *fuc = &ram->fuc;
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
struct nvkm_clk *clk = device->clk;
|
||||
@ -194,7 +193,7 @@ gt215_link_train(struct nvkm_fb *fb)
|
||||
goto out;
|
||||
|
||||
/* First: clock up/down */
|
||||
ret = ram->base.calc(fb, (u32) M0205T.freq * 1000);
|
||||
ret = ram->base.func->calc(&ram->base, (u32) M0205T.freq * 1000);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -237,7 +236,7 @@ gt215_link_train(struct nvkm_fb *fb)
|
||||
|
||||
ram_exec(fuc, true);
|
||||
|
||||
ram->base.calc(fb, clk_current);
|
||||
ram->base.func->calc(&ram->base, clk_current);
|
||||
ram_exec(fuc, true);
|
||||
|
||||
/* Post-processing, avoids flicker */
|
||||
@ -246,7 +245,7 @@ gt215_link_train(struct nvkm_fb *fb)
|
||||
|
||||
gt215_clk_post(clk, f);
|
||||
|
||||
ram_train_result(fb, result, 64);
|
||||
ram_train_result(ram->base.fb, result, 64);
|
||||
for (i = 0; i < 64; i++)
|
||||
nvkm_debug(subdev, "Train: %08x", result[i]);
|
||||
gt215_link_train_calc(result, train);
|
||||
@ -272,7 +271,7 @@ out:
|
||||
}
|
||||
|
||||
int
|
||||
gt215_link_train_init(struct nvkm_fb *fb)
|
||||
gt215_link_train_init(struct gt215_ram *ram)
|
||||
{
|
||||
static const u32 pattern[16] = {
|
||||
0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
|
||||
@ -280,10 +279,9 @@ gt215_link_train_init(struct nvkm_fb *fb)
|
||||
0x33333333, 0x55555555, 0x77777777, 0x66666666,
|
||||
0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
|
||||
};
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
struct gt215_ram *ram = (void *)fb->ram;
|
||||
struct gt215_ltrain *train = &ram->ltrain;
|
||||
struct nvkm_device *device = ram->base.fb->subdev.device;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
struct nvkm_mem *mem;
|
||||
struct nvbios_M0205E M0205E;
|
||||
u8 ver, hdr, cnt, len;
|
||||
@ -302,7 +300,8 @@ gt215_link_train_init(struct nvkm_fb *fb)
|
||||
|
||||
train->state = NVA3_TRAIN_ONCE;
|
||||
|
||||
ret = fb->ram->get(fb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem);
|
||||
ret = ram->base.func->get(&ram->base, 0x8000, 0x10000, 0, 0x800,
|
||||
&ram->ltrain.mem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -338,12 +337,10 @@ gt215_link_train_init(struct nvkm_fb *fb)
|
||||
}
|
||||
|
||||
void
|
||||
gt215_link_train_fini(struct nvkm_fb *fb)
|
||||
gt215_link_train_fini(struct gt215_ram *ram)
|
||||
{
|
||||
struct gt215_ram *ram = (void *)fb->ram;
|
||||
|
||||
if (ram->ltrain.mem)
|
||||
fb->ram->put(fb, &ram->ltrain.mem);
|
||||
ram->base.func->put(&ram->base, &ram->ltrain.mem);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -351,11 +348,10 @@ gt215_link_train_fini(struct nvkm_fb *fb)
|
||||
*/
|
||||
#define T(t) cfg->timing_10_##t
|
||||
static int
|
||||
gt215_ram_timing_calc(struct nvkm_fb *fb, u32 *timing)
|
||||
gt215_ram_timing_calc(struct gt215_ram *ram, u32 *timing)
|
||||
{
|
||||
struct gt215_ram *ram = (void *)fb->ram;
|
||||
struct nvbios_ramcfg *cfg = &ram->base.target.bios;
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int tUNK_base, tUNK_40_0, prevCL;
|
||||
u32 cur2, cur3, cur7, cur8;
|
||||
@ -367,10 +363,10 @@ gt215_ram_timing_calc(struct nvkm_fb *fb, u32 *timing)
|
||||
|
||||
|
||||
switch ((!T(CWL)) * ram->base.type) {
|
||||
case NV_MEM_TYPE_DDR2:
|
||||
case NVKM_RAM_TYPE_DDR2:
|
||||
T(CWL) = T(CL) - 1;
|
||||
break;
|
||||
case NV_MEM_TYPE_GDDR3:
|
||||
case NVKM_RAM_TYPE_GDDR3:
|
||||
T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
|
||||
break;
|
||||
}
|
||||
@ -408,8 +404,8 @@ gt215_ram_timing_calc(struct nvkm_fb *fb, u32 *timing)
|
||||
timing[8] = cur8 & 0xffffff00;
|
||||
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_DDR2:
|
||||
case NV_MEM_TYPE_GDDR3:
|
||||
case NVKM_RAM_TYPE_DDR2:
|
||||
case NVKM_RAM_TYPE_GDDR3:
|
||||
tUNK_40_0 = prevCL - (cur8 & 0xff);
|
||||
if (tUNK_40_0 > 0)
|
||||
timing[8] |= T(CL);
|
||||
@ -493,12 +489,12 @@ gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
|
||||
}
|
||||
|
||||
static int
|
||||
gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
gt215_ram_calc(struct nvkm_ram *base, u32 freq)
|
||||
{
|
||||
struct gt215_ram *ram = (void *)fb->ram;
|
||||
struct gt215_ram *ram = gt215_ram(base);
|
||||
struct gt215_ramfuc *fuc = &ram->fuc;
|
||||
struct gt215_ltrain *train = &ram->ltrain;
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
struct gt215_clk_info mclk;
|
||||
@ -516,7 +512,7 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
ram->base.next = next;
|
||||
|
||||
if (ram->ltrain.state == NVA3_TRAIN_ONCE)
|
||||
gt215_link_train(fb);
|
||||
gt215_link_train(ram);
|
||||
|
||||
/* lookup memory config data relevant to the target frequency */
|
||||
data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
|
||||
@ -527,7 +523,7 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
}
|
||||
|
||||
/* locate specific data set for the attached memory */
|
||||
strap = nvbios_ramcfg_index(nv_subdev(fb));
|
||||
strap = nvbios_ramcfg_index(subdev);
|
||||
if (strap >= cnt) {
|
||||
nvkm_error(subdev, "invalid ramcfg strap\n");
|
||||
return -EINVAL;
|
||||
@ -551,15 +547,15 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
}
|
||||
}
|
||||
|
||||
ret = gt215_pll_info(nvkm_clk(fb), 0x12, 0x4000, freq, &mclk);
|
||||
ret = gt215_pll_info(device->clk, 0x12, 0x4000, freq, &mclk);
|
||||
if (ret < 0) {
|
||||
nvkm_error(subdev, "failed mclk calculation\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
gt215_ram_timing_calc(fb, timing);
|
||||
gt215_ram_timing_calc(ram, timing);
|
||||
|
||||
ret = ram_init(fuc, fb);
|
||||
ret = ram_init(fuc, ram->base.fb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -569,13 +565,13 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
ram->base.mr[2] = ram_rd32(fuc, mr[2]);
|
||||
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_DDR2:
|
||||
case NVKM_RAM_TYPE_DDR2:
|
||||
ret = nvkm_sddr2_calc(&ram->base);
|
||||
break;
|
||||
case NV_MEM_TYPE_DDR3:
|
||||
case NVKM_RAM_TYPE_DDR3:
|
||||
ret = nvkm_sddr3_calc(&ram->base);
|
||||
break;
|
||||
case NV_MEM_TYPE_GDDR3:
|
||||
case NVKM_RAM_TYPE_GDDR3:
|
||||
ret = nvkm_gddr3_calc(&ram->base);
|
||||
break;
|
||||
default:
|
||||
@ -630,7 +626,7 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
ram_nsec(fuc, 2000);
|
||||
|
||||
if (!next->bios.ramcfg_10_02_10) {
|
||||
if (ram->base.type == NV_MEM_TYPE_GDDR3)
|
||||
if (ram->base.type == NVKM_RAM_TYPE_GDDR3)
|
||||
ram_mask(fuc, 0x111100, 0x04020000, 0x00020000);
|
||||
else
|
||||
ram_mask(fuc, 0x111100, 0x04020000, 0x04020000);
|
||||
@ -638,10 +634,10 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
|
||||
/* If we're disabling the DLL, do it now */
|
||||
switch (next->bios.ramcfg_DLLoff * ram->base.type) {
|
||||
case NV_MEM_TYPE_DDR3:
|
||||
case NVKM_RAM_TYPE_DDR3:
|
||||
nvkm_sddr3_dll_disable(fuc, ram->base.mr);
|
||||
break;
|
||||
case NV_MEM_TYPE_GDDR3:
|
||||
case NVKM_RAM_TYPE_GDDR3:
|
||||
nvkm_gddr3_dll_disable(fuc, ram->base.mr);
|
||||
break;
|
||||
}
|
||||
@ -657,7 +653,7 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
ram_wr32(fuc, 0x1002dc, 0x00000001);
|
||||
ram_nsec(fuc, 2000);
|
||||
|
||||
if (nv_device(fb)->chipset == 0xa3 && freq <= 500000)
|
||||
if (device->chipset == 0xa3 && freq <= 500000)
|
||||
ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
|
||||
|
||||
/* Fiddle with clocks */
|
||||
@ -715,7 +711,7 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
|
||||
}
|
||||
|
||||
if (nv_device(fb)->chipset == 0xa3 && freq > 500000) {
|
||||
if (device->chipset == 0xa3 && freq > 500000) {
|
||||
ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
|
||||
}
|
||||
|
||||
@ -759,11 +755,11 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
|
||||
if (next->bios.ramcfg_10_02_04) {
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_DDR3:
|
||||
if (nv_device(fb)->chipset != 0xa8)
|
||||
case NVKM_RAM_TYPE_DDR3:
|
||||
if (device->chipset != 0xa8)
|
||||
r111100 |= 0x00000004;
|
||||
/* no break */
|
||||
case NV_MEM_TYPE_DDR2:
|
||||
case NVKM_RAM_TYPE_DDR2:
|
||||
r111100 |= 0x08000000;
|
||||
break;
|
||||
default:
|
||||
@ -771,12 +767,12 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
}
|
||||
} else {
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_DDR2:
|
||||
case NVKM_RAM_TYPE_DDR2:
|
||||
r111100 |= 0x1a800000;
|
||||
unk714 |= 0x00000010;
|
||||
break;
|
||||
case NV_MEM_TYPE_DDR3:
|
||||
if (nv_device(fb)->chipset == 0xa8) {
|
||||
case NVKM_RAM_TYPE_DDR3:
|
||||
if (device->chipset == 0xa8) {
|
||||
r111100 |= 0x08000000;
|
||||
} else {
|
||||
r111100 &= ~0x00000004;
|
||||
@ -784,7 +780,7 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
}
|
||||
unk714 |= 0x00000010;
|
||||
break;
|
||||
case NV_MEM_TYPE_GDDR3:
|
||||
case NVKM_RAM_TYPE_GDDR3:
|
||||
r111100 |= 0x30000000;
|
||||
unk714 |= 0x00000020;
|
||||
break;
|
||||
@ -820,13 +816,13 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
if (!next->bios.ramcfg_DLLoff)
|
||||
nvkm_sddr2_dll_reset(fuc);
|
||||
|
||||
if (ram->base.type == NV_MEM_TYPE_GDDR3) {
|
||||
if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
|
||||
ram_nsec(fuc, 31000);
|
||||
} else {
|
||||
ram_nsec(fuc, 14000);
|
||||
}
|
||||
|
||||
if (ram->base.type == NV_MEM_TYPE_DDR3) {
|
||||
if (ram->base.type == NVKM_RAM_TYPE_DDR3) {
|
||||
ram_wr32(fuc, 0x100264, 0x1);
|
||||
ram_nsec(fuc, 2000);
|
||||
}
|
||||
@ -862,11 +858,11 @@ gt215_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
}
|
||||
|
||||
static int
|
||||
gt215_ram_prog(struct nvkm_fb *fb)
|
||||
gt215_ram_prog(struct nvkm_ram *base)
|
||||
{
|
||||
struct nvkm_device *device = nv_device(fb);
|
||||
struct gt215_ram *ram = (void *)fb->ram;
|
||||
struct gt215_ram *ram = gt215_ram(base);
|
||||
struct gt215_ramfuc *fuc = &ram->fuc;
|
||||
struct nvkm_device *device = ram->base.fb->subdev.device;
|
||||
bool exec = nvkm_boolopt(device->cfgopt, "NvMemExec", true);
|
||||
|
||||
if (exec) {
|
||||
@ -887,70 +883,56 @@ gt215_ram_prog(struct nvkm_fb *fb)
|
||||
}
|
||||
|
||||
static void
|
||||
gt215_ram_tidy(struct nvkm_fb *fb)
|
||||
gt215_ram_tidy(struct nvkm_ram *base)
|
||||
{
|
||||
struct gt215_ram *ram = (void *)fb->ram;
|
||||
struct gt215_ramfuc *fuc = &ram->fuc;
|
||||
ram_exec(fuc, false);
|
||||
struct gt215_ram *ram = gt215_ram(base);
|
||||
ram_exec(&ram->fuc, false);
|
||||
}
|
||||
|
||||
static int
|
||||
gt215_ram_init(struct nvkm_object *object)
|
||||
gt215_ram_init(struct nvkm_ram *base)
|
||||
{
|
||||
struct nvkm_fb *fb = (void *)object->parent;
|
||||
struct gt215_ram *ram = (void *)object;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ram_init(&ram->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gt215_link_train_init(fb);
|
||||
struct gt215_ram *ram = gt215_ram(base);
|
||||
gt215_link_train_init(ram);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gt215_ram_fini(struct nvkm_object *object, bool suspend)
|
||||
static void *
|
||||
gt215_ram_dtor(struct nvkm_ram *base)
|
||||
{
|
||||
struct nvkm_fb *fb = (void *)object->parent;
|
||||
|
||||
if (!suspend)
|
||||
gt215_link_train_fini(fb);
|
||||
|
||||
return 0;
|
||||
struct gt215_ram *ram = gt215_ram(base);
|
||||
gt215_link_train_fini(ram);
|
||||
return ram;
|
||||
}
|
||||
|
||||
static int
|
||||
gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 datasize,
|
||||
struct nvkm_object **pobject)
|
||||
static const struct nvkm_ram_func
|
||||
gt215_ram_func = {
|
||||
.dtor = gt215_ram_dtor,
|
||||
.init = gt215_ram_init,
|
||||
.get = nv50_ram_get,
|
||||
.put = nv50_ram_put,
|
||||
.calc = gt215_ram_calc,
|
||||
.prog = gt215_ram_prog,
|
||||
.tidy = gt215_ram_tidy,
|
||||
};
|
||||
|
||||
int
|
||||
gt215_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_gpio *gpio = subdev->device->gpio;
|
||||
struct nvkm_gpio *gpio = fb->subdev.device->gpio;
|
||||
struct dcb_gpio_func func;
|
||||
struct gt215_ram *ram;
|
||||
int ret, i;
|
||||
u32 reg, shift;
|
||||
int ret, i;
|
||||
|
||||
ret = nv50_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
*pram = &ram->base;
|
||||
|
||||
ret = nv50_ram_ctor(>215_ram_func, fb, &ram->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_DDR2:
|
||||
case NV_MEM_TYPE_DDR3:
|
||||
case NV_MEM_TYPE_GDDR3:
|
||||
ram->base.calc = gt215_ram_calc;
|
||||
ram->base.prog = gt215_ram_prog;
|
||||
ram->base.tidy = gt215_ram_tidy;
|
||||
break;
|
||||
default:
|
||||
nvkm_warn(subdev, "reclocking of this ram type unsupported\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ram->fuc.r_0x001610 = ramfuc_reg(0x001610);
|
||||
ram->fuc.r_0x001700 = ramfuc_reg(0x001700);
|
||||
ram->fuc.r_0x002504 = ramfuc_reg(0x002504);
|
||||
@ -1008,13 +990,3 @@ gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
gt215_ram_oclass = {
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = gt215_ram_ctor,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = gt215_ram_init,
|
||||
.fini = gt215_ram_fini,
|
||||
},
|
||||
};
|
||||
|
@ -21,7 +21,8 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv50.h"
|
||||
#define mcp77_ram(p) container_of((p), struct mcp77_ram, base)
|
||||
#include "ram.h"
|
||||
|
||||
struct mcp77_ram {
|
||||
struct nvkm_ram base;
|
||||
@ -29,56 +30,13 @@ struct mcp77_ram {
|
||||
};
|
||||
|
||||
static int
|
||||
mcp77_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 datasize,
|
||||
struct nvkm_object **pobject)
|
||||
mcp77_ram_init(struct nvkm_ram *base)
|
||||
{
|
||||
u32 rsvd_head = ( 256 * 1024); /* vga memory */
|
||||
u32 rsvd_tail = (1024 * 1024); /* vbios etc */
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
struct mcp77_ram *ram;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(fb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ram->base.type = NV_MEM_TYPE_STOLEN;
|
||||
ram->base.stolen = (u64)nvkm_rd32(device, 0x100e10) << 12;
|
||||
ram->base.size = (u64)nvkm_rd32(device, 0x100e14) << 12;
|
||||
|
||||
rsvd_tail += 0x1000;
|
||||
ram->poller_base = ram->base.size - rsvd_tail;
|
||||
|
||||
ret = nvkm_mm_init(&fb->vram, rsvd_head >> 12,
|
||||
(ram->base.size - (rsvd_head + rsvd_tail)) >> 12,
|
||||
1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ram->base.get = nv50_ram_get;
|
||||
ram->base.put = nv50_ram_put;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
mcp77_ram_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(object);
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
struct mcp77_ram *ram = (void *)object;
|
||||
int ret;
|
||||
u64 dniso, hostnb, flush;
|
||||
|
||||
ret = nvkm_ram_init(&ram->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dniso = ((ram->base.size - (ram->poller_base + 0x00)) >> 5) - 1;
|
||||
hostnb = ((ram->base.size - (ram->poller_base + 0x20)) >> 5) - 1;
|
||||
flush = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1;
|
||||
struct mcp77_ram *ram = mcp77_ram(base);
|
||||
struct nvkm_device *device = ram->base.fb->subdev.device;
|
||||
u32 dniso = ((ram->base.size - (ram->poller_base + 0x00)) >> 5) - 1;
|
||||
u32 hostnb = ((ram->base.size - (ram->poller_base + 0x20)) >> 5) - 1;
|
||||
u32 flush = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1;
|
||||
|
||||
/* Enable NISO poller for various clients and set their associated
|
||||
* read address, only for MCP77/78 and MCP79/7A. (fd#25701)
|
||||
@ -92,12 +50,38 @@ mcp77_ram_init(struct nvkm_object *object)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
mcp77_ram_oclass = {
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = mcp77_ram_ctor,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = mcp77_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
},
|
||||
static const struct nvkm_ram_func
|
||||
mcp77_ram_func = {
|
||||
.init = mcp77_ram_init,
|
||||
.get = nv50_ram_get,
|
||||
.put = nv50_ram_put,
|
||||
};
|
||||
|
||||
int
|
||||
mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
u32 rsvd_head = ( 256 * 1024); /* vga memory */
|
||||
u32 rsvd_tail = (1024 * 1024) + 0x1000; /* vbios etc + poller mem */
|
||||
u64 base = (u64)nvkm_rd32(device, 0x100e10) << 12;
|
||||
u64 size = (u64)nvkm_rd32(device, 0x100e14) << 12;
|
||||
struct mcp77_ram *ram;
|
||||
int ret;
|
||||
|
||||
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
*pram = &ram->base;
|
||||
|
||||
ret = nvkm_ram_ctor(&mcp77_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
|
||||
size, 0, &ram->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ram->poller_base = size - rsvd_tail;
|
||||
ram->base.stolen = base;
|
||||
nvkm_mm_fini(&ram->base.vram);
|
||||
|
||||
return nvkm_mm_init(&ram->base.vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
(size - rsvd_head - rsvd_tail) >>
|
||||
NVKM_RAM_MM_SHIFT, 1);
|
||||
}
|
||||
|
@ -21,60 +21,45 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "ram.h"
|
||||
#include "regsnv04.h"
|
||||
|
||||
static int
|
||||
nv04_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
const struct nvkm_ram_func
|
||||
nv04_ram_func = {
|
||||
};
|
||||
|
||||
int
|
||||
nv04_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_ram *ram;
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
u32 boot0 = nvkm_rd32(device, NV04_PFB_BOOT_0);
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (ret)
|
||||
return ret;
|
||||
u64 size;
|
||||
enum nvkm_ram_type type;
|
||||
|
||||
if (boot0 & 0x00000100) {
|
||||
ram->size = ((boot0 >> 12) & 0xf) * 2 + 2;
|
||||
ram->size *= 1024 * 1024;
|
||||
size = ((boot0 >> 12) & 0xf) * 2 + 2;
|
||||
size *= 1024 * 1024;
|
||||
} else {
|
||||
switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
|
||||
case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
|
||||
ram->size = 32 * 1024 * 1024;
|
||||
size = 32 * 1024 * 1024;
|
||||
break;
|
||||
case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
|
||||
ram->size = 16 * 1024 * 1024;
|
||||
size = 16 * 1024 * 1024;
|
||||
break;
|
||||
case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
|
||||
ram->size = 8 * 1024 * 1024;
|
||||
size = 8 * 1024 * 1024;
|
||||
break;
|
||||
case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
|
||||
ram->size = 4 * 1024 * 1024;
|
||||
size = 4 * 1024 * 1024;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ((boot0 & 0x00000038) <= 0x10)
|
||||
ram->type = NV_MEM_TYPE_SGRAM;
|
||||
type = NVKM_RAM_TYPE_SGRAM;
|
||||
else
|
||||
ram->type = NV_MEM_TYPE_SDRAM;
|
||||
type = NVKM_RAM_TYPE_SDRAM;
|
||||
|
||||
return 0;
|
||||
return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
nv04_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv04_ram_create,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = _nvkm_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
@ -21,40 +21,20 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "ram.h"
|
||||
|
||||
static int
|
||||
nv10_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
int
|
||||
nv10_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_ram *ram;
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
u32 cfg0 = nvkm_rd32(device, 0x100200);
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (ret)
|
||||
return ret;
|
||||
enum nvkm_ram_type type;
|
||||
|
||||
if (cfg0 & 0x00000001)
|
||||
ram->type = NV_MEM_TYPE_DDR1;
|
||||
type = NVKM_RAM_TYPE_DDR1;
|
||||
else
|
||||
ram->type = NV_MEM_TYPE_SDRAM;
|
||||
type = NVKM_RAM_TYPE_SDRAM;
|
||||
|
||||
ram->size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
return 0;
|
||||
return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
nv10_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv10_ram_create,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = _nvkm_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
@ -21,18 +21,13 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "ram.h"
|
||||
|
||||
static int
|
||||
nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
int
|
||||
nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_ram *ram;
|
||||
struct pci_dev *bridge;
|
||||
u32 mem, mib;
|
||||
int ret;
|
||||
|
||||
bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
|
||||
if (!bridge) {
|
||||
@ -40,12 +35,7 @@ nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = nvkm_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (nv_device(fb)->chipset == 0x1a) {
|
||||
if (fb->subdev.device->chipset == 0x1a) {
|
||||
pci_read_config_dword(bridge, 0x7c, &mem);
|
||||
mib = ((mem >> 6) & 31) + 1;
|
||||
} else {
|
||||
@ -53,18 +43,6 @@ nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
mib = ((mem >> 4) & 127) + 1;
|
||||
}
|
||||
|
||||
ram->type = NV_MEM_TYPE_STOLEN;
|
||||
ram->size = mib * 1024 * 1024;
|
||||
return 0;
|
||||
return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
|
||||
mib * 1024 * 1024, 0, pram);
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
nv1a_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv1a_ram_create,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = _nvkm_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
@ -21,43 +21,29 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "ram.h"
|
||||
|
||||
static int
|
||||
nv20_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
int
|
||||
nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_ram *ram;
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
u32 pbus1218 = nvkm_rd32(device, 0x001218);
|
||||
u32 pbus1218 = nvkm_rd32(device, 0x001218);
|
||||
u32 size = (nvkm_rd32(device, 0x10020c) & 0xff000000);
|
||||
u32 tags = nvkm_rd32(device, 0x100320);
|
||||
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
switch (pbus1218 & 0x00000300) {
|
||||
case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
|
||||
case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
|
||||
case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
|
||||
case 0x00000300: type = NVKM_RAM_TYPE_GDDR2; break;
|
||||
}
|
||||
|
||||
ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, tags, pram);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (pbus1218 & 0x00000300) {
|
||||
case 0x00000000: ram->type = NV_MEM_TYPE_SDRAM; break;
|
||||
case 0x00000100: ram->type = NV_MEM_TYPE_DDR1; break;
|
||||
case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break;
|
||||
case 0x00000300: ram->type = NV_MEM_TYPE_GDDR2; break;
|
||||
}
|
||||
ram->size = (nvkm_rd32(device, 0x10020c) & 0xff000000);
|
||||
ram->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
|
||||
ram->tags = nvkm_rd32(device, 0x100320);
|
||||
(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
nv20_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv20_ram_create,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = _nvkm_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
@ -21,7 +21,7 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv40.h"
|
||||
#include "ramnv40.h"
|
||||
|
||||
#include <subdev/bios.h>
|
||||
#include <subdev/bios/bit.h>
|
||||
@ -30,12 +30,12 @@
|
||||
#include <subdev/clk/pll.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
int
|
||||
nv40_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
static int
|
||||
nv40_ram_calc(struct nvkm_ram *base, u32 freq)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nv40_ram *ram = nv40_ram(base);
|
||||
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
|
||||
struct nvkm_bios *bios = subdev->device->bios;
|
||||
struct nv40_ram *ram = (void *)fb->ram;
|
||||
struct nvbios_pll pll;
|
||||
int N1, M1, N2, M2;
|
||||
int log2P, ret;
|
||||
@ -46,8 +46,7 @@ nv40_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nv04_pll_calc(nv_subdev(fb), &pll, freq,
|
||||
&N1, &M1, &N2, &M2, &log2P);
|
||||
ret = nv04_pll_calc(subdev, &pll, freq, &N1, &M1, &N2, &M2, &log2P);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -64,12 +63,13 @@ nv40_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nv40_ram_prog(struct nvkm_fb *fb)
|
||||
static int
|
||||
nv40_ram_prog(struct nvkm_ram *base)
|
||||
{
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
struct nv40_ram *ram = nv40_ram(base);
|
||||
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
struct nv40_ram *ram = (void *)fb->ram;
|
||||
struct bit_entry M;
|
||||
u32 crtc_mask = 0;
|
||||
u8 sr1[2];
|
||||
@ -152,7 +152,7 @@ nv40_ram_prog(struct nvkm_fb *fb)
|
||||
/* execute memory reset script from vbios */
|
||||
if (!bit_entry(bios, 'M', &M)) {
|
||||
struct nvbios_init init = {
|
||||
.subdev = nv_subdev(fb),
|
||||
.subdev = subdev,
|
||||
.bios = bios,
|
||||
.offset = nvbios_rd16(bios, M.offset + 0x00),
|
||||
.execute = 1,
|
||||
@ -181,51 +181,50 @@ nv40_ram_prog(struct nvkm_fb *fb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv40_ram_tidy(struct nvkm_fb *fb)
|
||||
static void
|
||||
nv40_ram_tidy(struct nvkm_ram *base)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
static const struct nvkm_ram_func
|
||||
nv40_ram_func = {
|
||||
.calc = nv40_ram_calc,
|
||||
.prog = nv40_ram_prog,
|
||||
.tidy = nv40_ram_tidy,
|
||||
};
|
||||
|
||||
int
|
||||
nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type type, u64 size,
|
||||
u32 tags, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nv40_ram *ram;
|
||||
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
*pram = &ram->base;
|
||||
return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, tags, &ram->base);
|
||||
}
|
||||
|
||||
int
|
||||
nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
u32 pbus1218 = nvkm_rd32(device, 0x001218);
|
||||
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
u32 tags = nvkm_rd32(device, 0x100320);
|
||||
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
switch (pbus1218 & 0x00000300) {
|
||||
case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
|
||||
case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
|
||||
case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
|
||||
case 0x00000300: type = NVKM_RAM_TYPE_DDR2 ; break;
|
||||
}
|
||||
|
||||
ret = nv40_ram_new_(fb, type, size, tags, pram);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (pbus1218 & 0x00000300) {
|
||||
case 0x00000000: ram->base.type = NV_MEM_TYPE_SDRAM; break;
|
||||
case 0x00000100: ram->base.type = NV_MEM_TYPE_DDR1; break;
|
||||
case 0x00000200: ram->base.type = NV_MEM_TYPE_GDDR3; break;
|
||||
case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break;
|
||||
}
|
||||
|
||||
ram->base.size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
ram->base.parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
|
||||
ram->base.tags = nvkm_rd32(device, 0x100320);
|
||||
ram->base.calc = nv40_ram_calc;
|
||||
ram->base.prog = nv40_ram_prog;
|
||||
ram->base.tidy = nv40_ram_tidy;
|
||||
(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
struct nvkm_oclass
|
||||
nv40_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv40_ram_create,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = _nvkm_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
14
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
Normal file
14
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
Normal file
@ -0,0 +1,14 @@
|
||||
#ifndef __NV40_FB_RAM_H__
|
||||
#define __NV40_FB_RAM_H__
|
||||
#define nv40_ram(p) container_of((p), struct nv40_ram, base)
|
||||
#include "ram.h"
|
||||
|
||||
struct nv40_ram {
|
||||
struct nvkm_ram base;
|
||||
u32 ctrl;
|
||||
u32 coef;
|
||||
};
|
||||
|
||||
int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64, u32,
|
||||
struct nvkm_ram **);
|
||||
#endif
|
@ -21,47 +21,29 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv40.h"
|
||||
#include "ramnv40.h"
|
||||
|
||||
static int
|
||||
nv41_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
int
|
||||
nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nv40_ram *ram;
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
u32 tags = nvkm_rd32(device, 0x100320);
|
||||
u32 fb474 = nvkm_rd32(device, 0x100474);
|
||||
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (fb474 & 0x00000004)
|
||||
type = NVKM_RAM_TYPE_GDDR3;
|
||||
if (fb474 & 0x00000002)
|
||||
type = NVKM_RAM_TYPE_DDR2;
|
||||
if (fb474 & 0x00000001)
|
||||
type = NVKM_RAM_TYPE_DDR1;
|
||||
|
||||
ret = nv40_ram_new_(fb, type, size, tags, pram);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (fb474 & 0x00000004)
|
||||
ram->base.type = NV_MEM_TYPE_GDDR3;
|
||||
if (fb474 & 0x00000002)
|
||||
ram->base.type = NV_MEM_TYPE_DDR2;
|
||||
if (fb474 & 0x00000001)
|
||||
ram->base.type = NV_MEM_TYPE_DDR1;
|
||||
|
||||
ram->base.size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
ram->base.parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
|
||||
ram->base.tags = nvkm_rd32(device, 0x100320);
|
||||
ram->base.calc = nv40_ram_calc;
|
||||
ram->base.prog = nv40_ram_prog;
|
||||
ram->base.tidy = nv40_ram_tidy;
|
||||
(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
nv41_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv41_ram_create,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = _nvkm_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
@ -21,45 +21,22 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv40.h"
|
||||
#include "ramnv40.h"
|
||||
|
||||
static int
|
||||
nv44_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
int
|
||||
nv44_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nv40_ram *ram;
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
u32 fb474 = nvkm_rd32(device, 0x100474);
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (ret)
|
||||
return ret;
|
||||
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
|
||||
|
||||
if (fb474 & 0x00000004)
|
||||
ram->base.type = NV_MEM_TYPE_GDDR3;
|
||||
type = NVKM_RAM_TYPE_GDDR3;
|
||||
if (fb474 & 0x00000002)
|
||||
ram->base.type = NV_MEM_TYPE_DDR2;
|
||||
type = NVKM_RAM_TYPE_DDR2;
|
||||
if (fb474 & 0x00000001)
|
||||
ram->base.type = NV_MEM_TYPE_DDR1;
|
||||
type = NVKM_RAM_TYPE_DDR1;
|
||||
|
||||
ram->base.size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
ram->base.calc = nv40_ram_calc;
|
||||
ram->base.prog = nv40_ram_prog;
|
||||
ram->base.tidy = nv40_ram_tidy;
|
||||
return 0;
|
||||
return nv40_ram_new_(fb, type, size, 0, pram);
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
nv44_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv44_ram_create,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = _nvkm_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
@ -21,47 +21,29 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv40.h"
|
||||
#include "ramnv40.h"
|
||||
|
||||
static int
|
||||
nv49_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
int
|
||||
nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nv40_ram *ram;
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
u32 tags = nvkm_rd32(device, 0x100320);
|
||||
u32 fb914 = nvkm_rd32(device, 0x100914);
|
||||
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (fb914 & 0x00000003) {
|
||||
case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
|
||||
case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
|
||||
case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
|
||||
case 0x00000000: type = NVKM_RAM_TYPE_DDR1 ; break;
|
||||
case 0x00000001: type = NVKM_RAM_TYPE_DDR2 ; break;
|
||||
case 0x00000002: type = NVKM_RAM_TYPE_GDDR3; break;
|
||||
case 0x00000003: break;
|
||||
}
|
||||
|
||||
ram->base.size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
ram->base.parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
|
||||
ram->base.tags = nvkm_rd32(device, 0x100320);
|
||||
ram->base.calc = nv40_ram_calc;
|
||||
ram->base.prog = nv40_ram_prog;
|
||||
ram->base.tidy = nv40_ram_tidy;
|
||||
ret = nv40_ram_new_(fb, type, size, tags, pram);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
nv49_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv49_ram_create,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = _nvkm_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
@ -21,35 +21,13 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "ram.h"
|
||||
|
||||
static int
|
||||
nv4e_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
int
|
||||
nv4e_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_ram *ram;
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ram->size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
ram->type = NV_MEM_TYPE_STOLEN;
|
||||
return 0;
|
||||
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
|
||||
return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_UNKNOWN,
|
||||
size, 0, pram);
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
nv4e_ram_oclass = {
|
||||
.handle = 0,
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv4e_ram_create,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = _nvkm_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
@ -21,8 +21,10 @@
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "nv50.h"
|
||||
#define nv50_ram(p) container_of((p), struct nv50_ram, base)
|
||||
#include "ram.h"
|
||||
#include "ramseq.h"
|
||||
#include "nv50.h"
|
||||
|
||||
#include <core/option.h>
|
||||
#include <subdev/bios.h>
|
||||
@ -66,11 +68,10 @@ struct nv50_ram {
|
||||
|
||||
#define T(t) cfg->timing_10_##t
|
||||
static int
|
||||
nv50_ram_timing_calc(struct nvkm_fb *fb, u32 *timing)
|
||||
nv50_ram_timing_calc(struct nv50_ram *ram, u32 *timing)
|
||||
{
|
||||
struct nv50_ram *ram = (void *)fb->ram;
|
||||
struct nvbios_ramcfg *cfg = &ram->base.target.bios;
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 cur2, cur4, cur7, cur8;
|
||||
u8 unkt3b;
|
||||
@ -81,16 +82,16 @@ nv50_ram_timing_calc(struct nvkm_fb *fb, u32 *timing)
|
||||
cur8 = nvkm_rd32(device, 0x100240);
|
||||
|
||||
switch ((!T(CWL)) * ram->base.type) {
|
||||
case NV_MEM_TYPE_DDR2:
|
||||
case NVKM_RAM_TYPE_DDR2:
|
||||
T(CWL) = T(CL) - 1;
|
||||
break;
|
||||
case NV_MEM_TYPE_GDDR3:
|
||||
case NVKM_RAM_TYPE_GDDR3:
|
||||
T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
|
||||
break;
|
||||
}
|
||||
|
||||
/* XXX: N=1 is not proper statistics */
|
||||
if (nv_device(fb)->chipset == 0xa0) {
|
||||
if (device->chipset == 0xa0) {
|
||||
unkt3b = 0x19 + ram->base.next->bios.rammap_00_16_40;
|
||||
timing[6] = (0x2d + T(CL) - T(CWL) +
|
||||
ram->base.next->bios.rammap_00_16_40) << 16 |
|
||||
@ -127,10 +128,11 @@ nv50_ram_timing_calc(struct nvkm_fb *fb, u32 *timing)
|
||||
timing[8] = (cur8 & 0xffffff00);
|
||||
|
||||
/* XXX: P.version == 1 only has DDR2 and GDDR3? */
|
||||
if (fb->ram->type == NV_MEM_TYPE_DDR2) {
|
||||
if (ram->base.type == NVKM_RAM_TYPE_DDR2) {
|
||||
timing[5] |= (T(CL) + 3) << 8;
|
||||
timing[8] |= (T(CL) - 4);
|
||||
} else if (fb->ram->type == NV_MEM_TYPE_GDDR3) {
|
||||
} else
|
||||
if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
|
||||
timing[5] |= (T(CL) + 2) << 8;
|
||||
timing[8] |= (T(CL) - 2);
|
||||
}
|
||||
@ -153,11 +155,11 @@ nvkm_sddr2_dll_reset(struct nv50_ramseq *hwsq)
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
nv50_ram_calc(struct nvkm_ram *base, u32 freq)
|
||||
{
|
||||
struct nv50_ram *ram = (void *)fb->ram;
|
||||
struct nv50_ram *ram = nv50_ram(base);
|
||||
struct nv50_ramseq *hwsq = &ram->hwsq;
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
|
||||
struct nvkm_bios *bios = subdev->device->bios;
|
||||
struct nvbios_perfE perfE;
|
||||
struct nvbios_pll mpll;
|
||||
@ -177,7 +179,7 @@ nv50_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
i = 0;
|
||||
do {
|
||||
data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
|
||||
&size, &perfE);
|
||||
&size, &perfE);
|
||||
if (!data || (ver < 0x25 || ver >= 0x40) ||
|
||||
(size < 2)) {
|
||||
nvkm_error(subdev, "invalid/missing perftab entry\n");
|
||||
@ -188,7 +190,7 @@ nv50_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
nvbios_rammapEp_from_perf(bios, data, hdr, &next->bios);
|
||||
|
||||
/* locate specific data set for the attached memory */
|
||||
strap = nvbios_ramcfg_index(nv_subdev(fb));
|
||||
strap = nvbios_ramcfg_index(subdev);
|
||||
if (strap >= cnt) {
|
||||
nvkm_error(subdev, "invalid ramcfg strap\n");
|
||||
return -EINVAL;
|
||||
@ -213,9 +215,9 @@ nv50_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
}
|
||||
}
|
||||
|
||||
nv50_ram_timing_calc(fb, timing);
|
||||
nv50_ram_timing_calc(ram, timing);
|
||||
|
||||
ret = ram_init(hwsq, nv_subdev(fb));
|
||||
ret = ram_init(hwsq, subdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -225,7 +227,7 @@ nv50_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
ram->base.mr[2] = ram_rd32(hwsq, mr[2]);
|
||||
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_GDDR3:
|
||||
case NVKM_RAM_TYPE_GDDR3:
|
||||
ret = nvkm_gddr3_calc(&ram->base);
|
||||
break;
|
||||
default:
|
||||
@ -257,7 +259,7 @@ nv50_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
ret = nvbios_pll_parse(bios, 0x004008, &mpll);
|
||||
mpll.vco2.max_freq = 0;
|
||||
if (ret >= 0) {
|
||||
ret = nv04_pll_calc(nv_subdev(fb), &mpll, freq,
|
||||
ret = nv04_pll_calc(subdev, &mpll, freq,
|
||||
&N1, &M1, &N2, &M2, &P);
|
||||
if (ret <= 0)
|
||||
ret = -EINVAL;
|
||||
@ -284,7 +286,7 @@ nv50_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
next->bios.rammap_00_16_40 << 14);
|
||||
ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
|
||||
ram_mask(hwsq, 0x004008, 0x91ff0000, r004008);
|
||||
if (nv_device(fb)->chipset >= 0x96)
|
||||
if (subdev->device->chipset >= 0x96)
|
||||
ram_wr32(hwsq, 0x100da0, r100da0);
|
||||
ram_nsec(hwsq, 64000); /*XXX*/
|
||||
ram_nsec(hwsq, 32000); /*XXX*/
|
||||
@ -298,11 +300,11 @@ nv50_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
ram_nsec(hwsq, 12000);
|
||||
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_DDR2:
|
||||
case NVKM_RAM_TYPE_DDR2:
|
||||
ram_nuke(hwsq, mr[0]); /* force update */
|
||||
ram_mask(hwsq, mr[0], 0x000, 0x000);
|
||||
break;
|
||||
case NV_MEM_TYPE_GDDR3:
|
||||
case NVKM_RAM_TYPE_GDDR3:
|
||||
ram_nuke(hwsq, mr[1]); /* force update */
|
||||
ram_wr32(hwsq, mr[1], ram->base.mr[1]);
|
||||
ram_nuke(hwsq, mr[0]); /* force update */
|
||||
@ -382,26 +384,23 @@ nv50_ram_calc(struct nvkm_fb *fb, u32 freq)
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_ram_prog(struct nvkm_fb *fb)
|
||||
nv50_ram_prog(struct nvkm_ram *base)
|
||||
{
|
||||
struct nvkm_device *device = nv_device(fb);
|
||||
struct nv50_ram *ram = (void *)fb->ram;
|
||||
struct nv50_ramseq *hwsq = &ram->hwsq;
|
||||
|
||||
ram_exec(hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
|
||||
struct nv50_ram *ram = nv50_ram(base);
|
||||
struct nvkm_device *device = ram->base.fb->subdev.device;
|
||||
ram_exec(&ram->hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_ram_tidy(struct nvkm_fb *fb)
|
||||
nv50_ram_tidy(struct nvkm_ram *base)
|
||||
{
|
||||
struct nv50_ram *ram = (void *)fb->ram;
|
||||
struct nv50_ramseq *hwsq = &ram->hwsq;
|
||||
ram_exec(hwsq, false);
|
||||
struct nv50_ram *ram = nv50_ram(base);
|
||||
ram_exec(&ram->hwsq, false);
|
||||
}
|
||||
|
||||
void
|
||||
__nv50_ram_put(struct nvkm_fb *fb, struct nvkm_mem *mem)
|
||||
__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
|
||||
{
|
||||
struct nvkm_mm_node *this;
|
||||
|
||||
@ -409,14 +408,14 @@ __nv50_ram_put(struct nvkm_fb *fb, struct nvkm_mem *mem)
|
||||
this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
|
||||
|
||||
list_del(&this->rl_entry);
|
||||
nvkm_mm_free(&fb->vram, &this);
|
||||
nvkm_mm_free(&ram->vram, &this);
|
||||
}
|
||||
|
||||
nvkm_mm_free(&fb->tags, &mem->tag);
|
||||
nvkm_mm_free(&ram->tags, &mem->tag);
|
||||
}
|
||||
|
||||
void
|
||||
nv50_ram_put(struct nvkm_fb *fb, struct nvkm_mem **pmem)
|
||||
nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
|
||||
{
|
||||
struct nvkm_mem *mem = *pmem;
|
||||
|
||||
@ -424,19 +423,19 @@ nv50_ram_put(struct nvkm_fb *fb, struct nvkm_mem **pmem)
|
||||
if (unlikely(mem == NULL))
|
||||
return;
|
||||
|
||||
mutex_lock(&fb->subdev.mutex);
|
||||
__nv50_ram_put(fb, mem);
|
||||
mutex_unlock(&fb->subdev.mutex);
|
||||
mutex_lock(&ram->fb->subdev.mutex);
|
||||
__nv50_ram_put(ram, mem);
|
||||
mutex_unlock(&ram->fb->subdev.mutex);
|
||||
|
||||
kfree(mem);
|
||||
}
|
||||
|
||||
int
|
||||
nv50_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
|
||||
nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
|
||||
u32 memtype, struct nvkm_mem **pmem)
|
||||
{
|
||||
struct nvkm_mm *heap = &fb->vram;
|
||||
struct nvkm_mm *tags = &fb->tags;
|
||||
struct nvkm_mm *heap = &ram->vram;
|
||||
struct nvkm_mm *tags = &ram->tags;
|
||||
struct nvkm_mm_node *r;
|
||||
struct nvkm_mem *mem;
|
||||
int comp = (memtype & 0x300) >> 8;
|
||||
@ -444,17 +443,17 @@ nv50_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
|
||||
int back = (memtype & 0x800);
|
||||
int min, max, ret;
|
||||
|
||||
max = (size >> 12);
|
||||
min = ncmin ? (ncmin >> 12) : max;
|
||||
align >>= 12;
|
||||
max = (size >> NVKM_RAM_MM_SHIFT);
|
||||
min = ncmin ? (ncmin >> NVKM_RAM_MM_SHIFT) : max;
|
||||
align >>= NVKM_RAM_MM_SHIFT;
|
||||
|
||||
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&fb->subdev.mutex);
|
||||
mutex_lock(&ram->fb->subdev.mutex);
|
||||
if (comp) {
|
||||
if (align == 16) {
|
||||
if (align == (1 << (16 - NVKM_RAM_MM_SHIFT))) {
|
||||
int n = (max >> 4) * comp;
|
||||
|
||||
ret = nvkm_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
|
||||
@ -477,26 +476,35 @@ nv50_ram_get(struct nvkm_fb *fb, u64 size, u32 align, u32 ncmin,
|
||||
else
|
||||
ret = nvkm_mm_head(heap, 0, type, max, min, align, &r);
|
||||
if (ret) {
|
||||
mutex_unlock(&fb->subdev.mutex);
|
||||
fb->ram->put(fb, &mem);
|
||||
mutex_unlock(&ram->fb->subdev.mutex);
|
||||
ram->func->put(ram, &mem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_add_tail(&r->rl_entry, &mem->regions);
|
||||
max -= r->length;
|
||||
} while (max);
|
||||
mutex_unlock(&fb->subdev.mutex);
|
||||
mutex_unlock(&ram->fb->subdev.mutex);
|
||||
|
||||
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
|
||||
mem->offset = (u64)r->offset << 12;
|
||||
mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
|
||||
*pmem = mem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_ram_func
|
||||
nv50_ram_func = {
|
||||
.get = nv50_ram_get,
|
||||
.put = nv50_ram_put,
|
||||
.calc = nv50_ram_calc,
|
||||
.prog = nv50_ram_prog,
|
||||
.tidy = nv50_ram_tidy,
|
||||
};
|
||||
|
||||
static u32
|
||||
nv50_fb_vram_rblock(struct nvkm_fb *fb, struct nvkm_ram *ram)
|
||||
nv50_fb_vram_rblock(struct nvkm_ram *ram)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nvkm_subdev *subdev = &ram->fb->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
int colbits, rowbitsa, rowbitsb, banks;
|
||||
u64 rowsize, predicted;
|
||||
@ -532,83 +540,63 @@ nv50_fb_vram_rblock(struct nvkm_fb *fb, struct nvkm_ram *ram)
|
||||
}
|
||||
|
||||
int
|
||||
nv50_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, int length, void **pobject)
|
||||
nv50_ram_ctor(const struct nvkm_ram_func *func,
|
||||
struct nvkm_fb *fb, struct nvkm_ram *ram)
|
||||
{
|
||||
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
|
||||
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_device *device = fb->subdev.device;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
struct nvkm_ram *ram;
|
||||
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
|
||||
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
|
||||
u64 size = nvkm_rd32(device, 0x10020c);
|
||||
u32 tags = nvkm_rd32(device, 0x100320);
|
||||
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ram_create_(parent, engine, oclass, length, pobject);
|
||||
ram = *pobject;
|
||||
switch (nvkm_rd32(device, 0x100714) & 0x00000007) {
|
||||
case 0: type = NVKM_RAM_TYPE_DDR1; break;
|
||||
case 1:
|
||||
if (nvkm_fb_bios_memtype(bios) == NVKM_RAM_TYPE_DDR3)
|
||||
type = NVKM_RAM_TYPE_DDR3;
|
||||
else
|
||||
type = NVKM_RAM_TYPE_DDR2;
|
||||
break;
|
||||
case 2: type = NVKM_RAM_TYPE_GDDR3; break;
|
||||
case 3: type = NVKM_RAM_TYPE_GDDR4; break;
|
||||
case 4: type = NVKM_RAM_TYPE_GDDR5; break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
size = (size & 0x000000ff) << 32 | (size & 0xffffff00);
|
||||
|
||||
ret = nvkm_ram_ctor(func, fb, type, size, tags, ram);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ram->size = nvkm_rd32(device, 0x10020c);
|
||||
ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
|
||||
|
||||
ram->part_mask = (nvkm_rd32(device, 0x001540) & 0x00ff0000) >> 16;
|
||||
ram->parts = hweight8(ram->part_mask);
|
||||
|
||||
switch (nvkm_rd32(device, 0x100714) & 0x00000007) {
|
||||
case 0: ram->type = NV_MEM_TYPE_DDR1; break;
|
||||
case 1:
|
||||
if (nvkm_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
|
||||
ram->type = NV_MEM_TYPE_DDR3;
|
||||
else
|
||||
ram->type = NV_MEM_TYPE_DDR2;
|
||||
break;
|
||||
case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
|
||||
case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
|
||||
case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ret = nvkm_mm_init(&fb->vram, rsvd_head, (ram->size >> 12) -
|
||||
(rsvd_head + rsvd_tail),
|
||||
nv50_fb_vram_rblock(fb, ram) >> 12);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
|
||||
ram->tags = nvkm_rd32(device, 0x100320);
|
||||
ram->get = nv50_ram_get;
|
||||
ram->put = nv50_ram_put;
|
||||
return 0;
|
||||
nvkm_mm_fini(&ram->vram);
|
||||
|
||||
return nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
(size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
|
||||
nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 datasize,
|
||||
struct nvkm_object **pobject)
|
||||
int
|
||||
nv50_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_subdev *subdev = &fb->subdev;
|
||||
struct nv50_ram *ram;
|
||||
int ret, i;
|
||||
|
||||
ret = nv50_ram_create(parent, engine, oclass, &ram);
|
||||
*pobject = nv_object(ram);
|
||||
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
*pram = &ram->base;
|
||||
|
||||
ret = nv50_ram_ctor(&nv50_ram_func, fb, &ram->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (ram->base.type) {
|
||||
case NV_MEM_TYPE_GDDR3:
|
||||
ram->base.calc = nv50_ram_calc;
|
||||
ram->base.prog = nv50_ram_prog;
|
||||
ram->base.tidy = nv50_ram_tidy;
|
||||
break;
|
||||
case NV_MEM_TYPE_DDR2:
|
||||
default:
|
||||
nvkm_warn(subdev, "reclocking of this ram type unsupported\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
|
||||
ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
|
||||
ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
|
||||
@ -648,13 +636,3 @@ nv50_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
nv50_ram_oclass = {
|
||||
.ofuncs = &(struct nvkm_ofuncs) {
|
||||
.ctor = nv50_ram_ctor,
|
||||
.dtor = _nvkm_ram_dtor,
|
||||
.init = _nvkm_ram_init,
|
||||
.fini = _nvkm_ram_fini,
|
||||
}
|
||||
};
|
||||
|
@ -84,8 +84,8 @@ static void
|
||||
nv50_instobj_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv50_instobj *node = (void *)object;
|
||||
struct nvkm_fb *fb = nvkm_fb(object);
|
||||
fb->ram->put(fb, &node->mem);
|
||||
struct nvkm_ram *ram = nvkm_fb(object)->ram;
|
||||
ram->func->put(ram, &node->mem);
|
||||
nvkm_instobj_destroy(&node->base);
|
||||
}
|
||||
|
||||
@ -94,7 +94,7 @@ nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(parent);
|
||||
struct nvkm_ram *ram = nvkm_fb(parent)->ram;
|
||||
struct nvkm_instobj_args *args = data;
|
||||
struct nv50_instobj *node;
|
||||
int ret;
|
||||
@ -107,7 +107,8 @@ nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = fb->ram->get(fb, args->size, args->align, 0, 0x800, &node->mem);
|
||||
ret = ram->func->get(ram, args->size, args->align, 0, 0x800,
|
||||
&node->mem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -145,12 +145,12 @@ gf100_ltc_init(struct nvkm_object *object)
|
||||
void
|
||||
gf100_ltc_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_fb *fb = nvkm_fb(object);
|
||||
struct nvkm_ltc_priv *ltc = (void *)object;
|
||||
struct nvkm_ram *ram = ltc->base.subdev.device->fb->ram;
|
||||
|
||||
nvkm_mm_fini(<c->tags);
|
||||
if (fb->ram)
|
||||
nvkm_mm_free(&fb->vram, <c->tag_ram);
|
||||
if (ram)
|
||||
nvkm_mm_free(&ram->vram, <c->tag_ram);
|
||||
|
||||
nvkm_ltc_destroy(ltc);
|
||||
}
|
||||
@ -158,19 +158,20 @@ gf100_ltc_dtor(struct nvkm_object *object)
|
||||
/* TODO: Figure out tag memory details and drop the over-cautious allocation.
|
||||
*/
|
||||
int
|
||||
gf100_ltc_init_tag_ram(struct nvkm_fb *fb, struct nvkm_ltc_priv *ltc)
|
||||
gf100_ltc_init_tag_ram(struct nvkm_ltc_priv *ltc)
|
||||
{
|
||||
struct nvkm_ram *ram = ltc->base.subdev.device->fb->ram;
|
||||
u32 tag_size, tag_margin, tag_align;
|
||||
int ret;
|
||||
|
||||
/* No VRAM, no tags for now. */
|
||||
if (!fb->ram) {
|
||||
if (!ram) {
|
||||
ltc->num_tags = 0;
|
||||
goto mm_init;
|
||||
}
|
||||
|
||||
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
|
||||
ltc->num_tags = (fb->ram->size >> 17) / 4;
|
||||
ltc->num_tags = (ram->size >> 17) / 4;
|
||||
if (ltc->num_tags > (1 << 17))
|
||||
ltc->num_tags = 1 << 17; /* we have 17 bits in PTE */
|
||||
ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */
|
||||
@ -190,7 +191,7 @@ gf100_ltc_init_tag_ram(struct nvkm_fb *fb, struct nvkm_ltc_priv *ltc)
|
||||
tag_size += tag_align;
|
||||
tag_size = (tag_size + 0xfff) >> 12; /* round up */
|
||||
|
||||
ret = nvkm_mm_tail(&fb->vram, 1, 1, tag_size, tag_size, 1,
|
||||
ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1,
|
||||
<c->tag_ram);
|
||||
if (ret) {
|
||||
ltc->num_tags = 0;
|
||||
@ -214,7 +215,6 @@ gf100_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = (void *)parent;
|
||||
struct nvkm_fb *fb = device->fb;
|
||||
struct nvkm_ltc_priv *ltc;
|
||||
u32 parts, mask;
|
||||
int ret, i;
|
||||
@ -232,7 +232,7 @@ gf100_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
}
|
||||
ltc->lts_nr = nvkm_rd32(device, 0x17e8dc) >> 28;
|
||||
|
||||
ret = gf100_ltc_init_tag_ram(fb, ltc);
|
||||
ret = gf100_ltc_init_tag_ram(ltc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -124,7 +124,6 @@ gm107_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = (void *)parent;
|
||||
struct nvkm_fb *fb = device->fb;
|
||||
struct nvkm_ltc_priv *ltc;
|
||||
u32 parts, mask;
|
||||
int ret, i;
|
||||
@ -142,7 +141,7 @@ gm107_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
}
|
||||
ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
|
||||
|
||||
ret = gf100_ltc_init_tag_ram(fb, ltc);
|
||||
ret = gf100_ltc_init_tag_ram(ltc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -45,7 +45,7 @@ int gf100_ltc_ctor(struct nvkm_object *, struct nvkm_object *,
|
||||
struct nvkm_oclass *, void *, u32,
|
||||
struct nvkm_object **);
|
||||
void gf100_ltc_dtor(struct nvkm_object *);
|
||||
int gf100_ltc_init_tag_ram(struct nvkm_fb *, struct nvkm_ltc_priv *);
|
||||
int gf100_ltc_init_tag_ram(struct nvkm_ltc_priv *);
|
||||
int gf100_ltc_tags_alloc(struct nvkm_ltc *, u32, struct nvkm_mm_node **);
|
||||
void gf100_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user