2012-07-14 09:09:17 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2012 Red Hat Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors: Ben Skeggs
|
|
|
|
*/
|
2015-01-14 05:09:19 +00:00
|
|
|
#include "nv04.h"
|
2012-07-14 09:09:17 +00:00
|
|
|
|
|
|
|
#include <core/gpuobj.h>
|
2012-09-26 22:55:53 +00:00
|
|
|
#include <core/option.h>
|
2012-07-14 09:09:17 +00:00
|
|
|
#include <subdev/timer.h>
|
|
|
|
|
|
|
|
#define NV44_GART_SIZE (512 * 1024 * 1024)
|
|
|
|
#define NV44_GART_PAGE ( 4 * 1024)
|
|
|
|
|
|
|
|
/*******************************************************************************
|
|
|
|
* VM map/unmap callbacks
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
static void
|
2015-01-14 05:09:19 +00:00
|
|
|
nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
|
2012-07-14 09:09:17 +00:00
|
|
|
dma_addr_t *list, u32 pte, u32 cnt)
|
|
|
|
{
|
|
|
|
u32 base = (pte << 2) & ~0x0000000f;
|
|
|
|
u32 tmp[4];
|
|
|
|
|
|
|
|
tmp[0] = nv_ro32(pgt, base + 0x0);
|
|
|
|
tmp[1] = nv_ro32(pgt, base + 0x4);
|
|
|
|
tmp[2] = nv_ro32(pgt, base + 0x8);
|
|
|
|
tmp[3] = nv_ro32(pgt, base + 0xc);
|
2012-09-26 22:55:53 +00:00
|
|
|
|
2012-07-14 09:09:17 +00:00
|
|
|
while (cnt--) {
|
|
|
|
u32 addr = list ? (*list++ >> 12) : (null >> 12);
|
|
|
|
switch (pte++ & 0x3) {
|
|
|
|
case 0:
|
|
|
|
tmp[0] &= ~0x07ffffff;
|
|
|
|
tmp[0] |= addr;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
tmp[0] &= ~0xf8000000;
|
|
|
|
tmp[0] |= addr << 27;
|
|
|
|
tmp[1] &= ~0x003fffff;
|
|
|
|
tmp[1] |= addr >> 5;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
tmp[1] &= ~0xffc00000;
|
|
|
|
tmp[1] |= addr << 22;
|
|
|
|
tmp[2] &= ~0x0001ffff;
|
|
|
|
tmp[2] |= addr >> 10;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
tmp[2] &= ~0xfffe0000;
|
|
|
|
tmp[2] |= addr << 17;
|
|
|
|
tmp[3] &= ~0x00000fff;
|
|
|
|
tmp[3] |= addr >> 15;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nv_wo32(pgt, base + 0x0, tmp[0]);
|
|
|
|
nv_wo32(pgt, base + 0x4, tmp[1]);
|
|
|
|
nv_wo32(pgt, base + 0x8, tmp[2]);
|
|
|
|
nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2015-01-14 05:09:19 +00:00
|
|
|
nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
|
|
|
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
|
2012-07-14 09:09:17 +00:00
|
|
|
{
|
2015-08-20 04:54:07 +00:00
|
|
|
struct nv04_mmu *mmu = (void *)vma->vm->mmu;
|
2012-07-14 09:09:17 +00:00
|
|
|
u32 tmp[4];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (pte & 3) {
|
|
|
|
u32 max = 4 - (pte & 3);
|
|
|
|
u32 part = (cnt > max) ? max : cnt;
|
2015-08-20 04:54:07 +00:00
|
|
|
nv44_vm_fill(pgt, mmu->null, list, pte, part);
|
2012-07-14 09:09:17 +00:00
|
|
|
pte += part;
|
|
|
|
list += part;
|
|
|
|
cnt -= part;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (cnt >= 4) {
|
|
|
|
for (i = 0; i < 4; i++)
|
|
|
|
tmp[i] = *list++ >> 12;
|
|
|
|
nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
|
|
|
|
nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
|
|
|
|
nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
|
|
|
|
nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
|
|
|
|
cnt -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cnt)
|
2015-08-20 04:54:07 +00:00
|
|
|
nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
|
2012-07-14 09:09:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2015-01-14 05:09:19 +00:00
|
|
|
nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
|
2012-07-14 09:09:17 +00:00
|
|
|
{
|
2015-08-20 04:54:07 +00:00
|
|
|
struct nv04_mmu *mmu = (void *)nvkm_mmu(pgt);
|
2012-07-14 09:09:17 +00:00
|
|
|
|
|
|
|
if (pte & 3) {
|
|
|
|
u32 max = 4 - (pte & 3);
|
|
|
|
u32 part = (cnt > max) ? max : cnt;
|
2015-08-20 04:54:07 +00:00
|
|
|
nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
|
2012-07-14 09:09:17 +00:00
|
|
|
pte += part;
|
|
|
|
cnt -= part;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (cnt >= 4) {
|
|
|
|
nv_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
|
nv_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
|
nv_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
|
nv_wo32(pgt, pte++ * 4, 0x00000000);
|
|
|
|
cnt -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cnt)
|
2015-08-20 04:54:07 +00:00
|
|
|
nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
|
2012-07-14 09:09:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2015-01-14 05:09:19 +00:00
|
|
|
nv44_vm_flush(struct nvkm_vm *vm)
|
2012-07-14 09:09:17 +00:00
|
|
|
{
|
2015-08-20 04:54:07 +00:00
|
|
|
struct nv04_mmu *mmu = (void *)vm->mmu;
|
|
|
|
nv_wr32(mmu, 0x100814, mmu->base.limit - NV44_GART_PAGE);
|
|
|
|
nv_wr32(mmu, 0x100808, 0x00000020);
|
|
|
|
if (!nv_wait(mmu, 0x100808, 0x00000001, 0x00000001))
|
|
|
|
nv_error(mmu, "timeout: 0x%08x\n", nv_rd32(mmu, 0x100808));
|
|
|
|
nv_wr32(mmu, 0x100808, 0x00000000);
|
2012-07-14 09:09:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*******************************************************************************
|
2015-01-13 23:57:36 +00:00
|
|
|
* MMU subdev
|
2012-07-14 09:09:17 +00:00
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
static int
|
2015-01-14 05:09:19 +00:00
|
|
|
nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|
|
|
struct nvkm_oclass *oclass, void *data, u32 size,
|
|
|
|
struct nvkm_object **pobject)
|
2012-07-14 09:09:17 +00:00
|
|
|
{
|
2015-01-14 05:09:19 +00:00
|
|
|
struct nvkm_device *device = nv_device(parent);
|
2015-08-20 04:54:07 +00:00
|
|
|
struct nv04_mmu *mmu;
|
2012-07-14 09:09:17 +00:00
|
|
|
int ret;
|
|
|
|
|
2012-10-22 00:56:07 +00:00
|
|
|
if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
|
2015-01-14 05:09:19 +00:00
|
|
|
!nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
|
|
|
|
return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
|
|
|
|
data, size, pobject);
|
2012-09-26 22:55:53 +00:00
|
|
|
}
|
|
|
|
|
2015-01-14 05:09:19 +00:00
|
|
|
ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
|
2015-08-20 04:54:07 +00:00
|
|
|
"mmu", &mmu);
|
|
|
|
*pobject = nv_object(mmu);
|
2012-07-14 09:09:17 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2015-08-20 04:54:07 +00:00
|
|
|
mmu->base.create = nv04_vm_create;
|
|
|
|
mmu->base.limit = NV44_GART_SIZE;
|
|
|
|
mmu->base.dma_bits = 39;
|
|
|
|
mmu->base.pgt_bits = 32 - 12;
|
|
|
|
mmu->base.spg_shift = 12;
|
|
|
|
mmu->base.lpg_shift = 12;
|
|
|
|
mmu->base.map_sg = nv44_vm_map_sg;
|
|
|
|
mmu->base.unmap = nv44_vm_unmap;
|
|
|
|
mmu->base.flush = nv44_vm_flush;
|
|
|
|
|
|
|
|
mmu->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &mmu->null);
|
|
|
|
if (!mmu->nullp) {
|
|
|
|
nv_warn(mmu, "unable to allocate dummy pages\n");
|
|
|
|
mmu->null = 0;
|
2012-07-14 09:09:17 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 04:54:07 +00:00
|
|
|
ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096,
|
|
|
|
&mmu->vm);
|
2012-07-14 09:09:17 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2015-08-20 04:54:07 +00:00
|
|
|
ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
|
2015-01-14 05:09:19 +00:00
|
|
|
(NV44_GART_SIZE / NV44_GART_PAGE) * 4,
|
|
|
|
512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
|
2015-08-20 04:54:07 +00:00
|
|
|
&mmu->vm->pgt[0].obj[0]);
|
|
|
|
mmu->vm->pgt[0].refcount[0] = 1;
|
2012-07-14 09:09:17 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-01-14 05:09:19 +00:00
|
|
|
nv44_mmu_init(struct nvkm_object *object)
|
2012-07-14 09:09:17 +00:00
|
|
|
{
|
2015-08-20 04:54:07 +00:00
|
|
|
struct nv04_mmu *mmu = (void *)object;
|
|
|
|
struct nvkm_gpuobj *gart = mmu->vm->pgt[0].obj[0];
|
2012-07-14 09:09:17 +00:00
|
|
|
u32 addr;
|
|
|
|
int ret;
|
|
|
|
|
2015-08-20 04:54:07 +00:00
|
|
|
ret = nvkm_mmu_init(&mmu->base);
|
2012-07-14 09:09:17 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* calculate vram address of this PRAMIN block, object must be
|
|
|
|
* allocated on 512KiB alignment, and not exceed a total size
|
|
|
|
* of 512KiB for this to work correctly
|
|
|
|
*/
|
2015-08-20 04:54:07 +00:00
|
|
|
addr = nv_rd32(mmu, 0x10020c);
|
2012-07-14 09:09:17 +00:00
|
|
|
addr -= ((gart->addr >> 19) + 1) << 19;
|
|
|
|
|
2015-08-20 04:54:07 +00:00
|
|
|
nv_wr32(mmu, 0x100850, 0x80000000);
|
|
|
|
nv_wr32(mmu, 0x100818, mmu->null);
|
|
|
|
nv_wr32(mmu, 0x100804, NV44_GART_SIZE);
|
|
|
|
nv_wr32(mmu, 0x100850, 0x00008000);
|
|
|
|
nv_mask(mmu, 0x10008c, 0x00000200, 0x00000200);
|
|
|
|
nv_wr32(mmu, 0x100820, 0x00000000);
|
|
|
|
nv_wr32(mmu, 0x10082c, 0x00000001);
|
|
|
|
nv_wr32(mmu, 0x100800, addr | 0x00000010);
|
2012-07-14 09:09:17 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-14 05:09:19 +00:00
|
|
|
struct nvkm_oclass
|
2015-01-13 23:57:36 +00:00
|
|
|
nv44_mmu_oclass = {
|
|
|
|
.handle = NV_SUBDEV(MMU, 0x44),
|
2015-01-14 05:09:19 +00:00
|
|
|
.ofuncs = &(struct nvkm_ofuncs) {
|
2015-01-13 23:57:36 +00:00
|
|
|
.ctor = nv44_mmu_ctor,
|
|
|
|
.dtor = nv04_mmu_dtor,
|
|
|
|
.init = nv44_mmu_init,
|
2015-01-14 05:09:19 +00:00
|
|
|
.fini = _nvkm_mmu_fini,
|
2012-07-14 09:09:17 +00:00
|
|
|
},
|
|
|
|
};
|