Merge branch 'drm-next-4.12' of git://people.freedesktop.org/~agd5f/linux into drm-next

A few more things for 4.12:
- ttm and amdgpu support for non-contiguous vram CPU mappings
- lots of bug fixes and cleanups for vega10
- misc bug fixes and code cleanups

[airlied: fix do_div error on 32-bit arm, not sure it's 100% correct]

* 'drm-next-4.12' of git://people.freedesktop.org/~agd5f/linux: (58 commits)
  drm/amdgpu: use uintptr_t instead of unsigned long to store pointer
  drm/amdgpu: Avoid using signed integer to store pointer value
  drm/amdgpu:invoke new implemented AI MB func
  drm/amdgpu/vega10:timeout set to equal with VI
  drm/amdgpu:implement the reset MB func for vega10
  drm/amdgpu:fix typo for mxgpu_ai
  drm/amdgpu:no need to involv HDP in KIQ
  drm/amdgpu:add PSP block only load_type=PSP (v2)
  drm/amdgpu/smu9: update to latest driver interface
  drm/amd/amdgpu: cleanup gfx_v9_0_gpu_init()
  drm/amd/amdgpu: cleanup gfx_v9_0_rlc_reset()
  drm/amd/amdgpu: cleanup gfx_v9_0_rlc_start()
  drm/amd/amdgpu: simplify gfx_v9_0_cp_gfx_enable()
  drm/amd/amdgpu: cleanup gfx_v9_0_kiq_init_register()
  drm/amd/amdgpu: Drop gfx_v9_0_print_status()
  drm/amd/amdgpu: cleanup gfx_v9_0_set_gfx_eop_interrupt_state()
  drm/amd/amdgpu: cleanup gfx_v9_0_set_priv_reg_fault_state()
  drm/amd/amdgpu: cleanup gfx_v9_0_set_priv_inst_fault_state()
  drm/amd/amdgpu: cleanup gfx_v9_0_init_queue()
  drm/amdgpu: Move function amdgpu_has_atpx near other similar functions
  ...
This commit is contained in:
Dave Airlie 2017-04-07 05:41:42 +10:00
commit 0168778115
57 changed files with 903 additions and 886 deletions

View File

@ -32,7 +32,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/rbtree.h>
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
@ -122,14 +122,6 @@ extern int amdgpu_param_buf_per_se;
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
/* max number of VMHUB */
#define AMDGPU_MAX_VMHUBS 2
#define AMDGPU_MMHUB 0
#define AMDGPU_GFXHUB 1
/* hardcode that limit for now */
#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
/* hard reset data */
#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
@ -312,12 +304,9 @@ struct amdgpu_gart_funcs {
/* set pte flags based per asic */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
};
/* provided by the mc block */
struct amdgpu_mc_funcs {
/* adjust mc addr in fb for APU case */
u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
uint32_t (*get_invalidate_req)(unsigned int vm_id);
};
/* provided by the ih block */
@ -379,7 +368,10 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo_va_mapping {
struct list_head list;
struct interval_tree_node it;
struct rb_node rb;
uint64_t start;
uint64_t last;
uint64_t __subtree_last;
uint64_t offset;
uint64_t flags;
};
@ -579,8 +571,6 @@ struct amdgpu_vmhub {
uint32_t vm_context0_cntl;
uint32_t vm_l2_pro_fault_status;
uint32_t vm_l2_pro_fault_cntl;
uint32_t (*get_invalidate_req)(unsigned int vm_id);
uint32_t (*get_vm_protection_bits)(void);
};
/*
@ -618,7 +608,6 @@ struct amdgpu_mc {
u64 private_aperture_end;
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
const struct amdgpu_mc_funcs *mc_funcs;
};
/*
@ -1712,6 +1701,12 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define WREG32_FIELD(reg, field, val) \
WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
#define WREG32_FIELD15(ip, idx, reg, field, val) \
WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
/*
* BIOS helpers.
*/
@ -1887,12 +1882,14 @@ void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
bool amdgpu_atpx_dgpu_req_power_for_displays(void);
bool amdgpu_has_atpx(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
static inline bool amdgpu_has_atpx(void) { return false; }
#endif
/*

View File

@ -754,6 +754,35 @@ union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
};
/*
* Return vram width from integrated system info table, if available,
* or 0 if not.
*/
int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
u16 data_offset, size;
union igp_info *igp_info;
u8 frev, crev;
/* get any igp specific overrides */
if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
igp_info = (union igp_info *)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 8:
case 9:
return igp_info->info_8.ucUMAChannelNumber * 64;
default:
return 0;
}
}
return 0;
}
static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev,
struct amdgpu_atom_ss *ss,
int id)

View File

@ -148,6 +148,8 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev);
int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev);
bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
struct amdgpu_atom_ss *ss,
int id, u32 clock);

View File

@ -237,7 +237,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle;
const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;
const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr;
struct drm_amdgpu_bo_list_entry *info;
struct amdgpu_bo_list *list;

View File

@ -161,7 +161,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
/* get chunks */
chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
@ -181,7 +181,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
ret = -EFAULT;
@ -192,7 +192,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
if (p->chunks[i].kdata == NULL) {
@ -949,7 +949,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
}
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
(m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
@ -960,7 +960,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return r;
}
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
@ -1339,7 +1339,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
if (fences == NULL)
return -ENOMEM;
fences_user = (void __user *)(unsigned long)(wait->in.fences);
fences_user = (void __user *)(uintptr_t)(wait->in.fences);
if (copy_from_user(fences, fences_user,
sizeof(struct drm_amdgpu_fence) * fence_count)) {
r = -EFAULT;
@ -1388,8 +1388,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
continue;
list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
if (mapping->it.start > addr ||
addr > mapping->it.last)
if (mapping->start > addr ||
addr > mapping->last)
continue;
*bo = lobj->bo_va->bo;
@ -1397,8 +1397,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
}
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
if (mapping->it.start > addr ||
addr > mapping->it.last)
if (mapping->start > addr ||
addr > mapping->last)
continue;
*bo = lobj->bo_va->bo;

View File

@ -1042,14 +1042,6 @@ static bool amdgpu_check_pot_argument(int arg)
static void amdgpu_get_block_size(struct amdgpu_device *adev)
{
/* from AI, asic starts to support multiple level VMPT */
if (adev->asic_type >= CHIP_VEGA10) {
if (amdgpu_vm_block_size != 9)
dev_warn(adev->dev,
"Multi-VMPT limits block size to one page!\n");
amdgpu_vm_block_size = 9;
return;
}
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
@ -1079,6 +1071,36 @@ static void amdgpu_get_block_size(struct amdgpu_device *adev)
}
}
static void amdgpu_check_vm_size(struct amdgpu_device *adev)
{
if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
amdgpu_vm_size);
goto def_value;
}
if (amdgpu_vm_size < 1) {
dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
amdgpu_vm_size);
goto def_value;
}
/*
* Max GPUVM size for Cayman, SI, CI VI are 40 bits.
*/
if (amdgpu_vm_size > 1024) {
dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
amdgpu_vm_size);
goto def_value;
}
return;
def_value:
amdgpu_vm_size = 8;
dev_info(adev->dev, "set default VM size %dGB\n", amdgpu_vm_size);
}
/**
* amdgpu_check_arguments - validate module params
*
@ -1108,26 +1130,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
}
}
if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
amdgpu_vm_size);
amdgpu_vm_size = 8;
}
if (amdgpu_vm_size < 1) {
dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
amdgpu_vm_size);
amdgpu_vm_size = 8;
}
/*
* Max GPUVM size for Cayman, SI and CI are 40 bits.
*/
if (amdgpu_vm_size > 1024) {
dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
amdgpu_vm_size);
amdgpu_vm_size = 8;
}
amdgpu_check_vm_size(adev);
amdgpu_get_block_size(adev);
@ -2249,9 +2252,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
r = amdgpu_resume(adev);
if (r)
if (r) {
DRM_ERROR("amdgpu_resume failed (%d).\n", r);
return r;
}
amdgpu_fence_driver_resume(adev);
if (resume) {

View File

@ -717,7 +717,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
struct drm_amdgpu_gem_create_in info;
void __user *out = (void __user *)(long)args->value;
void __user *out = (void __user *)(uintptr_t)args->value;
info.bo_size = robj->gem_base.size;
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;

View File

@ -316,9 +316,10 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
return -EINVAL;
if (!adev->irq.client[client_id].sources) {
adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
sizeof(struct amdgpu_irq_src),
GFP_KERNEL);
adev->irq.client[client_id].sources =
kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
sizeof(struct amdgpu_irq_src *),
GFP_KERNEL);
if (!adev->irq.client[client_id].sources)
return -ENOMEM;
}

View File

@ -36,12 +36,6 @@
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#if defined(CONFIG_VGA_SWITCHEROO)
bool amdgpu_has_atpx(void);
#else
static inline bool amdgpu_has_atpx(void) { return false; }
#endif
/**
* amdgpu_driver_unload_kms - Main unload function for KMS.
*
@ -243,7 +237,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(long)info->return_pointer;
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
uint32_t size = info->return_size;
struct drm_crtc *crtc;
uint32_t ui32 = 0;

View File

@ -31,6 +31,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/mmu_notifier.h>
#include <linux/interval_tree.h>
#include <drm/drmP.h>
#include <drm/drm.h>

View File

@ -122,20 +122,19 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
unsigned lpfn = 0;
/* This forces a reallocation if the flag wasn't set before */
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
places[c].fpfn = 0;
places[c].lpfn = lpfn;
places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
c++;
}
@ -928,8 +927,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
/* TODO: figure out how to map scattered VRAM to the CPU */
if ((offset + size) <= adev->mc.visible_vram_size &&
(abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
if ((offset + size) <= adev->mc.visible_vram_size)
return 0;
/* Can't move a pinned BO to visible VRAM */
@ -937,7 +935,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return -EINVAL;
/* hurrah the memory is not visible ! */
abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; i++) {

View File

@ -130,7 +130,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
while (*((unsigned int *)psp->fence_buf) != index) {
msleep(1);
};
}
amdgpu_bo_free_kernel(&cmd_buf_bo,
&cmd_buf_mc_addr,

View File

@ -226,8 +226,8 @@ TRACE_EVENT(amdgpu_vm_bo_map,
TP_fast_assign(
__entry->bo = bo_va ? bo_va->bo : NULL;
__entry->start = mapping->it.start;
__entry->last = mapping->it.last;
__entry->start = mapping->start;
__entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
@ -250,8 +250,8 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
TP_fast_assign(
__entry->bo = bo_va->bo;
__entry->start = mapping->it.start;
__entry->last = mapping->it.last;
__entry->start = mapping->start;
__entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
@ -270,8 +270,8 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
),
TP_fast_assign(
__entry->soffset = mapping->it.start;
__entry->eoffset = mapping->it.last + 1;
__entry->soffset = mapping->start;
__entry->eoffset = mapping->last + 1;
__entry->flags = mapping->flags;
),
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",

View File

@ -529,40 +529,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
case TTM_PL_TT:
break;
case TTM_PL_VRAM:
if (mem->start == AMDGPU_BO_INVALID_OFFSET)
return -EINVAL;
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
return -EINVAL;
mem->bus.base = adev->mc.aper_base;
mem->bus.is_iomem = true;
#ifdef __alpha__
/*
* Alpha: use bus.addr to hold the ioremap() return,
* so we can modify bus.base below.
*/
if (mem->placement & TTM_PL_FLAG_WC)
mem->bus.addr =
ioremap_wc(mem->bus.base + mem->bus.offset,
mem->bus.size);
else
mem->bus.addr =
ioremap_nocache(mem->bus.base + mem->bus.offset,
mem->bus.size);
if (!mem->bus.addr)
return -ENOMEM;
/*
* Alpha: Use just the bus offset plus
* the hose/domain memory base for bus.base.
* It then can be used to build PTEs for VRAM
* access, as done in ttm_bo_vm_fault().
*/
mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
adev->ddev->hose->dense_mem_base;
#endif
break;
default:
return -EINVAL;
@ -574,6 +546,17 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
{
}
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
struct drm_mm_node *mm = bo->mem.mm_node;
uint64_t size = mm->size;
uint64_t offset = page_offset;
page_offset = do_div(offset, size);
return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
}
/*
* TTM backend functions.
*/
@ -1089,6 +1072,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
};
int amdgpu_ttm_init(struct amdgpu_device *adev)

View File

@ -741,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
start = amdgpu_bo_gpu_offset(bo);
end = (mapping->it.last + 1 - mapping->it.start);
end = (mapping->last + 1 - mapping->start);
end = end * AMDGPU_GPU_PAGE_SIZE + start;
addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
start += addr;
amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,

View File

@ -595,13 +595,13 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
}
if ((addr + (uint64_t)size) >
((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
(mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
addr, lo, hi);
return -EINVAL;
}
addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
addr += amdgpu_bo_gpu_offset(bo);
addr -= ((uint64_t)size) * ((uint64_t)index);

View File

@ -122,9 +122,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
mutex_lock(&adev->virt.lock_kiq);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_hdp_flush(ring);
amdgpu_ring_emit_rreg(ring, reg);
amdgpu_ring_emit_hdp_invalidate(ring);
amdgpu_fence_emit(ring, &f);
amdgpu_ring_commit(ring);
mutex_unlock(&adev->virt.lock_kiq);
@ -150,9 +148,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
mutex_lock(&adev->virt.lock_kiq);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_hdp_flush(ring);
amdgpu_ring_emit_wreg(ring, reg, v);
amdgpu_ring_emit_hdp_invalidate(ring);
amdgpu_fence_emit(ring, &f);
amdgpu_ring_commit(ring);
mutex_unlock(&adev->virt.lock_kiq);

View File

@ -26,6 +26,7 @@
* Jerome Glisse
*/
#include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@ -51,6 +52,15 @@
* SI supports 16.
*/
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)
INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
START, LAST, static, amdgpu_vm_it)
#undef START
#undef LAST
/* Local structure. Encapsulate some VM table update parameters to reduce
* the number of function parameters
*/
@ -90,13 +100,14 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == 0)
/* For the root directory */
return adev->vm_manager.max_pfn >>
(amdgpu_vm_block_size * adev->vm_manager.num_level);
(adev->vm_manager.block_size *
adev->vm_manager.num_level);
else if (level == adev->vm_manager.num_level)
/* For the page tables on the leaves */
return AMDGPU_VM_PTE_COUNT;
return AMDGPU_VM_PTE_COUNT(adev);
else
/* Everything in between */
return 1 << amdgpu_vm_block_size;
return 1 << adev->vm_manager.block_size;
}
/**
@ -261,7 +272,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
unsigned level)
{
unsigned shift = (adev->vm_manager.num_level - level) *
amdgpu_vm_block_size;
adev->vm_manager.block_size;
unsigned pt_idx, from, to;
int r;
@ -365,11 +376,19 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
}
static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vm_id *id)
/**
* amdgpu_vm_had_gpu_reset - check if reset occured since last use
*
* @adev: amdgpu_device pointer
* @id: VMID structure
*
* Check if GPU reset occured since last use of the VMID.
*/
static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vm_id *id)
{
return id->current_gpu_reset_count !=
atomic_read(&adev->gpu_reset_counter) ? true : false;
atomic_read(&adev->gpu_reset_counter);
}
/**
@ -455,7 +474,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check all the prerequisites to using this VMID */
if (!id)
continue;
if (amdgpu_vm_is_gpu_reset(adev, id))
if (amdgpu_vm_had_gpu_reset(adev, id))
continue;
if (atomic64_read(&id->owner) != vm->client_id)
@ -483,7 +502,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
vm->ids[ring->idx] = id;
@ -504,9 +522,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
dma_fence_put(id->first);
id->first = dma_fence_get(fence);
dma_fence_put(id->last_flush);
id->last_flush = NULL;
@ -557,8 +572,8 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
{
u64 addr = mc_addr;
if (adev->mc.mc_funcs && adev->mc.mc_funcs->adjust_mc_addr)
addr = adev->mc.mc_funcs->adjust_mc_addr(adev, addr);
if (adev->gart.gart_funcs->adjust_mc_addr)
addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);
return addr;
}
@ -583,60 +598,62 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
id->gws_size != job->gws_size ||
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
bool vm_flush_needed = job->vm_needs_flush ||
amdgpu_vm_ring_has_compute_vm_bug(ring);
unsigned patch_offset = 0;
int r;
if (job->vm_needs_flush || gds_switch_needed ||
amdgpu_vm_is_gpu_reset(adev, id) ||
amdgpu_vm_ring_has_compute_vm_bug(ring)) {
unsigned patch_offset = 0;
if (amdgpu_vm_had_gpu_reset(adev, id)) {
gds_switch_needed = true;
vm_flush_needed = true;
}
if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (!vm_flush_needed && !gds_switch_needed)
return 0;
if (ring->funcs->emit_pipeline_sync &&
(job->vm_needs_flush || gds_switch_needed ||
amdgpu_vm_ring_has_compute_vm_bug(ring)))
amdgpu_ring_emit_pipeline_sync(ring);
if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
amdgpu_vm_is_gpu_reset(adev, id))) {
struct dma_fence *fence;
u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
if (ring->funcs->emit_pipeline_sync)
amdgpu_ring_emit_pipeline_sync(ring);
trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
if (ring->funcs->emit_vm_flush && vm_flush_needed) {
u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
struct dma_fence *fence;
r = amdgpu_fence_emit(ring, &fence);
if (r)
return r;
trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
mutex_lock(&adev->vm_manager.lock);
dma_fence_put(id->last_flush);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
r = amdgpu_fence_emit(ring, &fence);
if (r)
return r;
if (gds_switch_needed) {
id->gds_base = job->gds_base;
id->gds_size = job->gds_size;
id->gws_base = job->gws_base;
id->gws_size = job->gws_size;
id->oa_base = job->oa_base;
id->oa_size = job->oa_size;
amdgpu_ring_emit_gds_switch(ring, job->vm_id,
job->gds_base, job->gds_size,
job->gws_base, job->gws_size,
job->oa_base, job->oa_size);
}
mutex_lock(&adev->vm_manager.lock);
dma_fence_put(id->last_flush);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
if (ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
if (gds_switch_needed) {
id->gds_base = job->gds_base;
id->gds_size = job->gds_size;
id->gws_base = job->gws_base;
id->gws_size = job->gws_size;
id->oa_base = job->oa_base;
id->oa_size = job->oa_size;
amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
job->gds_size, job->gws_base,
job->gws_size, job->oa_base,
job->oa_size);
}
/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
if (ring->funcs->emit_switch_buffer) {
amdgpu_ring_emit_switch_buffer(ring);
amdgpu_ring_emit_switch_buffer(ring);
}
if (ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
if (ring->funcs->emit_switch_buffer) {
amdgpu_ring_emit_switch_buffer(ring);
amdgpu_ring_emit_switch_buffer(ring);
}
return 0;
}
@ -960,7 +977,7 @@ static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
unsigned idx, level = p->adev->vm_manager.num_level;
while (entry->entries) {
idx = addr >> (amdgpu_vm_block_size * level--);
idx = addr >> (p->adev->vm_manager.block_size * level--);
idx %= amdgpu_bo_size(entry->bo) / 8;
entry = &entry->entries[idx];
}
@ -987,7 +1004,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags)
{
const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
struct amdgpu_device *adev = params->adev;
const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
uint64_t cur_pe_start, cur_nptes, cur_dst;
uint64_t addr; /* next GPU address to be updated */
@ -1011,7 +1029,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
cur_pe_start = amdgpu_bo_gpu_offset(pt);
cur_pe_start += (addr & mask) * 8;
@ -1039,7 +1057,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
next_pe_start = amdgpu_bo_gpu_offset(pt);
next_pe_start += (addr & mask) * 8;
@ -1186,7 +1204,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
* reserve space for one command every (1 << BLOCK_SIZE)
* entries or 2k dwords (whatever is smaller)
*/
ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
/* padding, etc. */
ndw = 64;
@ -1301,7 +1319,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
uint64_t pfn, src = 0, start = mapping->it.start;
uint64_t pfn, src = 0, start = mapping->start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@ -1353,7 +1371,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
addr += pfn << PAGE_SHIFT;
last = min((uint64_t)mapping->it.last, start + max_entries - 1);
last = min((uint64_t)mapping->last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm,
start, last, flags, addr,
@ -1368,7 +1386,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
start = last + 1;
} while (unlikely(start != mapping->it.last + 1));
} while (unlikely(start != mapping->last + 1));
return 0;
}
@ -1518,7 +1536,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
if (fence)
dma_fence_wait(fence, false);
amdgpu_vm_prt_put(cb->adev);
amdgpu_vm_prt_put(adev);
} else {
cb->adev = adev;
if (!fence || dma_fence_add_callback(fence, &cb->cb,
@ -1724,9 +1742,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t saddr, uint64_t offset,
uint64_t size, uint64_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_vm *vm = bo_va->vm;
struct interval_tree_node *it;
uint64_t eaddr;
/* validate the parameters */
@ -1743,14 +1760,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
it = interval_tree_iter_first(&vm->va, saddr, eaddr);
if (it) {
struct amdgpu_bo_va_mapping *tmp;
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
/* bo and tmp overlap, invalid addr */
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
tmp->it.start, tmp->it.last + 1);
"0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
tmp->start, tmp->last + 1);
return -EINVAL;
}
@ -1759,13 +1774,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
return -ENOMEM;
INIT_LIST_HEAD(&mapping->list);
mapping->it.start = saddr;
mapping->it.last = eaddr;
mapping->start = saddr;
mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids);
interval_tree_insert(&mapping->it, &vm->va);
amdgpu_vm_it_insert(mapping, &vm->va);
if (flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
@ -1823,13 +1838,13 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
mapping->it.start = saddr;
mapping->it.last = eaddr;
mapping->start = saddr;
mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids);
interval_tree_insert(&mapping->it, &vm->va);
amdgpu_vm_it_insert(mapping, &vm->va);
if (flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
@ -1860,7 +1875,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
list_for_each_entry(mapping, &bo_va->valids, list) {
if (mapping->it.start == saddr)
if (mapping->start == saddr)
break;
}
@ -1868,7 +1883,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
valid = false;
list_for_each_entry(mapping, &bo_va->invalids, list) {
if (mapping->it.start == saddr)
if (mapping->start == saddr)
break;
}
@ -1877,7 +1892,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
}
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
amdgpu_vm_it_remove(mapping, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid)
@ -1905,7 +1920,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size)
{
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
struct interval_tree_node *it;
LIST_HEAD(removed);
uint64_t eaddr;
@ -1927,43 +1941,42 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
INIT_LIST_HEAD(&after->list);
/* Now gather all removed mappings */
it = interval_tree_iter_first(&vm->va, saddr, eaddr);
while (it) {
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
it = interval_tree_iter_next(it, saddr, eaddr);
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
while (tmp) {
/* Remember mapping split at the start */
if (tmp->it.start < saddr) {
before->it.start = tmp->it.start;
before->it.last = saddr - 1;
if (tmp->start < saddr) {
before->start = tmp->start;
before->last = saddr - 1;
before->offset = tmp->offset;
before->flags = tmp->flags;
list_add(&before->list, &tmp->list);
}
/* Remember mapping split at the end */
if (tmp->it.last > eaddr) {
after->it.start = eaddr + 1;
after->it.last = tmp->it.last;
if (tmp->last > eaddr) {
after->start = eaddr + 1;
after->last = tmp->last;
after->offset = tmp->offset;
after->offset += after->it.start - tmp->it.start;
after->offset += after->start - tmp->start;
after->flags = tmp->flags;
list_add(&after->list, &tmp->list);
}
list_del(&tmp->list);
list_add(&tmp->list, &removed);
tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
}
/* And free them up */
list_for_each_entry_safe(tmp, next, &removed, list) {
interval_tree_remove(&tmp->it, &vm->va);
amdgpu_vm_it_remove(tmp, &vm->va);
list_del(&tmp->list);
if (tmp->it.start < saddr)
tmp->it.start = saddr;
if (tmp->it.last > eaddr)
tmp->it.last = eaddr;
if (tmp->start < saddr)
tmp->start = saddr;
if (tmp->last > eaddr)
tmp->last = eaddr;
list_add(&tmp->list, &vm->freed);
trace_amdgpu_vm_bo_unmap(NULL, tmp);
@ -1971,7 +1984,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
/* Insert partial mapping before the range */
if (!list_empty(&before->list)) {
interval_tree_insert(&before->it, &vm->va);
amdgpu_vm_it_insert(before, &vm->va);
if (before->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
} else {
@ -1980,7 +1993,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
/* Insert partial mapping after the range */
if (!list_empty(&after->list)) {
interval_tree_insert(&after->it, &vm->va);
amdgpu_vm_it_insert(after, &vm->va);
if (after->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
} else {
@ -2014,13 +2027,13 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
amdgpu_vm_it_remove(mapping, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
list_add(&mapping->list, &vm->freed);
}
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
amdgpu_vm_it_remove(mapping, &vm->va);
amdgpu_vm_free_mapping(adev, vm, mapping,
bo_va->last_pt_update);
}
@ -2062,7 +2075,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT * 8);
AMDGPU_VM_PTE_COUNT(adev) * 8);
unsigned ring_instance;
struct amdgpu_ring *ring;
struct amd_sched_rq *rq;
@ -2162,9 +2175,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (!RB_EMPTY_ROOT(&vm->va)) {
dev_err(adev->dev, "still active bo inside vm\n");
}
rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
amdgpu_vm_it_remove(mapping, &vm->va);
kfree(mapping);
}
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
@ -2227,7 +2240,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
dma_fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush);

View File

@ -45,7 +45,7 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
/* number of entries in page table */
#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
@ -76,6 +76,14 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
/* max number of VMHUB */
#define AMDGPU_MAX_VMHUBS 2
#define AMDGPU_GFXHUB 0
#define AMDGPU_MMHUB 1
/* hardcode that limit for now */
#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
uint64_t addr;
@ -123,7 +131,6 @@ struct amdgpu_vm {
struct amdgpu_vm_id {
struct list_head list;
struct dma_fence *first;
struct amdgpu_sync active;
struct dma_fence *last_flush;
atomic64_t owner;
@ -155,6 +162,8 @@ struct amdgpu_vm_manager {
uint64_t max_pfn;
uint32_t num_level;
uint64_t vm_size;
uint32_t block_size;
/* vram base address for page table entry */
u64 vram_base_offset;
/* is vm enabled? */

View File

@ -93,7 +93,6 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
@ -106,8 +105,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (!lpfn)
lpfn = man->size;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
place->lpfn || amdgpu_vram_page_split == -1) {
if (place->flags & TTM_PL_FLAG_CONTIGUOUS ||
amdgpu_vram_page_split == -1) {
pages_per_node = ~0ul;
num_nodes = 1;
} else {
@ -124,12 +123,14 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
mem->start = 0;
pages_left = mem->num_pages;
spin_lock(&mgr->lock);
for (i = 0; i < num_nodes; ++i) {
unsigned long pages = min(pages_left, pages_per_node);
uint32_t alignment = mem->page_alignment;
unsigned long start;
if (pages == pages_per_node)
alignment = pages_per_node;
@ -141,11 +142,19 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r))
goto error;
/* Calculate a virtual BO start address to easily check if
* everything is CPU accessible.
*/
start = nodes[i].start + nodes[i].size;
if (start > mem->num_pages)
start -= mem->num_pages;
else
start = 0;
mem->start = max(mem->start, start);
pages_left -= pages;
}
spin_unlock(&mgr->lock);
mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
mem->mm_node = nodes;
return 0;

View File

@ -4565,6 +4565,7 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
adev->gfx.compute_ring[i].ready = false;
adev->gfx.kiq.ring.ready = false;
}
udelay(50);
}
@ -4721,14 +4722,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
if (ring->use_doorbell)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 1);
else
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0);
tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN,
ring->use_doorbell ? 1 : 0);
mqd->cp_hqd_pq_doorbell_control = tmp;
@ -4816,13 +4813,10 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct vi_mqd *mqd = ring->mqd_ptr;
uint32_t tmp;
int j;
/* disable wptr polling */
tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32(mmCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo);
WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi);
@ -4834,10 +4828,10 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control);
/* disable the queue if it's active */
if (RREG32(mmCP_HQD_ACTIVE) & 1) {
if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
udelay(1);
}
@ -4894,11 +4888,8 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
/* activate the queue */
WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
if (ring->use_doorbell) {
tmp = RREG32(mmCP_PQ_STATUS);
tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
WREG32(mmCP_PQ_STATUS, tmp);
}
if (ring->use_doorbell)
WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
@ -5471,19 +5462,18 @@ static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
{
int i;
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
u32 tmp;
tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
DEQUEUE_REQ, 2);
WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, 2);
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
udelay(1);
}
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static int gfx_v8_0_pre_soft_reset(void *handle)
@ -5589,11 +5579,13 @@ static int gfx_v8_0_soft_reset(void *handle)
static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
WREG32(mmCP_HQD_PQ_RPTR, 0);
WREG32(mmCP_HQD_PQ_WPTR, 0);
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static int gfx_v8_0_post_soft_reset(void *handle)
@ -6986,40 +6978,24 @@ static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
unsigned int type,
enum amdgpu_interrupt_state state)
{
uint32_t tmp, target;
struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
BUG_ON(ring->funcs->type != AMDGPU_RING_TYPE_KIQ);
if (ring->me == 1)
target = mmCP_ME1_PIPE0_INT_CNTL;
else
target = mmCP_ME2_PIPE0_INT_CNTL;
target += ring->pipe;
switch (type) {
case AMDGPU_CP_KIQ_IRQ_DRIVER0:
if (state == AMDGPU_IRQ_STATE_DISABLE) {
tmp = RREG32(mmCPC_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
GENERIC2_INT_ENABLE, 0);
WREG32(mmCPC_INT_CNTL, tmp);
tmp = RREG32(target);
tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
GENERIC2_INT_ENABLE, 0);
WREG32(target, tmp);
} else {
tmp = RREG32(mmCPC_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
GENERIC2_INT_ENABLE, 1);
WREG32(mmCPC_INT_CNTL, tmp);
tmp = RREG32(target);
tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
GENERIC2_INT_ENABLE, 1);
WREG32(target, tmp);
}
WREG32_FIELD(CPC_INT_CNTL, GENERIC2_INT_ENABLE,
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
if (ring->me == 1)
WREG32_FIELD_OFFSET(CP_ME1_PIPE0_INT_CNTL,
ring->pipe,
GENERIC2_INT_ENABLE,
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
else
WREG32_FIELD_OFFSET(CP_ME2_PIPE0_INT_CNTL,
ring->pipe,
GENERIC2_INT_ENABLE,
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
break;
default:
BUG(); /* kiq only support GENERIC2_INT now */
@ -7159,8 +7135,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,

View File

@ -1288,9 +1288,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
u32 tmp;
int i;
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL));
tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL), tmp);
WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v9_0_tiling_mode_table_init(adev);
@ -1395,13 +1393,9 @@ void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
{
u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET));
tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
udelay(50);
}
@ -1410,10 +1404,8 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
#ifdef AMDGPU_RLC_DEBUG_RETRY
u32 rlc_ucode_ver;
#endif
u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL));
tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL), tmp);
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU))
@ -1497,14 +1489,10 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
int i;
u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL));
if (enable) {
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
} else {
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
if (!enable) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].ready = false;
}
@ -2020,13 +2008,10 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
uint32_t tmp;
int j;
/* disable wptr polling */
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL));
tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp);
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
mqd->cp_hqd_eop_base_addr_lo);
@ -2118,11 +2103,8 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE),
mqd->cp_hqd_active);
if (ring->use_doorbell) {
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS));
tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp);
}
if (ring->use_doorbell)
WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
@ -2366,177 +2348,6 @@ static int gfx_v9_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static void gfx_v9_0_print_status(void *handle)
{
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dev_info(adev->dev, "GFX 9.x registers\n");
dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)));
dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)));
dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)));
dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)));
dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)));
dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)));
dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STAT)));
dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)));
dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)));
dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)));
dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)));
dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)));
dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)));
dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_BUSY_STAT)));
dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)));
dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)));
for (i = 0; i < 32; i++) {
dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_TILE_MODE0 ) + i*4));
}
for (i = 0; i < 16; i++) {
dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_MACROTILE_MODE0) + i*4));
}
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
dev_info(adev->dev, " se: %d\n", i);
gfx_v9_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG)));
dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG_1)));
}
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)));
dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEQ_THRESHOLDS)));
dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmSX_DEBUG_1)));
dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX)));
dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL)));
dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG)));
dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG)));
dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)));
dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG3)));
dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL)));
dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1)));
dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FIFO_SIZE)));
dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_NUM_INSTANCES)));
dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PERFMON_CNTL)));
dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FORCE_EOV_MAX_CNTS)));
dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION)));
dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_GS_VERTEX_REUSE)));
dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE)));
dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_CL_ENHANCE)));
dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE)));
dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)));
dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MAX_CONTEXT)));
dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_DEVICE_ID)));
dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_SEM_WAIT_TIMER)));
dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_DELAY)));
dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_VMID)));
dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL)));
dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR)));
dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR)));
dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR_HI)));
dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL)));
dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE)));
dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE_HI)));
dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_CNTL)));
dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_ADDR)));
dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_UMSK)));
dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)));
dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL)));
dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL)));
dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL)));
dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_INIT)));
dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_MAX)));
dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_INIT_CU_MASK)));
dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_PARAMS)));
dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL)));
dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_UCODE_CNTL)));
dev_info(adev->dev, " RLC_GPM_GENERAL_6=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_6)));
dev_info(adev->dev, " RLC_GPM_GENERAL_12=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_12)));
dev_info(adev->dev, " RLC_GPM_TIMER_INT_3=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_TIMER_INT_3)));
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < 16; i++) {
soc15_grbm_select(adev, 0, 0, 0, i);
dev_info(adev->dev, " VM %d:\n", i);
dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)));
dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES)));
}
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static int gfx_v9_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0;
@ -2569,8 +2380,7 @@ static int gfx_v9_0_soft_reset(void *handle)
GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
if (grbm_soft_reset ) {
gfx_v9_0_print_status((void *)adev);
if (grbm_soft_reset) {
/* stop the rlc */
gfx_v9_0_rlc_stop(adev);
@ -2596,7 +2406,6 @@ static int gfx_v9_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
gfx_v9_0_print_status((void *)adev);
}
return 0;
}
@ -3148,6 +2957,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@ -3157,7 +2967,6 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
uint32_t req = hub->get_invalidate_req(vm_id);
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
hub->ctx0_ptb_addr_lo32
@ -3376,21 +3185,12 @@ static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
u32 cp_int_cntl;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
TIME_STAMP_INT_ENABLE, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
cp_int_cntl =
REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
TIME_STAMP_INT_ENABLE, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
TIME_STAMP_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
break;
default:
break;
@ -3446,20 +3246,12 @@ static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 cp_int_cntl;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
PRIV_REG_INT_ENABLE, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
PRIV_REG_INT_ENABLE, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
PRIV_REG_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
break;
default:
break;
@ -3473,21 +3265,12 @@ static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 cp_int_cntl;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
PRIV_INSTR_INT_ENABLE, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
PRIV_INSTR_INT_ENABLE, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
break;
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
PRIV_INSTR_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
default:
break;
}
@ -3759,8 +3542,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@ -3975,9 +3756,7 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
ring->pipe,
ring->queue, 0);
/* disable wptr polling */
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL));
tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp);
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
/* write the EOP addr */
BUG_ON(ring->me != 1 || ring->pipe != 0); /* can't handle other cases eop address */
@ -4121,11 +3900,8 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
amdgpu_bo_kunmap(ring->mqd_obj);
amdgpu_bo_unreserve(ring->mqd_obj);
if (use_doorbell) {
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS));
tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp);
}
if (use_doorbell)
WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}

View File

@ -222,7 +222,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
amdgpu_vm_block_size - 9);
adev->vm_manager.block_size - 9);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
@ -299,36 +299,6 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
}
static uint32_t gfxhub_v1_0_get_invalidate_req(unsigned int vm_id)
{
u32 req = 0;
/* invalidate using legacy mode on vm_id*/
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vm_id);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
return req;
}
static uint32_t gfxhub_v1_0_get_vm_protection_bits(void)
{
return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
}
static int gfxhub_v1_0_early_init(void *handle)
{
return 0;
@ -361,9 +331,6 @@ static int gfxhub_v1_0_sw_init(void *handle)
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
hub->get_invalidate_req = gfxhub_v1_0_get_invalidate_req;
hub->get_vm_protection_bits = gfxhub_v1_0_get_vm_protection_bits;
return 0;
}

View File

@ -543,7 +543,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_CONTEXT1_CNTL,
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
((adev->vm_manager.block_size - 9)
<< VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v6_0_set_fault_enable_default(adev, false);
else
@ -848,7 +849,12 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
adev->vm_manager.vm_size = amdgpu_vm_size;
adev->vm_manager.block_size = amdgpu_vm_block_size;
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
adev->vm_manager.vm_size, adev->vm_manager.block_size);
adev->mc.mc_mask = 0xffffffffffULL;

View File

@ -37,6 +37,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "amdgpu_atombios.h"
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v7_0_wait_for_idle(void *handle);
@ -325,48 +327,51 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
*/
static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
{
u32 tmp;
int chansize, numchan;
adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
if (!adev->mc.vram_width) {
u32 tmp;
int chansize, numchan;
/* Get VRAM informations */
tmp = RREG32(mmMC_ARB_RAMCFG);
if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
chansize = 64;
} else {
chansize = 32;
/* Get VRAM informations */
tmp = RREG32(mmMC_ARB_RAMCFG);
if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
chansize = 64;
} else {
chansize = 32;
}
tmp = RREG32(mmMC_SHARED_CHMAP);
switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
case 0:
default:
numchan = 1;
break;
case 1:
numchan = 2;
break;
case 2:
numchan = 4;
break;
case 3:
numchan = 8;
break;
case 4:
numchan = 3;
break;
case 5:
numchan = 6;
break;
case 6:
numchan = 10;
break;
case 7:
numchan = 12;
break;
case 8:
numchan = 16;
break;
}
adev->mc.vram_width = numchan * chansize;
}
tmp = RREG32(mmMC_SHARED_CHMAP);
switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
case 0:
default:
numchan = 1;
break;
case 1:
numchan = 2;
break;
case 2:
numchan = 4;
break;
case 3:
numchan = 8;
break;
case 4:
numchan = 3;
break;
case 5:
numchan = 6;
break;
case 6:
numchan = 10;
break;
case 7:
numchan = 12;
break;
case 8:
numchan = 16;
break;
}
adev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
@ -639,7 +644,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
amdgpu_vm_block_size - 9);
adev->vm_manager.block_size - 9);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v7_0_set_fault_enable_default(adev, false);
@ -998,7 +1003,12 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
adev->vm_manager.vm_size = amdgpu_vm_size;
adev->vm_manager.block_size = amdgpu_vm_block_size;
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
adev->vm_manager.vm_size, adev->vm_manager.block_size);
/* Set the internal MC address mask
* This is the max address of the GPU's

View File

@ -38,6 +38,8 @@
#include "vid.h"
#include "vi.h"
#include "amdgpu_atombios.h"
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
@ -487,48 +489,51 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
*/
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
{
u32 tmp;
int chansize, numchan;
adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
if (!adev->mc.vram_width) {
u32 tmp;
int chansize, numchan;
/* Get VRAM informations */
tmp = RREG32(mmMC_ARB_RAMCFG);
if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
chansize = 64;
} else {
chansize = 32;
/* Get VRAM informations */
tmp = RREG32(mmMC_ARB_RAMCFG);
if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
chansize = 64;
} else {
chansize = 32;
}
tmp = RREG32(mmMC_SHARED_CHMAP);
switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
case 0:
default:
numchan = 1;
break;
case 1:
numchan = 2;
break;
case 2:
numchan = 4;
break;
case 3:
numchan = 8;
break;
case 4:
numchan = 3;
break;
case 5:
numchan = 6;
break;
case 6:
numchan = 10;
break;
case 7:
numchan = 12;
break;
case 8:
numchan = 16;
break;
}
adev->mc.vram_width = numchan * chansize;
}
tmp = RREG32(mmMC_SHARED_CHMAP);
switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
case 0:
default:
numchan = 1;
break;
case 1:
numchan = 2;
break;
case 2:
numchan = 4;
break;
case 3:
numchan = 8;
break;
case 4:
numchan = 3;
break;
case 5:
numchan = 6;
break;
case 6:
numchan = 10;
break;
case 7:
numchan = 12;
break;
case 8:
numchan = 16;
break;
}
adev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
@ -848,7 +853,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
amdgpu_vm_block_size - 9);
adev->vm_manager.block_size - 9);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v8_0_set_fault_enable_default(adev, false);
@ -1082,7 +1087,12 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
adev->vm_manager.vm_size = amdgpu_vm_size;
adev->vm_manager.block_size = amdgpu_vm_block_size;
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
adev->vm_manager.vm_size, adev->vm_manager.block_size);
/* Set the internal MC address mask
* This is the max address of the GPU's

View File

@ -75,11 +75,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_vmhub *hub;
u32 tmp, reg, bits, i;
bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB];
bits = hub->get_vm_protection_bits();
for (i = 0; i< 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@ -89,7 +96,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
bits = hub->get_vm_protection_bits();
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@ -100,7 +106,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB];
bits = hub->get_vm_protection_bits();
for (i = 0; i< 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@ -110,7 +115,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
bits = hub->get_vm_protection_bits();
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@ -129,8 +133,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct amdgpu_vmhub *gfxhub = &adev->vmhub[AMDGPU_GFXHUB];
struct amdgpu_vmhub *mmhub = &adev->vmhub[AMDGPU_MMHUB];
struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
uint32_t status = 0;
u64 addr;
@ -138,13 +141,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
if (!amdgpu_sriov_vf(adev)) {
if (entry->vm_id_src) {
status = RREG32(mmhub->vm_l2_pro_fault_status);
WREG32_P(mmhub->vm_l2_pro_fault_cntl, 1, ~1);
} else {
status = RREG32(gfxhub->vm_l2_pro_fault_status);
WREG32_P(gfxhub->vm_l2_pro_fault_cntl, 1, ~1);
}
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
}
if (printk_ratelimit()) {
@ -175,6 +173,25 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
}
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
{
u32 req = 0;
/* invalidate using legacy mode on vm_id*/
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vm_id);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
return req;
}
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@ -204,7 +221,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &adev->vmhub[i];
u32 tmp = hub->get_invalidate_req(vmid);
u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
@ -337,10 +354,17 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
{
return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
}
static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
.adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
.get_invalidate_req = gmc_v9_0_get_invalidate_req,
};
static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
@ -349,26 +373,11 @@ static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
}
static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
{
return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
}
static const struct amdgpu_mc_funcs gmc_v9_0_mc_funcs = {
.adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
};
static void gmc_v9_0_set_mc_funcs(struct amdgpu_device *adev)
{
adev->mc.mc_funcs = &gmc_v9_0_mc_funcs;
}
static int gmc_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_set_gart_funcs(adev);
gmc_v9_0_set_mc_funcs(adev);
gmc_v9_0_set_irq_funcs(adev);
return 0;
@ -543,11 +552,23 @@ static int gmc_v9_0_sw_init(void *handle)
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
adev->vm_manager.vm_size = amdgpu_vm_size;
adev->vm_manager.block_size = amdgpu_vm_block_size;
} else {
/* XXX Don't know how to get VRAM type yet. */
adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
/*
* To fulfill 4-level page support,
* vm size is 256TB (48bit), maximum size of Vega10,
* block size 512 (9bit)
*/
adev->vm_manager.vm_size = 1U << 18;
adev->vm_manager.block_size = 9;
}
DRM_INFO("vm size is %llu GB, block size is %d-bit\n",
adev->vm_manager.vm_size, adev->vm_manager.block_size);
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
&adev->mc.vm_fault);
@ -557,14 +578,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
/* Because of four level VMPTs, vm size is at least 512GB.
* The maximum size is 256TB (48bit).
*/
if (amdgpu_vm_size < 512) {
DRM_WARN("VM size is at least 512GB!\n");
amdgpu_vm_size = 512;
}
adev->vm_manager.max_pfn = (uint64_t)amdgpu_vm_size << 18;
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
* This is the max address of the GPU's

View File

@ -242,7 +242,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
amdgpu_vm_block_size - 9);
adev->vm_manager.block_size - 9);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
@ -317,36 +317,6 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
}
static uint32_t mmhub_v1_0_get_invalidate_req(unsigned int vm_id)
{
u32 req = 0;
/* invalidate using legacy mode on vm_id*/
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vm_id);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
return req;
}
static uint32_t mmhub_v1_0_get_vm_protection_bits(void)
{
return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
}
static int mmhub_v1_0_early_init(void *handle)
{
return 0;
@ -379,9 +349,6 @@ static int mmhub_v1_0_sw_init(void *handle)
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
hub->get_invalidate_req = mmhub_v1_0_get_invalidate_req;
hub->get_vm_protection_bits = mmhub_v1_0_get_vm_protection_bits;
return 0;
}

View File

@ -28,6 +28,7 @@
#include "vega10/GC/gc_9_0_offset.h"
#include "vega10/GC/gc_9_0_sh_mask.h"
#include "soc15.h"
#include "vega10_ih.h"
#include "soc15_common.h"
#include "mxgpu_ai.h"
@ -133,7 +134,7 @@ static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
return r;
}
static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
@ -172,7 +173,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
if (req == IDH_REQ_GPU_INIT_ACCESS ||
req == IDH_REQ_GPU_FINI_ACCESS ||
req == IDH_REQ_GPU_RESET_ACCESS) {
r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
if (r)
return r;
}
@ -180,6 +181,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
return 0;
}
static int xgpu_ai_request_reset(struct amdgpu_device *adev)
{
return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
}
static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
bool init)
{
@ -201,7 +207,134 @@ static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
return r;
}
static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("get ack intr and do nothing.\n");
return 0;
}
static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
(state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
return 0;
}
static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
/* wait until RCV_MSG become 3 */
if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
pr_err("failed to recieve FLR_CMPL\n");
return;
}
/* Trigger recovery due to world switch failure */
amdgpu_sriov_gpu_reset(adev, false);
}
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
(state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
return 0;
}
static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
int r;
/* see what event we get */
r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
/* only handle FLR_NOTIFY now */
if (!r)
schedule_work(&adev->virt.flr_work);
return 0;
}
static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
.set = xgpu_ai_set_mailbox_ack_irq,
.process = xgpu_ai_mailbox_ack_irq,
};
static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
.set = xgpu_ai_set_mailbox_rcv_irq,
.process = xgpu_ai_mailbox_rcv_irq,
};
void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
{
adev->virt.ack_irq.num_types = 1;
adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
adev->virt.rcv_irq.num_types = 1;
adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
}
int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
{
int r;
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
if (r)
return r;
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
}
return 0;
}
int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
{
int r;
r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
if (r)
return r;
r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
return 0;
}
void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
{
amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
.reset_gpu = xgpu_ai_request_reset,
};

View File

@ -24,7 +24,7 @@
#ifndef __MXGPU_AI_H__
#define __MXGPU_AI_H__
#define AI_MAILBOX_TIMEDOUT 150000
#define AI_MAILBOX_TIMEDOUT 5000
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
@ -44,4 +44,9 @@ enum idh_event {
extern const struct amdgpu_virt_ops xgpu_ai_virt_ops;
void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev);
int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev);
int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev);
void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev);
#endif

View File

@ -491,7 +491,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
ucode_size = ucode->ucode_size;
ucode_mem = (uint32_t *)ucode->kaddr;
while (!ucode_size) {
while (ucode_size) {
fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
if (*ucode_mem != fw_sram_reg_val)
@ -508,14 +508,10 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
uint32_t reg, reg_val;
uint32_t reg;
reg_val = (smnMP1_FIRMWARE_FLAGS & 0xffffffff) | 0x03b00000;
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg_val);
reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000;
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg);
reg = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2));
if ((reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
return true;
return false;
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
}

View File

@ -1039,6 +1039,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@ -1048,7 +1049,6 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));

View File

@ -106,6 +106,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
if (adev->asic_type == CHIP_VEGA10)
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
else
BUG();
address = nbio_pcie_id->index_offset;
data = nbio_pcie_id->data_offset;
@ -125,6 +127,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
if (adev->asic_type == CHIP_VEGA10)
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
else
BUG();
address = nbio_pcie_id->index_offset;
data = nbio_pcie_id->data_offset;
@ -493,7 +497,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &mmhub_v1_0_ip_block);
amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
@ -558,6 +563,7 @@ static int soc15_common_early_init(void *handle)
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_setting(adev);
xgpu_ai_mailbox_set_irq_funcs(adev);
}
/*
@ -610,8 +616,23 @@ static int soc15_common_early_init(void *handle)
return 0;
}
static int soc15_common_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
return 0;
}
static int soc15_common_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_add_irq_id(adev);
return 0;
}
@ -642,6 +663,8 @@ static int soc15_common_hw_fini(void *handle)
/* disable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, false);
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
return 0;
}
@ -855,7 +878,7 @@ static int soc15_common_set_powergating_state(void *handle,
const struct amd_ip_funcs soc15_common_ip_funcs = {
.name = "soc15_common",
.early_init = soc15_common_early_init,
.late_init = NULL,
.late_init = soc15_common_late_init,
.sw_init = soc15_common_sw_init,
.sw_fini = soc15_common_sw_fini,
.hw_init = soc15_common_hw_init,

View File

@ -135,12 +135,9 @@ static int uvd_v4_2_sw_fini(void *handle)
if (r)
return r;
r = amdgpu_uvd_sw_fini(adev);
if (r)
return r;
return r;
return amdgpu_uvd_sw_fini(adev);
}
static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
bool enable);
/**
@ -230,11 +227,7 @@ static int uvd_v4_2_suspend(void *handle)
if (r)
return r;
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
return r;
return amdgpu_uvd_suspend(adev);
}
static int uvd_v4_2_resume(void *handle)
@ -246,11 +239,7 @@ static int uvd_v4_2_resume(void *handle)
if (r)
return r;
r = uvd_v4_2_hw_init(adev);
if (r)
return r;
return r;
return uvd_v4_2_hw_init(adev);
}
/**

View File

@ -131,11 +131,7 @@ static int uvd_v5_0_sw_fini(void *handle)
if (r)
return r;
r = amdgpu_uvd_sw_fini(adev);
if (r)
return r;
return r;
return amdgpu_uvd_sw_fini(adev);
}
/**
@ -228,11 +224,7 @@ static int uvd_v5_0_suspend(void *handle)
return r;
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
return r;
return amdgpu_uvd_suspend(adev);
}
static int uvd_v5_0_resume(void *handle)
@ -244,11 +236,7 @@ static int uvd_v5_0_resume(void *handle)
if (r)
return r;
r = uvd_v5_0_hw_init(adev);
if (r)
return r;
return r;
return uvd_v5_0_hw_init(adev);
}
/**

View File

@ -134,11 +134,7 @@ static int uvd_v6_0_sw_fini(void *handle)
if (r)
return r;
r = amdgpu_uvd_sw_fini(adev);
if (r)
return r;
return r;
return amdgpu_uvd_sw_fini(adev);
}
/**
@ -230,11 +226,8 @@ static int uvd_v6_0_suspend(void *handle)
return r;
/* Skip this for APU for now */
if (!(adev->flags & AMD_IS_APU)) {
if (!(adev->flags & AMD_IS_APU))
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
}
return r;
}
@ -250,11 +243,7 @@ static int uvd_v6_0_resume(void *handle)
if (r)
return r;
}
r = uvd_v6_0_hw_init(adev);
if (r)
return r;
return r;
return uvd_v6_0_hw_init(adev);
}
/**

View File

@ -438,11 +438,7 @@ static int uvd_v7_0_sw_fini(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
r = amdgpu_uvd_sw_fini(adev);
if (r)
return r;
return r;
return amdgpu_uvd_sw_fini(adev);
}
/**
@ -547,11 +543,8 @@ static int uvd_v7_0_suspend(void *handle)
return r;
/* Skip this for APU for now */
if (!(adev->flags & AMD_IS_APU)) {
if (!(adev->flags & AMD_IS_APU))
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
}
return r;
}
@ -567,11 +560,7 @@ static int uvd_v7_0_resume(void *handle)
if (r)
return r;
}
r = uvd_v7_0_hw_init(adev);
if (r)
return r;
return r;
return uvd_v7_0_hw_init(adev);
}
/**
@ -1045,6 +1034,7 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
uint32_t data0, data1, mask;
unsigned eng = ring->idx;
unsigned i;
@ -1055,7 +1045,6 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
uint32_t req = hub->get_invalidate_req(vm_id);
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
data1 = upper_32_bits(pd_addr);
@ -1091,6 +1080,7 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@ -1100,7 +1090,6 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring,

View File

@ -451,11 +451,7 @@ static int vce_v2_0_sw_fini(void *handle)
if (r)
return r;
r = amdgpu_vce_sw_fini(adev);
if (r)
return r;
return r;
return amdgpu_vce_sw_fini(adev);
}
static int vce_v2_0_hw_init(void *handle)
@ -495,11 +491,7 @@ static int vce_v2_0_suspend(void *handle)
if (r)
return r;
r = amdgpu_vce_suspend(adev);
if (r)
return r;
return r;
return amdgpu_vce_suspend(adev);
}
static int vce_v2_0_resume(void *handle)
@ -511,11 +503,7 @@ static int vce_v2_0_resume(void *handle)
if (r)
return r;
r = vce_v2_0_hw_init(adev);
if (r)
return r;
return r;
return vce_v2_0_hw_init(adev);
}
static int vce_v2_0_soft_reset(void *handle)

View File

@ -417,11 +417,7 @@ static int vce_v3_0_sw_fini(void *handle)
if (r)
return r;
r = amdgpu_vce_sw_fini(adev);
if (r)
return r;
return r;
return amdgpu_vce_sw_fini(adev);
}
static int vce_v3_0_hw_init(void *handle)
@ -471,11 +467,7 @@ static int vce_v3_0_suspend(void *handle)
if (r)
return r;
r = amdgpu_vce_suspend(adev);
if (r)
return r;
return r;
return amdgpu_vce_suspend(adev);
}
static int vce_v3_0_resume(void *handle)
@ -487,11 +479,7 @@ static int vce_v3_0_resume(void *handle)
if (r)
return r;
r = vce_v3_0_hw_init(adev);
if (r)
return r;
return r;
return vce_v3_0_hw_init(adev);
}
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)

View File

@ -527,11 +527,7 @@ static int vce_v4_0_sw_fini(void *handle)
if (r)
return r;
r = amdgpu_vce_sw_fini(adev);
if (r)
return r;
return r;
return amdgpu_vce_sw_fini(adev);
}
static int vce_v4_0_hw_init(void *handle)
@ -584,11 +580,7 @@ static int vce_v4_0_suspend(void *handle)
if (r)
return r;
r = amdgpu_vce_suspend(adev);
if (r)
return r;
return r;
return amdgpu_vce_suspend(adev);
}
static int vce_v4_0_resume(void *handle)
@ -600,11 +592,7 @@ static int vce_v4_0_resume(void *handle)
if (r)
return r;
r = vce_v4_0_hw_init(adev);
if (r)
return r;
return r;
return vce_v4_0_hw_init(adev);
}
static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
@ -985,6 +973,7 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@ -994,7 +983,6 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring,

View File

@ -362,7 +362,89 @@
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
#define PACKET3_SWITCH_BUFFER 0x8B
#define PACKET3_SET_RESOURCES 0xA0
/* 1. header
* 2. CONTROL
* 3. QUEUE_MASK_LO [31:0]
* 4. QUEUE_MASK_HI [31:0]
* 5. GWS_MASK_LO [31:0]
* 6. GWS_MASK_HI [31:0]
* 7. OAC_MASK [15:0]
* 8. GDS_HEAP_SIZE [16:11] | GDS_HEAP_BASE [5:0]
*/
# define PACKET3_SET_RESOURCES_VMID_MASK(x) ((x) << 0)
# define PACKET3_SET_RESOURCES_UNMAP_LATENTY(x) ((x) << 16)
# define PACKET3_SET_RESOURCES_QUEUE_TYPE(x) ((x) << 29)
#define PACKET3_MAP_QUEUES 0xA2
/* 1. header
* 2. CONTROL
* 3. CONTROL2
* 4. MQD_ADDR_LO [31:0]
* 5. MQD_ADDR_HI [31:0]
* 6. WPTR_ADDR_LO [31:0]
* 7. WPTR_ADDR_HI [31:0]
*/
/* CONTROL */
# define PACKET3_MAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
# define PACKET3_MAP_QUEUES_VMID(x) ((x) << 8)
# define PACKET3_MAP_QUEUES_QUEUE_TYPE(x) ((x) << 21)
# define PACKET3_MAP_QUEUES_ALLOC_FORMAT(x) ((x) << 24)
# define PACKET3_MAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
# define PACKET3_MAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
/* CONTROL2 */
# define PACKET3_MAP_QUEUES_CHECK_DISABLE(x) ((x) << 1)
# define PACKET3_MAP_QUEUES_DOORBELL_OFFSET(x) ((x) << 2)
# define PACKET3_MAP_QUEUES_QUEUE(x) ((x) << 26)
# define PACKET3_MAP_QUEUES_PIPE(x) ((x) << 29)
# define PACKET3_MAP_QUEUES_ME(x) ((x) << 31)
#define PACKET3_UNMAP_QUEUES 0xA3
/* 1. header
* 2. CONTROL
* 3. CONTROL2
* 4. CONTROL3
* 5. CONTROL4
* 6. CONTROL5
*/
/* CONTROL */
# define PACKET3_UNMAP_QUEUES_ACTION(x) ((x) << 0)
/* 0 - PREEMPT_QUEUES
* 1 - RESET_QUEUES
* 2 - DISABLE_PROCESS_QUEUES
* 3 - PREEMPT_QUEUES_NO_UNMAP
*/
# define PACKET3_UNMAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
# define PACKET3_UNMAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
# define PACKET3_UNMAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
/* CONTROL2a */
# define PACKET3_UNMAP_QUEUES_PASID(x) ((x) << 0)
/* CONTROL2b */
# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(x) ((x) << 2)
/* CONTROL3a */
# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET1(x) ((x) << 2)
/* CONTROL3b */
# define PACKET3_UNMAP_QUEUES_RB_WPTR(x) ((x) << 0)
/* CONTROL4 */
# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET2(x) ((x) << 2)
/* CONTROL5 */
# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET3(x) ((x) << 2)
#define PACKET3_QUERY_STATUS 0xA4
/* 1. header
* 2. CONTROL
* 3. CONTROL2
* 4. ADDR_LO [31:0]
* 5. ADDR_HI [31:0]
* 6. DATA_LO [31:0]
* 7. DATA_HI [31:0]
*/
/* CONTROL */
# define PACKET3_QUERY_STATUS_CONTEXT_ID(x) ((x) << 0)
# define PACKET3_QUERY_STATUS_INTERRUPT_SEL(x) ((x) << 28)
# define PACKET3_QUERY_STATUS_COMMAND(x) ((x) << 30)
/* CONTROL2a */
# define PACKET3_QUERY_STATUS_PASID(x) ((x) << 0)
/* CONTROL2b */
# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001

View File

@ -493,8 +493,10 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
{
enum amd_pm_state_type ps;
if (input == NULL)
return -EINVAL;
if (input == NULL) {
ret = -EINVAL;
break;
}
ps = *(unsigned long *)input;
data.requested_ui_label = power_state_convert(ps);
@ -539,15 +541,19 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
switch (state->classification.ui_label) {
case PP_StateUILabel_Battery:
pm_type = POWER_STATE_TYPE_BATTERY;
break;
case PP_StateUILabel_Balanced:
pm_type = POWER_STATE_TYPE_BALANCED;
break;
case PP_StateUILabel_Performance:
pm_type = POWER_STATE_TYPE_PERFORMANCE;
break;
default:
if (state->classification.flags & PP_StateClassificationFlag_Boot)
pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
else
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
mutex_unlock(&pp_handle->pp_lock);
@ -894,7 +900,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
mutex_lock(&pp_handle->pp_lock);
ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
mutex_lock(&pp_handle->pp_lock);
mutex_unlock(&pp_handle->pp_lock);
return ret;
}

View File

@ -30,7 +30,9 @@
* SMU TEAM: Always increment the interface version if
* any structure is changed in this file
*/
#define SMU9_DRIVER_IF_VERSION 0xa
#define SMU9_DRIVER_IF_VERSION 0xB
#define PPTABLE_V10_SMU_VERSION 1
#define NUM_GFXCLK_DPM_LEVELS 8
#define NUM_UVD_DPM_LEVELS 8
@ -87,6 +89,11 @@ typedef struct {
int32_t a0;
int32_t a1;
int32_t a2;
uint8_t a0_shift;
uint8_t a1_shift;
uint8_t a2_shift;
uint8_t padding;
} GbVdroopTable_t;
typedef struct {
@ -293,7 +300,9 @@ typedef struct {
uint16_t Platform_sigma;
uint16_t PSM_Age_CompFactor;
uint32_t Reserved[20];
uint32_t DpmLevelPowerDelta;
uint32_t Reserved[19];
/* Padding - ignore */
uint32_t MmHubPadding[7]; /* SMU internal use */
@ -350,8 +359,8 @@ typedef struct {
typedef struct {
uint16_t avgPsmCount[30];
uint16_t minPsmCount[30];
uint16_t avgPsmVoltage[30]; /* in mV with 2 fractional bits */
uint16_t minPsmVoltage[30]; /* in mV with 2 fractional bits */
float avgPsmVoltage[30];
float minPsmVoltage[30];
uint32_t MmHubPadding[7]; /* SMU internal use */
} AvfsDebugTable_t;
@ -414,5 +423,45 @@ typedef struct {
#define UCLK_SWITCH_SLOW 0
#define UCLK_SWITCH_FAST 1
/* GFX DIDT Configuration */
#define SQ_Enable_MASK 0x1
#define SQ_IR_MASK 0x2
#define SQ_PCC_MASK 0x4
#define SQ_EDC_MASK 0x8
#define TCP_Enable_MASK 0x100
#define TCP_IR_MASK 0x200
#define TCP_PCC_MASK 0x400
#define TCP_EDC_MASK 0x800
#define TD_Enable_MASK 0x10000
#define TD_IR_MASK 0x20000
#define TD_PCC_MASK 0x40000
#define TD_EDC_MASK 0x80000
#define DB_Enable_MASK 0x1000000
#define DB_IR_MASK 0x2000000
#define DB_PCC_MASK 0x4000000
#define DB_EDC_MASK 0x8000000
#define SQ_Enable_SHIFT 0
#define SQ_IR_SHIFT 1
#define SQ_PCC_SHIFT 2
#define SQ_EDC_SHIFT 3
#define TCP_Enable_SHIFT 8
#define TCP_IR_SHIFT 9
#define TCP_PCC_SHIFT 10
#define TCP_EDC_SHIFT 11
#define TD_Enable_SHIFT 16
#define TD_IR_SHIFT 17
#define TD_PCC_SHIFT 18
#define TD_EDC_SHIFT 19
#define DB_Enable_SHIFT 24
#define DB_IR_SHIFT 25
#define DB_PCC_SHIFT 26
#define DB_EDC_SHIFT 27
#endif

View File

@ -236,6 +236,7 @@ struct ttm_bo_driver ast_bo_driver = {
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int ast_mm_init(struct ast_private *ast)

View File

@ -205,6 +205,7 @@ struct ttm_bo_driver bochs_bo_driver = {
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int bochs_mm_init(struct bochs_device *bochs)

View File

@ -236,6 +236,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int cirrus_mm_init(struct cirrus_device *cirrus)

View File

@ -236,6 +236,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int mgag200_mm_init(struct mga_device *mdev)

View File

@ -1574,6 +1574,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
struct nvkm_vma *

View File

@ -393,6 +393,7 @@ static struct ttm_bo_driver qxl_bo_driver = {
.verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &qxl_bo_move_notify,
};

View File

@ -873,6 +873,7 @@ static struct ttm_bo_driver radeon_bo_driver = {
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
.io_mem_free = &radeon_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int radeon_ttm_init(struct radeon_device *rdev)

View File

@ -1020,37 +1020,44 @@ out_unlock:
return ret;
}
static bool ttm_bo_places_compat(const struct ttm_place *places,
unsigned num_placement,
struct ttm_mem_reg *mem,
uint32_t *new_flags)
{
unsigned i;
for (i = 0; i < num_placement; i++) {
const struct ttm_place *heap = &places[i];
if (mem->mm_node && (mem->start < heap->fpfn ||
(heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
continue;
*new_flags = heap->flags;
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
(!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
(mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
}
return false;
}
bool ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem,
uint32_t *new_flags)
{
int i;
if (ttm_bo_places_compat(placement->placement, placement->num_placement,
mem, new_flags))
return true;
for (i = 0; i < placement->num_placement; i++) {
const struct ttm_place *heap = &placement->placement[i];
if (mem->mm_node &&
(mem->start < heap->fpfn ||
(heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
continue;
*new_flags = heap->flags;
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
return true;
}
for (i = 0; i < placement->num_busy_placement; i++) {
const struct ttm_place *heap = &placement->busy_placement[i];
if (mem->mm_node &&
(mem->start < heap->fpfn ||
(heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
continue;
*new_flags = heap->flags;
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
return true;
}
if ((placement->busy_placement != placement->placement ||
placement->num_busy_placement > placement->num_placement) &&
ttm_bo_places_compat(placement->busy_placement,
placement->num_busy_placement,
mem, new_flags))
return true;
return false;
}

View File

@ -231,7 +231,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem)
pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
pfn = bdev->driver->io_mem_pfn(bo, page_offset);
else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
@ -324,6 +324,14 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
return bo;
}
unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
+ page_offset;
}
EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn);
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{

View File

@ -431,6 +431,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.verify_access = &virtio_gpu_verify_access,
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
};

View File

@ -859,4 +859,5 @@ struct ttm_bo_driver vmw_bo_driver = {
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
};

View File

@ -710,6 +710,17 @@ extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
struct ttm_buffer_object *bo);
/**
* ttm_bo_default_iomem_pfn - get a pfn for a page offset
*
* @bo: the BO we need to look up the pfn for
* @page_offset: offset inside the BO to look up.
*
* Calculate the PFN for iomem based mappings during page fault
*/
unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset);
/**
* ttm_bo_mmap - mmap out of the ttm device address space.
*

View File

@ -462,6 +462,15 @@ struct ttm_bo_driver {
struct ttm_mem_reg *mem);
void (*io_mem_free)(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
/**
* Return the pfn for a given page_offset inside the BO.
*
* @bo: the BO to look up the pfn for
* @page_offset: the offset to look up
*/
unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
unsigned long page_offset);
};
/**

View File

@ -63,6 +63,7 @@
#define TTM_PL_FLAG_CACHED (1 << 16)
#define TTM_PL_FLAG_UNCACHED (1 << 17)
#define TTM_PL_FLAG_WC (1 << 18)
#define TTM_PL_FLAG_CONTIGUOUS (1 << 19)
#define TTM_PL_FLAG_NO_EVICT (1 << 21)
#define TTM_PL_FLAG_TOPDOWN (1 << 22)