forked from Minki/linux
Merge tag 'amd-drm-next-5.14-2021-05-21' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.14-2021-05-21: amdgpu: - RAS fixes - SR-IOV fixes - More BO management cleanups - Aldebaran fixes - Display fixes - Support for new GPU, Beige Goby - Backlight fixes amdkfd: - RAS fixes - DMA mapping fixes - HMM SVM fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210521045743.4047-1-alexander.deucher@amd.com
This commit is contained in:
commit
9a91e5e0af
@ -73,7 +73,8 @@ amdgpu-y += \
|
||||
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
|
||||
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
|
||||
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
|
||||
nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o
|
||||
nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \
|
||||
beige_goby_reg_init.o
|
||||
|
||||
# add DF block
|
||||
amdgpu-y += \
|
||||
|
@ -670,10 +670,10 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid,
|
||||
enum TLB_FLUSH_TYPE flush_type)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
const uint32_t flush_type = 0;
|
||||
bool all_hub = false;
|
||||
|
||||
if (adev->family == AMDGPU_FAMILY_AI)
|
||||
|
@ -36,13 +36,26 @@
|
||||
|
||||
extern uint64_t amdgpu_amdkfd_total_mem_size;
|
||||
|
||||
enum TLB_FLUSH_TYPE {
|
||||
TLB_FLUSH_LEGACY = 0,
|
||||
TLB_FLUSH_LIGHTWEIGHT,
|
||||
TLB_FLUSH_HEAVYWEIGHT
|
||||
};
|
||||
|
||||
struct amdgpu_device;
|
||||
|
||||
struct kfd_bo_va_list {
|
||||
struct list_head bo_list;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
void *kgd_dev;
|
||||
enum kfd_mem_attachment_type {
|
||||
KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */
|
||||
KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */
|
||||
KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */
|
||||
};
|
||||
|
||||
struct kfd_mem_attachment {
|
||||
struct list_head list;
|
||||
enum kfd_mem_attachment_type type;
|
||||
bool is_mapped;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct amdgpu_device *adev;
|
||||
uint64_t va;
|
||||
uint64_t pte_flags;
|
||||
};
|
||||
@ -50,7 +63,8 @@ struct kfd_bo_va_list {
|
||||
struct kgd_mem {
|
||||
struct mutex lock;
|
||||
struct amdgpu_bo *bo;
|
||||
struct list_head bo_va_list;
|
||||
struct dma_buf *dmabuf;
|
||||
struct list_head attachments;
|
||||
/* protected by amdkfd_process_info.lock */
|
||||
struct ttm_validate_buffer validate_list;
|
||||
struct ttm_validate_buffer resv_list;
|
||||
@ -135,7 +149,8 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
|
||||
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
|
||||
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid);
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid,
|
||||
enum TLB_FLUSH_TYPE flush_type);
|
||||
|
||||
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
|
||||
|
||||
|
@ -72,16 +72,16 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
||||
return (struct amdgpu_device *)kgd;
|
||||
}
|
||||
|
||||
static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
|
||||
static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
|
||||
struct kgd_mem *mem)
|
||||
{
|
||||
struct kfd_bo_va_list *entry;
|
||||
struct kfd_mem_attachment *entry;
|
||||
|
||||
list_for_each_entry(entry, &mem->bo_va_list, bo_list)
|
||||
list_for_each_entry(entry, &mem->attachments, list)
|
||||
if (entry->bo_va->base.vm == avm)
|
||||
return false;
|
||||
return true;
|
||||
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Set memory usage limits. Current, limits are
|
||||
@ -433,7 +433,8 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
|
||||
else
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_UC;
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
||||
} else {
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
||||
@ -452,7 +453,8 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
|
||||
if (adev->gmc.xgmi.connected_to_cpu)
|
||||
snoop = true;
|
||||
} else {
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_UC;
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
||||
if (amdgpu_xgmi_same_hive(adev, bo_adev))
|
||||
snoop = true;
|
||||
}
|
||||
@ -473,87 +475,318 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
|
||||
return pte_flags;
|
||||
}
|
||||
|
||||
/* add_bo_to_vm - Add a BO to a VM
|
||||
static int
|
||||
kfd_mem_dmamap_userptr(struct kgd_mem *mem,
|
||||
struct kfd_mem_attachment *attachment)
|
||||
{
|
||||
enum dma_data_direction direction =
|
||||
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
|
||||
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
||||
struct ttm_operation_ctx ctx = {.interruptible = true};
|
||||
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
|
||||
struct amdgpu_device *adev = attachment->adev;
|
||||
struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
|
||||
struct ttm_tt *ttm = bo->tbo.ttm;
|
||||
int ret;
|
||||
|
||||
ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
|
||||
if (unlikely(!ttm->sg))
|
||||
return -ENOMEM;
|
||||
|
||||
if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
|
||||
return -EINVAL;
|
||||
|
||||
/* Same sequence as in amdgpu_ttm_tt_pin_userptr */
|
||||
ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
|
||||
ttm->num_pages, 0,
|
||||
(u64)ttm->num_pages << PAGE_SHIFT,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(ret))
|
||||
goto free_sg;
|
||||
|
||||
ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
|
||||
if (unlikely(ret))
|
||||
goto release_sg;
|
||||
|
||||
drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
|
||||
ttm->num_pages);
|
||||
|
||||
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
if (ret)
|
||||
goto unmap_sg;
|
||||
|
||||
return 0;
|
||||
|
||||
unmap_sg:
|
||||
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
|
||||
release_sg:
|
||||
pr_err("DMA map userptr failed: %d\n", ret);
|
||||
sg_free_table(ttm->sg);
|
||||
free_sg:
|
||||
kfree(ttm->sg);
|
||||
ttm->sg = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = {.interruptible = true};
|
||||
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
|
||||
|
||||
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
}
|
||||
|
||||
static int
|
||||
kfd_mem_dmamap_attachment(struct kgd_mem *mem,
|
||||
struct kfd_mem_attachment *attachment)
|
||||
{
|
||||
switch (attachment->type) {
|
||||
case KFD_MEM_ATT_SHARED:
|
||||
return 0;
|
||||
case KFD_MEM_ATT_USERPTR:
|
||||
return kfd_mem_dmamap_userptr(mem, attachment);
|
||||
case KFD_MEM_ATT_DMABUF:
|
||||
return kfd_mem_dmamap_dmabuf(attachment);
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void
|
||||
kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
|
||||
struct kfd_mem_attachment *attachment)
|
||||
{
|
||||
enum dma_data_direction direction =
|
||||
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
|
||||
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
||||
struct ttm_operation_ctx ctx = {.interruptible = false};
|
||||
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
|
||||
struct amdgpu_device *adev = attachment->adev;
|
||||
struct ttm_tt *ttm = bo->tbo.ttm;
|
||||
|
||||
if (unlikely(!ttm->sg))
|
||||
return;
|
||||
|
||||
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
|
||||
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
|
||||
sg_free_table(ttm->sg);
|
||||
ttm->sg = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = {.interruptible = true};
|
||||
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
|
||||
|
||||
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
|
||||
struct kfd_mem_attachment *attachment)
|
||||
{
|
||||
switch (attachment->type) {
|
||||
case KFD_MEM_ATT_SHARED:
|
||||
break;
|
||||
case KFD_MEM_ATT_USERPTR:
|
||||
kfd_mem_dmaunmap_userptr(mem, attachment);
|
||||
break;
|
||||
case KFD_MEM_ATT_DMABUF:
|
||||
kfd_mem_dmaunmap_dmabuf(attachment);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
|
||||
struct amdgpu_bo **bo)
|
||||
{
|
||||
unsigned long bo_size = mem->bo->tbo.base.size;
|
||||
struct drm_gem_object *gobj;
|
||||
int ret;
|
||||
|
||||
ret = amdgpu_bo_reserve(mem->bo, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = amdgpu_gem_object_create(adev, bo_size, 1,
|
||||
AMDGPU_GEM_DOMAIN_CPU,
|
||||
0, ttm_bo_type_sg,
|
||||
mem->bo->tbo.base.resv,
|
||||
&gobj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
amdgpu_bo_unreserve(mem->bo);
|
||||
|
||||
*bo = gem_to_amdgpu_bo(gobj);
|
||||
(*bo)->parent = amdgpu_bo_ref(mem->bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
|
||||
struct amdgpu_bo **bo)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
|
||||
if (!mem->dmabuf) {
|
||||
mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
|
||||
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
|
||||
DRM_RDWR : 0);
|
||||
if (IS_ERR(mem->dmabuf)) {
|
||||
mem->dmabuf = NULL;
|
||||
return PTR_ERR(mem->dmabuf);
|
||||
}
|
||||
}
|
||||
|
||||
gobj = amdgpu_gem_prime_import(&adev->ddev, mem->dmabuf);
|
||||
if (IS_ERR(gobj))
|
||||
return PTR_ERR(gobj);
|
||||
|
||||
/* Import takes an extra reference on the dmabuf. Drop it now to
|
||||
* avoid leaking it. We only need the one reference in
|
||||
* kgd_mem->dmabuf.
|
||||
*/
|
||||
dma_buf_put(mem->dmabuf);
|
||||
|
||||
*bo = gem_to_amdgpu_bo(gobj);
|
||||
(*bo)->parent = amdgpu_bo_ref(mem->bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* kfd_mem_attach - Add a BO to a VM
|
||||
*
|
||||
* Everything that needs to bo done only once when a BO is first added
|
||||
* to a VM. It can later be mapped and unmapped many times without
|
||||
* repeating these steps.
|
||||
*
|
||||
* 0. Create BO for DMA mapping, if needed
|
||||
* 1. Allocate and initialize BO VA entry data structure
|
||||
* 2. Add BO to the VM
|
||||
* 3. Determine ASIC-specific PTE flags
|
||||
* 4. Alloc page tables and directories if needed
|
||||
* 4a. Validate new page tables and directories
|
||||
*/
|
||||
static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
|
||||
struct amdgpu_vm *vm, bool is_aql,
|
||||
struct kfd_bo_va_list **p_bo_va_entry)
|
||||
static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
|
||||
struct amdgpu_vm *vm, bool is_aql)
|
||||
{
|
||||
int ret;
|
||||
struct kfd_bo_va_list *bo_va_entry;
|
||||
struct amdgpu_bo *bo = mem->bo;
|
||||
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
|
||||
unsigned long bo_size = mem->bo->tbo.base.size;
|
||||
uint64_t va = mem->va;
|
||||
struct list_head *list_bo_va = &mem->bo_va_list;
|
||||
unsigned long bo_size = bo->tbo.base.size;
|
||||
struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
|
||||
struct amdgpu_bo *bo[2] = {NULL, NULL};
|
||||
int i, ret;
|
||||
|
||||
if (!va) {
|
||||
pr_err("Invalid VA when adding BO to VM\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_aql)
|
||||
for (i = 0; i <= is_aql; i++) {
|
||||
attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
|
||||
if (unlikely(!attachment[i])) {
|
||||
ret = -ENOMEM;
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
|
||||
va + bo_size, vm);
|
||||
|
||||
if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
|
||||
amdgpu_xgmi_same_hive(adev, bo_adev))) {
|
||||
/* Mappings on the local GPU and VRAM mappings in the
|
||||
* local hive share the original BO
|
||||
*/
|
||||
attachment[i]->type = KFD_MEM_ATT_SHARED;
|
||||
bo[i] = mem->bo;
|
||||
drm_gem_object_get(&bo[i]->tbo.base);
|
||||
} else if (i > 0) {
|
||||
/* Multiple mappings on the same GPU share the BO */
|
||||
attachment[i]->type = KFD_MEM_ATT_SHARED;
|
||||
bo[i] = bo[0];
|
||||
drm_gem_object_get(&bo[i]->tbo.base);
|
||||
} else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
|
||||
/* Create an SG BO to DMA-map userptrs on other GPUs */
|
||||
attachment[i]->type = KFD_MEM_ATT_USERPTR;
|
||||
ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
|
||||
if (ret)
|
||||
goto unwind;
|
||||
} else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
|
||||
mem->bo->tbo.type != ttm_bo_type_sg) {
|
||||
/* GTT BOs use DMA-mapping ability of dynamic-attach
|
||||
* DMA bufs. TODO: The same should work for VRAM on
|
||||
* large-BAR GPUs.
|
||||
*/
|
||||
attachment[i]->type = KFD_MEM_ATT_DMABUF;
|
||||
ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
|
||||
if (ret)
|
||||
goto unwind;
|
||||
} else {
|
||||
/* FIXME: Need to DMA-map other BO types:
|
||||
* large-BAR VRAM, doorbells, MMIO remap
|
||||
*/
|
||||
attachment[i]->type = KFD_MEM_ATT_SHARED;
|
||||
bo[i] = mem->bo;
|
||||
drm_gem_object_get(&bo[i]->tbo.base);
|
||||
}
|
||||
|
||||
/* Add BO to VM internal data structures */
|
||||
attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
|
||||
if (unlikely(!attachment[i]->bo_va)) {
|
||||
ret = -ENOMEM;
|
||||
pr_err("Failed to add BO object to VM. ret == %d\n",
|
||||
ret);
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
attachment[i]->va = va;
|
||||
attachment[i]->pte_flags = get_pte_flags(adev, mem);
|
||||
attachment[i]->adev = adev;
|
||||
list_add(&attachment[i]->list, &mem->attachments);
|
||||
|
||||
va += bo_size;
|
||||
|
||||
bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
|
||||
if (!bo_va_entry)
|
||||
return -ENOMEM;
|
||||
|
||||
pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
|
||||
va + bo_size, vm);
|
||||
|
||||
/* Add BO to VM internal data structures*/
|
||||
bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
|
||||
if (!bo_va_entry->bo_va) {
|
||||
ret = -EINVAL;
|
||||
pr_err("Failed to add BO object to VM. ret == %d\n",
|
||||
ret);
|
||||
goto err_vmadd;
|
||||
}
|
||||
|
||||
bo_va_entry->va = va;
|
||||
bo_va_entry->pte_flags = get_pte_flags(adev, mem);
|
||||
bo_va_entry->kgd_dev = (void *)adev;
|
||||
list_add(&bo_va_entry->bo_list, list_bo_va);
|
||||
|
||||
if (p_bo_va_entry)
|
||||
*p_bo_va_entry = bo_va_entry;
|
||||
|
||||
/* Allocate validate page tables if needed */
|
||||
ret = vm_validate_pt_pd_bos(vm);
|
||||
if (ret) {
|
||||
pr_err("validate_pt_pd_bos() failed\n");
|
||||
goto err_alloc_pts;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_alloc_pts:
|
||||
amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
|
||||
list_del(&bo_va_entry->bo_list);
|
||||
err_vmadd:
|
||||
kfree(bo_va_entry);
|
||||
unwind:
|
||||
for (; i >= 0; i--) {
|
||||
if (!attachment[i])
|
||||
continue;
|
||||
if (attachment[i]->bo_va) {
|
||||
amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
|
||||
list_del(&attachment[i]->list);
|
||||
}
|
||||
if (bo[i])
|
||||
drm_gem_object_put(&bo[i]->tbo.base);
|
||||
kfree(attachment[i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void remove_bo_from_vm(struct amdgpu_device *adev,
|
||||
struct kfd_bo_va_list *entry, unsigned long size)
|
||||
static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
|
||||
{
|
||||
pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
|
||||
entry->va,
|
||||
entry->va + size, entry);
|
||||
amdgpu_vm_bo_rmv(adev, entry->bo_va);
|
||||
list_del(&entry->bo_list);
|
||||
kfree(entry);
|
||||
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
|
||||
|
||||
pr_debug("\t remove VA 0x%llx in entry %p\n",
|
||||
attachment->va, attachment);
|
||||
amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
|
||||
drm_gem_object_put(&bo->tbo.base);
|
||||
list_del(&attachment->list);
|
||||
kfree(attachment);
|
||||
}
|
||||
|
||||
static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
|
||||
@ -728,7 +961,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
|
||||
struct bo_vm_reservation_context *ctx)
|
||||
{
|
||||
struct amdgpu_bo *bo = mem->bo;
|
||||
struct kfd_bo_va_list *entry;
|
||||
struct kfd_mem_attachment *entry;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
@ -740,7 +973,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
|
||||
INIT_LIST_HEAD(&ctx->list);
|
||||
INIT_LIST_HEAD(&ctx->duplicates);
|
||||
|
||||
list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
|
||||
list_for_each_entry(entry, &mem->attachments, list) {
|
||||
if ((vm && vm != entry->bo_va->base.vm) ||
|
||||
(entry->is_mapped != map_type
|
||||
&& map_type != BO_VM_ALL))
|
||||
@ -762,7 +995,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
|
||||
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
|
||||
list_for_each_entry(entry, &mem->attachments, list) {
|
||||
if ((vm && vm != entry->bo_va->base.vm) ||
|
||||
(entry->is_mapped != map_type
|
||||
&& map_type != BO_VM_ALL))
|
||||
@ -816,11 +1049,12 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
|
||||
struct kfd_bo_va_list *entry,
|
||||
static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
|
||||
struct kfd_mem_attachment *entry,
|
||||
struct amdgpu_sync *sync)
|
||||
{
|
||||
struct amdgpu_bo_va *bo_va = entry->bo_va;
|
||||
struct amdgpu_device *adev = entry->adev;
|
||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||
|
||||
amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
|
||||
@ -829,15 +1063,20 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_sync_fence(sync, bo_va->last_pt_update);
|
||||
|
||||
return 0;
|
||||
kfd_mem_dmaunmap_attachment(mem, entry);
|
||||
}
|
||||
|
||||
static int update_gpuvm_pte(struct amdgpu_device *adev,
|
||||
struct kfd_bo_va_list *entry,
|
||||
struct amdgpu_sync *sync)
|
||||
static int update_gpuvm_pte(struct kgd_mem *mem,
|
||||
struct kfd_mem_attachment *entry,
|
||||
struct amdgpu_sync *sync)
|
||||
{
|
||||
int ret;
|
||||
struct amdgpu_bo_va *bo_va = entry->bo_va;
|
||||
struct amdgpu_device *adev = entry->adev;
|
||||
int ret;
|
||||
|
||||
ret = kfd_mem_dmamap_attachment(mem, entry);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Update the page tables */
|
||||
ret = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
@ -849,14 +1088,15 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
|
||||
return amdgpu_sync_fence(sync, bo_va->last_pt_update);
|
||||
}
|
||||
|
||||
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
|
||||
struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
|
||||
bool no_update_pte)
|
||||
static int map_bo_to_gpuvm(struct kgd_mem *mem,
|
||||
struct kfd_mem_attachment *entry,
|
||||
struct amdgpu_sync *sync,
|
||||
bool no_update_pte)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Set virtual address for the allocation */
|
||||
ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
|
||||
ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
|
||||
amdgpu_bo_size(entry->bo_va->base.bo),
|
||||
entry->pte_flags);
|
||||
if (ret) {
|
||||
@ -868,7 +1108,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
|
||||
if (no_update_pte)
|
||||
return 0;
|
||||
|
||||
ret = update_gpuvm_pte(adev, entry, sync);
|
||||
ret = update_gpuvm_pte(mem, entry, sync);
|
||||
if (ret) {
|
||||
pr_err("update_gpuvm_pte() failed\n");
|
||||
goto update_gpuvm_pte_failed;
|
||||
@ -877,7 +1117,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
|
||||
update_gpuvm_pte_failed:
|
||||
unmap_bo_from_gpuvm(adev, entry, sync);
|
||||
unmap_bo_from_gpuvm(mem, entry, sync);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1194,7 +1434,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
INIT_LIST_HEAD(&(*mem)->bo_va_list);
|
||||
INIT_LIST_HEAD(&(*mem)->attachments);
|
||||
mutex_init(&(*mem)->lock);
|
||||
(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
|
||||
|
||||
@ -1283,7 +1523,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
||||
{
|
||||
struct amdkfd_process_info *process_info = mem->process_info;
|
||||
unsigned long bo_size = mem->bo->tbo.base.size;
|
||||
struct kfd_bo_va_list *entry, *tmp;
|
||||
struct kfd_mem_attachment *entry, *tmp;
|
||||
struct bo_vm_reservation_context ctx;
|
||||
struct ttm_validate_buffer *bo_list_entry;
|
||||
unsigned int mapped_to_gpu_memory;
|
||||
@ -1326,13 +1566,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
||||
pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
|
||||
mem->va + bo_size * (1 + mem->aql_queue));
|
||||
|
||||
/* Remove from VM internal data structures */
|
||||
list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
|
||||
remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
|
||||
entry, bo_size);
|
||||
|
||||
ret = unreserve_bo_and_vms(&ctx, false, false);
|
||||
|
||||
/* Remove from VM internal data structures */
|
||||
list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
|
||||
kfd_mem_detach(entry);
|
||||
|
||||
/* Free the sync object */
|
||||
amdgpu_sync_free(&mem->sync);
|
||||
|
||||
@ -1357,6 +1596,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
||||
|
||||
/* Free the BO*/
|
||||
drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
|
||||
if (mem->dmabuf)
|
||||
dma_buf_put(mem->dmabuf);
|
||||
drm_gem_object_put(&mem->bo->tbo.base);
|
||||
mutex_destroy(&mem->lock);
|
||||
kfree(mem);
|
||||
@ -1372,10 +1613,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
int ret;
|
||||
struct amdgpu_bo *bo;
|
||||
uint32_t domain;
|
||||
struct kfd_bo_va_list *entry;
|
||||
struct kfd_mem_attachment *entry;
|
||||
struct bo_vm_reservation_context ctx;
|
||||
struct kfd_bo_va_list *bo_va_entry = NULL;
|
||||
struct kfd_bo_va_list *bo_va_entry_aql = NULL;
|
||||
unsigned long bo_size;
|
||||
bool is_invalid_userptr = false;
|
||||
|
||||
@ -1411,6 +1650,12 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
mem->va + bo_size * (1 + mem->aql_queue),
|
||||
avm, domain_string(domain));
|
||||
|
||||
if (!kfd_mem_is_attached(avm, mem)) {
|
||||
ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = reserve_bo_and_vm(mem, avm, &ctx);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
@ -1424,22 +1669,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
|
||||
is_invalid_userptr = true;
|
||||
|
||||
if (check_if_add_bo_to_vm(avm, mem)) {
|
||||
ret = add_bo_to_vm(adev, mem, avm, false,
|
||||
&bo_va_entry);
|
||||
if (ret)
|
||||
goto add_bo_to_vm_failed;
|
||||
if (mem->aql_queue) {
|
||||
ret = add_bo_to_vm(adev, mem, avm,
|
||||
true, &bo_va_entry_aql);
|
||||
if (ret)
|
||||
goto add_bo_to_vm_failed_aql;
|
||||
}
|
||||
} else {
|
||||
ret = vm_validate_pt_pd_bos(avm);
|
||||
if (unlikely(ret))
|
||||
goto add_bo_to_vm_failed;
|
||||
}
|
||||
ret = vm_validate_pt_pd_bos(avm);
|
||||
if (unlikely(ret))
|
||||
goto out_unreserve;
|
||||
|
||||
if (mem->mapped_to_gpu_memory == 0 &&
|
||||
!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
|
||||
@ -1450,34 +1682,34 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
|
||||
if (ret) {
|
||||
pr_debug("Validate failed\n");
|
||||
goto map_bo_to_gpuvm_failed;
|
||||
goto out_unreserve;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
|
||||
if (entry->bo_va->base.vm == avm && !entry->is_mapped) {
|
||||
pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
|
||||
entry->va, entry->va + bo_size,
|
||||
entry);
|
||||
list_for_each_entry(entry, &mem->attachments, list) {
|
||||
if (entry->bo_va->base.vm != avm || entry->is_mapped)
|
||||
continue;
|
||||
|
||||
ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
|
||||
is_invalid_userptr);
|
||||
if (ret) {
|
||||
pr_err("Failed to map bo to gpuvm\n");
|
||||
goto map_bo_to_gpuvm_failed;
|
||||
}
|
||||
pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
|
||||
entry->va, entry->va + bo_size, entry);
|
||||
|
||||
ret = vm_update_pds(avm, ctx.sync);
|
||||
if (ret) {
|
||||
pr_err("Failed to update page directories\n");
|
||||
goto map_bo_to_gpuvm_failed;
|
||||
}
|
||||
|
||||
entry->is_mapped = true;
|
||||
mem->mapped_to_gpu_memory++;
|
||||
pr_debug("\t INC mapping count %d\n",
|
||||
mem->mapped_to_gpu_memory);
|
||||
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
|
||||
is_invalid_userptr);
|
||||
if (ret) {
|
||||
pr_err("Failed to map bo to gpuvm\n");
|
||||
goto out_unreserve;
|
||||
}
|
||||
|
||||
ret = vm_update_pds(avm, ctx.sync);
|
||||
if (ret) {
|
||||
pr_err("Failed to update page directories\n");
|
||||
goto out_unreserve;
|
||||
}
|
||||
|
||||
entry->is_mapped = true;
|
||||
mem->mapped_to_gpu_memory++;
|
||||
pr_debug("\t INC mapping count %d\n",
|
||||
mem->mapped_to_gpu_memory);
|
||||
}
|
||||
|
||||
if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
|
||||
@ -1488,13 +1720,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
|
||||
goto out;
|
||||
|
||||
map_bo_to_gpuvm_failed:
|
||||
if (bo_va_entry_aql)
|
||||
remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
|
||||
add_bo_to_vm_failed_aql:
|
||||
if (bo_va_entry)
|
||||
remove_bo_from_vm(adev, bo_va_entry, bo_size);
|
||||
add_bo_to_vm_failed:
|
||||
out_unreserve:
|
||||
unreserve_bo_and_vms(&ctx, false, false);
|
||||
out:
|
||||
mutex_unlock(&mem->process_info->lock);
|
||||
@ -1505,11 +1731,10 @@ out:
|
||||
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
|
||||
{
|
||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
|
||||
struct amdkfd_process_info *process_info = avm->process_info;
|
||||
unsigned long bo_size = mem->bo->tbo.base.size;
|
||||
struct kfd_bo_va_list *entry;
|
||||
struct kfd_mem_attachment *entry;
|
||||
struct bo_vm_reservation_context ctx;
|
||||
int ret;
|
||||
|
||||
@ -1533,26 +1758,19 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
|
||||
mem->va + bo_size * (1 + mem->aql_queue),
|
||||
avm);
|
||||
|
||||
list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
|
||||
if (entry->bo_va->base.vm == avm && entry->is_mapped) {
|
||||
pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
|
||||
entry->va,
|
||||
entry->va + bo_size,
|
||||
entry);
|
||||
list_for_each_entry(entry, &mem->attachments, list) {
|
||||
if (entry->bo_va->base.vm != avm || !entry->is_mapped)
|
||||
continue;
|
||||
|
||||
ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
|
||||
if (ret == 0) {
|
||||
entry->is_mapped = false;
|
||||
} else {
|
||||
pr_err("failed to unmap VA 0x%llx\n",
|
||||
mem->va);
|
||||
goto unreserve_out;
|
||||
}
|
||||
pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
|
||||
entry->va, entry->va + bo_size, entry);
|
||||
|
||||
mem->mapped_to_gpu_memory--;
|
||||
pr_debug("\t DEC mapping count %d\n",
|
||||
mem->mapped_to_gpu_memory);
|
||||
}
|
||||
unmap_bo_from_gpuvm(mem, entry, ctx.sync);
|
||||
entry->is_mapped = false;
|
||||
|
||||
mem->mapped_to_gpu_memory--;
|
||||
pr_debug("\t DEC mapping count %d\n",
|
||||
mem->mapped_to_gpu_memory);
|
||||
}
|
||||
|
||||
/* If BO is unmapped from all VMs, unfence it. It can be evicted if
|
||||
@ -1701,7 +1919,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
|
||||
if (mmap_offset)
|
||||
*mmap_offset = amdgpu_bo_mmap_offset(bo);
|
||||
|
||||
INIT_LIST_HEAD(&(*mem)->bo_va_list);
|
||||
INIT_LIST_HEAD(&(*mem)->attachments);
|
||||
mutex_init(&(*mem)->lock);
|
||||
|
||||
(*mem)->alloc_flags =
|
||||
@ -1898,7 +2116,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
|
||||
list_for_each_entry_safe(mem, tmp_mem,
|
||||
&process_info->userptr_inval_list,
|
||||
validate_list.head) {
|
||||
struct kfd_bo_va_list *bo_va_entry;
|
||||
struct kfd_mem_attachment *attachment;
|
||||
|
||||
bo = mem->bo;
|
||||
|
||||
@ -1921,13 +2139,12 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
|
||||
* VM faults if the GPU tries to access the invalid
|
||||
* memory.
|
||||
*/
|
||||
list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
|
||||
if (!bo_va_entry->is_mapped)
|
||||
list_for_each_entry(attachment, &mem->attachments, list) {
|
||||
if (!attachment->is_mapped)
|
||||
continue;
|
||||
|
||||
ret = update_gpuvm_pte((struct amdgpu_device *)
|
||||
bo_va_entry->kgd_dev,
|
||||
bo_va_entry, &sync);
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync);
|
||||
if (ret) {
|
||||
pr_err("%s: update PTE failed\n", __func__);
|
||||
/* make sure this gets validated again */
|
||||
@ -2108,7 +2325,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
|
||||
struct amdgpu_bo *bo = mem->bo;
|
||||
uint32_t domain = mem->domain;
|
||||
struct kfd_bo_va_list *bo_va_entry;
|
||||
struct kfd_mem_attachment *attachment;
|
||||
|
||||
total_size += amdgpu_bo_size(bo);
|
||||
|
||||
@ -2128,12 +2345,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
|
||||
goto validate_map_fail;
|
||||
}
|
||||
list_for_each_entry(bo_va_entry, &mem->bo_va_list,
|
||||
bo_list) {
|
||||
ret = update_gpuvm_pte((struct amdgpu_device *)
|
||||
bo_va_entry->kgd_dev,
|
||||
bo_va_entry,
|
||||
&sync_obj);
|
||||
list_for_each_entry(attachment, &mem->attachments, list) {
|
||||
if (!attachment->is_mapped)
|
||||
continue;
|
||||
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
|
||||
if (ret) {
|
||||
pr_debug("Memory eviction: update PTE failed. Try again\n");
|
||||
goto validate_map_fail;
|
||||
@ -2208,7 +2425,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&(*mem)->lock);
|
||||
INIT_LIST_HEAD(&(*mem)->bo_va_list);
|
||||
INIT_LIST_HEAD(&(*mem)->attachments);
|
||||
(*mem)->bo = amdgpu_bo_ref(gws_bo);
|
||||
(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
|
||||
(*mem)->process_info = process_info;
|
||||
|
@ -1828,6 +1828,9 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
|
||||
if (adev->is_atom_fw) {
|
||||
amdgpu_atomfirmware_scratch_regs_init(adev);
|
||||
amdgpu_atomfirmware_allocate_fb_scratch(adev);
|
||||
/* cached firmware_flags for further usage */
|
||||
adev->mode_info.firmware_flags =
|
||||
amdgpu_atomfirmware_query_firmware_capability(adev);
|
||||
} else {
|
||||
amdgpu_atombios_scratch_regs_init(adev);
|
||||
amdgpu_atombios_allocate_fb_scratch(adev);
|
||||
|
@ -29,23 +29,59 @@
|
||||
#include "atombios.h"
|
||||
#include "soc15_hw_ip.h"
|
||||
|
||||
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
|
||||
union firmware_info {
|
||||
struct atom_firmware_info_v3_1 v31;
|
||||
struct atom_firmware_info_v3_2 v32;
|
||||
struct atom_firmware_info_v3_3 v33;
|
||||
struct atom_firmware_info_v3_4 v34;
|
||||
};
|
||||
|
||||
/*
|
||||
* Helper function to query firmware capability
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Return firmware_capability in firmwareinfo table on success or 0 if not
|
||||
*/
|
||||
uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev)
|
||||
{
|
||||
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
firmwareinfo);
|
||||
uint16_t data_offset;
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
int index;
|
||||
u16 data_offset, size;
|
||||
union firmware_info *firmware_info;
|
||||
u8 frev, crev;
|
||||
u32 fw_cap = 0;
|
||||
|
||||
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
|
||||
NULL, NULL, &data_offset)) {
|
||||
struct atom_firmware_info_v3_1 *firmware_info =
|
||||
(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
|
||||
data_offset);
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
firmwareinfo);
|
||||
|
||||
if (le32_to_cpu(firmware_info->firmware_capability) &
|
||||
ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION)
|
||||
return true;
|
||||
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
|
||||
index, &size, &frev, &crev, &data_offset)) {
|
||||
/* support firmware_info 3.1 + */
|
||||
if ((frev == 3 && crev >=1) || (frev > 3)) {
|
||||
firmware_info = (union firmware_info *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
||||
return fw_cap;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function to query gpu virtualizaiton capability
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Return true if gpu virtualization is supported or false if not
|
||||
*/
|
||||
bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 fw_cap;
|
||||
|
||||
fw_cap = adev->mode_info.firmware_flags;
|
||||
|
||||
return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false;
|
||||
}
|
||||
|
||||
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
|
||||
@ -400,41 +436,36 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
|
||||
return ecc_default_enabled;
|
||||
}
|
||||
|
||||
union firmware_info {
|
||||
struct atom_firmware_info_v3_1 v31;
|
||||
struct atom_firmware_info_v3_2 v32;
|
||||
struct atom_firmware_info_v3_3 v33;
|
||||
struct atom_firmware_info_v3_4 v34;
|
||||
};
|
||||
|
||||
/*
|
||||
* Helper function to query sram ecc capablity
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Return true if vbios supports sram ecc or false if not
|
||||
*/
|
||||
bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
int index;
|
||||
u16 data_offset, size;
|
||||
union firmware_info *firmware_info;
|
||||
u8 frev, crev;
|
||||
bool sram_ecc_supported = false;
|
||||
u32 fw_cap;
|
||||
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
firmwareinfo);
|
||||
fw_cap = adev->mode_info.firmware_flags;
|
||||
|
||||
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
|
||||
index, &size, &frev, &crev, &data_offset)) {
|
||||
/* support firmware_info 3.1 + */
|
||||
if ((frev == 3 && crev >=1) || (frev > 3)) {
|
||||
firmware_info = (union firmware_info *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
sram_ecc_supported =
|
||||
(le32_to_cpu(firmware_info->v31.firmware_capability) &
|
||||
ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
|
||||
}
|
||||
}
|
||||
return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
|
||||
}
|
||||
|
||||
return sram_ecc_supported;
|
||||
/*
|
||||
* Helper function to query dynamic boot config capability
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Return true if vbios supports dynamic boot config or false if not
|
||||
*/
|
||||
bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 fw_cap;
|
||||
|
||||
fw_cap = adev->mode_info.firmware_flags;
|
||||
|
||||
return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
|
||||
}
|
||||
|
||||
union smu_info {
|
||||
@ -466,10 +497,6 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
|
||||
adev->pm.current_sclk = adev->clock.default_sclk;
|
||||
adev->pm.current_mclk = adev->clock.default_mclk;
|
||||
|
||||
/* not technically a clock, but... */
|
||||
adev->mode_info.firmware_flags =
|
||||
le32_to_cpu(firmware_info->v31.firmware_capability);
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@ -519,6 +546,21 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
/* if asic is Navi+, the rlc reference clock is used for system clock
|
||||
* from vbios gfx_info table */
|
||||
if (adev->asic_type >= CHIP_NAVI10) {
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
gfx_info);
|
||||
if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
struct atom_gfx_info_v2_2 *gfx_info = (struct atom_gfx_info_v2_2*)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
if ((frev == 2) && (crev >= 2))
|
||||
spll->reference_freq = le32_to_cpu(gfx_info->rlc_gpu_timer_refclk);
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -584,67 +626,19 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if VBIOS supports GDDR6 training data save/restore
|
||||
* Helper function to query two stage mem training capability
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Return true if two stage mem training is supported or false if not
|
||||
*/
|
||||
static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
|
||||
bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
uint16_t data_offset;
|
||||
int index;
|
||||
u32 fw_cap;
|
||||
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
firmwareinfo);
|
||||
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
|
||||
NULL, NULL, &data_offset)) {
|
||||
struct atom_firmware_info_v3_1 *firmware_info =
|
||||
(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
|
||||
data_offset);
|
||||
fw_cap = adev->mode_info.firmware_flags;
|
||||
|
||||
DRM_DEBUG("atom firmware capability:0x%08x.\n",
|
||||
le32_to_cpu(firmware_info->firmware_capability));
|
||||
|
||||
if (le32_to_cpu(firmware_info->firmware_capability) &
|
||||
ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int amdgpu_mem_train_support(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
uint32_t major, minor, revision, hw_v;
|
||||
|
||||
if (gddr6_mem_train_vbios_support(adev)) {
|
||||
amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
|
||||
hw_v = HW_REV(major, minor, revision);
|
||||
/*
|
||||
* treat 0 revision as a special case since register for MP0 and MMHUB is missing
|
||||
* for some Navi10 A0, preventing driver from discovering the hwip information since
|
||||
* none of the functions will be initialized, it should not cause any problems
|
||||
*/
|
||||
switch (hw_v) {
|
||||
case HW_REV(11, 0, 0):
|
||||
case HW_REV(11, 0, 5):
|
||||
case HW_REV(11, 0, 7):
|
||||
case HW_REV(11, 0, 11):
|
||||
case HW_REV(11, 0, 12):
|
||||
ret = 1;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("memory training vbios supports but psp hw(%08x)"
|
||||
" doesn't support!\n", hw_v);
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
ret = 0;
|
||||
hw_v = -1;
|
||||
}
|
||||
|
||||
|
||||
DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
|
||||
return ret;
|
||||
return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false;
|
||||
}
|
||||
|
||||
int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
|
||||
|
@ -26,7 +26,8 @@
|
||||
|
||||
#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
|
||||
|
||||
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
|
||||
uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev);
|
||||
bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev);
|
||||
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
||||
@ -35,7 +36,8 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
|
||||
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
|
||||
bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
|
||||
bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
|
||||
bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
|
||||
int amdgpu_mem_train_support(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
@ -119,6 +119,7 @@ const char *amdgpu_asic_name[] = {
|
||||
"NAVY_FLOUNDER",
|
||||
"VANGOGH",
|
||||
"DIMGREY_CAVEFISH",
|
||||
"BEIGE_GOBY",
|
||||
"LAST",
|
||||
};
|
||||
|
||||
@ -1820,6 +1821,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
default:
|
||||
return 0;
|
||||
case CHIP_VEGA10:
|
||||
@ -2033,6 +2035,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_VANGOGH:
|
||||
if (adev->asic_type == CHIP_VANGOGH)
|
||||
adev->family = AMDGPU_FAMILY_VGH;
|
||||
@ -3034,7 +3037,7 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
if (adev->is_atom_fw) {
|
||||
if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
|
||||
if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
||||
} else {
|
||||
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
|
||||
@ -3097,6 +3100,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_VANGOGH:
|
||||
#endif
|
||||
return amdgpu_dc != 0;
|
||||
@ -4476,7 +4480,6 @@ out:
|
||||
r = amdgpu_ib_ring_tests(tmp_adev);
|
||||
if (r) {
|
||||
dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
|
||||
r = amdgpu_device_ip_suspend(tmp_adev);
|
||||
need_full_reset = true;
|
||||
r = -EAGAIN;
|
||||
goto end;
|
||||
|
@ -288,10 +288,13 @@ out:
|
||||
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
|
||||
{
|
||||
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
|
||||
int i;
|
||||
|
||||
drm_fb_helper_unregister_fbi(&rfbdev->helper);
|
||||
|
||||
if (rfb->base.obj[0]) {
|
||||
for (i = 0; i < rfb->base.format->num_planes; i++)
|
||||
drm_gem_object_put(rfb->base.obj[0]);
|
||||
amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
|
||||
rfb->base.obj[0] = NULL;
|
||||
drm_framebuffer_unregister_private(&rfb->base);
|
||||
|
@ -607,7 +607,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = amdgpu_gfx_process_ras_data_cb,
|
||||
};
|
||||
struct ras_query_if info = { 0 };
|
||||
|
||||
if (!adev->gfx.ras_if) {
|
||||
adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
|
||||
@ -625,12 +624,8 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
|
||||
goto free;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
|
||||
if (adev->gmc.xgmi.connected_to_cpu) {
|
||||
info.head = *adev->gfx.ras_if;
|
||||
amdgpu_ras_query_error_status(adev, &info);
|
||||
} else {
|
||||
if (!amdgpu_persistent_edc_harvesting_supported(adev))
|
||||
amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
|
||||
}
|
||||
|
||||
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
|
||||
if (r)
|
||||
|
@ -344,7 +344,7 @@ struct amdgpu_mode_info {
|
||||
/* pointer to fbdev info structure */
|
||||
struct amdgpu_fbdev *rfbdev;
|
||||
/* firmware flags */
|
||||
u16 firmware_flags;
|
||||
u32 firmware_flags;
|
||||
/* pointer to backlight encoder */
|
||||
struct amdgpu_encoder *bl_encoder;
|
||||
u8 bl_level; /* saved backlight level */
|
||||
|
@ -30,6 +30,8 @@
|
||||
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_res_cursor.h"
|
||||
|
||||
#ifdef CONFIG_MMU_NOTIFIER
|
||||
#include <linux/mmu_notifier.h>
|
||||
#endif
|
||||
@ -215,18 +217,19 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
|
||||
static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
|
||||
struct drm_mm_node *node = bo->tbo.mem.mm_node;
|
||||
unsigned long pages_left;
|
||||
struct amdgpu_res_cursor cursor;
|
||||
|
||||
if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
|
||||
return false;
|
||||
|
||||
for (pages_left = bo->tbo.mem.num_pages; pages_left;
|
||||
pages_left -= node->size, node++)
|
||||
if (node->start < fpfn)
|
||||
amdgpu_res_first(&bo->tbo.mem, 0, amdgpu_bo_size(bo), &cursor);
|
||||
while (cursor.remaining) {
|
||||
if (cursor.start < adev->gmc.visible_vram_size)
|
||||
return true;
|
||||
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
|
||||
#include "amdgpu_ras.h"
|
||||
#include "amdgpu_securedisplay.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
|
||||
static int psp_sysfs_init(struct amdgpu_device *adev);
|
||||
static void psp_sysfs_fini(struct amdgpu_device *adev);
|
||||
@ -104,6 +105,7 @@ static int psp_early_init(void *handle)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
psp_v11_0_set_psp_funcs(psp);
|
||||
psp->autoload_supported = true;
|
||||
break;
|
||||
@ -538,7 +540,7 @@ static int psp_boot_config_set(struct amdgpu_device *adev)
|
||||
struct psp_context *psp = &adev->psp;
|
||||
struct psp_gfx_cmd_resp *cmd = psp->cmd;
|
||||
|
||||
if (adev->asic_type != CHIP_SIENNA_CICHLID || amdgpu_sriov_vf(adev))
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
|
||||
@ -1931,9 +1933,10 @@ static int psp_hw_start(struct psp_context *psp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = psp_boot_config_set(adev);
|
||||
if (ret) {
|
||||
DRM_WARN("PSP set boot config@\n");
|
||||
if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
|
||||
ret = psp_boot_config_set(adev);
|
||||
if (ret)
|
||||
dev_warn(adev->dev, "PSP set boot config failed\n");
|
||||
}
|
||||
|
||||
ret = psp_tmr_init(psp);
|
||||
|
@ -2194,7 +2194,7 @@ release_con:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
|
||||
int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->gmc.xgmi.connected_to_cpu)
|
||||
return 1;
|
||||
|
@ -625,4 +625,7 @@ void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
|
||||
bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_release_ras_context(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
@ -86,6 +86,8 @@ void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedispla
|
||||
(*cmd)->cmd_id = command_id;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -162,6 +164,8 @@ static const struct file_operations amdgpu_securedisplay_debugfs_ops = {
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
@ -231,7 +231,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
|
||||
*addr += mm_cur->start & ~PAGE_MASK;
|
||||
|
||||
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
num_bytes = num_pages * 8;
|
||||
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
|
||||
AMDGPU_IB_POOL_DELAYED, &job);
|
||||
@ -576,10 +576,10 @@ out:
|
||||
*
|
||||
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
|
||||
*/
|
||||
static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
|
||||
static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
|
||||
struct ttm_resource *mem)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct drm_mm_node *mm_node = mem->mm_node;
|
||||
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
|
||||
|
||||
switch (mem->mem_type) {
|
||||
@ -593,12 +593,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resourc
|
||||
/* check if it's visible */
|
||||
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
|
||||
return -EINVAL;
|
||||
/* Only physically contiguous buffers apply. In a contiguous
|
||||
* buffer, size of the first mm_node would match the number of
|
||||
* pages in ttm_resource.
|
||||
*/
|
||||
|
||||
if (adev->mman.aper_base_kaddr &&
|
||||
(mm_node->size == mem->num_pages))
|
||||
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
|
||||
mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
|
||||
mem->bus.offset;
|
||||
|
||||
@ -910,7 +907,23 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
|
||||
DRM_ERROR("failed to pin userptr\n");
|
||||
return r;
|
||||
}
|
||||
} else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
|
||||
if (!ttm->sg) {
|
||||
struct dma_buf_attachment *attach;
|
||||
struct sg_table *sgt;
|
||||
|
||||
attach = gtt->gobj->import_attach;
|
||||
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sgt))
|
||||
return PTR_ERR(sgt);
|
||||
|
||||
ttm->sg = sgt;
|
||||
}
|
||||
|
||||
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
|
||||
ttm->num_pages);
|
||||
}
|
||||
|
||||
if (!ttm->num_pages) {
|
||||
WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
|
||||
ttm->num_pages, bo_mem, ttm);
|
||||
@ -1035,8 +1048,15 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
|
||||
int r;
|
||||
|
||||
/* if the pages have userptr pinning then clear that first */
|
||||
if (gtt->userptr)
|
||||
if (gtt->userptr) {
|
||||
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
|
||||
} else if (ttm->sg && gtt->gobj->import_attach) {
|
||||
struct dma_buf_attachment *attach;
|
||||
|
||||
attach = gtt->gobj->import_attach;
|
||||
dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
|
||||
ttm->sg = NULL;
|
||||
}
|
||||
|
||||
if (!gtt->bound)
|
||||
return;
|
||||
@ -1123,23 +1143,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
|
||||
if (!ttm->sg) {
|
||||
struct dma_buf_attachment *attach;
|
||||
struct sg_table *sgt;
|
||||
|
||||
attach = gtt->gobj->import_attach;
|
||||
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sgt))
|
||||
return PTR_ERR(sgt);
|
||||
|
||||
ttm->sg = sgt;
|
||||
}
|
||||
|
||||
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
|
||||
ttm->num_pages);
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_SG)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
|
||||
}
|
||||
@ -1159,16 +1164,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
|
||||
if (gtt && gtt->userptr) {
|
||||
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
|
||||
kfree(ttm->sg);
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
|
||||
return;
|
||||
}
|
||||
|
||||
if (ttm->sg && gtt->gobj->import_attach) {
|
||||
struct dma_buf_attachment *attach;
|
||||
|
||||
attach = gtt->gobj->import_attach;
|
||||
dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
|
||||
ttm->sg = NULL;
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1581,11 +1578,8 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
|
||||
bool mem_train_support = false;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
ret = amdgpu_mem_train_support(adev);
|
||||
if (ret == 1)
|
||||
if (amdgpu_atomfirmware_mem_training_supported(adev))
|
||||
mem_train_support = true;
|
||||
else if (ret == -1)
|
||||
return -EINVAL;
|
||||
else
|
||||
DRM_DEBUG("memory training does not support!\n");
|
||||
}
|
||||
|
@ -37,11 +37,6 @@
|
||||
|
||||
#define AMDGPU_POISON 0xd0bed0be
|
||||
|
||||
struct amdgpu_vram_reservation {
|
||||
struct list_head node;
|
||||
struct drm_mm_node mm_node;
|
||||
};
|
||||
|
||||
struct amdgpu_vram_mgr {
|
||||
struct ttm_resource_manager manager;
|
||||
struct drm_mm mm;
|
||||
|
@ -403,6 +403,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_ALDEBARAN:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
if (!load_type)
|
||||
return AMDGPU_FW_LOAD_DIRECT;
|
||||
else
|
||||
|
@ -48,6 +48,7 @@
|
||||
#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
|
||||
#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
|
||||
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
|
||||
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
|
||||
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN);
|
||||
MODULE_FIRMWARE(FIRMWARE_PICASSO);
|
||||
@ -63,6 +64,7 @@ MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
|
||||
MODULE_FIRMWARE(FIRMWARE_VANGOGH);
|
||||
MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
|
||||
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
|
||||
|
||||
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
|
||||
|
||||
@ -151,6 +153,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
fw_name = FIRMWARE_BEIGE_GOBY;
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -432,6 +432,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
|
||||
uint32_t checksum;
|
||||
uint32_t checkval;
|
||||
|
||||
uint32_t i;
|
||||
uint32_t tmp;
|
||||
|
||||
if (adev->virt.fw_reserve.p_pf2vf == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
@ -472,6 +475,29 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
|
||||
adev->virt.reg_access =
|
||||
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
|
||||
|
||||
adev->virt.decode_max_dimension_pixels = 0;
|
||||
adev->virt.decode_max_frame_pixels = 0;
|
||||
adev->virt.encode_max_dimension_pixels = 0;
|
||||
adev->virt.encode_max_frame_pixels = 0;
|
||||
adev->virt.is_mm_bw_enabled = false;
|
||||
for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
|
||||
tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
|
||||
adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
|
||||
|
||||
tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
|
||||
adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
|
||||
|
||||
tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
|
||||
adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
|
||||
|
||||
tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
|
||||
adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
|
||||
}
|
||||
if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
|
||||
adev->virt.is_mm_bw_enabled = true;
|
||||
|
||||
adev->unique_id =
|
||||
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid pf2vf version\n");
|
||||
@ -744,3 +770,35 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
|
||||
|
||||
return mode;
|
||||
}
|
||||
|
||||
void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
|
||||
struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
|
||||
struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
if (!adev->virt.is_mm_bw_enabled)
|
||||
return;
|
||||
|
||||
if (encode) {
|
||||
for (i = 0; i < encode_array_size; i++) {
|
||||
encode[i].max_width = adev->virt.encode_max_dimension_pixels;
|
||||
encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
|
||||
if (encode[i].max_width > 0)
|
||||
encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
|
||||
else
|
||||
encode[i].max_height = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (decode) {
|
||||
for (i = 0; i < decode_array_size; i++) {
|
||||
decode[i].max_width = adev->virt.decode_max_dimension_pixels;
|
||||
decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
|
||||
if (decode[i].max_width > 0)
|
||||
decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
|
||||
else
|
||||
decode[i].max_height = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -233,8 +233,17 @@ struct amdgpu_virt {
|
||||
/* vf2pf message */
|
||||
struct delayed_work vf2pf_work;
|
||||
uint32_t vf2pf_update_interval_ms;
|
||||
|
||||
/* multimedia bandwidth config */
|
||||
bool is_mm_bw_enabled;
|
||||
uint32_t decode_max_dimension_pixels;
|
||||
uint32_t decode_max_frame_pixels;
|
||||
uint32_t encode_max_dimension_pixels;
|
||||
uint32_t encode_max_frame_pixels;
|
||||
};
|
||||
|
||||
struct amdgpu_video_codec_info;
|
||||
|
||||
#define amdgpu_sriov_enabled(adev) \
|
||||
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
|
||||
|
||||
@ -307,4 +316,8 @@ int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
|
||||
|
||||
enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
|
||||
struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
|
||||
struct amdgpu_video_codec_info *decode, uint32_t decode_array_size);
|
||||
#endif
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "amdgpu_gmc.h"
|
||||
#include "amdgpu_xgmi.h"
|
||||
#include "amdgpu_dma_buf.h"
|
||||
#include "amdgpu_res_cursor.h"
|
||||
#include "kfd_svm.h"
|
||||
|
||||
/**
|
||||
@ -1583,6 +1584,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
|
||||
while (cursor.pfn < frag_start) {
|
||||
amdgpu_vm_free_pts(adev, params->vm, &cursor);
|
||||
amdgpu_vm_pt_next(adev, &cursor);
|
||||
params->table_freed = true;
|
||||
}
|
||||
|
||||
} else if (frag >= shift) {
|
||||
@ -1607,9 +1609,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
|
||||
* @last: last mapped entry
|
||||
* @flags: flags for the entries
|
||||
* @offset: offset into nodes and pages_addr
|
||||
* @nodes: array of drm_mm_nodes with the MC addresses
|
||||
* @res: ttm_resource to map
|
||||
* @pages_addr: DMA addresses to use for mapping
|
||||
* @fence: optional resulting fence
|
||||
* @table_freed: return true if page table is freed
|
||||
*
|
||||
* Fill in the page table entries between @start and @last.
|
||||
*
|
||||
@ -1622,13 +1625,14 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
bool unlocked, struct dma_resv *resv,
|
||||
uint64_t start, uint64_t last,
|
||||
uint64_t flags, uint64_t offset,
|
||||
struct drm_mm_node *nodes,
|
||||
struct ttm_resource *res,
|
||||
dma_addr_t *pages_addr,
|
||||
struct dma_fence **fence)
|
||||
struct dma_fence **fence,
|
||||
bool *table_freed)
|
||||
{
|
||||
struct amdgpu_vm_update_params params;
|
||||
struct amdgpu_res_cursor cursor;
|
||||
enum amdgpu_sync_mode sync_mode;
|
||||
uint64_t pfn;
|
||||
int r;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
@ -1646,14 +1650,6 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
else
|
||||
sync_mode = AMDGPU_SYNC_EXPLICIT;
|
||||
|
||||
pfn = offset >> PAGE_SHIFT;
|
||||
if (nodes) {
|
||||
while (pfn >= nodes->size) {
|
||||
pfn -= nodes->size;
|
||||
++nodes;
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_vm_eviction_lock(vm);
|
||||
if (vm->evicting) {
|
||||
r = -EBUSY;
|
||||
@ -1672,23 +1668,17 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
do {
|
||||
amdgpu_res_first(res, offset, (last - start + 1) * AMDGPU_GPU_PAGE_SIZE,
|
||||
&cursor);
|
||||
while (cursor.remaining) {
|
||||
uint64_t tmp, num_entries, addr;
|
||||
|
||||
|
||||
num_entries = last - start + 1;
|
||||
if (nodes) {
|
||||
addr = nodes->start << PAGE_SHIFT;
|
||||
num_entries = min((nodes->size - pfn) *
|
||||
AMDGPU_GPU_PAGES_IN_CPU_PAGE, num_entries);
|
||||
} else {
|
||||
addr = 0;
|
||||
}
|
||||
|
||||
num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
|
||||
if (pages_addr) {
|
||||
bool contiguous = true;
|
||||
|
||||
if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
|
||||
uint64_t pfn = cursor.start >> PAGE_SHIFT;
|
||||
uint64_t count;
|
||||
|
||||
contiguous = pages_addr[pfn + 1] ==
|
||||
@ -1708,16 +1698,18 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
if (!contiguous) {
|
||||
addr = pfn << PAGE_SHIFT;
|
||||
addr = cursor.start;
|
||||
params.pages_addr = pages_addr;
|
||||
} else {
|
||||
addr = pages_addr[pfn];
|
||||
addr = pages_addr[cursor.start >> PAGE_SHIFT];
|
||||
params.pages_addr = NULL;
|
||||
}
|
||||
|
||||
} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
|
||||
addr += bo_adev->vm_manager.vram_base_offset;
|
||||
addr += pfn << PAGE_SHIFT;
|
||||
addr = bo_adev->vm_manager.vram_base_offset +
|
||||
cursor.start;
|
||||
} else {
|
||||
addr = 0;
|
||||
}
|
||||
|
||||
tmp = start + num_entries;
|
||||
@ -1725,17 +1717,15 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
pfn += num_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||
if (nodes && nodes->size == pfn) {
|
||||
pfn = 0;
|
||||
++nodes;
|
||||
}
|
||||
amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
|
||||
start = tmp;
|
||||
|
||||
} while (unlikely(start != last + 1));
|
||||
};
|
||||
|
||||
r = vm->update_funcs->commit(¶ms, fence);
|
||||
|
||||
if (table_freed)
|
||||
*table_freed = params.table_freed;
|
||||
|
||||
error_unlock:
|
||||
amdgpu_vm_eviction_unlock(vm);
|
||||
return r;
|
||||
@ -1805,7 +1795,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
dma_addr_t *pages_addr = NULL;
|
||||
struct ttm_resource *mem;
|
||||
struct drm_mm_node *nodes;
|
||||
struct dma_fence **last_update;
|
||||
struct dma_resv *resv;
|
||||
uint64_t flags;
|
||||
@ -1814,7 +1803,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||
|
||||
if (clear || !bo) {
|
||||
mem = NULL;
|
||||
nodes = NULL;
|
||||
resv = vm->root.base.bo->tbo.base.resv;
|
||||
} else {
|
||||
struct drm_gem_object *obj = &bo->tbo.base;
|
||||
@ -1829,7 +1817,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||
bo = gem_to_amdgpu_bo(gobj);
|
||||
}
|
||||
mem = &bo->tbo.mem;
|
||||
nodes = mem->mm_node;
|
||||
if (mem->mem_type == TTM_PL_TT)
|
||||
pages_addr = bo->tbo.ttm->dma_address;
|
||||
}
|
||||
@ -1878,8 +1865,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||
r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
|
||||
resv, mapping->start,
|
||||
mapping->last, update_flags,
|
||||
mapping->offset, nodes,
|
||||
pages_addr, last_update);
|
||||
mapping->offset, mem,
|
||||
pages_addr, last_update, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -2090,7 +2077,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false,
|
||||
resv, mapping->start,
|
||||
mapping->last, init_pte_value,
|
||||
0, NULL, NULL, &f);
|
||||
0, NULL, NULL, &f, NULL);
|
||||
amdgpu_vm_free_mapping(adev, vm, mapping, f);
|
||||
if (r) {
|
||||
dma_fence_put(f);
|
||||
@ -3428,7 +3415,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
||||
}
|
||||
|
||||
r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
|
||||
addr, flags, value, NULL, NULL,
|
||||
addr, flags, value, NULL, NULL, NULL,
|
||||
NULL);
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
@ -231,6 +231,11 @@ struct amdgpu_vm_update_params {
|
||||
* @num_dw_left: number of dw left for the IB
|
||||
*/
|
||||
unsigned int num_dw_left;
|
||||
|
||||
/**
|
||||
* @table_freed: return true if page table is freed when updating
|
||||
*/
|
||||
bool table_freed;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_update_funcs {
|
||||
@ -402,9 +407,9 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
bool unlocked, struct dma_resv *resv,
|
||||
uint64_t start, uint64_t last,
|
||||
uint64_t flags, uint64_t offset,
|
||||
struct drm_mm_node *nodes,
|
||||
struct ttm_resource *res,
|
||||
dma_addr_t *pages_addr,
|
||||
struct dma_fence **fence);
|
||||
struct dma_fence **fence, bool *free_table);
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
bool clear);
|
||||
|
@ -29,6 +29,11 @@
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "atom.h"
|
||||
|
||||
struct amdgpu_vram_reservation {
|
||||
struct list_head node;
|
||||
struct drm_mm_node mm_node;
|
||||
};
|
||||
|
||||
static inline struct amdgpu_vram_mgr *
|
||||
to_vram_mgr(struct ttm_resource_manager *man)
|
||||
{
|
||||
@ -446,10 +451,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
}
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
if (i == 1)
|
||||
mem->placement |= TTM_PL_FLAG_CONTIGUOUS;
|
||||
|
||||
atomic64_add(vis_usage, &mgr->vis_usage);
|
||||
|
||||
mem->mm_node = nodes;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
|
@ -56,6 +56,8 @@
|
||||
|
||||
#define AMD_SRIOV_MSG_RESERVE_UCODE 24
|
||||
|
||||
#define AMD_SRIOV_MSG_RESERVE_VCN_INST 4
|
||||
|
||||
enum amd_sriov_ucode_engine_id {
|
||||
AMD_SRIOV_UCODE_ID_VCE = 0,
|
||||
AMD_SRIOV_UCODE_ID_UVD,
|
||||
@ -98,10 +100,10 @@ union amd_sriov_msg_feature_flags {
|
||||
|
||||
union amd_sriov_reg_access_flags {
|
||||
struct {
|
||||
uint32_t vf_reg_psp_access_ih : 1;
|
||||
uint32_t vf_reg_rlc_access_mmhub : 1;
|
||||
uint32_t vf_reg_rlc_access_gc : 1;
|
||||
uint32_t reserved : 29;
|
||||
uint32_t vf_reg_access_ih : 1;
|
||||
uint32_t vf_reg_access_mmhub : 1;
|
||||
uint32_t vf_reg_access_gc : 1;
|
||||
uint32_t reserved : 29;
|
||||
} flags;
|
||||
uint32_t all;
|
||||
};
|
||||
@ -114,6 +116,37 @@ union amd_sriov_msg_os_info {
|
||||
uint32_t all;
|
||||
};
|
||||
|
||||
struct amd_sriov_msg_uuid_info {
|
||||
union {
|
||||
struct {
|
||||
uint32_t did : 16;
|
||||
uint32_t fcn : 8;
|
||||
uint32_t asic_7 : 8;
|
||||
};
|
||||
uint32_t time_low;
|
||||
};
|
||||
|
||||
struct {
|
||||
uint32_t time_mid : 16;
|
||||
uint32_t time_high : 12;
|
||||
uint32_t version : 4;
|
||||
};
|
||||
|
||||
struct {
|
||||
struct {
|
||||
uint8_t clk_seq_hi : 6;
|
||||
uint8_t variant : 2;
|
||||
};
|
||||
union {
|
||||
uint8_t clk_seq_low;
|
||||
uint8_t asic_6;
|
||||
};
|
||||
uint16_t asic_4;
|
||||
};
|
||||
|
||||
uint32_t asic_0;
|
||||
};
|
||||
|
||||
struct amd_sriov_msg_pf2vf_info_header {
|
||||
/* the total structure size in byte */
|
||||
uint32_t size;
|
||||
@ -160,10 +193,19 @@ struct amd_sriov_msg_pf2vf_info {
|
||||
/* identification in ROCm SMI */
|
||||
uint64_t uuid;
|
||||
uint32_t fcn_idx;
|
||||
/* flags which indicate the register access method VF should use */
|
||||
/* flags to indicate which register access method VF should use */
|
||||
union amd_sriov_reg_access_flags reg_access_flags;
|
||||
/* MM BW management */
|
||||
struct {
|
||||
uint32_t decode_max_dimension_pixels;
|
||||
uint32_t decode_max_frame_pixels;
|
||||
uint32_t encode_max_dimension_pixels;
|
||||
uint32_t encode_max_frame_pixels;
|
||||
} mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST];
|
||||
/* UUID info */
|
||||
struct amd_sriov_msg_uuid_info uuid_info;
|
||||
/* reserved */
|
||||
uint32_t reserved[256-27];
|
||||
uint32_t reserved[256 - 47];
|
||||
};
|
||||
|
||||
struct amd_sriov_msg_vf2pf_info_header {
|
||||
|
@ -74,6 +74,7 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev,
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE);
|
||||
athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE);
|
||||
break;
|
||||
|
54
drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c
Normal file
54
drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu.h"
|
||||
#include "nv.h"
|
||||
|
||||
#include "soc15_common.h"
|
||||
#include "soc15_hw_ip.h"
|
||||
#include "beige_goby_ip_offset.h"
|
||||
|
||||
int beige_goby_reg_base_init(struct amdgpu_device *adev)
|
||||
{
|
||||
/* HW has more IP blocks, only initialize the block needed by driver */
|
||||
uint32_t i;
|
||||
for (i = 0 ; i < MAX_INSTANCE ; ++i) {
|
||||
adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
|
||||
adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
|
||||
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
|
||||
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
|
||||
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
|
||||
adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
|
||||
adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN0_BASE.instance[i]));
|
||||
adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
|
||||
adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
|
||||
adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
|
||||
adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
|
||||
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
|
||||
}
|
||||
return 0;
|
||||
}
|
@ -277,13 +277,14 @@ static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0);
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
if (adev->asic_type == CHIP_ALDEBARAN) {
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_GCM_AON0_DramMegaBaseAddress0);
|
||||
tmp &=
|
||||
ALDEBARAN_DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK;
|
||||
else
|
||||
} else {
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0);
|
||||
tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK;
|
||||
|
||||
}
|
||||
tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
|
||||
|
||||
return tmp;
|
||||
|
@ -232,6 +232,13 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_rlc.bin");
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_1[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
|
||||
@ -1395,9 +1402,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
|
||||
@ -1415,12 +1423,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
|
||||
};
|
||||
|
||||
static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
|
||||
@ -3404,6 +3413,41 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_3_5[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xb0000ff0, 0x30000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff000000, 0x7e000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x000001ff, 0x00000020),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xe07df47f, 0x00180070),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER0_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER1_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER10_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER11_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER12_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER13_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER14_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER15_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER2_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER3_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER4_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER5_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER6_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX,0xfff7ffff, 0x01030000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
|
||||
};
|
||||
|
||||
#define DEFAULT_SH_MEM_CONFIG \
|
||||
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
|
||||
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
|
||||
@ -3624,6 +3668,11 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
golden_settings_gc_10_3_4,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_4));
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_10_3_5,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_5));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -3809,6 +3858,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->gfx.cp_fw_write_wait = true;
|
||||
break;
|
||||
default:
|
||||
@ -3924,6 +3974,9 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
chip_name = "dimgrey_cavefish";
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
chip_name = "beige_goby";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@ -4492,6 +4545,7 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
@ -4616,6 +4670,7 @@ static int gfx_v10_0_sw_init(void *handle)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->gfx.me.num_me = 1;
|
||||
adev->gfx.me.num_pipe_per_me = 1;
|
||||
adev->gfx.me.num_queue_per_pipe = 1;
|
||||
@ -6123,6 +6178,7 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
|
||||
DOORBELL_RANGE_LOWER_Sienna_Cichlid, ring->doorbell_index);
|
||||
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
|
||||
@ -6258,6 +6314,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, 0);
|
||||
break;
|
||||
default:
|
||||
@ -6270,6 +6327,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid,
|
||||
(CP_MEC_CNTL__MEC_ME1_HALT_MASK |
|
||||
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
|
||||
@ -6366,6 +6424,7 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
|
||||
tmp &= 0xffffff00;
|
||||
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
|
||||
@ -7080,6 +7139,7 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid);
|
||||
WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, 0);
|
||||
WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
|
||||
@ -7114,6 +7174,9 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t data;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
/* initialize cam_index to 0
|
||||
* index will auto-inc after each data writting */
|
||||
WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
|
||||
@ -7123,6 +7186,7 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
/* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
|
||||
data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
|
||||
GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
|
||||
@ -7432,6 +7496,7 @@ static int gfx_v10_0_soft_reset(void *handle)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY_Sienna_Cichlid))
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
|
||||
GRBM_SOFT_RESET,
|
||||
@ -7541,6 +7606,7 @@ static int gfx_v10_0_early_init(void *handle)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_Sienna_Cichlid;
|
||||
break;
|
||||
default:
|
||||
@ -7597,6 +7663,7 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
|
||||
|
||||
/* wait for RLC_SAFE_MODE */
|
||||
@ -7631,6 +7698,7 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
|
||||
break;
|
||||
default:
|
||||
@ -7998,6 +8066,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
amdgpu_gfx_off_ctrl(adev, enable);
|
||||
break;
|
||||
case CHIP_VANGOGH:
|
||||
@ -8026,6 +8095,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
gfx_v10_0_update_gfx_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
break;
|
||||
@ -9136,6 +9206,7 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
|
||||
break;
|
||||
case CHIP_NAVI12:
|
||||
|
@ -4947,7 +4947,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
|
||||
amdgpu_gfx_rlc_enter_safe_mode(adev);
|
||||
|
||||
/* Enable 3D CGCG/CGLS */
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
|
||||
if (enable) {
|
||||
/* write cmd to clear cgcg/cgls ov */
|
||||
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
|
||||
/* unset CGCG override */
|
||||
@ -4959,8 +4959,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
|
||||
/* enable 3Dcgcg FSM(0x0000363f) */
|
||||
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
|
||||
|
||||
data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
|
||||
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
|
||||
data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
|
||||
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
|
||||
else
|
||||
data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
|
||||
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
|
||||
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
|
||||
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
|
||||
|
@ -288,7 +288,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
|
||||
if (i < adev->usec_timeout)
|
||||
return;
|
||||
|
||||
DRM_ERROR("Timeout waiting for VM flush ACK!\n");
|
||||
DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -681,6 +681,7 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
|
||||
break;
|
||||
default:
|
||||
@ -796,6 +797,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
default:
|
||||
adev->gmc.gart_size = 512ULL << 20;
|
||||
break;
|
||||
@ -863,6 +865,7 @@ static int gmc_v10_0_sw_init(void *handle)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->num_vmhubs = 2;
|
||||
/*
|
||||
* To fulfill 4-level page support,
|
||||
@ -978,6 +981,7 @@ static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -1268,13 +1268,15 @@ static int gmc_v9_0_late_init(void *handle)
|
||||
}
|
||||
}
|
||||
|
||||
if (adev->mmhub.ras_funcs &&
|
||||
adev->mmhub.ras_funcs->reset_ras_error_count)
|
||||
adev->mmhub.ras_funcs->reset_ras_error_count(adev);
|
||||
if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
|
||||
if (adev->mmhub.ras_funcs &&
|
||||
adev->mmhub.ras_funcs->reset_ras_error_count)
|
||||
adev->mmhub.ras_funcs->reset_ras_error_count(adev);
|
||||
|
||||
if (adev->hdp.ras_funcs &&
|
||||
adev->hdp.ras_funcs->reset_ras_error_count)
|
||||
adev->hdp.ras_funcs->reset_ras_error_count(adev);
|
||||
if (adev->hdp.ras_funcs &&
|
||||
adev->hdp.ras_funcs->reset_ras_error_count)
|
||||
adev->hdp.ras_funcs->reset_ras_error_count(adev);
|
||||
}
|
||||
|
||||
r = amdgpu_gmc_ras_late_init(adev);
|
||||
if (r)
|
||||
|
@ -198,8 +198,6 @@ static int jpeg_v2_5_hw_fini(void *handle)
|
||||
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
|
||||
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
|
||||
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
|
||||
ring->sched.ready = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -166,8 +166,6 @@ static int jpeg_v3_0_hw_fini(void *handle)
|
||||
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
|
||||
jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
|
||||
ring->sched.ready = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -93,6 +93,30 @@ static const char *mmhub_client_ids_sienna_cichlid[][2] = {
|
||||
[15][1] = "OSS",
|
||||
};
|
||||
|
||||
static const char *mmhub_client_ids_beige_goby[][2] = {
|
||||
[3][0] = "DCEDMC",
|
||||
[4][0] = "DCEVGA",
|
||||
[5][0] = "MP0",
|
||||
[6][0] = "MP1",
|
||||
[8][0] = "VMC",
|
||||
[9][0] = "VCNU0",
|
||||
[11][0] = "VCN0",
|
||||
[14][0] = "HDP",
|
||||
[15][0] = "OSS",
|
||||
[0][1] = "DBGU0",
|
||||
[1][1] = "DBGU1",
|
||||
[2][1] = "DCEDWB",
|
||||
[3][1] = "DCEDMC",
|
||||
[4][1] = "DCEVGA",
|
||||
[5][1] = "MP0",
|
||||
[6][1] = "MP1",
|
||||
[7][1] = "XDP",
|
||||
[9][1] = "VCNU0",
|
||||
[11][1] = "VCN0",
|
||||
[14][1] = "HDP",
|
||||
[15][1] = "OSS",
|
||||
};
|
||||
|
||||
static uint32_t mmhub_v2_0_get_invalidate_req(unsigned int vmid,
|
||||
uint32_t flush_type)
|
||||
{
|
||||
@ -139,6 +163,9 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw];
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
mmhub_cid = mmhub_client_ids_beige_goby[cid][rw];
|
||||
break;
|
||||
default:
|
||||
mmhub_cid = NULL;
|
||||
break;
|
||||
@ -544,6 +571,7 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
|
||||
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
|
||||
break;
|
||||
@ -578,6 +606,7 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
if (def != data)
|
||||
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
|
||||
if (def1 != data1)
|
||||
@ -601,6 +630,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
|
||||
break;
|
||||
default:
|
||||
@ -618,6 +648,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
|
||||
break;
|
||||
default:
|
||||
@ -640,6 +671,7 @@ static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
mmhub_v2_0_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
mmhub_v2_0_update_medium_grain_light_sleep(adev,
|
||||
@ -663,6 +695,7 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
|
||||
data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
|
||||
break;
|
||||
|
@ -151,7 +151,15 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
|
||||
/* enable_intr field is only valid in ring0 */
|
||||
if (ih == &adev->irq.ih)
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
|
||||
WREG32(ih_regs->ih_rb_cntl, tmp);
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
|
||||
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
|
||||
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
} else {
|
||||
WREG32(ih_regs->ih_rb_cntl, tmp);
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
ih->enabled = true;
|
||||
@ -261,7 +269,15 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev,
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
|
||||
}
|
||||
WREG32(ih_regs->ih_rb_cntl, tmp);
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
|
||||
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
|
||||
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
} else {
|
||||
WREG32(ih_regs->ih_rb_cntl, tmp);
|
||||
}
|
||||
|
||||
if (ih == &adev->irq.ih) {
|
||||
/* set the ih ring 0 writeback address whether it's enabled or not */
|
||||
@ -311,6 +327,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid);
|
||||
ih_chicken = REG_SET_FIELD(ih_chicken,
|
||||
IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
|
||||
|
@ -218,11 +218,114 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode =
|
||||
.codec_array = sc_video_codecs_decode_array,
|
||||
};
|
||||
|
||||
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
|
||||
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
|
||||
{
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
|
||||
.max_width = 4096,
|
||||
.max_height = 2304,
|
||||
.max_pixels_per_frame = 4096 * 2304,
|
||||
.max_level = 0,
|
||||
},
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
|
||||
.max_width = 4096,
|
||||
.max_height = 2304,
|
||||
.max_pixels_per_frame = 4096 * 2304,
|
||||
.max_level = 0,
|
||||
},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
|
||||
{
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
|
||||
.max_width = 4096,
|
||||
.max_height = 4096,
|
||||
.max_pixels_per_frame = 4096 * 4096,
|
||||
.max_level = 3,
|
||||
},
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
|
||||
.max_width = 4096,
|
||||
.max_height = 4096,
|
||||
.max_pixels_per_frame = 4096 * 4096,
|
||||
.max_level = 5,
|
||||
},
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
|
||||
.max_width = 4096,
|
||||
.max_height = 4096,
|
||||
.max_pixels_per_frame = 4096 * 4096,
|
||||
.max_level = 52,
|
||||
},
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
|
||||
.max_width = 4096,
|
||||
.max_height = 4096,
|
||||
.max_pixels_per_frame = 4096 * 4096,
|
||||
.max_level = 4,
|
||||
},
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
|
||||
.max_width = 8192,
|
||||
.max_height = 4352,
|
||||
.max_pixels_per_frame = 8192 * 4352,
|
||||
.max_level = 186,
|
||||
},
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
|
||||
.max_width = 4096,
|
||||
.max_height = 4096,
|
||||
.max_pixels_per_frame = 4096 * 4096,
|
||||
.max_level = 0,
|
||||
},
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
|
||||
.max_width = 8192,
|
||||
.max_height = 4352,
|
||||
.max_pixels_per_frame = 8192 * 4352,
|
||||
.max_level = 0,
|
||||
},
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
|
||||
.max_width = 8192,
|
||||
.max_height = 4352,
|
||||
.max_pixels_per_frame = 8192 * 4352,
|
||||
.max_level = 0,
|
||||
},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
|
||||
.codec_array = sriov_sc_video_codecs_encode_array,
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array),
|
||||
.codec_array = sriov_sc_video_codecs_decode_array,
|
||||
};
|
||||
|
||||
static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
||||
const struct amdgpu_video_codecs **codecs)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
if (encode)
|
||||
*codecs = &sriov_sc_video_codecs_encode;
|
||||
else
|
||||
*codecs = &sriov_sc_video_codecs_decode;
|
||||
} else {
|
||||
if (encode)
|
||||
*codecs = &nv_video_codecs_encode;
|
||||
else
|
||||
*codecs = &sc_video_codecs_decode;
|
||||
}
|
||||
return 0;
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_VANGOGH:
|
||||
@ -534,6 +637,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
return AMD_RESET_METHOD_MODE1;
|
||||
default:
|
||||
if (amdgpu_dpm_is_baco_supported(adev))
|
||||
@ -675,6 +779,9 @@ legacy_init:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
dimgrey_cavefish_reg_base_init(adev);
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
beige_goby_reg_base_init(adev);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -764,9 +871,15 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
|
||||
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
|
||||
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
|
||||
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
|
||||
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
|
||||
} else {
|
||||
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
|
||||
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
|
||||
}
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
|
||||
is_support_sw_smu(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
@ -845,6 +958,28 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
|
||||
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
|
||||
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
|
||||
is_support_sw_smu(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
#endif
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
|
||||
is_support_sw_smu(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1152,6 +1287,23 @@ static int nv_common_early_init(void *handle)
|
||||
AMD_PG_SUPPORT_MMHUB;
|
||||
adev->external_rev_id = adev->rev_id + 0x3c;
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_IH_CG |
|
||||
AMD_CG_SUPPORT_VCN_MGCG;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
AMD_PG_SUPPORT_ATHUB |
|
||||
AMD_PG_SUPPORT_MMHUB;
|
||||
adev->external_rev_id = adev->rev_id + 0x46;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
@ -1174,8 +1326,12 @@ static int nv_common_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
xgpu_nv_mailbox_get_irq(adev);
|
||||
amdgpu_virt_update_sriov_video_codec(adev,
|
||||
sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
|
||||
sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1271,6 +1427,7 @@ static int nv_common_set_clockgating_state(void *handle,
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
|
||||
|
@ -36,4 +36,5 @@ int navi12_reg_base_init(struct amdgpu_device *adev);
|
||||
int sienna_cichlid_reg_base_init(struct amdgpu_device *adev);
|
||||
void vangogh_reg_base_init(struct amdgpu_device *adev);
|
||||
int dimgrey_cavefish_reg_base_init(struct amdgpu_device *adev);
|
||||
int beige_goby_reg_base_init(struct amdgpu_device *adev);
|
||||
#endif
|
||||
|
@ -63,6 +63,8 @@ MODULE_FIRMWARE("amdgpu/vangogh_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vangogh_toc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_ta.bin");
|
||||
|
||||
/* address block */
|
||||
#define smnMP1_FIRMWARE_FLAGS 0x3010024
|
||||
@ -115,6 +117,9 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
chip_name = "dimgrey_cavefish";
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
chip_name = "beige_goby";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@ -200,6 +205,14 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
err = psp_init_sos_microcode(psp, chip_name);
|
||||
if (err)
|
||||
return err;
|
||||
err = psp_init_ta_microcode(psp, chip_name);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
case CHIP_VANGOGH:
|
||||
err = psp_init_asd_microcode(psp, chip_name);
|
||||
if (err)
|
||||
|
@ -123,6 +123,10 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
};
|
||||
|
||||
|
@ -47,6 +47,7 @@
|
||||
MODULE_FIRMWARE("amdgpu/sienna_cichlid_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navy_flounder_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_sdma.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vangogh_sdma.bin");
|
||||
|
||||
@ -92,6 +93,7 @@ static void sdma_v5_2_init_golden_registers(struct amdgpu_device *adev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -163,6 +165,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
chip_name = "dimgrey_cavefish";
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
chip_name = "beige_goby";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@ -497,11 +502,6 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
|
||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
|
||||
}
|
||||
|
||||
sdma0->sched.ready = false;
|
||||
sdma1->sched.ready = false;
|
||||
sdma2->sched.ready = false;
|
||||
sdma3->sched.ready = false;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1228,6 +1228,7 @@ static int sdma_v5_2_early_init(void *handle)
|
||||
adev->sdma.num_instances = 2;
|
||||
break;
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->sdma.num_instances = 1;
|
||||
break;
|
||||
default:
|
||||
@ -1628,6 +1629,7 @@ static int sdma_v5_2_set_clockgating_state(void *handle,
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
sdma_v5_2_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
sdma_v5_2_update_medium_grain_light_sleep(adev,
|
||||
|
@ -302,6 +302,7 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
||||
*codecs = &rv_video_codecs_decode;
|
||||
return 0;
|
||||
case CHIP_ARCTURUS:
|
||||
case CHIP_ALDEBARAN:
|
||||
case CHIP_RENOIR:
|
||||
if (encode)
|
||||
*codecs = &vega_video_codecs_encode;
|
||||
@ -1393,7 +1394,6 @@ static int soc15_common_early_init(void *handle)
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
@ -1413,7 +1413,6 @@ static int soc15_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_RLC_LS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
|
@ -91,6 +91,11 @@ static int vcn_v3_0_early_init(void *handle)
|
||||
adev->vcn.harvest_config = 0;
|
||||
adev->vcn.num_enc_rings = 1;
|
||||
|
||||
if (adev->asic_type == CHIP_BEIGE_GOBY) {
|
||||
adev->vcn.num_vcn_inst = 1;
|
||||
adev->vcn.num_enc_rings = 0;
|
||||
}
|
||||
|
||||
} else {
|
||||
if (adev->asic_type == CHIP_SIENNA_CICHLID) {
|
||||
u32 harvest;
|
||||
@ -110,7 +115,10 @@ static int vcn_v3_0_early_init(void *handle)
|
||||
} else
|
||||
adev->vcn.num_vcn_inst = 1;
|
||||
|
||||
adev->vcn.num_enc_rings = 2;
|
||||
if (adev->asic_type == CHIP_BEIGE_GOBY)
|
||||
adev->vcn.num_enc_rings = 0;
|
||||
else
|
||||
adev->vcn.num_enc_rings = 2;
|
||||
}
|
||||
|
||||
vcn_v3_0_set_dec_ring_funcs(adev);
|
||||
@ -373,7 +381,7 @@ static int vcn_v3_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
@ -388,12 +396,6 @@ static int vcn_v3_0_hw_fini(void *handle)
|
||||
vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
}
|
||||
}
|
||||
ring->sched.ready = false;
|
||||
|
||||
for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
|
||||
ring = &adev->vcn.inst[i].ring_enc[j];
|
||||
ring->sched.ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1261,23 +1263,25 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
|
||||
fw_shared->rb.wptr = lower_32_bits(ring->wptr);
|
||||
fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[i].ring_enc[0];
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
if (adev->asic_type != CHIP_BEIGE_GOBY) {
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[i].ring_enc[0];
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[i].ring_enc[1];
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[i].ring_enc[1];
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1657,31 +1661,33 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
|
||||
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
|
||||
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
|
||||
|
||||
/* Restore */
|
||||
fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
if (adev->asic_type != CHIP_BEIGE_GOBY) {
|
||||
/* Restore */
|
||||
fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
|
||||
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
|
||||
ring->wptr = 0;
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
|
||||
|
||||
/* restore wptr/rptr with pointers saved in FW shared memory*/
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
|
||||
/* restore wptr/rptr with pointers saved in FW shared memory*/
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
|
||||
}
|
||||
|
||||
/* Unstall DPG */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
|
||||
@ -2138,7 +2144,8 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
|
||||
adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
|
||||
adev->vcn.inst[i].ring_enc[j].me = i;
|
||||
}
|
||||
DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i);
|
||||
if (adev->vcn.num_enc_rings > 0)
|
||||
DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -689,6 +689,63 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
|
||||
{
|
||||
/* TCP L1 Cache per CU */
|
||||
.cache_size = 16,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 1,
|
||||
},
|
||||
{
|
||||
/* Scalar L1 Instruction Cache per SQC */
|
||||
.cache_size = 32,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_INST_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
{
|
||||
/* Scalar L1 Data Cache per SQC */
|
||||
.cache_size = 16,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
{
|
||||
/* GL1 Data Cache per SA */
|
||||
.cache_size = 128,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 8,
|
||||
},
|
||||
{
|
||||
/* L2 Data Cache per GPU (Total Tex Cache) */
|
||||
.cache_size = 1024,
|
||||
.cache_level = 2,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 8,
|
||||
},
|
||||
{
|
||||
/* L3 Data Cache per GPU */
|
||||
.cache_size = 16*1024,
|
||||
.cache_level = 3,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 8,
|
||||
},
|
||||
};
|
||||
|
||||
static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
|
||||
struct crat_subtype_computeunit *cu)
|
||||
{
|
||||
@ -1322,6 +1379,10 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
|
||||
pcache_info = vangogh_cache_info;
|
||||
num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
pcache_info = beige_goby_cache_info;
|
||||
num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -82,6 +82,7 @@ static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
|
||||
[CHIP_NAVY_FLOUNDER] = &gfx_v10_3_kfd2kgd,
|
||||
[CHIP_VANGOGH] = &gfx_v10_3_kfd2kgd,
|
||||
[CHIP_DIMGREY_CAVEFISH] = &gfx_v10_3_kfd2kgd,
|
||||
[CHIP_BEIGE_GOBY] = &gfx_v10_3_kfd2kgd,
|
||||
};
|
||||
|
||||
#ifdef KFD_SUPPORT_IOMMU_V2
|
||||
@ -558,6 +559,24 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
|
||||
.num_sdma_queues_per_engine = 8,
|
||||
};
|
||||
|
||||
static const struct kfd_device_info beige_goby_device_info = {
|
||||
.asic_family = CHIP_BEIGE_GOBY,
|
||||
.asic_name = "beige_goby",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 8,
|
||||
.ih_ring_entry_size = 8 * sizeof(uint32_t),
|
||||
.event_interrupt_class = &event_interrupt_class_v9,
|
||||
.num_of_watch_points = 4,
|
||||
.mqd_size_aligned = MQD_SIZE_ALIGNED,
|
||||
.needs_iommu_device = false,
|
||||
.supports_cwsr = true,
|
||||
.needs_pci_atomics = true,
|
||||
.num_sdma_engines = 1,
|
||||
.num_xgmi_sdma_engines = 0,
|
||||
.num_sdma_queues_per_engine = 8,
|
||||
};
|
||||
|
||||
|
||||
/* For each entry, [0] is regular and [1] is virtualisation device. */
|
||||
static const struct kfd_device_info *kfd_supported_devices[][2] = {
|
||||
@ -586,6 +605,7 @@ static const struct kfd_device_info *kfd_supported_devices[][2] = {
|
||||
[CHIP_NAVY_FLOUNDER] = {&navy_flounder_device_info, &navy_flounder_device_info},
|
||||
[CHIP_VANGOGH] = {&vangogh_device_info, NULL},
|
||||
[CHIP_DIMGREY_CAVEFISH] = {&dimgrey_cavefish_device_info, &dimgrey_cavefish_device_info},
|
||||
[CHIP_BEIGE_GOBY] = {&beige_goby_device_info, &beige_goby_device_info},
|
||||
};
|
||||
|
||||
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
|
||||
|
@ -1936,6 +1936,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
device_queue_manager_init_v10_navi10(&dqm->asic_ops);
|
||||
break;
|
||||
default:
|
||||
|
@ -1050,3 +1050,44 @@ void kfd_signal_reset_event(struct kfd_dev *dev)
|
||||
}
|
||||
srcu_read_unlock(&kfd_processes_srcu, idx);
|
||||
}
|
||||
|
||||
void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid)
|
||||
{
|
||||
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
|
||||
struct kfd_hsa_memory_exception_data memory_exception_data;
|
||||
struct kfd_hsa_hw_exception_data hw_exception_data;
|
||||
struct kfd_event *ev;
|
||||
uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID;
|
||||
|
||||
if (!p)
|
||||
return; /* Presumably process exited. */
|
||||
|
||||
memset(&hw_exception_data, 0, sizeof(hw_exception_data));
|
||||
hw_exception_data.gpu_id = dev->id;
|
||||
hw_exception_data.memory_lost = 1;
|
||||
hw_exception_data.reset_cause = KFD_HW_EXCEPTION_ECC;
|
||||
|
||||
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
|
||||
memory_exception_data.ErrorType = KFD_MEM_ERR_POISON_CONSUMED;
|
||||
memory_exception_data.gpu_id = dev->id;
|
||||
memory_exception_data.failure.imprecise = true;
|
||||
|
||||
mutex_lock(&p->event_mutex);
|
||||
idr_for_each_entry_continue(&p->event_idr, ev, id) {
|
||||
if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
|
||||
ev->hw_exception_data = hw_exception_data;
|
||||
set_event(ev);
|
||||
}
|
||||
|
||||
if (ev->type == KFD_EVENT_TYPE_MEMORY) {
|
||||
ev->memory_exception_data = memory_exception_data;
|
||||
set_event(ev);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&p->event_mutex);
|
||||
|
||||
/* user application will handle SIGBUS signal */
|
||||
send_sig(SIGBUS, p->lead_thread, 0);
|
||||
|
||||
kfd_unref_process(p);
|
||||
}
|
||||
|
@ -424,6 +424,7 @@ int kfd_init_apertures(struct kfd_process *process)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
kfd_init_apertures_v9(pdd, id);
|
||||
break;
|
||||
default:
|
||||
|
@ -230,7 +230,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
|
||||
sq_intr_err);
|
||||
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
|
||||
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
|
||||
kfd_signal_hw_exception_event(pasid);
|
||||
kfd_signal_poison_consumed_event(dev, pasid);
|
||||
amdgpu_amdkfd_gpu_reset(dev->kgd);
|
||||
return;
|
||||
}
|
||||
|
@ -249,6 +249,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
pm->pmf = &kfd_v9_pm_funcs;
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
|
@ -1144,6 +1144,8 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
|
||||
|
||||
void kfd_signal_reset_event(struct kfd_dev *dev);
|
||||
|
||||
void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid);
|
||||
|
||||
void kfd_flush_tlb(struct kfd_process_device *pdd);
|
||||
|
||||
int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
|
||||
|
@ -1853,7 +1853,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd)
|
||||
pdd->qpd.vmid);
|
||||
} else {
|
||||
amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
|
||||
pdd->process->pasid);
|
||||
pdd->process->pasid, TLB_FLUSH_LEGACY);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1022,7 +1022,8 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange)
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
|
||||
} else {
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_UC;
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
||||
if (amdgpu_xgmi_same_hive(adev, bo_adev))
|
||||
snoop = true;
|
||||
}
|
||||
@ -1039,7 +1040,8 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange)
|
||||
if (adev->gmc.xgmi.connected_to_cpu)
|
||||
snoop = true;
|
||||
} else {
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_UC;
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
||||
if (amdgpu_xgmi_same_hive(adev, bo_adev))
|
||||
snoop = true;
|
||||
}
|
||||
@ -1084,7 +1086,7 @@ svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
|
||||
return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL,
|
||||
start, last, init_pte_value, 0,
|
||||
NULL, NULL, fence);
|
||||
NULL, NULL, fence, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1125,7 +1127,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
|
||||
break;
|
||||
}
|
||||
amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
|
||||
p->pasid);
|
||||
p->pasid, TLB_FLUSH_HEAVYWEIGHT);
|
||||
}
|
||||
|
||||
return r;
|
||||
@ -1137,6 +1139,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct amdgpu_device *bo_adev, struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_bo_va bo_va;
|
||||
bool table_freed = false;
|
||||
uint64_t pte_flags;
|
||||
int r = 0;
|
||||
|
||||
@ -1157,9 +1160,9 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
prange->mapping.start,
|
||||
prange->mapping.last, pte_flags,
|
||||
prange->mapping.offset,
|
||||
prange->ttm_res ?
|
||||
prange->ttm_res->mm_node : NULL,
|
||||
dma_addr, &vm->last_update);
|
||||
prange->ttm_res,
|
||||
dma_addr, &vm->last_update,
|
||||
&table_freed);
|
||||
if (r) {
|
||||
pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
|
||||
goto out;
|
||||
@ -1175,6 +1178,13 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
if (fence)
|
||||
*fence = dma_fence_get(vm->last_update);
|
||||
|
||||
if (table_freed) {
|
||||
struct kfd_process *p;
|
||||
|
||||
p = container_of(prange->svms, struct kfd_process, svms);
|
||||
amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
|
||||
p->pasid, TLB_FLUSH_LEGACY);
|
||||
}
|
||||
out:
|
||||
prange->mapping.bo_va = NULL;
|
||||
return r;
|
||||
@ -1231,9 +1241,6 @@ static int svm_range_map_to_gpus(struct svm_range *prange,
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
|
||||
p->pasid);
|
||||
}
|
||||
|
||||
return r;
|
||||
|
@ -1398,6 +1398,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
|
||||
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
|
||||
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
|
||||
|
@ -106,6 +106,8 @@ MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
|
||||
MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
|
||||
#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
|
||||
#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
|
||||
|
||||
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
|
||||
@ -1400,6 +1402,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_VANGOGH:
|
||||
return 0;
|
||||
case CHIP_NAVI12:
|
||||
@ -1515,6 +1518,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
dmub_asic = DMUB_ASIC_DCN302;
|
||||
fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
dmub_asic = DMUB_ASIC_DCN303;
|
||||
fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* ASIC doesn't support DMUB. */
|
||||
@ -1987,9 +1994,6 @@ static int dm_suspend(void *handle)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
|
||||
amdgpu_dm_crtc_secure_display_suspend(adev);
|
||||
#endif
|
||||
WARN_ON(adev->dm.cached_state);
|
||||
adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
|
||||
|
||||
@ -2314,10 +2318,6 @@ static int dm_resume(void *handle)
|
||||
|
||||
dm->cached_state = NULL;
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
|
||||
amdgpu_dm_crtc_secure_display_resume(adev);
|
||||
#endif
|
||||
|
||||
amdgpu_dm_irq_resume_late(adev);
|
||||
|
||||
amdgpu_dm_smu_write_watermarks_table(adev);
|
||||
@ -3467,26 +3467,28 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
|
||||
max - min);
|
||||
}
|
||||
|
||||
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
|
||||
static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
|
||||
u32 user_brightness)
|
||||
{
|
||||
struct amdgpu_display_manager *dm = bl_get_data(bd);
|
||||
struct amdgpu_dm_backlight_caps caps;
|
||||
struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
|
||||
u32 brightness;
|
||||
u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
|
||||
bool rc;
|
||||
int i;
|
||||
|
||||
amdgpu_dm_update_backlight_caps(dm);
|
||||
caps = dm->backlight_caps;
|
||||
|
||||
for (i = 0; i < dm->num_of_edps; i++)
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
dm->brightness[i] = user_brightness;
|
||||
brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
|
||||
link[i] = (struct dc_link *)dm->backlight_link[i];
|
||||
}
|
||||
|
||||
brightness = convert_brightness_from_user(&caps, bd->props.brightness);
|
||||
// Change brightness based on AUX property
|
||||
/* Change brightness based on AUX property */
|
||||
if (caps.aux_support) {
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
rc = dc_link_set_backlight_level_nits(link[i], true, brightness,
|
||||
rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
|
||||
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
|
||||
if (!rc) {
|
||||
DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
|
||||
@ -3495,7 +3497,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness, 0);
|
||||
rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
|
||||
if (!rc) {
|
||||
DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
|
||||
break;
|
||||
@ -3506,9 +3508,17 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
|
||||
return rc ? 0 : 1;
|
||||
}
|
||||
|
||||
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
|
||||
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
|
||||
{
|
||||
struct amdgpu_display_manager *dm = bl_get_data(bd);
|
||||
|
||||
amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
|
||||
{
|
||||
struct amdgpu_dm_backlight_caps caps;
|
||||
|
||||
amdgpu_dm_update_backlight_caps(dm);
|
||||
@ -3521,17 +3531,24 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
|
||||
|
||||
rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
|
||||
if (!rc)
|
||||
return bd->props.brightness;
|
||||
return dm->brightness[0];
|
||||
return convert_brightness_to_user(&caps, avg);
|
||||
} else {
|
||||
int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
|
||||
|
||||
if (ret == DC_ERROR_UNEXPECTED)
|
||||
return bd->props.brightness;
|
||||
return dm->brightness[0];
|
||||
return convert_brightness_to_user(&caps, ret);
|
||||
}
|
||||
}
|
||||
|
||||
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
|
||||
{
|
||||
struct amdgpu_display_manager *dm = bl_get_data(bd);
|
||||
|
||||
return amdgpu_dm_backlight_get_level(dm);
|
||||
}
|
||||
|
||||
static const struct backlight_ops amdgpu_dm_backlight_ops = {
|
||||
.options = BL_CORE_SUSPENDRESUME,
|
||||
.get_brightness = amdgpu_dm_backlight_get_brightness,
|
||||
@ -3543,8 +3560,11 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
|
||||
{
|
||||
char bl_name[16];
|
||||
struct backlight_properties props = { 0 };
|
||||
int i;
|
||||
|
||||
amdgpu_dm_update_backlight_caps(dm);
|
||||
for (i = 0; i < dm->num_of_edps; i++)
|
||||
dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
|
||||
|
||||
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
|
||||
props.brightness = AMDGPU_MAX_BL_LEVEL;
|
||||
@ -3825,6 +3845,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_VANGOGH:
|
||||
if (dcn10_register_irq_handlers(dm->adev)) {
|
||||
DRM_ERROR("DM: Failed to initialize IRQ\n");
|
||||
@ -4004,6 +4025,11 @@ static int dm_early_init(void *handle)
|
||||
adev->mode_info.num_hpd = 5;
|
||||
adev->mode_info.num_dig = 5;
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
adev->mode_info.num_crtc = 2;
|
||||
adev->mode_info.num_hpd = 2;
|
||||
adev->mode_info.num_dig = 2;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
|
||||
@ -4229,6 +4255,7 @@ fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
|
||||
if (adev->asic_type == CHIP_SIENNA_CICHLID ||
|
||||
adev->asic_type == CHIP_NAVY_FLOUNDER ||
|
||||
adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
|
||||
adev->asic_type == CHIP_BEIGE_GOBY ||
|
||||
adev->asic_type == CHIP_VANGOGH)
|
||||
tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
|
||||
}
|
||||
@ -8983,6 +9010,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
bool configure_crc = false;
|
||||
enum amdgpu_dm_pipe_crc_source cur_crc_src;
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
|
||||
#endif
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
cur_crc_src = acrtc->dm_irq_params.crc_src;
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
#endif
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
|
||||
@ -8999,21 +9032,26 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
* settings for the stream.
|
||||
*/
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
cur_crc_src = acrtc->dm_irq_params.crc_src;
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
|
||||
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
|
||||
configure_crc = true;
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
if (amdgpu_dm_crc_window_is_activated(crtc))
|
||||
configure_crc = false;
|
||||
if (amdgpu_dm_crc_window_is_activated(crtc)) {
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
acrtc->dm_irq_params.crc_window.update_win = true;
|
||||
acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
|
||||
spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
|
||||
crc_rd_wrk->crtc = crtc;
|
||||
spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (configure_crc)
|
||||
amdgpu_dm_crtc_configure_crc_source(
|
||||
crtc, dm_new_crtc_state, cur_crc_src);
|
||||
if (amdgpu_dm_crtc_configure_crc_source(
|
||||
crtc, dm_new_crtc_state, cur_crc_src))
|
||||
DRM_DEBUG_DRIVER("Failed to configure crc source");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@ -9034,6 +9072,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
/* Update audio instances for each connector. */
|
||||
amdgpu_dm_commit_audio(dev, state);
|
||||
|
||||
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
|
||||
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
|
||||
/* restore the backlight level */
|
||||
if (dm->backlight_dev)
|
||||
amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
|
||||
#endif
|
||||
/*
|
||||
* send vblank event on all events not handled in flip and
|
||||
* mark consumed event for drm_atomic_helper_commit_hw_done
|
||||
|
@ -436,6 +436,13 @@ struct amdgpu_display_manager {
|
||||
*/
|
||||
struct list_head da_list;
|
||||
struct completion dmub_aux_transfer_done;
|
||||
|
||||
/**
|
||||
* @brightness:
|
||||
*
|
||||
* cached backlight values.
|
||||
*/
|
||||
u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
|
||||
};
|
||||
|
||||
enum dsc_clock_force_state {
|
||||
|
@ -176,7 +176,7 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
||||
|
||||
/* Configuration will be deferred to stream enable. */
|
||||
if (!stream_state)
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
|
||||
@ -525,67 +525,6 @@ cleanup:
|
||||
spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
|
||||
}
|
||||
|
||||
void amdgpu_dm_crtc_secure_display_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
enum amdgpu_dm_pipe_crc_source cur_crc_src;
|
||||
struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk;
|
||||
struct crc_window_parm cur_crc_window;
|
||||
struct amdgpu_crtc *acrtc = NULL;
|
||||
|
||||
drm_for_each_crtc(crtc, &adev->ddev) {
|
||||
acrtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
spin_lock_irq(&adev_to_drm(adev)->event_lock);
|
||||
cur_crc_src = acrtc->dm_irq_params.crc_src;
|
||||
cur_crc_window = acrtc->dm_irq_params.crc_window;
|
||||
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
|
||||
|
||||
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
|
||||
amdgpu_dm_crtc_set_crc_source(crtc,
|
||||
pipe_crc_sources[cur_crc_src]);
|
||||
spin_lock_irq(&adev_to_drm(adev)->event_lock);
|
||||
acrtc->dm_irq_params.crc_window = cur_crc_window;
|
||||
if (acrtc->dm_irq_params.crc_window.activated) {
|
||||
acrtc->dm_irq_params.crc_window.update_win = true;
|
||||
acrtc->dm_irq_params.crc_window.skip_frame_cnt = 1;
|
||||
spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
|
||||
crc_rd_wrk->crtc = crtc;
|
||||
spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
|
||||
}
|
||||
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_dm_crtc_secure_display_suspend(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct crc_window_parm cur_crc_window;
|
||||
enum amdgpu_dm_pipe_crc_source cur_crc_src;
|
||||
struct amdgpu_crtc *acrtc = NULL;
|
||||
|
||||
drm_for_each_crtc(crtc, &adev->ddev) {
|
||||
acrtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
spin_lock_irq(&adev_to_drm(adev)->event_lock);
|
||||
cur_crc_src = acrtc->dm_irq_params.crc_src;
|
||||
cur_crc_window = acrtc->dm_irq_params.crc_window;
|
||||
cur_crc_window.update_win = false;
|
||||
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
|
||||
|
||||
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
|
||||
amdgpu_dm_crtc_set_crc_source(crtc, NULL);
|
||||
spin_lock_irq(&adev_to_drm(adev)->event_lock);
|
||||
/* For resume to set back crc source*/
|
||||
acrtc->dm_irq_params.crc_src = cur_crc_src;
|
||||
acrtc->dm_irq_params.crc_window = cur_crc_window;
|
||||
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void)
|
||||
{
|
||||
struct crc_rd_work *crc_rd_wrk = NULL;
|
||||
|
@ -91,14 +91,10 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
|
||||
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc);
|
||||
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc);
|
||||
struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void);
|
||||
void amdgpu_dm_crtc_secure_display_resume(struct amdgpu_device *adev);
|
||||
void amdgpu_dm_crtc_secure_display_suspend(struct amdgpu_device *adev);
|
||||
#else
|
||||
#define amdgpu_dm_crc_window_is_activated(x)
|
||||
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
|
||||
#define amdgpu_dm_crtc_secure_display_create_work()
|
||||
#define amdgpu_dm_crtc_secure_display_resume(x)
|
||||
#define amdgpu_dm_crtc_secure_display_suspend(x)
|
||||
#endif
|
||||
|
||||
#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */
|
||||
|
@ -33,6 +33,7 @@ DC_LIBS += dcn21
|
||||
DC_LIBS += dcn30
|
||||
DC_LIBS += dcn301
|
||||
DC_LIBS += dcn302
|
||||
DC_LIBS += dcn303
|
||||
endif
|
||||
|
||||
DC_LIBS += dce120
|
||||
|
@ -73,6 +73,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
|
||||
case DCN_VERSION_3_0:
|
||||
case DCN_VERSION_3_01:
|
||||
case DCN_VERSION_3_02:
|
||||
case DCN_VERSION_3_03:
|
||||
*h = dal_cmd_tbl_helper_dce112_get_table2();
|
||||
return true;
|
||||
#endif
|
||||
|
@ -241,6 +241,10 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
|
||||
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
|
||||
return &clk_mgr->base;
|
||||
}
|
||||
if (ASICREV_IS_BEIGE_GOBY_P(asic_id.hw_internal_rev)) {
|
||||
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
|
||||
return &clk_mgr->base;
|
||||
}
|
||||
dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
|
||||
return &clk_mgr->base;
|
||||
}
|
||||
@ -278,6 +282,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
|
||||
if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
|
||||
dcn3_clk_mgr_destroy(clk_mgr);
|
||||
}
|
||||
if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
|
||||
dcn3_clk_mgr_destroy(clk_mgr);
|
||||
}
|
||||
break;
|
||||
|
||||
case FAMILY_VGH:
|
||||
|
@ -761,6 +761,43 @@ static struct wm_table ddr4_wm_table_rn = {
|
||||
}
|
||||
};
|
||||
|
||||
static struct wm_table ddr4_1R_wm_table_rn = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 13.90,
|
||||
.sr_enter_plus_exit_time_us = 14.80,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 13.90,
|
||||
.sr_enter_plus_exit_time_us = 14.80,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 13.90,
|
||||
.sr_enter_plus_exit_time_us = 14.80,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 13.90,
|
||||
.sr_enter_plus_exit_time_us = 14.80,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct wm_table lpddr4_wm_table_rn = {
|
||||
.entries = {
|
||||
{
|
||||
@ -971,8 +1008,12 @@ void rn_clk_mgr_construct(
|
||||
} else {
|
||||
if (is_green_sardine)
|
||||
rn_bw_params.wm_table = ddr4_wm_table_gs;
|
||||
else
|
||||
rn_bw_params.wm_table = ddr4_wm_table_rn;
|
||||
else {
|
||||
if (ctx->dc->config.is_single_rank_dimm)
|
||||
rn_bw_params.wm_table = ddr4_1R_wm_table_rn;
|
||||
else
|
||||
rn_bw_params.wm_table = ddr4_wm_table_rn;
|
||||
}
|
||||
}
|
||||
/* Saved clocks configured at boot for debug purposes */
|
||||
rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
|
||||
@ -990,6 +1031,9 @@ void rn_clk_mgr_construct(
|
||||
if (status == PP_SMU_RESULT_OK &&
|
||||
ctx->dc_bios && ctx->dc_bios->integrated_info) {
|
||||
rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
|
||||
/* treat memory config as single channel if memory is asymmetrics. */
|
||||
if (ctx->dc->config.is_asymmetric_memory)
|
||||
clk_mgr->base.bw_params->num_channels = 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2663,7 +2663,6 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
dc->hwss.interdependent_update_lock(dc, context, false);
|
||||
else
|
||||
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
|
||||
|
||||
dc->hwss.post_unlock_program_front_end(dc, context);
|
||||
return;
|
||||
}
|
||||
@ -2764,6 +2763,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
plane_state->flip_immediate);
|
||||
}
|
||||
}
|
||||
|
||||
/* Perform requested Updates */
|
||||
for (i = 0; i < surface_count; i++) {
|
||||
struct dc_plane_state *plane_state = srf_updates[i].surface;
|
||||
@ -2786,6 +2786,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
dc->hwss.update_plane_addr(dc, pipe_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
|
||||
|
@ -1099,6 +1099,24 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
||||
dc_is_dvi_signal(link->connector_signal)) {
|
||||
if (prev_sink)
|
||||
dc_sink_release(prev_sink);
|
||||
link_disconnect_sink(link);
|
||||
|
||||
return false;
|
||||
}
|
||||
/*
|
||||
* Abort detection for DP connectors if we have
|
||||
* no EDID and connector is active converter
|
||||
* as there are no display downstream
|
||||
*
|
||||
*/
|
||||
if (dc_is_dp_sst_signal(link->connector_signal) &&
|
||||
(link->dpcd_caps.dongle_type ==
|
||||
DISPLAY_DONGLE_DP_VGA_CONVERTER ||
|
||||
link->dpcd_caps.dongle_type ==
|
||||
DISPLAY_DONGLE_DP_DVI_CONVERTER)) {
|
||||
if (prev_sink)
|
||||
dc_sink_release(prev_sink);
|
||||
link_disconnect_sink(link);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -1157,7 +1157,7 @@ static enum link_training_result perform_clock_recovery_sequence(
|
||||
return get_cr_failure(lane_count, dpcd_lane_status);
|
||||
}
|
||||
|
||||
static inline enum link_training_result perform_link_training_int(
|
||||
static inline enum link_training_result dp_transition_to_video_idle(
|
||||
struct dc_link *link,
|
||||
struct link_training_settings *lt_settings,
|
||||
enum link_training_result status)
|
||||
@ -1231,7 +1231,7 @@ enum link_training_result dp_check_link_loss_status(
|
||||
return status;
|
||||
}
|
||||
|
||||
static void initialize_training_settings(
|
||||
static inline void decide_8b_10b_training_settings(
|
||||
struct dc_link *link,
|
||||
const struct dc_link_settings *link_setting,
|
||||
const struct dc_link_training_overrides *overrides,
|
||||
@ -1275,6 +1275,8 @@ static void initialize_training_settings(
|
||||
else
|
||||
lt_settings->link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
|
||||
|
||||
lt_settings->lttpr_mode = link->lttpr_mode;
|
||||
|
||||
/* Initialize lane settings overrides */
|
||||
if (overrides->voltage_swing != NULL)
|
||||
lt_settings->voltage_swing = overrides->voltage_swing;
|
||||
@ -1327,6 +1329,17 @@ static void initialize_training_settings(
|
||||
lt_settings->enhanced_framing = 1;
|
||||
}
|
||||
|
||||
static void decide_training_settings(
|
||||
struct dc_link *link,
|
||||
const struct dc_link_settings *link_settings,
|
||||
const struct dc_link_training_overrides *overrides,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
|
||||
decide_8b_10b_training_settings(link, link_settings, overrides, lt_settings);
|
||||
}
|
||||
|
||||
|
||||
uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
|
||||
{
|
||||
switch (lttpr_repeater_count) {
|
||||
@ -1356,13 +1369,16 @@ static void configure_lttpr_mode_transparent(struct dc_link *link)
|
||||
{
|
||||
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
|
||||
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
|
||||
core_link_write_dpcd(link,
|
||||
DP_PHY_REPEATER_MODE,
|
||||
(uint8_t *)&repeater_mode,
|
||||
sizeof(repeater_mode));
|
||||
}
|
||||
|
||||
static void configure_lttpr_mode_non_transparent(struct dc_link *link)
|
||||
static void configure_lttpr_mode_non_transparent(
|
||||
struct dc_link *link,
|
||||
const struct link_training_settings *lt_settings)
|
||||
{
|
||||
/* aux timeout is already set to extended */
|
||||
/* RESET/SET lttpr mode to enable non transparent mode */
|
||||
@ -1372,11 +1388,16 @@ static void configure_lttpr_mode_non_transparent(struct dc_link *link)
|
||||
enum dc_status result = DC_ERROR_UNEXPECTED;
|
||||
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
|
||||
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
|
||||
result = core_link_write_dpcd(link,
|
||||
DP_PHY_REPEATER_MODE,
|
||||
(uint8_t *)&repeater_mode,
|
||||
sizeof(repeater_mode));
|
||||
enum dp_link_encoding encoding = dp_get_link_encoding_format(<_settings->link_settings);
|
||||
|
||||
if (encoding == DP_8b_10b_ENCODING) {
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
|
||||
result = core_link_write_dpcd(link,
|
||||
DP_PHY_REPEATER_MODE,
|
||||
(uint8_t *)&repeater_mode,
|
||||
sizeof(repeater_mode));
|
||||
|
||||
}
|
||||
|
||||
if (result == DC_OK) {
|
||||
link->dpcd_caps.lttpr_caps.mode = repeater_mode;
|
||||
@ -1396,17 +1417,18 @@ static void configure_lttpr_mode_non_transparent(struct dc_link *link)
|
||||
link->dpcd_caps.lttpr_caps.mode = repeater_mode;
|
||||
}
|
||||
|
||||
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
|
||||
|
||||
for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
|
||||
aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
|
||||
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
aux_interval_address,
|
||||
(uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1],
|
||||
sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1]));
|
||||
link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F;
|
||||
if (encoding == DP_8b_10b_ENCODING) {
|
||||
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
|
||||
for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
|
||||
aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
|
||||
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
aux_interval_address,
|
||||
(uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1],
|
||||
sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1]));
|
||||
link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1542,7 +1564,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
|
||||
{
|
||||
struct link_training_settings lt_settings;
|
||||
|
||||
initialize_training_settings(
|
||||
decide_training_settings(
|
||||
link,
|
||||
link_setting,
|
||||
&link->preferred_training_settings,
|
||||
@ -1592,7 +1614,7 @@ enum link_training_result dc_link_dp_perform_link_training(
|
||||
uint8_t repeater_cnt;
|
||||
uint8_t repeater_id;
|
||||
|
||||
initialize_training_settings(
|
||||
decide_training_settings(
|
||||
link,
|
||||
link_setting,
|
||||
&link->preferred_training_settings,
|
||||
@ -1600,7 +1622,7 @@ enum link_training_result dc_link_dp_perform_link_training(
|
||||
|
||||
/* Configure lttpr mode */
|
||||
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
|
||||
configure_lttpr_mode_non_transparent(link);
|
||||
configure_lttpr_mode_non_transparent(link, <_settings);
|
||||
else if (link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
|
||||
configure_lttpr_mode_transparent(link);
|
||||
|
||||
@ -1654,7 +1676,7 @@ enum link_training_result dc_link_dp_perform_link_training(
|
||||
/* 3. set training not in progress*/
|
||||
dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
|
||||
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
|
||||
status = perform_link_training_int(link,
|
||||
status = dp_transition_to_video_idle(link,
|
||||
<_settings,
|
||||
status);
|
||||
}
|
||||
@ -1877,7 +1899,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
|
||||
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
|
||||
bool fec_enable = false;
|
||||
|
||||
initialize_training_settings(
|
||||
decide_training_settings(
|
||||
link,
|
||||
link_settings,
|
||||
lt_overrides,
|
||||
@ -2573,7 +2595,11 @@ bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *lin
|
||||
struct dc_link_settings current_link_setting;
|
||||
uint32_t link_bw;
|
||||
|
||||
if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14 ||
|
||||
/*
|
||||
* edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
|
||||
* Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
|
||||
*/
|
||||
if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
|
||||
link->dpcd_caps.edp_supported_link_rates_count == 0) {
|
||||
*link_setting = link->verified_link_cap;
|
||||
return true;
|
||||
@ -2773,9 +2799,10 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
|
||||
union phy_test_pattern dpcd_test_pattern;
|
||||
union lane_adjust dpcd_lane_adjustment[2];
|
||||
unsigned char dpcd_post_cursor_2_adjustment = 0;
|
||||
unsigned char test_80_bit_pattern[
|
||||
unsigned char test_pattern_buffer[
|
||||
(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
|
||||
DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
|
||||
unsigned int test_pattern_size = 0;
|
||||
enum dp_test_pattern test_pattern;
|
||||
struct dc_link_training_settings link_settings;
|
||||
union lane_adjust dpcd_lane_adjust;
|
||||
@ -2845,12 +2872,15 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
|
||||
break;
|
||||
}
|
||||
|
||||
if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM)
|
||||
if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
|
||||
test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
|
||||
DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1;
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
|
||||
test_80_bit_pattern,
|
||||
sizeof(test_80_bit_pattern));
|
||||
test_pattern_buffer,
|
||||
test_pattern_size);
|
||||
}
|
||||
|
||||
/* prepare link training settings */
|
||||
link_settings.link = link->cur_link_settings;
|
||||
@ -2888,9 +2918,8 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
|
||||
test_pattern,
|
||||
DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
|
||||
&link_training_settings,
|
||||
test_80_bit_pattern,
|
||||
(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
|
||||
DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1);
|
||||
test_pattern_buffer,
|
||||
test_pattern_size);
|
||||
}
|
||||
|
||||
static void dp_test_send_link_test_pattern(struct dc_link *link)
|
||||
@ -3993,7 +4022,11 @@ void detect_edp_sink_caps(struct dc_link *link)
|
||||
link->dpcd_caps.edp_supported_link_rates_count = 0;
|
||||
memset(supported_link_rates, 0, sizeof(supported_link_rates));
|
||||
|
||||
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
|
||||
/*
|
||||
* edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
|
||||
* Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
|
||||
*/
|
||||
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
|
||||
(link->dc->debug.optimize_edp_link_rate ||
|
||||
link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
|
||||
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
|
||||
@ -4867,4 +4900,11 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
|
||||
return false;
|
||||
}
|
||||
|
||||
enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings)
|
||||
{
|
||||
if ((link_settings->link_rate >= LINK_RATE_LOW) &&
|
||||
(link_settings->link_rate <= LINK_RATE_HIGH3))
|
||||
return DP_8b_10b_ENCODING;
|
||||
return DP_UNKNOWN_ENCODING;
|
||||
}
|
||||
|
||||
|
@ -57,6 +57,7 @@
|
||||
#include "dcn30/dcn30_resource.h"
|
||||
#include "dcn301/dcn301_resource.h"
|
||||
#include "dcn302/dcn302_resource.h"
|
||||
#include "dcn303/dcn303_resource.h"
|
||||
#endif
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
@ -130,6 +131,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
dc_version = DCN_VERSION_3_0;
|
||||
if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev))
|
||||
dc_version = DCN_VERSION_3_02;
|
||||
if (ASICREV_IS_BEIGE_GOBY_P(asic_id.hw_internal_rev))
|
||||
dc_version = DCN_VERSION_3_03;
|
||||
break;
|
||||
|
||||
case FAMILY_VGH:
|
||||
@ -216,6 +219,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
|
||||
case DCN_VERSION_3_02:
|
||||
res_pool = dcn302_create_resource_pool(init_data, dc);
|
||||
break;
|
||||
case DCN_VERSION_3_03:
|
||||
res_pool = dcn303_create_resource_pool(init_data, dc);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
break;
|
||||
|
@ -45,7 +45,7 @@
|
||||
/* forward declaration */
|
||||
struct aux_payload;
|
||||
|
||||
#define DC_VER "3.2.135.1"
|
||||
#define DC_VER "3.2.136"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
@ -308,6 +308,8 @@ struct dc_config {
|
||||
#endif
|
||||
uint64_t vblank_alignment_dto_params;
|
||||
uint8_t vblank_alignment_max_frame_time_diff;
|
||||
bool is_asymmetric_memory;
|
||||
bool is_single_rank_dimm;
|
||||
};
|
||||
|
||||
enum visual_confirm {
|
||||
|
@ -98,6 +98,11 @@ enum dc_dp_training_pattern {
|
||||
DP_TRAINING_PATTERN_VIDEOIDLE,
|
||||
};
|
||||
|
||||
enum dp_link_encoding {
|
||||
DP_UNKNOWN_ENCODING = 0,
|
||||
DP_8b_10b_ENCODING = 1,
|
||||
};
|
||||
|
||||
struct dc_link_settings {
|
||||
enum dc_lane_count lane_count;
|
||||
enum dc_link_rate link_rate;
|
||||
|
@ -142,6 +142,15 @@
|
||||
SRII(PIXEL_RATE_CNTL, OTG, 3),\
|
||||
SRII(PIXEL_RATE_CNTL, OTG, 4)
|
||||
|
||||
#define CS_COMMON_REG_LIST_DCN3_03(index, pllid) \
|
||||
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
|
||||
SRII(PHASE, DP_DTO, 0),\
|
||||
SRII(PHASE, DP_DTO, 1),\
|
||||
SRII(MODULO, DP_DTO, 0),\
|
||||
SRII(MODULO, DP_DTO, 1),\
|
||||
SRII(PIXEL_RATE_CNTL, OTG, 0),\
|
||||
SRII(PIXEL_RATE_CNTL, OTG, 1)
|
||||
|
||||
#endif
|
||||
#define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
|
||||
CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
|
||||
|
@ -480,6 +480,35 @@
|
||||
SR(AZALIA_AUDIO_DTO), \
|
||||
SR(AZALIA_CONTROLLER_CLOCK_GATING)
|
||||
|
||||
#define HWSEQ_DCN303_REG_LIST() \
|
||||
HWSEQ_DCN_REG_LIST(), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 1), \
|
||||
SR(MICROSECOND_TIME_BASE_DIV), \
|
||||
SR(MILLISECOND_TIME_BASE_DIV), \
|
||||
SR(DISPCLK_FREQ_CHANGE_CNTL), \
|
||||
SR(RBBMIF_TIMEOUT_DIS), \
|
||||
SR(RBBMIF_TIMEOUT_DIS_2), \
|
||||
SR(DCHUBBUB_CRC_CTRL), \
|
||||
SR(DPP_TOP0_DPP_CRC_CTRL), \
|
||||
SR(DPP_TOP0_DPP_CRC_VAL_B_A), \
|
||||
SR(DPP_TOP0_DPP_CRC_VAL_R_G), \
|
||||
SR(MPC_CRC_CTRL), \
|
||||
SR(MPC_CRC_RESULT_GB), \
|
||||
SR(MPC_CRC_RESULT_C), \
|
||||
SR(MPC_CRC_RESULT_AR), \
|
||||
SR(D1VGA_CONTROL), \
|
||||
SR(D2VGA_CONTROL), \
|
||||
SR(D3VGA_CONTROL), \
|
||||
SR(D4VGA_CONTROL), \
|
||||
SR(D5VGA_CONTROL), \
|
||||
SR(D6VGA_CONTROL), \
|
||||
HWSEQ_PIXEL_RATE_REG_LIST_303(OTG), \
|
||||
HWSEQ_PHYPLL_REG_LIST_303(OTG), \
|
||||
SR(AZALIA_AUDIO_DTO), \
|
||||
SR(AZALIA_CONTROLLER_CLOCK_GATING), \
|
||||
SR(HPO_TOP_CLOCK_CONTROL)
|
||||
|
||||
#define HWSEQ_PIXEL_RATE_REG_LIST_302(blk) \
|
||||
SRII(PIXEL_RATE_CNTL, blk, 0), \
|
||||
SRII(PIXEL_RATE_CNTL, blk, 1),\
|
||||
@ -494,6 +523,14 @@
|
||||
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
|
||||
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4)
|
||||
|
||||
#define HWSEQ_PIXEL_RATE_REG_LIST_303(blk) \
|
||||
SRII(PIXEL_RATE_CNTL, blk, 0), \
|
||||
SRII(PIXEL_RATE_CNTL, blk, 1)
|
||||
|
||||
#define HWSEQ_PHYPLL_REG_LIST_303(blk) \
|
||||
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
|
||||
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1)
|
||||
|
||||
struct dce_hwseq_registers {
|
||||
uint32_t DCFE_CLOCK_CONTROL[6];
|
||||
uint32_t DCFEV_CLOCK_CONTROL;
|
||||
@ -934,6 +971,12 @@ struct dce_hwseq_registers {
|
||||
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
|
||||
HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh)
|
||||
|
||||
#define HWSEQ_DCN303_MASK_SH_LIST(mask_sh) \
|
||||
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
|
||||
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
|
||||
HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
|
||||
HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_GATE_DIS, mask_sh)
|
||||
|
||||
#define HWSEQ_REG_FIELD_LIST(type) \
|
||||
type DCFE_CLOCK_ENABLE; \
|
||||
type DCFEV_CLOCK_ENABLE; \
|
||||
|
@ -653,33 +653,50 @@ static void dpp1_dscl_set_manual_ratio_init(
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void dpp1_dscl_set_recout(
|
||||
struct dcn10_dpp *dpp, const struct rect *recout)
|
||||
/**
|
||||
* dpp1_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area
|
||||
*
|
||||
* @dpp: DPP data struct
|
||||
* @recount: Rectangle information
|
||||
*
|
||||
* This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on
|
||||
* the values specified in the recount parameter.
|
||||
*
|
||||
* Note: This function only have effect if AutoCal is disabled.
|
||||
*/
|
||||
static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
|
||||
const struct rect *recout)
|
||||
{
|
||||
int visual_confirm_on = 0;
|
||||
if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
|
||||
visual_confirm_on = 1;
|
||||
|
||||
REG_SET_2(RECOUT_START, 0,
|
||||
/* First pixel of RECOUT */
|
||||
RECOUT_START_X, recout->x,
|
||||
/* First line of RECOUT */
|
||||
RECOUT_START_Y, recout->y);
|
||||
/* First pixel of RECOUT in the active OTG area */
|
||||
RECOUT_START_X, recout->x,
|
||||
/* First line of RECOUT in the active OTG area */
|
||||
RECOUT_START_Y, recout->y);
|
||||
|
||||
REG_SET_2(RECOUT_SIZE, 0,
|
||||
/* Number of RECOUT horizontal pixels */
|
||||
RECOUT_WIDTH, recout->width,
|
||||
/* Number of RECOUT vertical lines */
|
||||
RECOUT_HEIGHT, recout->height
|
||||
/* Number of RECOUT horizontal pixels */
|
||||
RECOUT_WIDTH, recout->width,
|
||||
/* Number of RECOUT vertical lines */
|
||||
RECOUT_HEIGHT, recout->height
|
||||
- visual_confirm_on * 2 * (dpp->base.inst + 1));
|
||||
}
|
||||
|
||||
/* Main function to program scaler and line buffer in manual scaling mode */
|
||||
void dpp1_dscl_set_scaler_manual_scale(
|
||||
struct dpp *dpp_base,
|
||||
const struct scaler_data *scl_data)
|
||||
/**
|
||||
* dpp1_dscl_set_scaler_manual_scale - Manually program scaler and line buffer
|
||||
*
|
||||
* @dpp_base: High level DPP struct
|
||||
* @scl_data: scalaer_data info
|
||||
*
|
||||
* This is the primary function to program scaler and line buffer in manual
|
||||
* scaling mode. To execute the required operations for manual scale, we need
|
||||
* to disable AutoCal first.
|
||||
*/
|
||||
void dpp1_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
|
||||
const struct scaler_data *scl_data)
|
||||
{
|
||||
enum lb_memory_config lb_config;
|
||||
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
|
||||
|
@ -1226,6 +1226,14 @@ void hubp1_cursor_set_position(
|
||||
/* TODO Handle surface pixel formats other than 4:4:4 */
|
||||
}
|
||||
|
||||
/**
|
||||
* hubp1_clk_cntl - Disable or enable clocks for DCHUBP
|
||||
*
|
||||
* @hubp: hubp struct reference.
|
||||
* @enable: Set true for enabling gate clock.
|
||||
*
|
||||
* When enabling/disabling DCHUBP clock, we affect dcfclk/dppclk.
|
||||
*/
|
||||
void hubp1_clk_cntl(struct hubp *hubp, bool enable)
|
||||
{
|
||||
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
|
||||
@ -1257,6 +1265,11 @@ void hubp1_soft_reset(struct hubp *hubp, bool reset)
|
||||
REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* hubp1_set_flip_int - Enable surface flip interrupt
|
||||
*
|
||||
* @hubp: hubp struct reference.
|
||||
*/
|
||||
void hubp1_set_flip_int(struct hubp *hubp)
|
||||
{
|
||||
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
|
||||
|
@ -72,6 +72,9 @@
|
||||
|
||||
#define GAMMA_HW_POINTS_NUM 256
|
||||
|
||||
#define PGFSM_POWER_ON 0
|
||||
#define PGFSM_POWER_OFF 2
|
||||
|
||||
void print_microsec(struct dc_context *dc_ctx,
|
||||
struct dc_log_buffer_ctx *log_ctx,
|
||||
uint32_t ref_cycle)
|
||||
@ -536,13 +539,22 @@ void dcn10_disable_vga(
|
||||
REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* dcn10_dpp_pg_control - DPP power gate control.
|
||||
*
|
||||
* @hws: dce_hwseq reference.
|
||||
* @dpp_inst: DPP instance reference.
|
||||
* @power_on: true if we want to enable power gate, false otherwise.
|
||||
*
|
||||
* Enable or disable power gate in the specific DPP instance.
|
||||
*/
|
||||
void dcn10_dpp_pg_control(
|
||||
struct dce_hwseq *hws,
|
||||
unsigned int dpp_inst,
|
||||
bool power_on)
|
||||
{
|
||||
uint32_t power_gate = power_on ? 0 : 1;
|
||||
uint32_t pwr_status = power_on ? 0 : 2;
|
||||
uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
|
||||
|
||||
if (hws->ctx->dc->debug.disable_dpp_power_gate)
|
||||
return;
|
||||
@ -588,13 +600,22 @@ void dcn10_dpp_pg_control(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dcn10_hubp_pg_control - HUBP power gate control.
|
||||
*
|
||||
* @hws: dce_hwseq reference.
|
||||
* @hubp_inst: DPP instance reference.
|
||||
* @power_on: true if we want to enable power gate, false otherwise.
|
||||
*
|
||||
* Enable or disable power gate in the specific HUBP instance.
|
||||
*/
|
||||
void dcn10_hubp_pg_control(
|
||||
struct dce_hwseq *hws,
|
||||
unsigned int hubp_inst,
|
||||
bool power_on)
|
||||
{
|
||||
uint32_t power_gate = power_on ? 0 : 1;
|
||||
uint32_t pwr_status = power_on ? 0 : 2;
|
||||
uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
|
||||
|
||||
if (hws->ctx->dc->debug.disable_hubp_power_gate)
|
||||
return;
|
||||
@ -1078,6 +1099,19 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
hws->funcs.verify_allow_pstate_change_high(dc);
|
||||
}
|
||||
|
||||
/**
|
||||
* dcn10_plane_atomic_power_down - Power down plane components.
|
||||
*
|
||||
* @dc: dc struct reference. used for grab hwseq.
|
||||
* @dpp: dpp struct reference.
|
||||
* @hubp: hubp struct reference.
|
||||
*
|
||||
* Keep in mind that this operation requires a power gate configuration;
|
||||
* however, requests for switch power gate are precisely controlled to avoid
|
||||
* problems. For this reason, power gate request is usually disabled. This
|
||||
* function first needs to enable the power gate request before disabling DPP
|
||||
* and HUBP. Finally, it disables the power gate request again.
|
||||
*/
|
||||
void dcn10_plane_atomic_power_down(struct dc *dc,
|
||||
struct dpp *dpp,
|
||||
struct hubp *hubp)
|
||||
@ -2165,129 +2199,6 @@ void dcn10_enable_per_frame_crtc_position_reset(
|
||||
DC_SYNC_INFO("Multi-display sync is complete\n");
|
||||
}
|
||||
|
||||
/*static void print_rq_dlg_ttu(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
|
||||
"\n============== DML TTU Output parameters [%d] ==============\n"
|
||||
"qos_level_low_wm: %d, \n"
|
||||
"qos_level_high_wm: %d, \n"
|
||||
"min_ttu_vblank: %d, \n"
|
||||
"qos_level_flip: %d, \n"
|
||||
"refcyc_per_req_delivery_l: %d, \n"
|
||||
"qos_level_fixed_l: %d, \n"
|
||||
"qos_ramp_disable_l: %d, \n"
|
||||
"refcyc_per_req_delivery_pre_l: %d, \n"
|
||||
"refcyc_per_req_delivery_c: %d, \n"
|
||||
"qos_level_fixed_c: %d, \n"
|
||||
"qos_ramp_disable_c: %d, \n"
|
||||
"refcyc_per_req_delivery_pre_c: %d\n"
|
||||
"=============================================================\n",
|
||||
pipe_ctx->pipe_idx,
|
||||
pipe_ctx->ttu_regs.qos_level_low_wm,
|
||||
pipe_ctx->ttu_regs.qos_level_high_wm,
|
||||
pipe_ctx->ttu_regs.min_ttu_vblank,
|
||||
pipe_ctx->ttu_regs.qos_level_flip,
|
||||
pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
|
||||
pipe_ctx->ttu_regs.qos_level_fixed_l,
|
||||
pipe_ctx->ttu_regs.qos_ramp_disable_l,
|
||||
pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
|
||||
pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
|
||||
pipe_ctx->ttu_regs.qos_level_fixed_c,
|
||||
pipe_ctx->ttu_regs.qos_ramp_disable_c,
|
||||
pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
|
||||
);
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
|
||||
"\n============== DML DLG Output parameters [%d] ==============\n"
|
||||
"refcyc_h_blank_end: %d, \n"
|
||||
"dlg_vblank_end: %d, \n"
|
||||
"min_dst_y_next_start: %d, \n"
|
||||
"refcyc_per_htotal: %d, \n"
|
||||
"refcyc_x_after_scaler: %d, \n"
|
||||
"dst_y_after_scaler: %d, \n"
|
||||
"dst_y_prefetch: %d, \n"
|
||||
"dst_y_per_vm_vblank: %d, \n"
|
||||
"dst_y_per_row_vblank: %d, \n"
|
||||
"ref_freq_to_pix_freq: %d, \n"
|
||||
"vratio_prefetch: %d, \n"
|
||||
"refcyc_per_pte_group_vblank_l: %d, \n"
|
||||
"refcyc_per_meta_chunk_vblank_l: %d, \n"
|
||||
"dst_y_per_pte_row_nom_l: %d, \n"
|
||||
"refcyc_per_pte_group_nom_l: %d, \n",
|
||||
pipe_ctx->pipe_idx,
|
||||
pipe_ctx->dlg_regs.refcyc_h_blank_end,
|
||||
pipe_ctx->dlg_regs.dlg_vblank_end,
|
||||
pipe_ctx->dlg_regs.min_dst_y_next_start,
|
||||
pipe_ctx->dlg_regs.refcyc_per_htotal,
|
||||
pipe_ctx->dlg_regs.refcyc_x_after_scaler,
|
||||
pipe_ctx->dlg_regs.dst_y_after_scaler,
|
||||
pipe_ctx->dlg_regs.dst_y_prefetch,
|
||||
pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
|
||||
pipe_ctx->dlg_regs.dst_y_per_row_vblank,
|
||||
pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
|
||||
pipe_ctx->dlg_regs.vratio_prefetch,
|
||||
pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
|
||||
pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
|
||||
pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
|
||||
pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
|
||||
);
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
|
||||
"\ndst_y_per_meta_row_nom_l: %d, \n"
|
||||
"refcyc_per_meta_chunk_nom_l: %d, \n"
|
||||
"refcyc_per_line_delivery_pre_l: %d, \n"
|
||||
"refcyc_per_line_delivery_l: %d, \n"
|
||||
"vratio_prefetch_c: %d, \n"
|
||||
"refcyc_per_pte_group_vblank_c: %d, \n"
|
||||
"refcyc_per_meta_chunk_vblank_c: %d, \n"
|
||||
"dst_y_per_pte_row_nom_c: %d, \n"
|
||||
"refcyc_per_pte_group_nom_c: %d, \n"
|
||||
"dst_y_per_meta_row_nom_c: %d, \n"
|
||||
"refcyc_per_meta_chunk_nom_c: %d, \n"
|
||||
"refcyc_per_line_delivery_pre_c: %d, \n"
|
||||
"refcyc_per_line_delivery_c: %d \n"
|
||||
"========================================================\n",
|
||||
pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
|
||||
pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
|
||||
pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
|
||||
pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
|
||||
pipe_ctx->dlg_regs.vratio_prefetch_c,
|
||||
pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
|
||||
pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
|
||||
pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
|
||||
pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
|
||||
pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
|
||||
pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
|
||||
pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
|
||||
pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
|
||||
);
|
||||
|
||||
DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
|
||||
"\n============== DML RQ Output parameters [%d] ==============\n"
|
||||
"chunk_size: %d \n"
|
||||
"min_chunk_size: %d \n"
|
||||
"meta_chunk_size: %d \n"
|
||||
"min_meta_chunk_size: %d \n"
|
||||
"dpte_group_size: %d \n"
|
||||
"mpte_group_size: %d \n"
|
||||
"swath_height: %d \n"
|
||||
"pte_row_height_linear: %d \n"
|
||||
"========================================================\n",
|
||||
pipe_ctx->pipe_idx,
|
||||
pipe_ctx->rq_regs.rq_regs_l.chunk_size,
|
||||
pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
|
||||
pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
|
||||
pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
|
||||
pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
|
||||
pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
|
||||
pipe_ctx->rq_regs.rq_regs_l.swath_height,
|
||||
pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
|
||||
);
|
||||
}
|
||||
*/
|
||||
|
||||
static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
|
||||
struct vm_system_aperture_param *apt,
|
||||
struct dce_hwseq *hws)
|
||||
@ -2395,43 +2306,6 @@ static void dcn10_enable_plane(
|
||||
pipe_ctx->stream_res.opp,
|
||||
true);
|
||||
|
||||
/* TODO: enable/disable in dm as per update type.
|
||||
if (plane_state) {
|
||||
DC_LOG_DC(dc->ctx->logger,
|
||||
"Pipe:%d 0x%x: addr hi:0x%x, "
|
||||
"addr low:0x%x, "
|
||||
"src: %d, %d, %d,"
|
||||
" %d; dst: %d, %d, %d, %d;\n",
|
||||
pipe_ctx->pipe_idx,
|
||||
plane_state,
|
||||
plane_state->address.grph.addr.high_part,
|
||||
plane_state->address.grph.addr.low_part,
|
||||
plane_state->src_rect.x,
|
||||
plane_state->src_rect.y,
|
||||
plane_state->src_rect.width,
|
||||
plane_state->src_rect.height,
|
||||
plane_state->dst_rect.x,
|
||||
plane_state->dst_rect.y,
|
||||
plane_state->dst_rect.width,
|
||||
plane_state->dst_rect.height);
|
||||
|
||||
DC_LOG_DC(dc->ctx->logger,
|
||||
"Pipe %d: width, height, x, y format:%d\n"
|
||||
"viewport:%d, %d, %d, %d\n"
|
||||
"recout: %d, %d, %d, %d\n",
|
||||
pipe_ctx->pipe_idx,
|
||||
plane_state->format,
|
||||
pipe_ctx->plane_res.scl_data.viewport.width,
|
||||
pipe_ctx->plane_res.scl_data.viewport.height,
|
||||
pipe_ctx->plane_res.scl_data.viewport.x,
|
||||
pipe_ctx->plane_res.scl_data.viewport.y,
|
||||
pipe_ctx->plane_res.scl_data.recout.width,
|
||||
pipe_ctx->plane_res.scl_data.recout.height,
|
||||
pipe_ctx->plane_res.scl_data.recout.x,
|
||||
pipe_ctx->plane_res.scl_data.recout.y);
|
||||
print_rq_dlg_ttu(dc, pipe_ctx);
|
||||
}
|
||||
*/
|
||||
if (dc->config.gpu_vm_support)
|
||||
dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
|
||||
|
||||
@ -2628,9 +2502,25 @@ static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state
|
||||
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
|
||||
}
|
||||
|
||||
void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
|
||||
{
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
struct mpc *mpc = dc->res_pool->mpc;
|
||||
|
||||
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
|
||||
hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, color);
|
||||
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
|
||||
hws->funcs.get_surface_visual_confirm_color(pipe_ctx, color);
|
||||
else
|
||||
color_space_to_black_color(
|
||||
dc, pipe_ctx->stream->output_color_space, color);
|
||||
|
||||
if (mpc->funcs->set_bg_color)
|
||||
mpc->funcs->set_bg_color(mpc, color, mpcc_id);
|
||||
}
|
||||
|
||||
void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct hubp *hubp = pipe_ctx->plane_res.hubp;
|
||||
struct mpcc_blnd_cfg blnd_cfg = {{0}};
|
||||
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
|
||||
@ -2639,18 +2529,6 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
struct mpc *mpc = dc->res_pool->mpc;
|
||||
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
|
||||
|
||||
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
|
||||
hws->funcs.get_hdr_visual_confirm_color(
|
||||
pipe_ctx, &blnd_cfg.black_color);
|
||||
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
|
||||
hws->funcs.get_surface_visual_confirm_color(
|
||||
pipe_ctx, &blnd_cfg.black_color);
|
||||
} else {
|
||||
color_space_to_black_color(
|
||||
dc, pipe_ctx->stream->output_color_space,
|
||||
&blnd_cfg.black_color);
|
||||
}
|
||||
|
||||
if (per_pixel_alpha)
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
|
||||
else
|
||||
@ -2682,6 +2560,8 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
*/
|
||||
mpcc_id = hubp->inst;
|
||||
|
||||
dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
|
||||
|
||||
/* If there is no full update, don't need to touch MPC tree*/
|
||||
if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
|
||||
mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
|
||||
|
@ -206,4 +206,10 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc);
|
||||
|
||||
void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits);
|
||||
|
||||
void dcn10_update_visual_confirm_color(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color,
|
||||
int mpcc_id);
|
||||
|
||||
#endif /* __DC_HWSS_DCN10_H__ */
|
||||
|
@ -82,6 +82,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
.set_pipe = dce110_set_pipe,
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn10_private_funcs = {
|
||||
|
@ -64,6 +64,8 @@ void mpc1_set_bg_color(struct mpc *mpc,
|
||||
MPCC_BG_G_Y, bg_g_y);
|
||||
REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0,
|
||||
MPCC_BG_B_CB, bg_b_cb);
|
||||
|
||||
bottommost_mpcc->blnd_cfg.black_color = *bg_color;
|
||||
}
|
||||
|
||||
static void mpc1_update_blending(
|
||||
@ -246,6 +248,8 @@ struct mpcc *mpc1_insert_plane(
|
||||
}
|
||||
}
|
||||
|
||||
mpc->funcs->set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
|
||||
|
||||
/* update the blending configuration */
|
||||
mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
|
||||
|
||||
@ -495,6 +499,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
|
||||
.set_output_csc = NULL,
|
||||
.set_output_gamma = NULL,
|
||||
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
|
||||
.set_bg_color = mpc1_set_bg_color,
|
||||
};
|
||||
|
||||
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
|
||||
|
@ -2267,9 +2267,25 @@ void dcn20_get_mpctree_visual_confirm_color(
|
||||
*color = pipe_colors[top_pipe->pipe_idx];
|
||||
}
|
||||
|
||||
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
|
||||
{
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
struct mpc *mpc = dc->res_pool->mpc;
|
||||
|
||||
/* input to MPCC is always RGB, by default leave black_color at 0 */
|
||||
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
|
||||
hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, color);
|
||||
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
|
||||
hws->funcs.get_surface_visual_confirm_color(pipe_ctx, color);
|
||||
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
|
||||
dcn20_get_mpctree_visual_confirm_color(pipe_ctx, color);
|
||||
|
||||
if (mpc->funcs->set_bg_color)
|
||||
mpc->funcs->set_bg_color(mpc, color, mpcc_id);
|
||||
}
|
||||
|
||||
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct hubp *hubp = pipe_ctx->plane_res.hubp;
|
||||
struct mpcc_blnd_cfg blnd_cfg = { {0} };
|
||||
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
|
||||
@ -2278,15 +2294,6 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
struct mpc *mpc = dc->res_pool->mpc;
|
||||
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
|
||||
|
||||
// input to MPCC is always RGB, by default leave black_color at 0
|
||||
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
|
||||
hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
|
||||
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
|
||||
hws->funcs.get_surface_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
|
||||
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
|
||||
dcn20_get_mpctree_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
|
||||
}
|
||||
|
||||
if (per_pixel_alpha)
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
|
||||
else
|
||||
@ -2320,6 +2327,8 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
*/
|
||||
mpcc_id = hubp->inst;
|
||||
|
||||
dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
|
||||
|
||||
/* If there is no full update, don't need to touch MPC tree*/
|
||||
if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
|
||||
!pipe_ctx->update_flags.bits.mpcc) {
|
||||
|
@ -146,5 +146,10 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc,
|
||||
const struct tg_color *solid_color,
|
||||
int width, int height, int offset);
|
||||
|
||||
void dcn20_update_visual_confirm_color(struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color,
|
||||
int mpcc_id);
|
||||
|
||||
#endif /* __DC_HWSS_DCN20_H__ */
|
||||
|
||||
|
@ -96,6 +96,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
|
||||
#endif
|
||||
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn20_private_funcs = {
|
||||
|
@ -67,7 +67,6 @@ void mpc2_update_blending(
|
||||
REG_SET(MPCC_BOT_GAIN_INSIDE[mpcc_id], 0, MPCC_BOT_GAIN_INSIDE, blnd_cfg->bottom_inside_gain);
|
||||
REG_SET(MPCC_BOT_GAIN_OUTSIDE[mpcc_id], 0, MPCC_BOT_GAIN_OUTSIDE, blnd_cfg->bottom_outside_gain);
|
||||
|
||||
mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
|
||||
mpcc->blnd_cfg = *blnd_cfg;
|
||||
}
|
||||
|
||||
@ -557,6 +556,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
|
||||
.set_output_gamma = mpc2_set_output_gamma,
|
||||
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
|
||||
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
|
||||
.set_bg_color = mpc1_set_bg_color,
|
||||
};
|
||||
|
||||
void dcn20_mpc_construct(struct dcn20_mpc *mpc20,
|
||||
|
@ -100,6 +100,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
|
||||
.is_abm_supported = dcn21_is_abm_supported,
|
||||
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn21_private_funcs = {
|
||||
|
@ -813,6 +813,15 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
|
||||
(100LL + dc->debug.mall_additional_timer_percent) + denom - 1),
|
||||
denom) - 64LL;
|
||||
|
||||
/* In some cases the stutter period is really big (tiny modes) in these
|
||||
* cases MALL cant be enabled, So skip these cases to avoid a ASSERT()
|
||||
*
|
||||
* We can check if stutter_period is more than 1/10th the frame time to
|
||||
* consider if we can actually meet the range of hysteresis timer
|
||||
*/
|
||||
if (stutter_period > 100000/refresh_hz)
|
||||
return false;
|
||||
|
||||
/* scale should be increased until it fits into 6 bits */
|
||||
while (tmr_delay & ~0x3F) {
|
||||
tmr_scale++;
|
||||
|
@ -99,6 +99,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
|
||||
.set_pipe = dcn21_set_pipe,
|
||||
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn30_private_funcs = {
|
||||
|
@ -1431,7 +1431,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
|
||||
.release_rmu = mpcc3_release_rmu,
|
||||
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
|
||||
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
|
||||
|
||||
.set_bg_color = mpc1_set_bg_color,
|
||||
};
|
||||
|
||||
void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
|
||||
|
@ -585,6 +585,181 @@
|
||||
type MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS;\
|
||||
type MPC_RMU_SHAPER_MODE_CURRENT
|
||||
|
||||
#define MPC_COMMON_MASK_SH_LIST_DCN303(mask_sh) \
|
||||
MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\
|
||||
SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\
|
||||
SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\
|
||||
SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\
|
||||
SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\
|
||||
SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\
|
||||
SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\
|
||||
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\
|
||||
SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\
|
||||
SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\
|
||||
SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\
|
||||
SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\
|
||||
SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_LOW_PWR_MODE, mask_sh),\
|
||||
SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_STATE, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE_CURRENT, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_GAMUT_REMAP_COEF_FORMAT, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C11_A, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C12_A, mask_sh),\
|
||||
SF(MPC_DWB0_MUX, MPC_DWB0_MUX, mask_sh),\
|
||||
SF(MPC_DWB0_MUX, MPC_DWB0_MUX_STATUS, mask_sh),\
|
||||
SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\
|
||||
SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\
|
||||
SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\
|
||||
SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT, mask_sh), \
|
||||
SF(MPC_RMU_CONTROL, MPC_RMU0_MUX, mask_sh), \
|
||||
SF(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, mask_sh), \
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM_RAMA_OFFSET_B, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM_RAMA_OFFSET_G, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM_RAMA_OFFSET_R, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_PWL_DISABLE, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE_CURRENT, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT_CURRENT, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_COLOR_SEL, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_DBG, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_HOST_SEL, mask_sh),\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_CONFIG_MODE, mask_sh),\
|
||||
/*SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_STATUS, mask_sh),*/\
|
||||
SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\
|
||||
SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE, mask_sh),\
|
||||
SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_SIZE, mask_sh),\
|
||||
/*SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE_CURRENT, mask_sh),*/\
|
||||
SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_WRITE_EN_MASK, mask_sh),\
|
||||
SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_RAM_SEL, mask_sh),\
|
||||
SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_30BIT_EN, mask_sh),\
|
||||
/*SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_CONFIG_STATUS, mask_sh),*/\
|
||||
SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_READ_SEL, mask_sh),\
|
||||
SF(MPC_RMU0_3DLUT_INDEX, MPC_RMU_3DLUT_INDEX, mask_sh),\
|
||||
SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA0, mask_sh),\
|
||||
SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA1, mask_sh),\
|
||||
SF(MPC_RMU0_3DLUT_DATA_30BIT, MPC_RMU_3DLUT_DATA_30BIT, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE, mask_sh),\
|
||||
/*SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE_CURRENT, mask_sh),*/\
|
||||
SF(MPC_RMU0_SHAPER_OFFSET_R, MPC_RMU_SHAPER_OFFSET_R, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_OFFSET_G, MPC_RMU_SHAPER_OFFSET_G, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_OFFSET_B, MPC_RMU_SHAPER_OFFSET_B, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_SCALE_R, MPC_RMU_SHAPER_SCALE_R, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_G, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_B, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_LUT_INDEX, MPC_RMU_SHAPER_LUT_INDEX, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_LUT_DATA, MPC_RMU_SHAPER_LUT_DATA, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_SEL, mask_sh),\
|
||||
/*SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_CONFIG_STATUS, mask_sh),*/\
|
||||
SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
|
||||
SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_FORCE, mask_sh),\
|
||||
SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_DIS, mask_sh),\
|
||||
SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, mask_sh),\
|
||||
SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, mask_sh),\
|
||||
SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, mask_sh),\
|
||||
SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_MODE_CURRENT, mask_sh),\
|
||||
SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
|
||||
|
||||
#define MPC_REG_FIELD_LIST_DCN3_03(type) \
|
||||
MPC_REG_FIELD_LIST_DCN2_0(type) \
|
||||
type MPC_DWB0_MUX;\
|
||||
type MPC_DWB0_MUX_STATUS;\
|
||||
type MPC_OUT_RATE_CONTROL;\
|
||||
type MPC_OUT_RATE_CONTROL_DISABLE;\
|
||||
type MPC_OUT_FLOW_CONTROL_MODE;\
|
||||
type MPC_OUT_FLOW_CONTROL_COUNT; \
|
||||
type MPCC_GAMUT_REMAP_MODE; \
|
||||
type MPCC_GAMUT_REMAP_MODE_CURRENT;\
|
||||
type MPCC_GAMUT_REMAP_COEF_FORMAT; \
|
||||
type MPCC_GAMUT_REMAP_C11_A; \
|
||||
type MPCC_GAMUT_REMAP_C12_A; \
|
||||
type MPC_RMU0_MUX; \
|
||||
type MPC_RMU0_MUX_STATUS; \
|
||||
type MPC_RMU0_MEM_PWR_FORCE;\
|
||||
type MPC_RMU0_MEM_PWR_DIS;\
|
||||
type MPC_RMU0_MEM_LOW_PWR_MODE;\
|
||||
type MPC_RMU0_SHAPER_MEM_PWR_STATE;\
|
||||
type MPC_RMU0_3DLUT_MEM_PWR_STATE;\
|
||||
type MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B; \
|
||||
type MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B;\
|
||||
type MPCC_OGAM_RAMA_OFFSET_B;\
|
||||
type MPCC_OGAM_RAMA_OFFSET_G;\
|
||||
type MPCC_OGAM_RAMA_OFFSET_R;\
|
||||
type MPCC_OGAM_SELECT; \
|
||||
type MPCC_OGAM_PWL_DISABLE; \
|
||||
type MPCC_OGAM_MODE_CURRENT; \
|
||||
type MPCC_OGAM_SELECT_CURRENT; \
|
||||
type MPCC_OGAM_LUT_WRITE_COLOR_MASK; \
|
||||
type MPCC_OGAM_LUT_READ_COLOR_SEL; \
|
||||
type MPCC_OGAM_LUT_READ_DBG; \
|
||||
type MPCC_OGAM_LUT_HOST_SEL; \
|
||||
type MPCC_OGAM_LUT_CONFIG_MODE; \
|
||||
type MPCC_OGAM_LUT_STATUS; \
|
||||
type MPCC_OGAM_RAMA_START_BASE_CNTL_B;\
|
||||
type MPCC_OGAM_MEM_LOW_PWR_MODE;\
|
||||
type MPCC_OGAM_MEM_PWR_STATE;\
|
||||
type MPC_RMU_3DLUT_MODE; \
|
||||
type MPC_RMU_3DLUT_SIZE; \
|
||||
type MPC_RMU_3DLUT_MODE_CURRENT; \
|
||||
type MPC_RMU_3DLUT_WRITE_EN_MASK;\
|
||||
type MPC_RMU_3DLUT_RAM_SEL;\
|
||||
type MPC_RMU_3DLUT_30BIT_EN;\
|
||||
type MPC_RMU_3DLUT_CONFIG_STATUS;\
|
||||
type MPC_RMU_3DLUT_READ_SEL;\
|
||||
type MPC_RMU_3DLUT_INDEX;\
|
||||
type MPC_RMU_3DLUT_DATA0;\
|
||||
type MPC_RMU_3DLUT_DATA1;\
|
||||
type MPC_RMU_3DLUT_DATA_30BIT;\
|
||||
type MPC_RMU_SHAPER_LUT_MODE;\
|
||||
type MPC_RMU_SHAPER_LUT_MODE_CURRENT;\
|
||||
type MPC_RMU_SHAPER_OFFSET_R;\
|
||||
type MPC_RMU_SHAPER_OFFSET_G;\
|
||||
type MPC_RMU_SHAPER_OFFSET_B;\
|
||||
type MPC_RMU_SHAPER_SCALE_R;\
|
||||
type MPC_RMU_SHAPER_SCALE_G;\
|
||||
type MPC_RMU_SHAPER_SCALE_B;\
|
||||
type MPC_RMU_SHAPER_LUT_INDEX;\
|
||||
type MPC_RMU_SHAPER_LUT_DATA;\
|
||||
type MPC_RMU_SHAPER_LUT_WRITE_EN_MASK;\
|
||||
type MPC_RMU_SHAPER_LUT_WRITE_SEL;\
|
||||
type MPC_RMU_SHAPER_CONFIG_STATUS;\
|
||||
type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B;\
|
||||
type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B;\
|
||||
type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B;\
|
||||
type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B;\
|
||||
type MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET;\
|
||||
type MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS;\
|
||||
type MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET;\
|
||||
type MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS;\
|
||||
type MPC_RMU_SHAPER_MODE_CURRENT
|
||||
|
||||
struct dcn30_mpc_registers {
|
||||
MPC_REG_VARIABLE_LIST_DCN3_0;
|
||||
|
@ -826,10 +826,11 @@ static const struct dc_plane_cap plane_cap = {
|
||||
.fp16 = 16000
|
||||
},
|
||||
|
||||
/* 6:1 downscaling ratio: 1000/6 = 166.666 */
|
||||
.max_downscale_factor = {
|
||||
.argb8888 = 600,
|
||||
.nv12 = 600,
|
||||
.fp16 = 600
|
||||
.argb8888 = 167,
|
||||
.nv12 = 167,
|
||||
.fp16 = 167
|
||||
}
|
||||
};
|
||||
|
||||
@ -1724,7 +1725,7 @@ static bool init_soc_bounding_box(struct dc *dc,
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
if (!is_soc_bounding_box_valid(dc)) {
|
||||
DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
|
||||
DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -101,6 +101,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.optimize_pwr_state = dcn21_optimize_pwr_state,
|
||||
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
|
||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn301_private_funcs = {
|
||||
|
@ -843,10 +843,11 @@ static const struct dc_plane_cap plane_cap = {
|
||||
.fp16 = 16000
|
||||
},
|
||||
|
||||
/* 6:1 downscaling ratio: 1000/6 = 166.666 */
|
||||
.max_downscale_factor = {
|
||||
.argb8888 = 600,
|
||||
.nv12 = 600,
|
||||
.fp16 = 600
|
||||
.argb8888 = 167,
|
||||
.nv12 = 167,
|
||||
.fp16 = 167
|
||||
},
|
||||
64,
|
||||
64
|
||||
@ -1497,7 +1498,7 @@ static bool init_soc_bounding_box(struct dc *dc,
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
if (!is_soc_bounding_box_valid(dc)) {
|
||||
DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
|
||||
DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -284,10 +284,11 @@ static const struct dc_plane_cap plane_cap = {
|
||||
.nv12 = 16000,
|
||||
.fp16 = 16000
|
||||
},
|
||||
/* 6:1 downscaling ratio: 1000/6 = 166.666 */
|
||||
.max_downscale_factor = {
|
||||
.argb8888 = 600,
|
||||
.nv12 = 600,
|
||||
.fp16 = 600
|
||||
.argb8888 = 167,
|
||||
.nv12 = 167,
|
||||
.fp16 = 167
|
||||
},
|
||||
16,
|
||||
16
|
||||
@ -1093,7 +1094,7 @@ static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool)
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
if (!is_soc_bounding_box_valid(dc)) {
|
||||
DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
|
||||
DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
39
drivers/gpu/drm/amd/display/dc/dcn303/Makefile
Normal file
39
drivers/gpu/drm/amd/display/dc/dcn303/Makefile
Normal file
@ -0,0 +1,39 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# Copyright (C) 2021 Advanced Micro Devices, Inc. All the rights reserved
|
||||
#
|
||||
# Authors: AMD
|
||||
#
|
||||
# Makefile for dcn303.
|
||||
|
||||
DCN3_03 = dcn303_init.o dcn303_hwseq.o dcn303_resource.o
|
||||
|
||||
ifdef CONFIG_X86
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o := -msse
|
||||
endif
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o := -mhard-float -maltivec
|
||||
endif
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -mhard-float
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86
|
||||
ifdef IS_OLD_GCC
|
||||
# Stack alignment mismatch, proceed with caution.
|
||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||
# (8B stack alignment).
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -mpreferred-stack-boundary=4
|
||||
else
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -msse2
|
||||
endif
|
||||
endif
|
||||
|
||||
AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_03)
|
30
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h
Normal file
30
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h
Normal file
@ -0,0 +1,30 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Authors: AMD
|
||||
*/
|
||||
|
||||
#ifndef __DCN303_DCCG_H__
|
||||
#define __DCN303_DCCG_H__
|
||||
|
||||
#include "dcn30/dcn30_dccg.h"
|
||||
|
||||
|
||||
#define DCCG_REG_LIST_DCN3_03() \
|
||||
SR(DPPCLK_DTO_CTRL),\
|
||||
DCCG_SRII(DTO_PARAM, DPPCLK, 0),\
|
||||
DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
|
||||
SR(REFCLK_CNTL)
|
||||
|
||||
#define DCCG_MASK_SH_LIST_DCN3_03(mask_sh) \
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 1, mask_sh),\
|
||||
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
|
||||
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
|
||||
DCCG_SF(REFCLK_CNTL, REFCLK_CLOCK_EN, mask_sh),\
|
||||
DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh)
|
||||
|
||||
#endif //__DCN303_DCCG_H__
|
40
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c
Normal file
40
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c
Normal file
@ -0,0 +1,40 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Authors: AMD
|
||||
*/
|
||||
|
||||
#include "dcn303_hwseq.h"
|
||||
|
||||
#include "dce/dce_hwseq.h"
|
||||
|
||||
#include "reg_helper.h"
|
||||
#include "dc.h"
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
#define CTX \
|
||||
hws->ctx
|
||||
#define REG(reg)\
|
||||
hws->regs->reg
|
||||
|
||||
#undef FN
|
||||
#define FN(reg_name, field_name) \
|
||||
hws->shifts->field_name, hws->masks->field_name
|
||||
|
||||
|
||||
void dcn303_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on)
|
||||
{
|
||||
/*DCN303 removes PG registers*/
|
||||
}
|
||||
|
||||
void dcn303_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
|
||||
{
|
||||
/*DCN303 removes PG registers*/
|
||||
}
|
||||
|
||||
void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on)
|
||||
{
|
||||
/*DCN303 removes PG registers*/
|
||||
}
|
17
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h
Normal file
17
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h
Normal file
@ -0,0 +1,17 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Authors: AMD
|
||||
*/
|
||||
|
||||
#ifndef __DC_HWSS_DCN303_H__
|
||||
#define __DC_HWSS_DCN303_H__
|
||||
|
||||
#include "hw_sequencer_private.h"
|
||||
|
||||
void dcn303_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on);
|
||||
void dcn303_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
|
||||
void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on);
|
||||
|
||||
#endif /* __DC_HWSS_DCN303_H__ */
|
19
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
Normal file
19
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
Normal file
@ -0,0 +1,19 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Authors: AMD
|
||||
*/
|
||||
|
||||
#include "dcn303_hwseq.h"
|
||||
#include "dcn30/dcn30_init.h"
|
||||
#include "dc.h"
|
||||
|
||||
void dcn303_hw_sequencer_construct(struct dc *dc)
|
||||
{
|
||||
dcn30_hw_sequencer_construct(dc);
|
||||
|
||||
dc->hwseq->funcs.dpp_pg_control = dcn303_dpp_pg_control;
|
||||
dc->hwseq->funcs.hubp_pg_control = dcn303_hubp_pg_control;
|
||||
dc->hwseq->funcs.dsc_pg_control = dcn303_dsc_pg_control;
|
||||
}
|
15
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h
Normal file
15
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h
Normal file
@ -0,0 +1,15 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Authors: AMD
|
||||
*/
|
||||
|
||||
#ifndef __DC_DCN303_INIT_H__
|
||||
#define __DC_DCN303_INIT_H__
|
||||
|
||||
struct dc;
|
||||
|
||||
void dcn303_hw_sequencer_construct(struct dc *dc);
|
||||
|
||||
#endif /* __DC_DCN303_INIT_H__ */
|
1675
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
Normal file
1675
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
Normal file
File diff suppressed because it is too large
Load Diff
17
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h
Normal file
17
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h
Normal file
@ -0,0 +1,17 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Authors: AMD
|
||||
*/
|
||||
|
||||
#ifndef _DCN303_RESOURCE_H_
|
||||
#define _DCN303_RESOURCE_H_
|
||||
|
||||
#include "core_types.h"
|
||||
|
||||
struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc);
|
||||
|
||||
void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
|
||||
|
||||
#endif /* _DCN303_RESOURCE_H_ */
|
@ -111,6 +111,7 @@ bool dal_hw_factory_init(
|
||||
case DCN_VERSION_3_0:
|
||||
case DCN_VERSION_3_01:
|
||||
case DCN_VERSION_3_02:
|
||||
case DCN_VERSION_3_03:
|
||||
dal_hw_factory_dcn30_init(factory);
|
||||
return true;
|
||||
#endif
|
||||
|
@ -106,6 +106,7 @@ bool dal_hw_translate_init(
|
||||
case DCN_VERSION_3_0:
|
||||
case DCN_VERSION_3_01:
|
||||
case DCN_VERSION_3_02:
|
||||
case DCN_VERSION_3_03:
|
||||
dal_hw_translate_dcn30_init(translate);
|
||||
return true;
|
||||
#endif
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user