Merge branch 'drm-next-4.13' of git://people.freedesktop.org/~agd5f/linux into drm-next
New radeon and amdgpu features for 4.13: - Lots of Vega10 bug fixes - Preliminary Raven support - KIQ support for compute rings - MEC queue management rework from Andres - Audio support for DCE6 - SR-IOV improvements - Improved module parameters for controlling radeon vs amdgpu support for SI and CIK - Bug fixes - General code cleanups [airlied: dropped drmP.h header from one file was needed and build broke] * 'drm-next-4.13' of git://people.freedesktop.org/~agd5f/linux: (362 commits) drm/amdgpu: Fix compiler warnings drm/amdgpu: vm_update_ptes remove code duplication drm/amd/amdgpu: Port VCN over to new SOC15 macros drm/amd/amdgpu: Port PSP v10.0 over to new SOC15 macros drm/amd/amdgpu: Port PSP v3.1 over to new SOC15 macros drm/amd/amdgpu: Port NBIO v7.0 driver over to new SOC15 macros drm/amd/amdgpu: Port NBIO v6.1 driver over to new SOC15 macros drm/amd/amdgpu: Port UVD 7.0 over to new SOC15 macros drm/amd/amdgpu: Port MMHUB over to new SOC15 macros drm/amd/amdgpu: Cleanup gfxhub read-modify-write patterns drm/amd/amdgpu: Port GFXHUB over to new SOC15 macros drm/amd/amdgpu: Add offset variant to SOC15 macros drm/amd/powerplay: add avfs control for Vega10 drm/amdgpu: add virtual display support for raven drm/amdgpu/gfx9: fix compute ring doorbell index drm/amd/amdgpu: Rename KIQ ring to avoid spaces drm/amd/amdgpu: gfx9 tidy ups (v2) drm/amdgpu: add contiguous flag in ucode bo create drm/amdgpu: fix missed gpu info firmware when cache firmware during S3 drm/amdgpu: export test ib debugfs interface ...
This commit is contained in:
@@ -5,15 +5,23 @@ config DRM_AMDGPU_SI
|
|||||||
Choose this option if you want to enable experimental support
|
Choose this option if you want to enable experimental support
|
||||||
for SI asics.
|
for SI asics.
|
||||||
|
|
||||||
|
SI is already supported in radeon. Experimental support for SI
|
||||||
|
in amdgpu will be disabled by default and is still provided by
|
||||||
|
radeon. Use module options to override this:
|
||||||
|
|
||||||
|
radeon.si_support=0 amdgpu.si_support=1
|
||||||
|
|
||||||
config DRM_AMDGPU_CIK
|
config DRM_AMDGPU_CIK
|
||||||
bool "Enable amdgpu support for CIK parts"
|
bool "Enable amdgpu support for CIK parts"
|
||||||
depends on DRM_AMDGPU
|
depends on DRM_AMDGPU
|
||||||
help
|
help
|
||||||
Choose this option if you want to enable experimental support
|
Choose this option if you want to enable support for CIK asics.
|
||||||
for CIK asics.
|
|
||||||
|
|
||||||
CIK is already supported in radeon. CIK support in amdgpu
|
CIK is already supported in radeon. Support for CIK in amdgpu
|
||||||
is for experimentation and testing.
|
will be disabled by default and is still provided by radeon.
|
||||||
|
Use module options to override this:
|
||||||
|
|
||||||
|
radeon.cik_support=0 amdgpu.cik_support=1
|
||||||
|
|
||||||
config DRM_AMDGPU_USERPTR
|
config DRM_AMDGPU_USERPTR
|
||||||
bool "Always enable userptr write support"
|
bool "Always enable userptr write support"
|
||||||
|
|||||||
@@ -24,7 +24,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
|||||||
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
|
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
|
||||||
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
|
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
|
||||||
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
|
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
|
||||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o
|
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
|
||||||
|
amdgpu_queue_mgr.o
|
||||||
|
|
||||||
# add asic specific block
|
# add asic specific block
|
||||||
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
||||||
@@ -34,7 +35,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
|||||||
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
|
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
|
||||||
|
|
||||||
amdgpu-y += \
|
amdgpu-y += \
|
||||||
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o
|
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o nbio_v7_0.o
|
||||||
|
|
||||||
# add GMC block
|
# add GMC block
|
||||||
amdgpu-y += \
|
amdgpu-y += \
|
||||||
@@ -54,7 +55,8 @@ amdgpu-y += \
|
|||||||
# add PSP block
|
# add PSP block
|
||||||
amdgpu-y += \
|
amdgpu-y += \
|
||||||
amdgpu_psp.o \
|
amdgpu_psp.o \
|
||||||
psp_v3_1.o
|
psp_v3_1.o \
|
||||||
|
psp_v10_0.o
|
||||||
|
|
||||||
# add SMC block
|
# add SMC block
|
||||||
amdgpu-y += \
|
amdgpu-y += \
|
||||||
@@ -92,6 +94,11 @@ amdgpu-y += \
|
|||||||
vce_v3_0.o \
|
vce_v3_0.o \
|
||||||
vce_v4_0.o
|
vce_v4_0.o
|
||||||
|
|
||||||
|
# add VCN block
|
||||||
|
amdgpu-y += \
|
||||||
|
amdgpu_vcn.o \
|
||||||
|
vcn_v1_0.o
|
||||||
|
|
||||||
# add amdkfd interfaces
|
# add amdkfd interfaces
|
||||||
amdgpu-y += \
|
amdgpu-y += \
|
||||||
amdgpu_amdkfd.o \
|
amdgpu_amdkfd.o \
|
||||||
|
|||||||
@@ -46,6 +46,8 @@
|
|||||||
#include <drm/drm_gem.h>
|
#include <drm/drm_gem.h>
|
||||||
#include <drm/amdgpu_drm.h>
|
#include <drm/amdgpu_drm.h>
|
||||||
|
|
||||||
|
#include <kgd_kfd_interface.h>
|
||||||
|
|
||||||
#include "amd_shared.h"
|
#include "amd_shared.h"
|
||||||
#include "amdgpu_mode.h"
|
#include "amdgpu_mode.h"
|
||||||
#include "amdgpu_ih.h"
|
#include "amdgpu_ih.h"
|
||||||
@@ -62,6 +64,7 @@
|
|||||||
#include "amdgpu_acp.h"
|
#include "amdgpu_acp.h"
|
||||||
#include "amdgpu_uvd.h"
|
#include "amdgpu_uvd.h"
|
||||||
#include "amdgpu_vce.h"
|
#include "amdgpu_vce.h"
|
||||||
|
#include "amdgpu_vcn.h"
|
||||||
|
|
||||||
#include "gpu_scheduler.h"
|
#include "gpu_scheduler.h"
|
||||||
#include "amdgpu_virt.h"
|
#include "amdgpu_virt.h"
|
||||||
@@ -92,6 +95,7 @@ extern int amdgpu_vm_size;
|
|||||||
extern int amdgpu_vm_block_size;
|
extern int amdgpu_vm_block_size;
|
||||||
extern int amdgpu_vm_fault_stop;
|
extern int amdgpu_vm_fault_stop;
|
||||||
extern int amdgpu_vm_debug;
|
extern int amdgpu_vm_debug;
|
||||||
|
extern int amdgpu_vm_update_mode;
|
||||||
extern int amdgpu_sched_jobs;
|
extern int amdgpu_sched_jobs;
|
||||||
extern int amdgpu_sched_hw_submission;
|
extern int amdgpu_sched_hw_submission;
|
||||||
extern int amdgpu_no_evict;
|
extern int amdgpu_no_evict;
|
||||||
@@ -109,6 +113,15 @@ extern int amdgpu_prim_buf_per_se;
|
|||||||
extern int amdgpu_pos_buf_per_se;
|
extern int amdgpu_pos_buf_per_se;
|
||||||
extern int amdgpu_cntl_sb_buf_per_se;
|
extern int amdgpu_cntl_sb_buf_per_se;
|
||||||
extern int amdgpu_param_buf_per_se;
|
extern int amdgpu_param_buf_per_se;
|
||||||
|
extern int amdgpu_job_hang_limit;
|
||||||
|
extern int amdgpu_lbpw;
|
||||||
|
|
||||||
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||||
|
extern int amdgpu_si_support;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||||
|
extern int amdgpu_cik_support;
|
||||||
|
#endif
|
||||||
|
|
||||||
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
|
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
|
||||||
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
|
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
|
||||||
@@ -305,8 +318,8 @@ struct amdgpu_gart_funcs {
|
|||||||
/* set pte flags based per asic */
|
/* set pte flags based per asic */
|
||||||
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
|
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
|
||||||
uint32_t flags);
|
uint32_t flags);
|
||||||
/* adjust mc addr in fb for APU case */
|
/* get the pde for a given mc addr */
|
||||||
u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
|
u64 (*get_vm_pde)(struct amdgpu_device *adev, u64 addr);
|
||||||
uint32_t (*get_invalidate_req)(unsigned int vm_id);
|
uint32_t (*get_invalidate_req)(unsigned int vm_id);
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -554,7 +567,7 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
|
|||||||
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
|
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
|
||||||
int amdgpu_gart_init(struct amdgpu_device *adev);
|
int amdgpu_gart_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_gart_fini(struct amdgpu_device *adev);
|
void amdgpu_gart_fini(struct amdgpu_device *adev);
|
||||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
||||||
int pages);
|
int pages);
|
||||||
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
||||||
int pages, struct page **pagelist,
|
int pages, struct page **pagelist,
|
||||||
@@ -602,6 +615,7 @@ struct amdgpu_mc {
|
|||||||
uint32_t srbm_soft_reset;
|
uint32_t srbm_soft_reset;
|
||||||
struct amdgpu_mode_mc_save save;
|
struct amdgpu_mode_mc_save save;
|
||||||
bool prt_warning;
|
bool prt_warning;
|
||||||
|
uint64_t stolen_size;
|
||||||
/* apertures */
|
/* apertures */
|
||||||
u64 shared_aperture_start;
|
u64 shared_aperture_start;
|
||||||
u64 shared_aperture_end;
|
u64 shared_aperture_end;
|
||||||
@@ -771,6 +785,29 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
|||||||
struct amd_sched_entity *entity, void *owner,
|
struct amd_sched_entity *entity, void *owner,
|
||||||
struct dma_fence **f);
|
struct dma_fence **f);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Queue manager
|
||||||
|
*/
|
||||||
|
struct amdgpu_queue_mapper {
|
||||||
|
int hw_ip;
|
||||||
|
struct mutex lock;
|
||||||
|
/* protected by lock */
|
||||||
|
struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct amdgpu_queue_mgr {
|
||||||
|
struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM];
|
||||||
|
};
|
||||||
|
|
||||||
|
int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_queue_mgr *mgr);
|
||||||
|
int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_queue_mgr *mgr);
|
||||||
|
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_queue_mgr *mgr,
|
||||||
|
int hw_ip, int instance, int ring,
|
||||||
|
struct amdgpu_ring **out_ring);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* context related structures
|
* context related structures
|
||||||
*/
|
*/
|
||||||
@@ -784,6 +821,7 @@ struct amdgpu_ctx_ring {
|
|||||||
struct amdgpu_ctx {
|
struct amdgpu_ctx {
|
||||||
struct kref refcount;
|
struct kref refcount;
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
|
struct amdgpu_queue_mgr queue_mgr;
|
||||||
unsigned reset_counter;
|
unsigned reset_counter;
|
||||||
spinlock_t ring_lock;
|
spinlock_t ring_lock;
|
||||||
struct dma_fence **fences;
|
struct dma_fence **fences;
|
||||||
@@ -822,6 +860,7 @@ struct amdgpu_fpriv {
|
|||||||
struct mutex bo_list_lock;
|
struct mutex bo_list_lock;
|
||||||
struct idr bo_list_handles;
|
struct idr bo_list_handles;
|
||||||
struct amdgpu_ctx_mgr ctx_mgr;
|
struct amdgpu_ctx_mgr ctx_mgr;
|
||||||
|
u32 vram_lost_counter;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -893,20 +932,26 @@ struct amdgpu_rlc {
|
|||||||
u32 *register_restore;
|
u32 *register_restore;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
|
||||||
|
|
||||||
struct amdgpu_mec {
|
struct amdgpu_mec {
|
||||||
struct amdgpu_bo *hpd_eop_obj;
|
struct amdgpu_bo *hpd_eop_obj;
|
||||||
u64 hpd_eop_gpu_addr;
|
u64 hpd_eop_gpu_addr;
|
||||||
struct amdgpu_bo *mec_fw_obj;
|
struct amdgpu_bo *mec_fw_obj;
|
||||||
u64 mec_fw_gpu_addr;
|
u64 mec_fw_gpu_addr;
|
||||||
u32 num_pipe;
|
|
||||||
u32 num_mec;
|
u32 num_mec;
|
||||||
u32 num_queue;
|
u32 num_pipe_per_mec;
|
||||||
|
u32 num_queue_per_pipe;
|
||||||
void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
|
void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
|
||||||
|
|
||||||
|
/* These are the resources for which amdgpu takes ownership */
|
||||||
|
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_kiq {
|
struct amdgpu_kiq {
|
||||||
u64 eop_gpu_addr;
|
u64 eop_gpu_addr;
|
||||||
struct amdgpu_bo *eop_obj;
|
struct amdgpu_bo *eop_obj;
|
||||||
|
struct mutex ring_mutex;
|
||||||
struct amdgpu_ring ring;
|
struct amdgpu_ring ring;
|
||||||
struct amdgpu_irq_src irq;
|
struct amdgpu_irq_src irq;
|
||||||
};
|
};
|
||||||
@@ -983,7 +1028,10 @@ struct amdgpu_gfx_config {
|
|||||||
struct amdgpu_cu_info {
|
struct amdgpu_cu_info {
|
||||||
uint32_t number; /* total active CU number */
|
uint32_t number; /* total active CU number */
|
||||||
uint32_t ao_cu_mask;
|
uint32_t ao_cu_mask;
|
||||||
|
uint32_t max_waves_per_simd;
|
||||||
uint32_t wave_front_size;
|
uint32_t wave_front_size;
|
||||||
|
uint32_t max_scratch_slots_per_cu;
|
||||||
|
uint32_t lds_size;
|
||||||
uint32_t bitmap[4][4];
|
uint32_t bitmap[4][4];
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1061,6 +1109,8 @@ struct amdgpu_gfx {
|
|||||||
uint32_t grbm_soft_reset;
|
uint32_t grbm_soft_reset;
|
||||||
uint32_t srbm_soft_reset;
|
uint32_t srbm_soft_reset;
|
||||||
bool in_reset;
|
bool in_reset;
|
||||||
|
/* s3/s4 mask */
|
||||||
|
bool in_suspend;
|
||||||
/* NGG */
|
/* NGG */
|
||||||
struct amdgpu_ngg ngg;
|
struct amdgpu_ngg ngg;
|
||||||
};
|
};
|
||||||
@@ -1114,7 +1164,6 @@ struct amdgpu_cs_parser {
|
|||||||
#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
|
#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
|
||||||
#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
|
#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
|
||||||
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
|
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
|
||||||
#define AMDGPU_VM_DOMAIN (1 << 3) /* bit set means in virtual memory context */
|
|
||||||
|
|
||||||
struct amdgpu_job {
|
struct amdgpu_job {
|
||||||
struct amd_sched_job base;
|
struct amd_sched_job base;
|
||||||
@@ -1122,6 +1171,8 @@ struct amdgpu_job {
|
|||||||
struct amdgpu_vm *vm;
|
struct amdgpu_vm *vm;
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct amdgpu_sync sync;
|
struct amdgpu_sync sync;
|
||||||
|
struct amdgpu_sync dep_sync;
|
||||||
|
struct amdgpu_sync sched_sync;
|
||||||
struct amdgpu_ib *ibs;
|
struct amdgpu_ib *ibs;
|
||||||
struct dma_fence *fence; /* the hw fence */
|
struct dma_fence *fence; /* the hw fence */
|
||||||
uint32_t preamble_status;
|
uint32_t preamble_status;
|
||||||
@@ -1129,7 +1180,6 @@ struct amdgpu_job {
|
|||||||
void *owner;
|
void *owner;
|
||||||
uint64_t fence_ctx; /* the fence_context this job uses */
|
uint64_t fence_ctx; /* the fence_context this job uses */
|
||||||
bool vm_needs_flush;
|
bool vm_needs_flush;
|
||||||
bool need_pipeline_sync;
|
|
||||||
unsigned vm_id;
|
unsigned vm_id;
|
||||||
uint64_t vm_pd_addr;
|
uint64_t vm_pd_addr;
|
||||||
uint32_t gds_base, gds_size;
|
uint32_t gds_base, gds_size;
|
||||||
@@ -1221,6 +1271,9 @@ struct amdgpu_firmware {
|
|||||||
const struct amdgpu_psp_funcs *funcs;
|
const struct amdgpu_psp_funcs *funcs;
|
||||||
struct amdgpu_bo *rbuf;
|
struct amdgpu_bo *rbuf;
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
|
|
||||||
|
/* gpu info firmware data pointer */
|
||||||
|
const struct firmware *gpu_info_fw;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1296,7 +1349,6 @@ struct amdgpu_smumgr {
|
|||||||
*/
|
*/
|
||||||
struct amdgpu_allowed_register_entry {
|
struct amdgpu_allowed_register_entry {
|
||||||
uint32_t reg_offset;
|
uint32_t reg_offset;
|
||||||
bool untouched;
|
|
||||||
bool grbm_indexed;
|
bool grbm_indexed;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1424,6 +1476,7 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
|||||||
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
||||||
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
|
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
|
||||||
|
|
||||||
|
#define AMDGPU_RESET_MAGIC_NUM 64
|
||||||
struct amdgpu_device {
|
struct amdgpu_device {
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct drm_device *ddev;
|
struct drm_device *ddev;
|
||||||
@@ -1523,7 +1576,9 @@ struct amdgpu_device {
|
|||||||
atomic64_t gtt_usage;
|
atomic64_t gtt_usage;
|
||||||
atomic64_t num_bytes_moved;
|
atomic64_t num_bytes_moved;
|
||||||
atomic64_t num_evictions;
|
atomic64_t num_evictions;
|
||||||
|
atomic64_t num_vram_cpu_page_faults;
|
||||||
atomic_t gpu_reset_counter;
|
atomic_t gpu_reset_counter;
|
||||||
|
atomic_t vram_lost_counter;
|
||||||
|
|
||||||
/* data for buffer migration throttling */
|
/* data for buffer migration throttling */
|
||||||
struct {
|
struct {
|
||||||
@@ -1570,11 +1625,18 @@ struct amdgpu_device {
|
|||||||
/* sdma */
|
/* sdma */
|
||||||
struct amdgpu_sdma sdma;
|
struct amdgpu_sdma sdma;
|
||||||
|
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
/* uvd */
|
/* uvd */
|
||||||
struct amdgpu_uvd uvd;
|
struct amdgpu_uvd uvd;
|
||||||
|
|
||||||
/* vce */
|
/* vce */
|
||||||
struct amdgpu_vce vce;
|
struct amdgpu_vce vce;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* vcn */
|
||||||
|
struct amdgpu_vcn vcn;
|
||||||
|
};
|
||||||
|
|
||||||
/* firmwares */
|
/* firmwares */
|
||||||
struct amdgpu_firmware firmware;
|
struct amdgpu_firmware firmware;
|
||||||
@@ -1598,6 +1660,9 @@ struct amdgpu_device {
|
|||||||
/* amdkfd interface */
|
/* amdkfd interface */
|
||||||
struct kfd_dev *kfd;
|
struct kfd_dev *kfd;
|
||||||
|
|
||||||
|
/* delayed work_func for deferring clockgating during resume */
|
||||||
|
struct delayed_work late_init_work;
|
||||||
|
|
||||||
struct amdgpu_virt virt;
|
struct amdgpu_virt virt;
|
||||||
|
|
||||||
/* link all shadow bo */
|
/* link all shadow bo */
|
||||||
@@ -1606,9 +1671,13 @@ struct amdgpu_device {
|
|||||||
/* link all gtt */
|
/* link all gtt */
|
||||||
spinlock_t gtt_list_lock;
|
spinlock_t gtt_list_lock;
|
||||||
struct list_head gtt_list;
|
struct list_head gtt_list;
|
||||||
|
/* keep an lru list of rings by HW IP */
|
||||||
|
struct list_head ring_lru_list;
|
||||||
|
spinlock_t ring_lru_list_lock;
|
||||||
|
|
||||||
/* record hw reset is performed */
|
/* record hw reset is performed */
|
||||||
bool has_hw_reset;
|
bool has_hw_reset;
|
||||||
|
u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1617,7 +1686,6 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
|||||||
return container_of(bdev, struct amdgpu_device, mman.bdev);
|
return container_of(bdev, struct amdgpu_device, mman.bdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool amdgpu_device_is_px(struct drm_device *dev);
|
|
||||||
int amdgpu_device_init(struct amdgpu_device *adev,
|
int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
struct drm_device *ddev,
|
struct drm_device *ddev,
|
||||||
struct pci_dev *pdev,
|
struct pci_dev *pdev,
|
||||||
@@ -1733,9 +1801,11 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *sr
|
|||||||
unsigned occupied, chunk1, chunk2;
|
unsigned occupied, chunk1, chunk2;
|
||||||
void *dst;
|
void *dst;
|
||||||
|
|
||||||
if (ring->count_dw < count_dw) {
|
if (unlikely(ring->count_dw < count_dw)) {
|
||||||
DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
|
DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
|
||||||
} else {
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
occupied = ring->wptr & ring->buf_mask;
|
occupied = ring->wptr & ring->buf_mask;
|
||||||
dst = (void *)&ring->ring[occupied];
|
dst = (void *)&ring->ring[occupied];
|
||||||
chunk1 = ring->buf_mask + 1 - occupied;
|
chunk1 = ring->buf_mask + 1 - occupied;
|
||||||
@@ -1757,7 +1827,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *sr
|
|||||||
ring->wptr &= ring->ptr_mask;
|
ring->wptr &= ring->ptr_mask;
|
||||||
ring->count_dw -= count_dw;
|
ring->count_dw -= count_dw;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct amdgpu_sdma_instance *
|
static inline struct amdgpu_sdma_instance *
|
||||||
amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||||
@@ -1792,6 +1861,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||||||
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
|
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
|
||||||
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
|
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
|
||||||
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
|
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
|
||||||
|
#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
|
||||||
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
|
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
|
||||||
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
|
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
|
||||||
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
|
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
|
||||||
@@ -1813,6 +1883,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||||||
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
|
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
|
||||||
#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
|
#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
|
||||||
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
|
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
|
||||||
|
#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
|
||||||
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
|
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
|
||||||
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
|
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
|
||||||
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
|
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
|
||||||
@@ -1849,9 +1920,6 @@ bool amdgpu_need_post(struct amdgpu_device *adev);
|
|||||||
void amdgpu_update_display_priority(struct amdgpu_device *adev);
|
void amdgpu_update_display_priority(struct amdgpu_device *adev);
|
||||||
|
|
||||||
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
|
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
|
||||||
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
|
||||||
u32 ip_instance, u32 ring,
|
|
||||||
struct amdgpu_ring **out_ring);
|
|
||||||
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
|
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
|
||||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
|
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
|
||||||
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
||||||
@@ -1900,6 +1968,8 @@ static inline bool amdgpu_has_atpx(void) { return false; }
|
|||||||
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
|
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
|
||||||
extern const int amdgpu_max_kms_ioctl;
|
extern const int amdgpu_max_kms_ioctl;
|
||||||
|
|
||||||
|
bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_fpriv *fpriv);
|
||||||
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
||||||
void amdgpu_driver_unload_kms(struct drm_device *dev);
|
void amdgpu_driver_unload_kms(struct drm_device *dev);
|
||||||
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
|
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
|
||||||
|
|||||||
@@ -24,6 +24,7 @@
|
|||||||
#include "amd_shared.h"
|
#include "amd_shared.h"
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
|
#include "amdgpu_gfx.h"
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
const struct kfd2kgd_calls *kfd2kgd;
|
const struct kfd2kgd_calls *kfd2kgd;
|
||||||
@@ -60,9 +61,9 @@ int amdgpu_amdkfd_init(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
|
bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
switch (rdev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||||
case CHIP_KAVERI:
|
case CHIP_KAVERI:
|
||||||
kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
|
kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
|
||||||
@@ -86,59 +87,83 @@ void amdgpu_amdkfd_fini(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
|
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
if (kgd2kfd)
|
if (kgd2kfd)
|
||||||
rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
|
adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
|
||||||
rdev->pdev, kfd2kgd);
|
adev->pdev, kfd2kgd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
|
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
if (rdev->kfd) {
|
int i;
|
||||||
|
int last_valid_bit;
|
||||||
|
if (adev->kfd) {
|
||||||
struct kgd2kfd_shared_resources gpu_resources = {
|
struct kgd2kfd_shared_resources gpu_resources = {
|
||||||
.compute_vmid_bitmap = 0xFF00,
|
.compute_vmid_bitmap = 0xFF00,
|
||||||
|
.num_mec = adev->gfx.mec.num_mec,
|
||||||
.first_compute_pipe = 1,
|
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
|
||||||
.compute_pipe_count = 4 - 1,
|
.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
|
||||||
};
|
};
|
||||||
|
|
||||||
amdgpu_doorbell_get_kfd_info(rdev,
|
/* this is going to have a few of the MSBs set that we need to
|
||||||
|
* clear */
|
||||||
|
bitmap_complement(gpu_resources.queue_bitmap,
|
||||||
|
adev->gfx.mec.queue_bitmap,
|
||||||
|
KGD_MAX_QUEUES);
|
||||||
|
|
||||||
|
/* remove the KIQ bit as well */
|
||||||
|
if (adev->gfx.kiq.ring.ready)
|
||||||
|
clear_bit(amdgpu_gfx_queue_to_bit(adev,
|
||||||
|
adev->gfx.kiq.ring.me - 1,
|
||||||
|
adev->gfx.kiq.ring.pipe,
|
||||||
|
adev->gfx.kiq.ring.queue),
|
||||||
|
gpu_resources.queue_bitmap);
|
||||||
|
|
||||||
|
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
|
||||||
|
* nbits is not compile time constant */
|
||||||
|
last_valid_bit = adev->gfx.mec.num_mec
|
||||||
|
* adev->gfx.mec.num_pipe_per_mec
|
||||||
|
* adev->gfx.mec.num_queue_per_pipe;
|
||||||
|
for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
|
||||||
|
clear_bit(i, gpu_resources.queue_bitmap);
|
||||||
|
|
||||||
|
amdgpu_doorbell_get_kfd_info(adev,
|
||||||
&gpu_resources.doorbell_physical_address,
|
&gpu_resources.doorbell_physical_address,
|
||||||
&gpu_resources.doorbell_aperture_size,
|
&gpu_resources.doorbell_aperture_size,
|
||||||
&gpu_resources.doorbell_start_offset);
|
&gpu_resources.doorbell_start_offset);
|
||||||
|
|
||||||
kgd2kfd->device_init(rdev->kfd, &gpu_resources);
|
kgd2kfd->device_init(adev->kfd, &gpu_resources);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
|
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
if (rdev->kfd) {
|
if (adev->kfd) {
|
||||||
kgd2kfd->device_exit(rdev->kfd);
|
kgd2kfd->device_exit(adev->kfd);
|
||||||
rdev->kfd = NULL;
|
adev->kfd = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
|
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
|
||||||
const void *ih_ring_entry)
|
const void *ih_ring_entry)
|
||||||
{
|
{
|
||||||
if (rdev->kfd)
|
if (adev->kfd)
|
||||||
kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
|
kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
|
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
if (rdev->kfd)
|
if (adev->kfd)
|
||||||
kgd2kfd->suspend(rdev->kfd);
|
kgd2kfd->suspend(adev->kfd);
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
|
int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
if (rdev->kfd)
|
if (adev->kfd)
|
||||||
r = kgd2kfd->resume(rdev->kfd);
|
r = kgd2kfd->resume(adev->kfd);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@@ -147,7 +172,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||||||
void **mem_obj, uint64_t *gpu_addr,
|
void **mem_obj, uint64_t *gpu_addr,
|
||||||
void **cpu_ptr)
|
void **cpu_ptr)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
|
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||||
struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
|
struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
@@ -159,10 +184,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||||||
if ((*mem) == NULL)
|
if ((*mem) == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
|
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
|
||||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
|
AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(rdev->dev,
|
dev_err(adev->dev,
|
||||||
"failed to allocate BO for amdkfd (%d)\n", r);
|
"failed to allocate BO for amdkfd (%d)\n", r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@@ -170,21 +195,21 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||||||
/* map the buffer */
|
/* map the buffer */
|
||||||
r = amdgpu_bo_reserve((*mem)->bo, true);
|
r = amdgpu_bo_reserve((*mem)->bo, true);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
|
dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
|
||||||
goto allocate_mem_reserve_bo_failed;
|
goto allocate_mem_reserve_bo_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
|
r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
|
||||||
&(*mem)->gpu_addr);
|
&(*mem)->gpu_addr);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
|
dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
|
||||||
goto allocate_mem_pin_bo_failed;
|
goto allocate_mem_pin_bo_failed;
|
||||||
}
|
}
|
||||||
*gpu_addr = (*mem)->gpu_addr;
|
*gpu_addr = (*mem)->gpu_addr;
|
||||||
|
|
||||||
r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
|
r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(rdev->dev,
|
dev_err(adev->dev,
|
||||||
"(%d) failed to map bo to kernel for amdkfd\n", r);
|
"(%d) failed to map bo to kernel for amdkfd\n", r);
|
||||||
goto allocate_mem_kmap_bo_failed;
|
goto allocate_mem_kmap_bo_failed;
|
||||||
}
|
}
|
||||||
@@ -220,27 +245,27 @@ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
|
|||||||
|
|
||||||
uint64_t get_vmem_size(struct kgd_dev *kgd)
|
uint64_t get_vmem_size(struct kgd_dev *kgd)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *rdev =
|
struct amdgpu_device *adev =
|
||||||
(struct amdgpu_device *)kgd;
|
(struct amdgpu_device *)kgd;
|
||||||
|
|
||||||
BUG_ON(kgd == NULL);
|
BUG_ON(kgd == NULL);
|
||||||
|
|
||||||
return rdev->mc.real_vram_size;
|
return adev->mc.real_vram_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
|
uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
|
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||||
|
|
||||||
if (rdev->gfx.funcs->get_gpu_clock_counter)
|
if (adev->gfx.funcs->get_gpu_clock_counter)
|
||||||
return rdev->gfx.funcs->get_gpu_clock_counter(rdev);
|
return adev->gfx.funcs->get_gpu_clock_counter(adev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
|
uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
|
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||||
|
|
||||||
/* The sclk is in quantas of 10kHz */
|
/* The sclk is in quantas of 10kHz */
|
||||||
return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
|
return adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,15 +39,15 @@ struct kgd_mem {
|
|||||||
int amdgpu_amdkfd_init(void);
|
int amdgpu_amdkfd_init(void);
|
||||||
void amdgpu_amdkfd_fini(void);
|
void amdgpu_amdkfd_fini(void);
|
||||||
|
|
||||||
bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev);
|
bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev);
|
||||||
|
|
||||||
void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev);
|
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev);
|
||||||
int amdgpu_amdkfd_resume(struct amdgpu_device *rdev);
|
int amdgpu_amdkfd_resume(struct amdgpu_device *adev);
|
||||||
void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
|
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
|
||||||
const void *ih_ring_entry);
|
const void *ih_ring_entry);
|
||||||
void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev);
|
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
|
||||||
void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev);
|
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev);
|
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
|
||||||
|
|
||||||
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
|
||||||
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
|
||||||
|
|||||||
@@ -29,6 +29,7 @@
|
|||||||
#include "cikd.h"
|
#include "cikd.h"
|
||||||
#include "cik_sdma.h"
|
#include "cik_sdma.h"
|
||||||
#include "amdgpu_ucode.h"
|
#include "amdgpu_ucode.h"
|
||||||
|
#include "gfx_v7_0.h"
|
||||||
#include "gca/gfx_7_2_d.h"
|
#include "gca/gfx_7_2_d.h"
|
||||||
#include "gca/gfx_7_2_enum.h"
|
#include "gca/gfx_7_2_enum.h"
|
||||||
#include "gca/gfx_7_2_sh_mask.h"
|
#include "gca/gfx_7_2_sh_mask.h"
|
||||||
@@ -38,8 +39,6 @@
|
|||||||
#include "gmc/gmc_7_1_sh_mask.h"
|
#include "gmc/gmc_7_1_sh_mask.h"
|
||||||
#include "cik_structs.h"
|
#include "cik_structs.h"
|
||||||
|
|
||||||
#define CIK_PIPE_PER_MEC (4)
|
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MAX_TRAPID = 8, /* 3 bits in the bitfield. */
|
MAX_TRAPID = 8, /* 3 bits in the bitfield. */
|
||||||
MAX_WATCH_ADDRESSES = 4
|
MAX_WATCH_ADDRESSES = 4
|
||||||
@@ -185,8 +184,10 @@ static void unlock_srbm(struct kgd_dev *kgd)
|
|||||||
static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
|
static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
|
||||||
uint32_t queue_id)
|
uint32_t queue_id)
|
||||||
{
|
{
|
||||||
uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
|
|
||||||
|
uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
|
||||||
|
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
|
||||||
|
|
||||||
lock_srbm(kgd, mec, pipe, queue_id, 0);
|
lock_srbm(kgd, mec, pipe, queue_id, 0);
|
||||||
}
|
}
|
||||||
@@ -243,18 +244,7 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
|
|||||||
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
|
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
|
||||||
uint32_t hpd_size, uint64_t hpd_gpu_addr)
|
uint32_t hpd_size, uint64_t hpd_gpu_addr)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
/* amdgpu owns the per-pipe state */
|
||||||
|
|
||||||
uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
|
|
||||||
uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
|
|
||||||
|
|
||||||
lock_srbm(kgd, mec, pipe, 0, 0);
|
|
||||||
WREG32(mmCP_HPD_EOP_BASE_ADDR, lower_32_bits(hpd_gpu_addr >> 8));
|
|
||||||
WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(hpd_gpu_addr >> 8));
|
|
||||||
WREG32(mmCP_HPD_EOP_VMID, 0);
|
|
||||||
WREG32(mmCP_HPD_EOP_CONTROL, hpd_size);
|
|
||||||
unlock_srbm(kgd);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,8 +254,8 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
|
|||||||
uint32_t mec;
|
uint32_t mec;
|
||||||
uint32_t pipe;
|
uint32_t pipe;
|
||||||
|
|
||||||
mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
|
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
|
||||||
pipe = (pipe_id % CIK_PIPE_PER_MEC);
|
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
|
||||||
|
|
||||||
lock_srbm(kgd, mec, pipe, 0, 0);
|
lock_srbm(kgd, mec, pipe, 0, 0);
|
||||||
|
|
||||||
@@ -309,55 +299,11 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
|||||||
m = get_mqd(mqd);
|
m = get_mqd(mqd);
|
||||||
|
|
||||||
is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
|
is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
|
||||||
|
if (is_wptr_shadow_valid)
|
||||||
|
m->cp_hqd_pq_wptr = wptr_shadow;
|
||||||
|
|
||||||
acquire_queue(kgd, pipe_id, queue_id);
|
acquire_queue(kgd, pipe_id, queue_id);
|
||||||
WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
|
gfx_v7_0_mqd_commit(adev, m);
|
||||||
WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
|
|
||||||
WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
|
|
||||||
WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
|
|
||||||
WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
|
|
||||||
WREG32(mmCP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
|
|
||||||
WREG32(mmCP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
|
|
||||||
WREG32(mmCP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
|
|
||||||
WREG32(mmCP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_ATOMIC0_PREOP_LO, m->cp_hqd_atomic0_preop_lo);
|
|
||||||
WREG32(mmCP_HQD_ATOMIC0_PREOP_HI, m->cp_hqd_atomic0_preop_hi);
|
|
||||||
WREG32(mmCP_HQD_ATOMIC1_PREOP_LO, m->cp_hqd_atomic1_preop_lo);
|
|
||||||
WREG32(mmCP_HQD_ATOMIC1_PREOP_HI, m->cp_hqd_atomic1_preop_hi);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
|
|
||||||
WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
|
|
||||||
m->cp_hqd_pq_rptr_report_addr_hi);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, m->cp_hqd_pq_wptr_poll_addr_lo);
|
|
||||||
WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, m->cp_hqd_pq_wptr_poll_addr_hi);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
|
|
||||||
WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
|
|
||||||
|
|
||||||
if (is_wptr_shadow_valid)
|
|
||||||
WREG32(mmCP_HQD_PQ_WPTR, wptr_shadow);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
|
|
||||||
release_queue(kgd);
|
release_queue(kgd);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -28,6 +28,7 @@
|
|||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_amdkfd.h"
|
#include "amdgpu_amdkfd.h"
|
||||||
#include "amdgpu_ucode.h"
|
#include "amdgpu_ucode.h"
|
||||||
|
#include "gfx_v8_0.h"
|
||||||
#include "gca/gfx_8_0_sh_mask.h"
|
#include "gca/gfx_8_0_sh_mask.h"
|
||||||
#include "gca/gfx_8_0_d.h"
|
#include "gca/gfx_8_0_d.h"
|
||||||
#include "gca/gfx_8_0_enum.h"
|
#include "gca/gfx_8_0_enum.h"
|
||||||
@@ -38,8 +39,6 @@
|
|||||||
#include "vi_structs.h"
|
#include "vi_structs.h"
|
||||||
#include "vid.h"
|
#include "vid.h"
|
||||||
|
|
||||||
#define VI_PIPE_PER_MEC (4)
|
|
||||||
|
|
||||||
struct cik_sdma_rlc_registers;
|
struct cik_sdma_rlc_registers;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -146,8 +145,10 @@ static void unlock_srbm(struct kgd_dev *kgd)
|
|||||||
static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
|
static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
|
||||||
uint32_t queue_id)
|
uint32_t queue_id)
|
||||||
{
|
{
|
||||||
uint32_t mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
uint32_t pipe = (pipe_id % VI_PIPE_PER_MEC);
|
|
||||||
|
uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
|
||||||
|
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
|
||||||
|
|
||||||
lock_srbm(kgd, mec, pipe, queue_id, 0);
|
lock_srbm(kgd, mec, pipe, queue_id, 0);
|
||||||
}
|
}
|
||||||
@@ -205,6 +206,7 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
|
|||||||
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
|
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
|
||||||
uint32_t hpd_size, uint64_t hpd_gpu_addr)
|
uint32_t hpd_size, uint64_t hpd_gpu_addr)
|
||||||
{
|
{
|
||||||
|
/* amdgpu owns the per-pipe state */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -214,8 +216,8 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
|
|||||||
uint32_t mec;
|
uint32_t mec;
|
||||||
uint32_t pipe;
|
uint32_t pipe;
|
||||||
|
|
||||||
mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
|
mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
|
||||||
pipe = (pipe_id % VI_PIPE_PER_MEC);
|
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
|
||||||
|
|
||||||
lock_srbm(kgd, mec, pipe, 0, 0);
|
lock_srbm(kgd, mec, pipe, 0, 0);
|
||||||
|
|
||||||
@@ -251,53 +253,11 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
|||||||
m = get_mqd(mqd);
|
m = get_mqd(mqd);
|
||||||
|
|
||||||
valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
|
valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
|
||||||
|
if (valid_wptr == 0)
|
||||||
|
m->cp_hqd_pq_wptr = shadow_wptr;
|
||||||
|
|
||||||
acquire_queue(kgd, pipe_id, queue_id);
|
acquire_queue(kgd, pipe_id, queue_id);
|
||||||
|
gfx_v8_0_mqd_commit(adev, mqd);
|
||||||
WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
|
|
||||||
WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
|
|
||||||
WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
|
|
||||||
WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
|
|
||||||
WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
|
|
||||||
WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
|
|
||||||
WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
|
|
||||||
WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
|
|
||||||
WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
|
|
||||||
WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
|
|
||||||
WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
|
|
||||||
m->cp_hqd_pq_rptr_report_addr_hi);
|
|
||||||
|
|
||||||
if (valid_wptr > 0)
|
|
||||||
WREG32(mmCP_HQD_PQ_WPTR, shadow_wptr);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
|
|
||||||
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_EOP_BASE_ADDR, m->cp_hqd_eop_base_addr_lo);
|
|
||||||
WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, m->cp_hqd_eop_base_addr_hi);
|
|
||||||
WREG32(mmCP_HQD_EOP_CONTROL, m->cp_hqd_eop_control);
|
|
||||||
WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
|
|
||||||
WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
|
|
||||||
WREG32(mmCP_HQD_EOP_EVENTS, m->cp_hqd_eop_done_events);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO, m->cp_hqd_ctx_save_base_addr_lo);
|
|
||||||
WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI, m->cp_hqd_ctx_save_base_addr_hi);
|
|
||||||
WREG32(mmCP_HQD_CTX_SAVE_CONTROL, m->cp_hqd_ctx_save_control);
|
|
||||||
WREG32(mmCP_HQD_CNTL_STACK_OFFSET, m->cp_hqd_cntl_stack_offset);
|
|
||||||
WREG32(mmCP_HQD_CNTL_STACK_SIZE, m->cp_hqd_cntl_stack_size);
|
|
||||||
WREG32(mmCP_HQD_WG_STATE_OFFSET, m->cp_hqd_wg_state_offset);
|
|
||||||
WREG32(mmCP_HQD_CTX_SAVE_SIZE, m->cp_hqd_ctx_save_size);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_DEQUEUE_REQUEST, m->cp_hqd_dequeue_request);
|
|
||||||
WREG32(mmCP_HQD_ERROR, m->cp_hqd_error);
|
|
||||||
WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
|
|
||||||
WREG32(mmCP_HQD_EOP_DONES, m->cp_hqd_eop_dones);
|
|
||||||
|
|
||||||
WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
|
|
||||||
|
|
||||||
release_queue(kgd);
|
release_queue(kgd);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -30,78 +30,6 @@
|
|||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_trace.h"
|
#include "amdgpu_trace.h"
|
||||||
|
|
||||||
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
|
||||||
u32 ip_instance, u32 ring,
|
|
||||||
struct amdgpu_ring **out_ring)
|
|
||||||
{
|
|
||||||
/* Right now all IPs have only one instance - multiple rings. */
|
|
||||||
if (ip_instance != 0) {
|
|
||||||
DRM_ERROR("invalid ip instance: %d\n", ip_instance);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (ip_type) {
|
|
||||||
default:
|
|
||||||
DRM_ERROR("unknown ip type: %d\n", ip_type);
|
|
||||||
return -EINVAL;
|
|
||||||
case AMDGPU_HW_IP_GFX:
|
|
||||||
if (ring < adev->gfx.num_gfx_rings) {
|
|
||||||
*out_ring = &adev->gfx.gfx_ring[ring];
|
|
||||||
} else {
|
|
||||||
DRM_ERROR("only %d gfx rings are supported now\n",
|
|
||||||
adev->gfx.num_gfx_rings);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case AMDGPU_HW_IP_COMPUTE:
|
|
||||||
if (ring < adev->gfx.num_compute_rings) {
|
|
||||||
*out_ring = &adev->gfx.compute_ring[ring];
|
|
||||||
} else {
|
|
||||||
DRM_ERROR("only %d compute rings are supported now\n",
|
|
||||||
adev->gfx.num_compute_rings);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case AMDGPU_HW_IP_DMA:
|
|
||||||
if (ring < adev->sdma.num_instances) {
|
|
||||||
*out_ring = &adev->sdma.instance[ring].ring;
|
|
||||||
} else {
|
|
||||||
DRM_ERROR("only %d SDMA rings are supported\n",
|
|
||||||
adev->sdma.num_instances);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case AMDGPU_HW_IP_UVD:
|
|
||||||
*out_ring = &adev->uvd.ring;
|
|
||||||
break;
|
|
||||||
case AMDGPU_HW_IP_VCE:
|
|
||||||
if (ring < adev->vce.num_rings){
|
|
||||||
*out_ring = &adev->vce.ring[ring];
|
|
||||||
} else {
|
|
||||||
DRM_ERROR("only %d VCE rings are supported\n", adev->vce.num_rings);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case AMDGPU_HW_IP_UVD_ENC:
|
|
||||||
if (ring < adev->uvd.num_enc_rings){
|
|
||||||
*out_ring = &adev->uvd.ring_enc[ring];
|
|
||||||
} else {
|
|
||||||
DRM_ERROR("only %d UVD ENC rings are supported\n",
|
|
||||||
adev->uvd.num_enc_rings);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!(*out_ring && (*out_ring)->adev)) {
|
|
||||||
DRM_ERROR("Ring %d is not initialized on IP %d\n",
|
|
||||||
ring, ip_type);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
||||||
struct drm_amdgpu_cs_chunk_fence *data,
|
struct drm_amdgpu_cs_chunk_fence *data,
|
||||||
uint32_t *offset)
|
uint32_t *offset)
|
||||||
@@ -597,7 +525,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
goto error_free_pages;
|
goto error_free_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fill the page arrays for all useptrs. */
|
/* Fill the page arrays for all userptrs. */
|
||||||
list_for_each_entry(e, &need_pages, tv.head) {
|
list_for_each_entry(e, &need_pages, tv.head) {
|
||||||
struct ttm_tt *ttm = e->robj->tbo.ttm;
|
struct ttm_tt *ttm = e->robj->tbo.ttm;
|
||||||
|
|
||||||
@@ -917,9 +845,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
|
r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type,
|
||||||
chunk_ib->ip_instance, chunk_ib->ring,
|
chunk_ib->ip_instance, chunk_ib->ring, &ring);
|
||||||
&ring);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
@@ -1021,16 +948,19 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
|||||||
struct amdgpu_ctx *ctx;
|
struct amdgpu_ctx *ctx;
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
|
|
||||||
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
|
|
||||||
deps[j].ip_instance,
|
|
||||||
deps[j].ring, &ring);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
|
ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
|
||||||
if (ctx == NULL)
|
if (ctx == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
|
||||||
|
deps[j].ip_type,
|
||||||
|
deps[j].ip_instance,
|
||||||
|
deps[j].ring, &ring);
|
||||||
|
if (r) {
|
||||||
|
amdgpu_ctx_put(ctx);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
fence = amdgpu_ctx_get_fence(ctx, ring,
|
fence = amdgpu_ctx_get_fence(ctx, ring,
|
||||||
deps[j].handle);
|
deps[j].handle);
|
||||||
if (IS_ERR(fence)) {
|
if (IS_ERR(fence)) {
|
||||||
@@ -1086,6 +1016,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|||||||
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||||
union drm_amdgpu_cs *cs = data;
|
union drm_amdgpu_cs *cs = data;
|
||||||
struct amdgpu_cs_parser parser = {};
|
struct amdgpu_cs_parser parser = {};
|
||||||
bool reserved_buffers = false;
|
bool reserved_buffers = false;
|
||||||
@@ -1093,6 +1024,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||||||
|
|
||||||
if (!adev->accel_working)
|
if (!adev->accel_working)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
if (amdgpu_kms_vram_lost(adev, fpriv))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
parser.adev = adev;
|
parser.adev = adev;
|
||||||
parser.filp = filp;
|
parser.filp = filp;
|
||||||
@@ -1154,21 +1087,28 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
|
|||||||
{
|
{
|
||||||
union drm_amdgpu_wait_cs *wait = data;
|
union drm_amdgpu_wait_cs *wait = data;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||||
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
|
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
|
||||||
struct amdgpu_ring *ring = NULL;
|
struct amdgpu_ring *ring = NULL;
|
||||||
struct amdgpu_ctx *ctx;
|
struct amdgpu_ctx *ctx;
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
long r;
|
long r;
|
||||||
|
|
||||||
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
|
if (amdgpu_kms_vram_lost(adev, fpriv))
|
||||||
wait->in.ring, &ring);
|
return -ENODEV;
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
|
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
|
||||||
if (ctx == NULL)
|
if (ctx == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
|
||||||
|
wait->in.ip_type, wait->in.ip_instance,
|
||||||
|
wait->in.ring, &ring);
|
||||||
|
if (r) {
|
||||||
|
amdgpu_ctx_put(ctx);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
|
fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
|
||||||
if (IS_ERR(fence))
|
if (IS_ERR(fence))
|
||||||
r = PTR_ERR(fence);
|
r = PTR_ERR(fence);
|
||||||
@@ -1204,15 +1144,17 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
|
|||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
|
|
||||||
user->ring, &ring);
|
|
||||||
if (r)
|
|
||||||
return ERR_PTR(r);
|
|
||||||
|
|
||||||
ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
|
ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
|
||||||
if (ctx == NULL)
|
if (ctx == NULL)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type,
|
||||||
|
user->ip_instance, user->ring, &ring);
|
||||||
|
if (r) {
|
||||||
|
amdgpu_ctx_put(ctx);
|
||||||
|
return ERR_PTR(r);
|
||||||
|
}
|
||||||
|
|
||||||
fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
|
fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
|
||||||
amdgpu_ctx_put(ctx);
|
amdgpu_ctx_put(ctx);
|
||||||
|
|
||||||
@@ -1333,12 +1275,15 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
|
|||||||
struct drm_file *filp)
|
struct drm_file *filp)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||||
union drm_amdgpu_wait_fences *wait = data;
|
union drm_amdgpu_wait_fences *wait = data;
|
||||||
uint32_t fence_count = wait->in.fence_count;
|
uint32_t fence_count = wait->in.fence_count;
|
||||||
struct drm_amdgpu_fence *fences_user;
|
struct drm_amdgpu_fence *fences_user;
|
||||||
struct drm_amdgpu_fence *fences;
|
struct drm_amdgpu_fence *fences;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
if (amdgpu_kms_vram_lost(adev, fpriv))
|
||||||
|
return -ENODEV;
|
||||||
/* Get the fences from userspace */
|
/* Get the fences from userspace */
|
||||||
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
|
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|||||||
@@ -52,12 +52,20 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
|
|||||||
struct amd_sched_rq *rq;
|
struct amd_sched_rq *rq;
|
||||||
|
|
||||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||||
|
|
||||||
|
if (ring == &adev->gfx.kiq.ring)
|
||||||
|
continue;
|
||||||
|
|
||||||
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
|
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
|
||||||
rq, amdgpu_sched_jobs);
|
rq, amdgpu_sched_jobs);
|
||||||
if (r)
|
if (r)
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
|
||||||
|
if (r)
|
||||||
|
goto failed;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
failed:
|
failed:
|
||||||
@@ -86,6 +94,8 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
|
|||||||
for (i = 0; i < adev->num_rings; i++)
|
for (i = 0; i < adev->num_rings; i++)
|
||||||
amd_sched_entity_fini(&adev->rings[i]->sched,
|
amd_sched_entity_fini(&adev->rings[i]->sched,
|
||||||
&ctx->rings[i].entity);
|
&ctx->rings[i].entity);
|
||||||
|
|
||||||
|
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
|
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
|
||||||
|
|||||||
@@ -54,8 +54,14 @@
|
|||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/firmware.h>
|
#include <linux/firmware.h>
|
||||||
|
|
||||||
|
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
|
||||||
|
|
||||||
|
#define AMDGPU_RESUME_MS 2000
|
||||||
|
|
||||||
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
|
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
|
||||||
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
|
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
|
||||||
|
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
|
||||||
|
|
||||||
static const char *amdgpu_asic_name[] = {
|
static const char *amdgpu_asic_name[] = {
|
||||||
"TAHITI",
|
"TAHITI",
|
||||||
@@ -77,6 +83,7 @@ static const char *amdgpu_asic_name[] = {
|
|||||||
"POLARIS11",
|
"POLARIS11",
|
||||||
"POLARIS12",
|
"POLARIS12",
|
||||||
"VEGA10",
|
"VEGA10",
|
||||||
|
"RAVEN",
|
||||||
"LAST",
|
"LAST",
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -478,9 +485,8 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* amdgpu_wb_*()
|
* amdgpu_wb_*()
|
||||||
* Writeback is the the method by which the the GPU updates special pages
|
* Writeback is the method by which the GPU updates special pages in memory
|
||||||
* in memory with the status of certain GPU events (fences, ring pointers,
|
* with the status of certain GPU events (fences, ring pointers,etc.).
|
||||||
* etc.).
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -506,7 +512,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
|
|||||||
*
|
*
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
*
|
*
|
||||||
* Disables Writeback and frees the Writeback memory (all asics).
|
* Initializes writeback and allocates writeback memory (all asics).
|
||||||
* Used at driver startup.
|
* Used at driver startup.
|
||||||
* Returns 0 on success or an -error on failure.
|
* Returns 0 on success or an -error on failure.
|
||||||
*/
|
*/
|
||||||
@@ -614,7 +620,7 @@ void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
|
|||||||
* @mc: memory controller structure holding memory informations
|
* @mc: memory controller structure holding memory informations
|
||||||
* @base: base address at which to put VRAM
|
* @base: base address at which to put VRAM
|
||||||
*
|
*
|
||||||
* Function will place try to place VRAM at base address provided
|
* Function will try to place VRAM at base address provided
|
||||||
* as parameter (which is so far either PCI aperture address or
|
* as parameter (which is so far either PCI aperture address or
|
||||||
* for IGP TOM base address).
|
* for IGP TOM base address).
|
||||||
*
|
*
|
||||||
@@ -636,7 +642,7 @@ void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
|
|||||||
* ones)
|
* ones)
|
||||||
*
|
*
|
||||||
* Note: IGP TOM addr should be the same as the aperture addr, we don't
|
* Note: IGP TOM addr should be the same as the aperture addr, we don't
|
||||||
* explicitly check for that thought.
|
* explicitly check for that though.
|
||||||
*
|
*
|
||||||
* FIXME: when reducing VRAM size align new size on power of 2.
|
* FIXME: when reducing VRAM size align new size on power of 2.
|
||||||
*/
|
*/
|
||||||
@@ -1342,6 +1348,9 @@ int amdgpu_ip_block_add(struct amdgpu_device *adev,
|
|||||||
if (!ip_block_version)
|
if (!ip_block_version)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
|
||||||
|
ip_block_version->funcs->name);
|
||||||
|
|
||||||
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
|
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1392,6 +1401,104 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
const char *chip_name;
|
||||||
|
char fw_name[30];
|
||||||
|
int err;
|
||||||
|
const struct gpu_info_firmware_header_v1_0 *hdr;
|
||||||
|
|
||||||
|
adev->firmware.gpu_info_fw = NULL;
|
||||||
|
|
||||||
|
switch (adev->asic_type) {
|
||||||
|
case CHIP_TOPAZ:
|
||||||
|
case CHIP_TONGA:
|
||||||
|
case CHIP_FIJI:
|
||||||
|
case CHIP_POLARIS11:
|
||||||
|
case CHIP_POLARIS10:
|
||||||
|
case CHIP_POLARIS12:
|
||||||
|
case CHIP_CARRIZO:
|
||||||
|
case CHIP_STONEY:
|
||||||
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||||
|
case CHIP_VERDE:
|
||||||
|
case CHIP_TAHITI:
|
||||||
|
case CHIP_PITCAIRN:
|
||||||
|
case CHIP_OLAND:
|
||||||
|
case CHIP_HAINAN:
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||||
|
case CHIP_BONAIRE:
|
||||||
|
case CHIP_HAWAII:
|
||||||
|
case CHIP_KAVERI:
|
||||||
|
case CHIP_KABINI:
|
||||||
|
case CHIP_MULLINS:
|
||||||
|
#endif
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
case CHIP_VEGA10:
|
||||||
|
chip_name = "vega10";
|
||||||
|
break;
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
chip_name = "raven";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
|
||||||
|
if (err) {
|
||||||
|
dev_err(adev->dev,
|
||||||
|
"Failed to load gpu_info firmware \"%s\"\n",
|
||||||
|
fw_name);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
|
||||||
|
if (err) {
|
||||||
|
dev_err(adev->dev,
|
||||||
|
"Failed to validate gpu_info firmware \"%s\"\n",
|
||||||
|
fw_name);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
|
||||||
|
amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
|
||||||
|
|
||||||
|
switch (hdr->version_major) {
|
||||||
|
case 1:
|
||||||
|
{
|
||||||
|
const struct gpu_info_firmware_v1_0 *gpu_info_fw =
|
||||||
|
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
|
||||||
|
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||||
|
|
||||||
|
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
|
||||||
|
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
|
||||||
|
adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
|
||||||
|
adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
|
||||||
|
adev->gfx.config.max_texture_channel_caches =
|
||||||
|
le32_to_cpu(gpu_info_fw->gc_num_tccs);
|
||||||
|
adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
|
||||||
|
adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
|
||||||
|
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
|
||||||
|
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
|
||||||
|
adev->gfx.config.double_offchip_lds_buf =
|
||||||
|
le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
|
||||||
|
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
|
||||||
|
adev->gfx.cu_info.max_waves_per_simd =
|
||||||
|
le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
|
||||||
|
adev->gfx.cu_info.max_scratch_slots_per_cu =
|
||||||
|
le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
|
||||||
|
adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
dev_err(adev->dev,
|
||||||
|
"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
|
||||||
|
err = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static int amdgpu_early_init(struct amdgpu_device *adev)
|
static int amdgpu_early_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int i, r;
|
int i, r;
|
||||||
@@ -1445,6 +1552,10 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
|||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
if (adev->asic_type == CHIP_RAVEN)
|
||||||
|
adev->family = AMDGPU_FAMILY_RV;
|
||||||
|
else
|
||||||
adev->family = AMDGPU_FAMILY_AI;
|
adev->family = AMDGPU_FAMILY_AI;
|
||||||
|
|
||||||
r = soc15_set_ip_blocks(adev);
|
r = soc15_set_ip_blocks(adev);
|
||||||
@@ -1456,6 +1567,10 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r = amdgpu_device_parse_gpu_info_fw(adev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
r = amdgpu_virt_request_full_gpu(adev, true);
|
r = amdgpu_virt_request_full_gpu(adev, true);
|
||||||
if (r)
|
if (r)
|
||||||
@@ -1464,7 +1579,8 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||||
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
|
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
|
||||||
DRM_ERROR("disabled ip block: %d\n", i);
|
DRM_ERROR("disabled ip block: %d <%s>\n",
|
||||||
|
i, adev->ip_blocks[i].version->funcs->name);
|
||||||
adev->ip_blocks[i].status.valid = false;
|
adev->ip_blocks[i].status.valid = false;
|
||||||
} else {
|
} else {
|
||||||
if (adev->ip_blocks[i].version->funcs->early_init) {
|
if (adev->ip_blocks[i].version->funcs->early_init) {
|
||||||
@@ -1552,6 +1668,40 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return !!memcmp(adev->gart.ptr, adev->reset_magic,
|
||||||
|
AMDGPU_RESET_MAGIC_NUM);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i = 0, r;
|
||||||
|
|
||||||
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||||
|
if (!adev->ip_blocks[i].status.valid)
|
||||||
|
continue;
|
||||||
|
/* skip CG for VCE/UVD, it's handled specially */
|
||||||
|
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||||
|
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
|
||||||
|
/* enable clockgating to save power */
|
||||||
|
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
||||||
|
AMD_CG_STATE_GATE);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
|
||||||
|
adev->ip_blocks[i].version->funcs->name, r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int amdgpu_late_init(struct amdgpu_device *adev)
|
static int amdgpu_late_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int i = 0, r;
|
int i = 0, r;
|
||||||
@@ -1568,20 +1718,13 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
adev->ip_blocks[i].status.late_initialized = true;
|
adev->ip_blocks[i].status.late_initialized = true;
|
||||||
}
|
}
|
||||||
/* skip CG for VCE/UVD, it's handled specially */
|
|
||||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
|
||||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
|
|
||||||
/* enable clockgating to save power */
|
|
||||||
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
|
|
||||||
AMD_CG_STATE_GATE);
|
|
||||||
if (r) {
|
|
||||||
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
|
|
||||||
adev->ip_blocks[i].version->funcs->name, r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mod_delayed_work(system_wq, &adev->late_init_work,
|
||||||
|
msecs_to_jiffies(AMDGPU_RESUME_MS));
|
||||||
|
|
||||||
|
amdgpu_fill_reset_magic(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1672,6 +1815,13 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void amdgpu_late_init_func_handler(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev =
|
||||||
|
container_of(work, struct amdgpu_device, late_init_work.work);
|
||||||
|
amdgpu_late_set_cg_state(adev);
|
||||||
|
}
|
||||||
|
|
||||||
int amdgpu_suspend(struct amdgpu_device *adev)
|
int amdgpu_suspend(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int i, r;
|
int i, r;
|
||||||
@@ -1717,19 +1867,25 @@ static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
static enum amd_ip_block_type ip_order[] = {
|
||||||
if (!adev->ip_blocks[i].status.valid)
|
AMD_IP_BLOCK_TYPE_GMC,
|
||||||
|
AMD_IP_BLOCK_TYPE_COMMON,
|
||||||
|
AMD_IP_BLOCK_TYPE_IH,
|
||||||
|
};
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
||||||
|
int j;
|
||||||
|
struct amdgpu_ip_block *block;
|
||||||
|
|
||||||
|
for (j = 0; j < adev->num_ip_blocks; j++) {
|
||||||
|
block = &adev->ip_blocks[j];
|
||||||
|
|
||||||
|
if (block->version->type != ip_order[i] ||
|
||||||
|
!block->status.valid)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
r = block->version->funcs->hw_init(adev);
|
||||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
|
||||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
|
|
||||||
r = adev->ip_blocks[i].version->funcs->hw_init(adev);
|
|
||||||
|
|
||||||
if (r) {
|
|
||||||
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
|
||||||
adev->ip_blocks[i].version->funcs->name, r);
|
|
||||||
return r;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1740,16 +1896,68 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
|
static enum amd_ip_block_type ip_order[] = {
|
||||||
|
AMD_IP_BLOCK_TYPE_SMC,
|
||||||
|
AMD_IP_BLOCK_TYPE_DCE,
|
||||||
|
AMD_IP_BLOCK_TYPE_GFX,
|
||||||
|
AMD_IP_BLOCK_TYPE_SDMA,
|
||||||
|
AMD_IP_BLOCK_TYPE_VCE,
|
||||||
|
};
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
||||||
|
int j;
|
||||||
|
struct amdgpu_ip_block *block;
|
||||||
|
|
||||||
|
for (j = 0; j < adev->num_ip_blocks; j++) {
|
||||||
|
block = &adev->ip_blocks[j];
|
||||||
|
|
||||||
|
if (block->version->type != ip_order[i] ||
|
||||||
|
!block->status.valid)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
r = block->version->funcs->hw_init(adev);
|
||||||
|
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_resume_phase1(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i, r;
|
||||||
|
|
||||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||||
if (!adev->ip_blocks[i].status.valid)
|
if (!adev->ip_blocks[i].status.valid)
|
||||||
continue;
|
continue;
|
||||||
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
||||||
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
||||||
|
adev->ip_blocks[i].version->type ==
|
||||||
|
AMD_IP_BLOCK_TYPE_IH) {
|
||||||
|
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
||||||
|
adev->ip_blocks[i].version->funcs->name, r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_resume_phase2(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i, r;
|
||||||
|
|
||||||
|
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||||
|
if (!adev->ip_blocks[i].status.valid)
|
||||||
|
continue;
|
||||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
||||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
||||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
|
||||||
continue;
|
continue;
|
||||||
|
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
||||||
r = adev->ip_blocks[i].version->funcs->hw_init(adev);
|
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
||||||
adev->ip_blocks[i].version->funcs->name, r);
|
adev->ip_blocks[i].version->funcs->name, r);
|
||||||
@@ -1762,20 +1970,14 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
static int amdgpu_resume(struct amdgpu_device *adev)
|
static int amdgpu_resume(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int i, r;
|
int r;
|
||||||
|
|
||||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
r = amdgpu_resume_phase1(adev);
|
||||||
if (!adev->ip_blocks[i].status.valid)
|
if (r)
|
||||||
continue;
|
|
||||||
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
|
||||||
if (r) {
|
|
||||||
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
|
||||||
adev->ip_blocks[i].version->funcs->name, r);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
r = amdgpu_resume_phase2(adev);
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
|
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
|
||||||
@@ -1860,8 +2062,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
amdgpu_check_arguments(adev);
|
amdgpu_check_arguments(adev);
|
||||||
|
|
||||||
/* Registers mapping */
|
|
||||||
/* TODO: block userspace mapping of io register */
|
|
||||||
spin_lock_init(&adev->mmio_idx_lock);
|
spin_lock_init(&adev->mmio_idx_lock);
|
||||||
spin_lock_init(&adev->smc_idx_lock);
|
spin_lock_init(&adev->smc_idx_lock);
|
||||||
spin_lock_init(&adev->pcie_idx_lock);
|
spin_lock_init(&adev->pcie_idx_lock);
|
||||||
@@ -1877,6 +2077,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||||||
INIT_LIST_HEAD(&adev->gtt_list);
|
INIT_LIST_HEAD(&adev->gtt_list);
|
||||||
spin_lock_init(&adev->gtt_list_lock);
|
spin_lock_init(&adev->gtt_list_lock);
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&adev->ring_lru_list);
|
||||||
|
spin_lock_init(&adev->ring_lru_list_lock);
|
||||||
|
|
||||||
|
INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
|
||||||
|
|
||||||
|
/* Registers mapping */
|
||||||
|
/* TODO: block userspace mapping of io register */
|
||||||
if (adev->asic_type >= CHIP_BONAIRE) {
|
if (adev->asic_type >= CHIP_BONAIRE) {
|
||||||
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
|
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
|
||||||
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
|
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
|
||||||
@@ -1989,6 +2196,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
adev->accel_working = true;
|
adev->accel_working = true;
|
||||||
|
|
||||||
|
amdgpu_vm_check_compute_bug(adev);
|
||||||
|
|
||||||
/* Initialize the buffer migration limit. */
|
/* Initialize the buffer migration limit. */
|
||||||
if (amdgpu_moverate >= 0)
|
if (amdgpu_moverate >= 0)
|
||||||
max_MBps = amdgpu_moverate;
|
max_MBps = amdgpu_moverate;
|
||||||
@@ -2017,6 +2226,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||||||
if (r)
|
if (r)
|
||||||
DRM_ERROR("registering register debugfs failed (%d).\n", r);
|
DRM_ERROR("registering register debugfs failed (%d).\n", r);
|
||||||
|
|
||||||
|
r = amdgpu_debugfs_test_ib_ring_init(adev);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
|
||||||
|
|
||||||
r = amdgpu_debugfs_firmware_init(adev);
|
r = amdgpu_debugfs_firmware_init(adev);
|
||||||
if (r)
|
if (r)
|
||||||
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
|
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
|
||||||
@@ -2073,7 +2286,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||||||
amdgpu_fence_driver_fini(adev);
|
amdgpu_fence_driver_fini(adev);
|
||||||
amdgpu_fbdev_fini(adev);
|
amdgpu_fbdev_fini(adev);
|
||||||
r = amdgpu_fini(adev);
|
r = amdgpu_fini(adev);
|
||||||
|
if (adev->firmware.gpu_info_fw) {
|
||||||
|
release_firmware(adev->firmware.gpu_info_fw);
|
||||||
|
adev->firmware.gpu_info_fw = NULL;
|
||||||
|
}
|
||||||
adev->accel_working = false;
|
adev->accel_working = false;
|
||||||
|
cancel_delayed_work_sync(&adev->late_init_work);
|
||||||
/* free i2c buses */
|
/* free i2c buses */
|
||||||
amdgpu_i2c_fini(adev);
|
amdgpu_i2c_fini(adev);
|
||||||
amdgpu_atombios_fini(adev);
|
amdgpu_atombios_fini(adev);
|
||||||
@@ -2458,16 +2676,15 @@ err:
|
|||||||
* amdgpu_sriov_gpu_reset - reset the asic
|
* amdgpu_sriov_gpu_reset - reset the asic
|
||||||
*
|
*
|
||||||
* @adev: amdgpu device pointer
|
* @adev: amdgpu device pointer
|
||||||
* @voluntary: if this reset is requested by guest.
|
* @job: which job trigger hang
|
||||||
* (true means by guest and false means by HYPERVISOR )
|
|
||||||
*
|
*
|
||||||
* Attempt the reset the GPU if it has hung (all asics).
|
* Attempt the reset the GPU if it has hung (all asics).
|
||||||
* for SRIOV case.
|
* for SRIOV case.
|
||||||
* Returns 0 for success or an error on failure.
|
* Returns 0 for success or an error on failure.
|
||||||
*/
|
*/
|
||||||
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
|
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
|
||||||
{
|
{
|
||||||
int i, r = 0;
|
int i, j, r = 0;
|
||||||
int resched;
|
int resched;
|
||||||
struct amdgpu_bo *bo, *tmp;
|
struct amdgpu_bo *bo, *tmp;
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
@@ -2480,22 +2697,39 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
|
|||||||
/* block TTM */
|
/* block TTM */
|
||||||
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
||||||
|
|
||||||
/* block scheduler */
|
/* we start from the ring trigger GPU hang */
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
j = job ? job->ring->idx : 0;
|
||||||
ring = adev->rings[i];
|
|
||||||
|
|
||||||
|
/* block scheduler */
|
||||||
|
for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
|
||||||
|
ring = adev->rings[i % AMDGPU_MAX_RINGS];
|
||||||
if (!ring || !ring->sched.thread)
|
if (!ring || !ring->sched.thread)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
kthread_park(ring->sched.thread);
|
kthread_park(ring->sched.thread);
|
||||||
amd_sched_hw_job_reset(&ring->sched);
|
|
||||||
|
if (job && j != i)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* here give the last chance to check if job removed from mirror-list
|
||||||
|
* since we already pay some time on kthread_park */
|
||||||
|
if (job && list_empty(&job->base.node)) {
|
||||||
|
kthread_unpark(ring->sched.thread);
|
||||||
|
goto give_up_reset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
|
||||||
|
amd_sched_job_kickout(&job->base);
|
||||||
|
|
||||||
|
/* only do job_reset on the hang ring if @job not NULL */
|
||||||
|
amd_sched_hw_job_reset(&ring->sched);
|
||||||
|
|
||||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||||
amdgpu_fence_driver_force_completion(adev);
|
amdgpu_fence_driver_force_completion_ring(ring);
|
||||||
|
}
|
||||||
|
|
||||||
/* request to take full control of GPU before re-initialization */
|
/* request to take full control of GPU before re-initialization */
|
||||||
if (voluntary)
|
if (job)
|
||||||
amdgpu_virt_reset_gpu(adev);
|
amdgpu_virt_reset_gpu(adev);
|
||||||
else
|
else
|
||||||
amdgpu_virt_request_full_gpu(adev, true);
|
amdgpu_virt_request_full_gpu(adev, true);
|
||||||
@@ -2545,20 +2779,28 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
|
|||||||
}
|
}
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
|
||||||
struct amdgpu_ring *ring = adev->rings[i];
|
ring = adev->rings[i % AMDGPU_MAX_RINGS];
|
||||||
if (!ring || !ring->sched.thread)
|
if (!ring || !ring->sched.thread)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (job && j != i) {
|
||||||
|
kthread_unpark(ring->sched.thread);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
amd_sched_job_recovery(&ring->sched);
|
amd_sched_job_recovery(&ring->sched);
|
||||||
kthread_unpark(ring->sched.thread);
|
kthread_unpark(ring->sched.thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_helper_resume_force_mode(adev->ddev);
|
drm_helper_resume_force_mode(adev->ddev);
|
||||||
|
give_up_reset:
|
||||||
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
||||||
if (r) {
|
if (r) {
|
||||||
/* bad news, how to tell it to userspace ? */
|
/* bad news, how to tell it to userspace ? */
|
||||||
dev_info(adev->dev, "GPU reset failed\n");
|
dev_info(adev->dev, "GPU reset failed\n");
|
||||||
|
} else {
|
||||||
|
dev_info(adev->dev, "GPU reset successed!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
adev->gfx.in_reset = false;
|
adev->gfx.in_reset = false;
|
||||||
@@ -2578,10 +2820,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
int i, r;
|
int i, r;
|
||||||
int resched;
|
int resched;
|
||||||
bool need_full_reset;
|
bool need_full_reset, vram_lost = false;
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
|
||||||
return amdgpu_sriov_gpu_reset(adev, true);
|
|
||||||
|
|
||||||
if (!amdgpu_check_soft_reset(adev)) {
|
if (!amdgpu_check_soft_reset(adev)) {
|
||||||
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
|
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
|
||||||
@@ -2641,16 +2880,27 @@ retry:
|
|||||||
|
|
||||||
if (!r) {
|
if (!r) {
|
||||||
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
|
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
|
||||||
r = amdgpu_resume(adev);
|
r = amdgpu_resume_phase1(adev);
|
||||||
|
if (r)
|
||||||
|
goto out;
|
||||||
|
vram_lost = amdgpu_check_vram_lost(adev);
|
||||||
|
if (vram_lost) {
|
||||||
|
DRM_ERROR("VRAM is lost!\n");
|
||||||
|
atomic_inc(&adev->vram_lost_counter);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (!r) {
|
|
||||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
|
||||||
if (need_full_reset && amdgpu_need_backup(adev)) {
|
|
||||||
r = amdgpu_ttm_recover_gart(adev);
|
r = amdgpu_ttm_recover_gart(adev);
|
||||||
if (r)
|
if (r)
|
||||||
DRM_ERROR("gart recovery failed!!!\n");
|
goto out;
|
||||||
|
r = amdgpu_resume_phase2(adev);
|
||||||
|
if (r)
|
||||||
|
goto out;
|
||||||
|
if (vram_lost)
|
||||||
|
amdgpu_fill_reset_magic(adev);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
if (!r) {
|
||||||
|
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||||
r = amdgpu_ib_ring_tests(adev);
|
r = amdgpu_ib_ring_tests(adev);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
|
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
|
||||||
@@ -2712,10 +2962,11 @@ retry:
|
|||||||
drm_helper_resume_force_mode(adev->ddev);
|
drm_helper_resume_force_mode(adev->ddev);
|
||||||
|
|
||||||
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
||||||
if (r) {
|
if (r)
|
||||||
/* bad news, how to tell it to userspace ? */
|
/* bad news, how to tell it to userspace ? */
|
||||||
dev_info(adev->dev, "GPU reset failed\n");
|
dev_info(adev->dev, "GPU reset failed\n");
|
||||||
}
|
else
|
||||||
|
dev_info(adev->dev, "GPU reset successed!\n");
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@@ -3499,11 +3750,60 @@ static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
|
||||||
|
{
|
||||||
|
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||||
|
struct drm_device *dev = node->minor->dev;
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
int r = 0, i;
|
||||||
|
|
||||||
|
/* hold on the scheduler */
|
||||||
|
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||||
|
struct amdgpu_ring *ring = adev->rings[i];
|
||||||
|
|
||||||
|
if (!ring || !ring->sched.thread)
|
||||||
|
continue;
|
||||||
|
kthread_park(ring->sched.thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
seq_printf(m, "run ib test:\n");
|
||||||
|
r = amdgpu_ib_ring_tests(adev);
|
||||||
|
if (r)
|
||||||
|
seq_printf(m, "ib ring tests failed (%d).\n", r);
|
||||||
|
else
|
||||||
|
seq_printf(m, "ib ring tests passed.\n");
|
||||||
|
|
||||||
|
/* go on the scheduler */
|
||||||
|
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||||
|
struct amdgpu_ring *ring = adev->rings[i];
|
||||||
|
|
||||||
|
if (!ring || !ring->sched.thread)
|
||||||
|
continue;
|
||||||
|
kthread_unpark(ring->sched.thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
|
||||||
|
{"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
|
||||||
|
};
|
||||||
|
|
||||||
|
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return amdgpu_debugfs_add_files(adev,
|
||||||
|
amdgpu_debugfs_test_ib_ring_list, 1);
|
||||||
|
}
|
||||||
|
|
||||||
int amdgpu_debugfs_init(struct drm_minor *minor)
|
int amdgpu_debugfs_init(struct drm_minor *minor)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
static int amdgpu_debugfs_test_ib_init(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
|
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -65,9 +65,11 @@
|
|||||||
* - 3.13.0 - Add PRT support
|
* - 3.13.0 - Add PRT support
|
||||||
* - 3.14.0 - Fix race in amdgpu_ctx_get_fence() and note new functionality
|
* - 3.14.0 - Fix race in amdgpu_ctx_get_fence() and note new functionality
|
||||||
* - 3.15.0 - Export more gpu info for gfx9
|
* - 3.15.0 - Export more gpu info for gfx9
|
||||||
|
* - 3.16.0 - Add reserved vmid support
|
||||||
|
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
|
||||||
*/
|
*/
|
||||||
#define KMS_DRIVER_MAJOR 3
|
#define KMS_DRIVER_MAJOR 3
|
||||||
#define KMS_DRIVER_MINOR 15
|
#define KMS_DRIVER_MINOR 17
|
||||||
#define KMS_DRIVER_PATCHLEVEL 0
|
#define KMS_DRIVER_PATCHLEVEL 0
|
||||||
|
|
||||||
int amdgpu_vram_limit = 0;
|
int amdgpu_vram_limit = 0;
|
||||||
@@ -92,7 +94,8 @@ int amdgpu_vm_size = -1;
|
|||||||
int amdgpu_vm_block_size = -1;
|
int amdgpu_vm_block_size = -1;
|
||||||
int amdgpu_vm_fault_stop = 0;
|
int amdgpu_vm_fault_stop = 0;
|
||||||
int amdgpu_vm_debug = 0;
|
int amdgpu_vm_debug = 0;
|
||||||
int amdgpu_vram_page_split = 1024;
|
int amdgpu_vram_page_split = 512;
|
||||||
|
int amdgpu_vm_update_mode = -1;
|
||||||
int amdgpu_exp_hw_support = 0;
|
int amdgpu_exp_hw_support = 0;
|
||||||
int amdgpu_sched_jobs = 32;
|
int amdgpu_sched_jobs = 32;
|
||||||
int amdgpu_sched_hw_submission = 2;
|
int amdgpu_sched_hw_submission = 2;
|
||||||
@@ -110,6 +113,8 @@ int amdgpu_prim_buf_per_se = 0;
|
|||||||
int amdgpu_pos_buf_per_se = 0;
|
int amdgpu_pos_buf_per_se = 0;
|
||||||
int amdgpu_cntl_sb_buf_per_se = 0;
|
int amdgpu_cntl_sb_buf_per_se = 0;
|
||||||
int amdgpu_param_buf_per_se = 0;
|
int amdgpu_param_buf_per_se = 0;
|
||||||
|
int amdgpu_job_hang_limit = 0;
|
||||||
|
int amdgpu_lbpw = -1;
|
||||||
|
|
||||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
||||||
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
||||||
@@ -177,6 +182,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
|
|||||||
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
|
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
|
||||||
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
|
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
|
||||||
|
|
||||||
|
MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
|
||||||
|
module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
|
||||||
|
|
||||||
MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
|
MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
|
||||||
module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
|
module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
|
||||||
|
|
||||||
@@ -232,6 +240,24 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
|
|||||||
MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
|
MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
|
||||||
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
|
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
|
||||||
|
|
||||||
|
MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)");
|
||||||
|
module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
|
||||||
|
|
||||||
|
MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
|
||||||
|
module_param_named(lbpw, amdgpu_lbpw, int, 0444);
|
||||||
|
|
||||||
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||||
|
int amdgpu_si_support = 0;
|
||||||
|
MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
|
||||||
|
module_param_named(si_support, amdgpu_si_support, int, 0444);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||||
|
int amdgpu_cik_support = 0;
|
||||||
|
MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
|
||||||
|
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
static const struct pci_device_id pciidlist[] = {
|
static const struct pci_device_id pciidlist[] = {
|
||||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||||
@@ -460,6 +486,9 @@ static const struct pci_device_id pciidlist[] = {
|
|||||||
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||||
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||||
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||||
|
/* Raven */
|
||||||
|
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
|
||||||
|
|
||||||
{0, 0, 0}
|
{0, 0, 0}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -491,6 +520,7 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
|
|||||||
static int amdgpu_pci_probe(struct pci_dev *pdev,
|
static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||||
const struct pci_device_id *ent)
|
const struct pci_device_id *ent)
|
||||||
{
|
{
|
||||||
|
struct drm_device *dev;
|
||||||
unsigned long flags = ent->driver_data;
|
unsigned long flags = ent->driver_data;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@@ -513,7 +543,29 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return drm_get_pci_dev(pdev, ent, &kms_driver);
|
dev = drm_dev_alloc(&kms_driver, &pdev->dev);
|
||||||
|
if (IS_ERR(dev))
|
||||||
|
return PTR_ERR(dev);
|
||||||
|
|
||||||
|
ret = pci_enable_device(pdev);
|
||||||
|
if (ret)
|
||||||
|
goto err_free;
|
||||||
|
|
||||||
|
dev->pdev = pdev;
|
||||||
|
|
||||||
|
pci_set_drvdata(pdev, dev);
|
||||||
|
|
||||||
|
ret = drm_dev_register(dev, ent->driver_data);
|
||||||
|
if (ret)
|
||||||
|
goto err_pci;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_pci:
|
||||||
|
pci_disable_device(pdev);
|
||||||
|
err_free:
|
||||||
|
drm_dev_unref(dev);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@@ -521,7 +573,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
|
|||||||
{
|
{
|
||||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||||
|
|
||||||
drm_put_dev(dev);
|
drm_dev_unregister(dev);
|
||||||
|
drm_dev_unref(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@@ -817,7 +870,7 @@ static int __init amdgpu_init(void)
|
|||||||
driver->num_ioctls = amdgpu_max_kms_ioctl;
|
driver->num_ioctls = amdgpu_max_kms_ioctl;
|
||||||
amdgpu_register_atpx_handler();
|
amdgpu_register_atpx_handler();
|
||||||
/* let modprobe override vga console setting */
|
/* let modprobe override vga console setting */
|
||||||
return drm_pci_init(driver, pdriver);
|
return pci_register_driver(pdriver);
|
||||||
|
|
||||||
error_sched:
|
error_sched:
|
||||||
amdgpu_fence_slab_fini();
|
amdgpu_fence_slab_fini();
|
||||||
@@ -832,7 +885,7 @@ error_sync:
|
|||||||
static void __exit amdgpu_exit(void)
|
static void __exit amdgpu_exit(void)
|
||||||
{
|
{
|
||||||
amdgpu_amdkfd_fini();
|
amdgpu_amdkfd_fini();
|
||||||
drm_pci_exit(driver, pdriver);
|
pci_unregister_driver(pdriver);
|
||||||
amdgpu_unregister_atpx_handler();
|
amdgpu_unregister_atpx_handler();
|
||||||
amdgpu_sync_fini();
|
amdgpu_sync_fini();
|
||||||
amd_sched_fence_slab_fini();
|
amd_sched_fence_slab_fini();
|
||||||
|
|||||||
@@ -541,6 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
if (ring)
|
||||||
|
amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Common fence implementation
|
* Common fence implementation
|
||||||
*/
|
*/
|
||||||
@@ -660,11 +666,17 @@ static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
|
|||||||
{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
|
{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
|
||||||
{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
|
{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
|
||||||
|
{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
|
||||||
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
|
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
|
||||||
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
|
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -224,8 +224,9 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
|
|||||||
*
|
*
|
||||||
* Unbinds the requested pages from the gart page table and
|
* Unbinds the requested pages from the gart page table and
|
||||||
* replaces them with the dummy page (all asics).
|
* replaces them with the dummy page (all asics).
|
||||||
|
* Returns 0 for success, -EINVAL for failure.
|
||||||
*/
|
*/
|
||||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
||||||
int pages)
|
int pages)
|
||||||
{
|
{
|
||||||
unsigned t;
|
unsigned t;
|
||||||
@@ -237,7 +238,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
|||||||
|
|
||||||
if (!adev->gart.ready) {
|
if (!adev->gart.ready) {
|
||||||
WARN(1, "trying to unbind memory from uninitialized GART !\n");
|
WARN(1, "trying to unbind memory from uninitialized GART !\n");
|
||||||
return;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
t = offset / AMDGPU_GPU_PAGE_SIZE;
|
t = offset / AMDGPU_GPU_PAGE_SIZE;
|
||||||
@@ -258,6 +259,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
|||||||
}
|
}
|
||||||
mb();
|
mb();
|
||||||
amdgpu_gart_flush_gpu_tlb(adev, 0);
|
amdgpu_gart_flush_gpu_tlb(adev, 0);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -219,16 +219,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||||||
ttm_eu_backoff_reservation(&ticket, &list);
|
ttm_eu_backoff_reservation(&ticket, &list);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
|
|
||||||
{
|
|
||||||
if (r == -EDEADLK) {
|
|
||||||
r = amdgpu_gpu_reset(adev);
|
|
||||||
if (!r)
|
|
||||||
r = -EAGAIN;
|
|
||||||
}
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GEM ioctls.
|
* GEM ioctls.
|
||||||
*/
|
*/
|
||||||
@@ -249,20 +239,17 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
|
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CLEARED|
|
AMDGPU_GEM_CREATE_VRAM_CLEARED|
|
||||||
AMDGPU_GEM_CREATE_SHADOW |
|
AMDGPU_GEM_CREATE_SHADOW |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
|
||||||
r = -EINVAL;
|
return -EINVAL;
|
||||||
goto error_unlock;
|
|
||||||
}
|
|
||||||
/* reject invalid gem domains */
|
/* reject invalid gem domains */
|
||||||
if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
|
if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
|
||||||
AMDGPU_GEM_DOMAIN_GTT |
|
AMDGPU_GEM_DOMAIN_GTT |
|
||||||
AMDGPU_GEM_DOMAIN_VRAM |
|
AMDGPU_GEM_DOMAIN_VRAM |
|
||||||
AMDGPU_GEM_DOMAIN_GDS |
|
AMDGPU_GEM_DOMAIN_GDS |
|
||||||
AMDGPU_GEM_DOMAIN_GWS |
|
AMDGPU_GEM_DOMAIN_GWS |
|
||||||
AMDGPU_GEM_DOMAIN_OA)) {
|
AMDGPU_GEM_DOMAIN_OA))
|
||||||
r = -EINVAL;
|
return -EINVAL;
|
||||||
goto error_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* create a gem object to contain this object in */
|
/* create a gem object to contain this object in */
|
||||||
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
|
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
|
||||||
@@ -274,10 +261,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
size = size << AMDGPU_GWS_SHIFT;
|
size = size << AMDGPU_GWS_SHIFT;
|
||||||
else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
|
else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
|
||||||
size = size << AMDGPU_OA_SHIFT;
|
size = size << AMDGPU_OA_SHIFT;
|
||||||
else {
|
else
|
||||||
r = -EINVAL;
|
return -EINVAL;
|
||||||
goto error_unlock;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
size = roundup(size, PAGE_SIZE);
|
size = roundup(size, PAGE_SIZE);
|
||||||
|
|
||||||
@@ -286,21 +271,17 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
args->in.domain_flags,
|
args->in.domain_flags,
|
||||||
kernel, &gobj);
|
kernel, &gobj);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_unlock;
|
return r;
|
||||||
|
|
||||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||||
/* drop reference from allocate - handle holds it now */
|
/* drop reference from allocate - handle holds it now */
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
drm_gem_object_unreference_unlocked(gobj);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_unlock;
|
return r;
|
||||||
|
|
||||||
memset(args, 0, sizeof(*args));
|
memset(args, 0, sizeof(*args));
|
||||||
args->out.handle = handle;
|
args->out.handle = handle;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_unlock:
|
|
||||||
r = amdgpu_gem_handle_lockup(adev, r);
|
|
||||||
return r;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||||
@@ -334,7 +315,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||||||
AMDGPU_GEM_DOMAIN_CPU, 0,
|
AMDGPU_GEM_DOMAIN_CPU, 0,
|
||||||
0, &gobj);
|
0, &gobj);
|
||||||
if (r)
|
if (r)
|
||||||
goto handle_lockup;
|
return r;
|
||||||
|
|
||||||
bo = gem_to_amdgpu_bo(gobj);
|
bo = gem_to_amdgpu_bo(gobj);
|
||||||
bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
|
bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||||
@@ -374,7 +355,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||||||
/* drop reference from allocate - handle holds it now */
|
/* drop reference from allocate - handle holds it now */
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
drm_gem_object_unreference_unlocked(gobj);
|
||||||
if (r)
|
if (r)
|
||||||
goto handle_lockup;
|
return r;
|
||||||
|
|
||||||
args->handle = handle;
|
args->handle = handle;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -388,9 +369,6 @@ unlock_mmap_sem:
|
|||||||
release_object:
|
release_object:
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
drm_gem_object_unreference_unlocked(gobj);
|
||||||
|
|
||||||
handle_lockup:
|
|
||||||
r = amdgpu_gem_handle_lockup(adev, r);
|
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -456,7 +434,6 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
|
|||||||
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *filp)
|
struct drm_file *filp)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
union drm_amdgpu_gem_wait_idle *args = data;
|
union drm_amdgpu_gem_wait_idle *args = data;
|
||||||
struct drm_gem_object *gobj;
|
struct drm_gem_object *gobj;
|
||||||
struct amdgpu_bo *robj;
|
struct amdgpu_bo *robj;
|
||||||
@@ -484,7 +461,6 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
|||||||
r = ret;
|
r = ret;
|
||||||
|
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
drm_gem_object_unreference_unlocked(gobj);
|
||||||
r = amdgpu_gem_handle_lockup(adev, r);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -593,9 +569,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||||||
uint64_t va_flags;
|
uint64_t va_flags;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
if (!adev->vm_manager.enabled)
|
|
||||||
return -ENOTTY;
|
|
||||||
|
|
||||||
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
|
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
|
||||||
dev_err(&dev->pdev->dev,
|
dev_err(&dev->pdev->dev,
|
||||||
"va_address 0x%lX is in reserved area 0x%X\n",
|
"va_address 0x%lX is in reserved area 0x%X\n",
|
||||||
@@ -621,6 +594,11 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||||||
args->operation);
|
args->operation);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
if ((args->operation == AMDGPU_VA_OP_MAP) ||
|
||||||
|
(args->operation == AMDGPU_VA_OP_REPLACE)) {
|
||||||
|
if (amdgpu_kms_vram_lost(adev, fpriv))
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&list);
|
INIT_LIST_HEAD(&list);
|
||||||
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
|
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
|
||||||
|
|||||||
@@ -108,3 +108,209 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
|
|||||||
p = next + 1;
|
p = next + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i, queue, pipe, mec;
|
||||||
|
|
||||||
|
/* policy for amdgpu compute queue ownership */
|
||||||
|
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
|
||||||
|
queue = i % adev->gfx.mec.num_queue_per_pipe;
|
||||||
|
pipe = (i / adev->gfx.mec.num_queue_per_pipe)
|
||||||
|
% adev->gfx.mec.num_pipe_per_mec;
|
||||||
|
mec = (i / adev->gfx.mec.num_queue_per_pipe)
|
||||||
|
/ adev->gfx.mec.num_pipe_per_mec;
|
||||||
|
|
||||||
|
/* we've run out of HW */
|
||||||
|
if (mec >= adev->gfx.mec.num_mec)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (adev->gfx.mec.num_mec > 1) {
|
||||||
|
/* policy: amdgpu owns the first two queues of the first MEC */
|
||||||
|
if (mec == 0 && queue < 2)
|
||||||
|
set_bit(i, adev->gfx.mec.queue_bitmap);
|
||||||
|
} else {
|
||||||
|
/* policy: amdgpu owns all queues in the first pipe */
|
||||||
|
if (mec == 0 && pipe == 0)
|
||||||
|
set_bit(i, adev->gfx.mec.queue_bitmap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* update the number of active compute rings */
|
||||||
|
adev->gfx.num_compute_rings =
|
||||||
|
bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
|
||||||
|
|
||||||
|
/* If you hit this case and edited the policy, you probably just
|
||||||
|
* need to increase AMDGPU_MAX_COMPUTE_RINGS */
|
||||||
|
if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
|
||||||
|
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
int queue_bit;
|
||||||
|
int mec, pipe, queue;
|
||||||
|
|
||||||
|
queue_bit = adev->gfx.mec.num_mec
|
||||||
|
* adev->gfx.mec.num_pipe_per_mec
|
||||||
|
* adev->gfx.mec.num_queue_per_pipe;
|
||||||
|
|
||||||
|
while (queue_bit-- >= 0) {
|
||||||
|
if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue);
|
||||||
|
|
||||||
|
/* Using pipes 2/3 from MEC 2 seems cause problems */
|
||||||
|
if (mec == 1 && pipe > 1)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ring->me = mec + 1;
|
||||||
|
ring->pipe = pipe;
|
||||||
|
ring->queue = queue;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_err(adev->dev, "Failed to find a queue for KIQ\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_irq_src *irq)
|
||||||
|
{
|
||||||
|
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||||
|
int r = 0;
|
||||||
|
|
||||||
|
mutex_init(&kiq->ring_mutex);
|
||||||
|
|
||||||
|
r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
ring->adev = NULL;
|
||||||
|
ring->ring_obj = NULL;
|
||||||
|
ring->use_doorbell = true;
|
||||||
|
ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
|
||||||
|
|
||||||
|
r = amdgpu_gfx_kiq_acquire(adev, ring);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
ring->eop_gpu_addr = kiq->eop_gpu_addr;
|
||||||
|
sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||||
|
r = amdgpu_ring_init(adev, ring, 1024,
|
||||||
|
irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
|
||||||
|
if (r)
|
||||||
|
dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_irq_src *irq)
|
||||||
|
{
|
||||||
|
amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
|
||||||
|
amdgpu_ring_fini(ring);
|
||||||
|
}
|
||||||
|
|
||||||
|
void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||||
|
|
||||||
|
amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
|
||||||
|
unsigned hpd_size)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
u32 *hpd;
|
||||||
|
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||||
|
|
||||||
|
r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
|
||||||
|
AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
|
||||||
|
&kiq->eop_gpu_addr, (void **)&hpd);
|
||||||
|
if (r) {
|
||||||
|
dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(hpd, 0, hpd_size);
|
||||||
|
|
||||||
|
r = amdgpu_bo_reserve(kiq->eop_obj, true);
|
||||||
|
if (unlikely(r != 0))
|
||||||
|
dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
|
||||||
|
amdgpu_bo_kunmap(kiq->eop_obj);
|
||||||
|
amdgpu_bo_unreserve(kiq->eop_obj);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* create MQD for each compute queue */
|
||||||
|
int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
|
||||||
|
unsigned mqd_size)
|
||||||
|
{
|
||||||
|
struct amdgpu_ring *ring = NULL;
|
||||||
|
int r, i;
|
||||||
|
|
||||||
|
/* create MQD for KIQ */
|
||||||
|
ring = &adev->gfx.kiq.ring;
|
||||||
|
if (!ring->mqd_obj) {
|
||||||
|
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
|
||||||
|
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
|
||||||
|
&ring->mqd_gpu_addr, &ring->mqd_ptr);
|
||||||
|
if (r) {
|
||||||
|
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* prepare MQD backup */
|
||||||
|
adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
|
||||||
|
if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
|
||||||
|
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* create MQD for each KCQ */
|
||||||
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||||
|
ring = &adev->gfx.compute_ring[i];
|
||||||
|
if (!ring->mqd_obj) {
|
||||||
|
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
|
||||||
|
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
|
||||||
|
&ring->mqd_gpu_addr, &ring->mqd_ptr);
|
||||||
|
if (r) {
|
||||||
|
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* prepare MQD backup */
|
||||||
|
adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
|
||||||
|
if (!adev->gfx.mec.mqd_backup[i])
|
||||||
|
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
struct amdgpu_ring *ring = NULL;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||||
|
ring = &adev->gfx.compute_ring[i];
|
||||||
|
kfree(adev->gfx.mec.mqd_backup[i]);
|
||||||
|
amdgpu_bo_free_kernel(&ring->mqd_obj,
|
||||||
|
&ring->mqd_gpu_addr,
|
||||||
|
&ring->mqd_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
ring = &adev->gfx.kiq.ring;
|
||||||
|
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
|
||||||
|
amdgpu_bo_free_kernel(&ring->mqd_obj,
|
||||||
|
&ring->mqd_gpu_addr,
|
||||||
|
&ring->mqd_ptr);
|
||||||
|
}
|
||||||
|
|||||||
@@ -30,4 +30,64 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
|
|||||||
void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
|
void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
|
||||||
unsigned max_sh);
|
unsigned max_sh);
|
||||||
|
|
||||||
|
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_irq_src *irq);
|
||||||
|
|
||||||
|
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_irq_src *irq);
|
||||||
|
|
||||||
|
void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
|
||||||
|
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
|
||||||
|
unsigned hpd_size);
|
||||||
|
|
||||||
|
int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
|
||||||
|
unsigned mqd_size);
|
||||||
|
void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_gfx_create_bitmask - create a bitmask
|
||||||
|
*
|
||||||
|
* @bit_width: length of the mask
|
||||||
|
*
|
||||||
|
* create a variable length bit mask.
|
||||||
|
* Returns the bitmask.
|
||||||
|
*/
|
||||||
|
static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width)
|
||||||
|
{
|
||||||
|
return (u32)((1ULL << bit_width) - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int amdgpu_gfx_queue_to_bit(struct amdgpu_device *adev,
|
||||||
|
int mec, int pipe, int queue)
|
||||||
|
{
|
||||||
|
int bit = 0;
|
||||||
|
|
||||||
|
bit += mec * adev->gfx.mec.num_pipe_per_mec
|
||||||
|
* adev->gfx.mec.num_queue_per_pipe;
|
||||||
|
bit += pipe * adev->gfx.mec.num_queue_per_pipe;
|
||||||
|
bit += queue;
|
||||||
|
|
||||||
|
return bit;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit,
|
||||||
|
int *mec, int *pipe, int *queue)
|
||||||
|
{
|
||||||
|
*queue = bit % adev->gfx.mec.num_queue_per_pipe;
|
||||||
|
*pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
|
||||||
|
% adev->gfx.mec.num_pipe_per_mec;
|
||||||
|
*mec = (bit / adev->gfx.mec.num_queue_per_pipe)
|
||||||
|
/ adev->gfx.mec.num_pipe_per_mec;
|
||||||
|
|
||||||
|
}
|
||||||
|
static inline bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
|
||||||
|
int mec, int pipe, int queue)
|
||||||
|
{
|
||||||
|
return test_bit(amdgpu_gfx_queue_to_bit(adev, mec, pipe, queue),
|
||||||
|
adev->gfx.mec.queue_bitmap);
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -121,6 +121,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
struct amdgpu_ib *ib = &ibs[0];
|
struct amdgpu_ib *ib = &ibs[0];
|
||||||
|
struct dma_fence *tmp = NULL;
|
||||||
bool skip_preamble, need_ctx_switch;
|
bool skip_preamble, need_ctx_switch;
|
||||||
unsigned patch_offset = ~0;
|
unsigned patch_offset = ~0;
|
||||||
struct amdgpu_vm *vm;
|
struct amdgpu_vm *vm;
|
||||||
@@ -160,8 +161,16 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||||||
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
|
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
if (ring->funcs->emit_pipeline_sync && job && job->need_pipeline_sync)
|
|
||||||
|
if (ring->funcs->emit_pipeline_sync && job &&
|
||||||
|
((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
|
||||||
|
amdgpu_vm_need_pipeline_sync(ring, job))) {
|
||||||
amdgpu_ring_emit_pipeline_sync(ring);
|
amdgpu_ring_emit_pipeline_sync(ring);
|
||||||
|
dma_fence_put(tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ring->funcs->insert_start)
|
||||||
|
ring->funcs->insert_start(ring);
|
||||||
|
|
||||||
if (vm) {
|
if (vm) {
|
||||||
r = amdgpu_vm_flush(ring, job);
|
r = amdgpu_vm_flush(ring, job);
|
||||||
@@ -188,8 +197,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||||||
status |= AMDGPU_HAVE_CTX_SWITCH;
|
status |= AMDGPU_HAVE_CTX_SWITCH;
|
||||||
status |= job->preamble_status;
|
status |= job->preamble_status;
|
||||||
|
|
||||||
if (vm)
|
|
||||||
status |= AMDGPU_VM_DOMAIN;
|
|
||||||
amdgpu_ring_emit_cntxcntl(ring, status);
|
amdgpu_ring_emit_cntxcntl(ring, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -208,6 +215,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||||||
need_ctx_switch = false;
|
need_ctx_switch = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ring->funcs->emit_tmz)
|
||||||
|
amdgpu_ring_emit_tmz(ring, false);
|
||||||
|
|
||||||
if (ring->funcs->emit_hdp_invalidate
|
if (ring->funcs->emit_hdp_invalidate
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
&& !(adev->flags & AMD_IS_APU)
|
&& !(adev->flags & AMD_IS_APU)
|
||||||
|
|||||||
@@ -62,8 +62,9 @@ enum amdgpu_ih_clientid
|
|||||||
AMDGPU_IH_CLIENTID_MP0 = 0x1e,
|
AMDGPU_IH_CLIENTID_MP0 = 0x1e,
|
||||||
AMDGPU_IH_CLIENTID_MP1 = 0x1f,
|
AMDGPU_IH_CLIENTID_MP1 = 0x1f,
|
||||||
|
|
||||||
AMDGPU_IH_CLIENTID_MAX
|
AMDGPU_IH_CLIENTID_MAX,
|
||||||
|
|
||||||
|
AMDGPU_IH_CLIENTID_VCN = AMDGPU_IH_CLIENTID_UVD
|
||||||
};
|
};
|
||||||
|
|
||||||
#define AMDGPU_IH_CLIENTID_LEGACY 0
|
#define AMDGPU_IH_CLIENTID_LEGACY 0
|
||||||
|
|||||||
@@ -83,6 +83,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
|
|||||||
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
|
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
|
||||||
reset_work);
|
reset_work);
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
amdgpu_gpu_reset(adev);
|
amdgpu_gpu_reset(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -36,6 +36,10 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
|
|||||||
job->base.sched->name,
|
job->base.sched->name,
|
||||||
atomic_read(&job->ring->fence_drv.last_seq),
|
atomic_read(&job->ring->fence_drv.last_seq),
|
||||||
job->ring->fence_drv.sync_seq);
|
job->ring->fence_drv.sync_seq);
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(job->adev))
|
||||||
|
amdgpu_sriov_gpu_reset(job->adev, job);
|
||||||
|
else
|
||||||
amdgpu_gpu_reset(job->adev);
|
amdgpu_gpu_reset(job->adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,9 +61,10 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
|||||||
(*job)->vm = vm;
|
(*job)->vm = vm;
|
||||||
(*job)->ibs = (void *)&(*job)[1];
|
(*job)->ibs = (void *)&(*job)[1];
|
||||||
(*job)->num_ibs = num_ibs;
|
(*job)->num_ibs = num_ibs;
|
||||||
(*job)->need_pipeline_sync = false;
|
|
||||||
|
|
||||||
amdgpu_sync_create(&(*job)->sync);
|
amdgpu_sync_create(&(*job)->sync);
|
||||||
|
amdgpu_sync_create(&(*job)->dep_sync);
|
||||||
|
amdgpu_sync_create(&(*job)->sched_sync);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -98,6 +103,8 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
|
|||||||
|
|
||||||
dma_fence_put(job->fence);
|
dma_fence_put(job->fence);
|
||||||
amdgpu_sync_free(&job->sync);
|
amdgpu_sync_free(&job->sync);
|
||||||
|
amdgpu_sync_free(&job->dep_sync);
|
||||||
|
amdgpu_sync_free(&job->sched_sync);
|
||||||
kfree(job);
|
kfree(job);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,6 +114,8 @@ void amdgpu_job_free(struct amdgpu_job *job)
|
|||||||
|
|
||||||
dma_fence_put(job->fence);
|
dma_fence_put(job->fence);
|
||||||
amdgpu_sync_free(&job->sync);
|
amdgpu_sync_free(&job->sync);
|
||||||
|
amdgpu_sync_free(&job->dep_sync);
|
||||||
|
amdgpu_sync_free(&job->sched_sync);
|
||||||
kfree(job);
|
kfree(job);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -138,11 +147,18 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
|
|||||||
struct amdgpu_job *job = to_amdgpu_job(sched_job);
|
struct amdgpu_job *job = to_amdgpu_job(sched_job);
|
||||||
struct amdgpu_vm *vm = job->vm;
|
struct amdgpu_vm *vm = job->vm;
|
||||||
|
|
||||||
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
|
struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
|
||||||
|
r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("Error adding fence to sync (%d)\n", r);
|
||||||
|
}
|
||||||
|
if (!fence)
|
||||||
|
fence = amdgpu_sync_get_fence(&job->sync);
|
||||||
while (fence == NULL && vm && !job->vm_id) {
|
while (fence == NULL && vm && !job->vm_id) {
|
||||||
struct amdgpu_ring *ring = job->ring;
|
struct amdgpu_ring *ring = job->ring;
|
||||||
int r;
|
|
||||||
|
|
||||||
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
|
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
|
||||||
&job->base.s_fence->finished,
|
&job->base.s_fence->finished,
|
||||||
@@ -153,9 +169,6 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
|
|||||||
fence = amdgpu_sync_get_fence(&job->sync);
|
fence = amdgpu_sync_get_fence(&job->sync);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amd_sched_dependency_optimized(fence, sched_job->s_entity))
|
|
||||||
job->need_pipeline_sync = true;
|
|
||||||
|
|
||||||
return fence;
|
return fence;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -163,6 +176,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
|||||||
{
|
{
|
||||||
struct dma_fence *fence = NULL;
|
struct dma_fence *fence = NULL;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
|
struct amdgpu_fpriv *fpriv = NULL;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (!sched_job) {
|
if (!sched_job) {
|
||||||
@@ -174,10 +188,16 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
|||||||
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
|
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
|
||||||
|
|
||||||
trace_amdgpu_sched_run_job(job);
|
trace_amdgpu_sched_run_job(job);
|
||||||
|
if (job->vm)
|
||||||
|
fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
|
||||||
|
/* skip ib schedule when vram is lost */
|
||||||
|
if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv))
|
||||||
|
DRM_ERROR("Skip scheduling IBs!\n");
|
||||||
|
else {
|
||||||
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
|
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
|
||||||
if (r)
|
if (r)
|
||||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||||
|
}
|
||||||
/* if gpu reset, hw fence will be replaced here */
|
/* if gpu reset, hw fence will be replaced here */
|
||||||
dma_fence_put(job->fence);
|
dma_fence_put(job->fence);
|
||||||
job->fence = dma_fence_get(fence);
|
job->fence = dma_fence_get(fence);
|
||||||
|
|||||||
@@ -87,6 +87,41 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
int r, acpi_status;
|
int r, acpi_status;
|
||||||
|
|
||||||
|
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||||
|
if (!amdgpu_si_support) {
|
||||||
|
switch (flags & AMD_ASIC_MASK) {
|
||||||
|
case CHIP_TAHITI:
|
||||||
|
case CHIP_PITCAIRN:
|
||||||
|
case CHIP_VERDE:
|
||||||
|
case CHIP_OLAND:
|
||||||
|
case CHIP_HAINAN:
|
||||||
|
dev_info(dev->dev,
|
||||||
|
"SI support provided by radeon.\n");
|
||||||
|
dev_info(dev->dev,
|
||||||
|
"Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
|
||||||
|
);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||||
|
if (!amdgpu_cik_support) {
|
||||||
|
switch (flags & AMD_ASIC_MASK) {
|
||||||
|
case CHIP_KAVERI:
|
||||||
|
case CHIP_BONAIRE:
|
||||||
|
case CHIP_HAWAII:
|
||||||
|
case CHIP_KABINI:
|
||||||
|
case CHIP_MULLINS:
|
||||||
|
dev_info(dev->dev,
|
||||||
|
"CIK support provided by radeon.\n");
|
||||||
|
dev_info(dev->dev,
|
||||||
|
"Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
|
||||||
|
);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
|
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
|
||||||
if (adev == NULL) {
|
if (adev == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -235,6 +270,7 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
|||||||
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||||
struct drm_amdgpu_info *info = data;
|
struct drm_amdgpu_info *info = data;
|
||||||
struct amdgpu_mode_info *minfo = &adev->mode_info;
|
struct amdgpu_mode_info *minfo = &adev->mode_info;
|
||||||
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
|
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
|
||||||
@@ -247,6 +283,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||||||
|
|
||||||
if (!info->return_size || !info->return_pointer)
|
if (!info->return_size || !info->return_pointer)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
if (amdgpu_kms_vram_lost(adev, fpriv))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
switch (info->query) {
|
switch (info->query) {
|
||||||
case AMDGPU_INFO_ACCEL_WORKING:
|
case AMDGPU_INFO_ACCEL_WORKING:
|
||||||
@@ -319,6 +357,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||||||
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
||||||
ib_size_alignment = 1;
|
ib_size_alignment = 1;
|
||||||
break;
|
break;
|
||||||
|
case AMDGPU_HW_IP_VCN_DEC:
|
||||||
|
type = AMD_IP_BLOCK_TYPE_VCN;
|
||||||
|
ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
|
||||||
|
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
ib_size_alignment = 16;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_VCN_ENC:
|
||||||
|
type = AMD_IP_BLOCK_TYPE_VCN;
|
||||||
|
for (i = 0; i < adev->vcn.num_enc_rings; i++)
|
||||||
|
ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
|
||||||
|
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
ib_size_alignment = 1;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -361,6 +412,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||||||
case AMDGPU_HW_IP_UVD_ENC:
|
case AMDGPU_HW_IP_UVD_ENC:
|
||||||
type = AMD_IP_BLOCK_TYPE_UVD;
|
type = AMD_IP_BLOCK_TYPE_UVD;
|
||||||
break;
|
break;
|
||||||
|
case AMDGPU_HW_IP_VCN_DEC:
|
||||||
|
case AMDGPU_HW_IP_VCN_ENC:
|
||||||
|
type = AMD_IP_BLOCK_TYPE_VCN;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -397,6 +452,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||||||
case AMDGPU_INFO_NUM_EVICTIONS:
|
case AMDGPU_INFO_NUM_EVICTIONS:
|
||||||
ui64 = atomic64_read(&adev->num_evictions);
|
ui64 = atomic64_read(&adev->num_evictions);
|
||||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||||
|
case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
|
||||||
|
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
|
||||||
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||||
case AMDGPU_INFO_VRAM_USAGE:
|
case AMDGPU_INFO_VRAM_USAGE:
|
||||||
ui64 = atomic64_read(&adev->vram_usage);
|
ui64 = atomic64_read(&adev->vram_usage);
|
||||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||||
@@ -730,6 +788,12 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
|
|||||||
vga_switcheroo_process_delayed_switch();
|
vga_switcheroo_process_delayed_switch();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_fpriv *fpriv)
|
||||||
|
{
|
||||||
|
return fpriv->vram_lost_counter != atomic_read(&adev->vram_lost_counter);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_driver_open_kms - drm callback for open
|
* amdgpu_driver_open_kms - drm callback for open
|
||||||
*
|
*
|
||||||
@@ -757,7 +821,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||||||
goto out_suspend;
|
goto out_suspend;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_vm_init(adev, &fpriv->vm);
|
r = amdgpu_vm_init(adev, &fpriv->vm,
|
||||||
|
AMDGPU_VM_CONTEXT_GFX);
|
||||||
if (r) {
|
if (r) {
|
||||||
kfree(fpriv);
|
kfree(fpriv);
|
||||||
goto out_suspend;
|
goto out_suspend;
|
||||||
@@ -782,6 +847,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||||||
|
|
||||||
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
|
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
|
||||||
|
|
||||||
|
fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
|
||||||
file_priv->driver_priv = fpriv;
|
file_priv->driver_priv = fpriv;
|
||||||
|
|
||||||
out_suspend:
|
out_suspend:
|
||||||
@@ -814,8 +880,10 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
|||||||
|
|
||||||
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
|
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
|
||||||
|
|
||||||
|
if (adev->asic_type != CHIP_RAVEN) {
|
||||||
amdgpu_uvd_free_handles(adev, file_priv);
|
amdgpu_uvd_free_handles(adev, file_priv);
|
||||||
amdgpu_vce_free_handles(adev, file_priv);
|
amdgpu_vce_free_handles(adev, file_priv);
|
||||||
|
}
|
||||||
|
|
||||||
amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
|
amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
|
||||||
|
|
||||||
@@ -948,6 +1016,7 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
|
|||||||
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
|
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
|
||||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||||
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||||
|
DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||||
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||||
/* KMS */
|
/* KMS */
|
||||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||||
|
|||||||
@@ -960,6 +960,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* hurrah the memory is not visible ! */
|
/* hurrah the memory is not visible ! */
|
||||||
|
atomic64_inc(&adev->num_vram_cpu_page_faults);
|
||||||
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
|
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||||
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
|
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||||
for (i = 0; i < abo->placement.num_placement; i++) {
|
for (i = 0; i < abo->placement.num_placement; i++) {
|
||||||
|
|||||||
@@ -72,6 +72,7 @@ static int amdgpu_pp_early_init(void *handle)
|
|||||||
case CHIP_CARRIZO:
|
case CHIP_CARRIZO:
|
||||||
case CHIP_STONEY:
|
case CHIP_STONEY:
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
|
case CHIP_RAVEN:
|
||||||
adev->pp_enabled = true;
|
adev->pp_enabled = true;
|
||||||
if (amdgpu_create_pp_handle(adev))
|
if (amdgpu_create_pp_handle(adev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|||||||
@@ -30,6 +30,7 @@
|
|||||||
#include "amdgpu_ucode.h"
|
#include "amdgpu_ucode.h"
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
#include "psp_v3_1.h"
|
#include "psp_v3_1.h"
|
||||||
|
#include "psp_v10_0.h"
|
||||||
|
|
||||||
static void psp_set_funcs(struct amdgpu_device *adev);
|
static void psp_set_funcs(struct amdgpu_device *adev);
|
||||||
|
|
||||||
@@ -61,6 +62,12 @@ static int psp_sw_init(void *handle)
|
|||||||
psp->compare_sram_data = psp_v3_1_compare_sram_data;
|
psp->compare_sram_data = psp_v3_1_compare_sram_data;
|
||||||
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
|
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
|
||||||
break;
|
break;
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
|
||||||
|
psp->ring_init = psp_v10_0_ring_init;
|
||||||
|
psp->cmd_submit = psp_v10_0_cmd_submit;
|
||||||
|
psp->compare_sram_data = psp_v10_0_compare_sram_data;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -230,6 +237,13 @@ static int psp_asd_load(struct psp_context *psp)
|
|||||||
int ret;
|
int ret;
|
||||||
struct psp_gfx_cmd_resp *cmd;
|
struct psp_gfx_cmd_resp *cmd;
|
||||||
|
|
||||||
|
/* If PSP version doesn't match ASD version, asd loading will be failed.
|
||||||
|
* add workaround to bypass it for sriov now.
|
||||||
|
* TODO: add version check to make it common
|
||||||
|
*/
|
||||||
|
if (amdgpu_sriov_vf(psp->adev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||||
if (!cmd)
|
if (!cmd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -542,3 +556,12 @@ const struct amdgpu_ip_block_version psp_v3_1_ip_block =
|
|||||||
.rev = 0,
|
.rev = 0,
|
||||||
.funcs = &psp_ip_funcs,
|
.funcs = &psp_ip_funcs,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const struct amdgpu_ip_block_version psp_v10_0_ip_block =
|
||||||
|
{
|
||||||
|
.type = AMD_IP_BLOCK_TYPE_PSP,
|
||||||
|
.major = 10,
|
||||||
|
.minor = 0,
|
||||||
|
.rev = 0,
|
||||||
|
.funcs = &psp_ip_funcs,
|
||||||
|
};
|
||||||
|
|||||||
@@ -138,4 +138,6 @@ extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
|
|||||||
extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
|
extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
|
||||||
uint32_t field_val, uint32_t mask, bool check_changed);
|
uint32_t field_val, uint32_t mask, bool check_changed);
|
||||||
|
|
||||||
|
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
299
drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
Normal file
299
drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
Normal file
@@ -0,0 +1,299 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2017 Valve Corporation
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* Authors: Andres Rodriguez
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "amdgpu.h"
|
||||||
|
#include "amdgpu_ring.h"
|
||||||
|
|
||||||
|
static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper *mapper,
|
||||||
|
int hw_ip)
|
||||||
|
{
|
||||||
|
if (!mapper)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (hw_ip > AMDGPU_MAX_IP_NUM)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
mapper->hw_ip = hw_ip;
|
||||||
|
mutex_init(&mapper->lock);
|
||||||
|
|
||||||
|
memset(mapper->queue_map, 0, sizeof(mapper->queue_map));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct amdgpu_ring *amdgpu_get_cached_map(struct amdgpu_queue_mapper *mapper,
|
||||||
|
int ring)
|
||||||
|
{
|
||||||
|
return mapper->queue_map[ring];
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
|
||||||
|
int ring, struct amdgpu_ring *pring)
|
||||||
|
{
|
||||||
|
if (WARN_ON(mapper->queue_map[ring])) {
|
||||||
|
DRM_ERROR("Un-expected ring re-map\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper->queue_map[ring] = pring;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_identity_map(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_queue_mapper *mapper,
|
||||||
|
int ring,
|
||||||
|
struct amdgpu_ring **out_ring)
|
||||||
|
{
|
||||||
|
switch (mapper->hw_ip) {
|
||||||
|
case AMDGPU_HW_IP_GFX:
|
||||||
|
*out_ring = &adev->gfx.gfx_ring[ring];
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_COMPUTE:
|
||||||
|
*out_ring = &adev->gfx.compute_ring[ring];
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_DMA:
|
||||||
|
*out_ring = &adev->sdma.instance[ring].ring;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_UVD:
|
||||||
|
*out_ring = &adev->uvd.ring;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_VCE:
|
||||||
|
*out_ring = &adev->vce.ring[ring];
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_UVD_ENC:
|
||||||
|
*out_ring = &adev->uvd.ring_enc[ring];
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_VCN_DEC:
|
||||||
|
*out_ring = &adev->vcn.ring_dec;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_VCN_ENC:
|
||||||
|
*out_ring = &adev->vcn.ring_enc[ring];
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
*out_ring = NULL;
|
||||||
|
DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return amdgpu_update_cached_map(mapper, ring, *out_ring);
|
||||||
|
}
|
||||||
|
|
||||||
|
static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
|
||||||
|
{
|
||||||
|
switch (hw_ip) {
|
||||||
|
case AMDGPU_HW_IP_GFX:
|
||||||
|
return AMDGPU_RING_TYPE_GFX;
|
||||||
|
case AMDGPU_HW_IP_COMPUTE:
|
||||||
|
return AMDGPU_RING_TYPE_COMPUTE;
|
||||||
|
case AMDGPU_HW_IP_DMA:
|
||||||
|
return AMDGPU_RING_TYPE_SDMA;
|
||||||
|
case AMDGPU_HW_IP_UVD:
|
||||||
|
return AMDGPU_RING_TYPE_UVD;
|
||||||
|
case AMDGPU_HW_IP_VCE:
|
||||||
|
return AMDGPU_RING_TYPE_VCE;
|
||||||
|
default:
|
||||||
|
DRM_ERROR("Invalid HW IP specified %d\n", hw_ip);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_lru_map(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_queue_mapper *mapper,
|
||||||
|
int user_ring,
|
||||||
|
struct amdgpu_ring **out_ring)
|
||||||
|
{
|
||||||
|
int r, i, j;
|
||||||
|
int ring_type = amdgpu_hw_ip_to_ring_type(mapper->hw_ip);
|
||||||
|
int ring_blacklist[AMDGPU_MAX_RINGS];
|
||||||
|
struct amdgpu_ring *ring;
|
||||||
|
|
||||||
|
/* 0 is a valid ring index, so initialize to -1 */
|
||||||
|
memset(ring_blacklist, 0xff, sizeof(ring_blacklist));
|
||||||
|
|
||||||
|
for (i = 0, j = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||||
|
ring = mapper->queue_map[i];
|
||||||
|
if (ring)
|
||||||
|
ring_blacklist[j++] = ring->idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
|
||||||
|
j, out_ring);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
return amdgpu_update_cached_map(mapper, user_ring, *out_ring);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
* @mgr: amdgpu_queue_mgr structure holding queue information
|
||||||
|
*
|
||||||
|
* Initialize the the selected @mgr (all asics).
|
||||||
|
*
|
||||||
|
* Returns 0 on success, error on failure.
|
||||||
|
*/
|
||||||
|
int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_queue_mgr *mgr)
|
||||||
|
{
|
||||||
|
int i, r;
|
||||||
|
|
||||||
|
if (!adev || !mgr)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
memset(mgr, 0, sizeof(*mgr));
|
||||||
|
|
||||||
|
for (i = 0; i < AMDGPU_MAX_IP_NUM; ++i) {
|
||||||
|
r = amdgpu_queue_mapper_init(&mgr->mapper[i], i);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
* @mgr: amdgpu_queue_mgr structure holding queue information
|
||||||
|
*
|
||||||
|
* De-initialize the the selected @mgr (all asics).
|
||||||
|
*
|
||||||
|
* Returns 0 on success, error on failure.
|
||||||
|
*/
|
||||||
|
int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_queue_mgr *mgr)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
* @mgr: amdgpu_queue_mgr structure holding queue information
|
||||||
|
* @hw_ip: HW IP enum
|
||||||
|
* @instance: HW instance
|
||||||
|
* @ring: user ring id
|
||||||
|
* @our_ring: pointer to mapped amdgpu_ring
|
||||||
|
*
|
||||||
|
* Map a userspace ring id to an appropriate kernel ring. Different
|
||||||
|
* policies are configurable at a HW IP level.
|
||||||
|
*
|
||||||
|
* Returns 0 on success, error on failure.
|
||||||
|
*/
|
||||||
|
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_queue_mgr *mgr,
|
||||||
|
int hw_ip, int instance, int ring,
|
||||||
|
struct amdgpu_ring **out_ring)
|
||||||
|
{
|
||||||
|
int r, ip_num_rings;
|
||||||
|
struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
|
||||||
|
|
||||||
|
if (!adev || !mgr || !out_ring)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (hw_ip >= AMDGPU_MAX_IP_NUM)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (ring >= AMDGPU_MAX_RINGS)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Right now all IPs have only one instance - multiple rings. */
|
||||||
|
if (instance != 0) {
|
||||||
|
DRM_ERROR("invalid ip instance: %d\n", instance);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (hw_ip) {
|
||||||
|
case AMDGPU_HW_IP_GFX:
|
||||||
|
ip_num_rings = adev->gfx.num_gfx_rings;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_COMPUTE:
|
||||||
|
ip_num_rings = adev->gfx.num_compute_rings;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_DMA:
|
||||||
|
ip_num_rings = adev->sdma.num_instances;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_UVD:
|
||||||
|
ip_num_rings = 1;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_VCE:
|
||||||
|
ip_num_rings = adev->vce.num_rings;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_UVD_ENC:
|
||||||
|
ip_num_rings = adev->uvd.num_enc_rings;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_VCN_DEC:
|
||||||
|
ip_num_rings = 1;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_VCN_ENC:
|
||||||
|
ip_num_rings = adev->vcn.num_enc_rings;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
DRM_ERROR("unknown ip type: %d\n", hw_ip);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ring >= ip_num_rings) {
|
||||||
|
DRM_ERROR("Ring index:%d exceeds maximum:%d for ip:%d\n",
|
||||||
|
ring, ip_num_rings, hw_ip);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&mapper->lock);
|
||||||
|
|
||||||
|
*out_ring = amdgpu_get_cached_map(mapper, ring);
|
||||||
|
if (*out_ring) {
|
||||||
|
/* cache hit */
|
||||||
|
r = 0;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (mapper->hw_ip) {
|
||||||
|
case AMDGPU_HW_IP_GFX:
|
||||||
|
case AMDGPU_HW_IP_UVD:
|
||||||
|
case AMDGPU_HW_IP_VCE:
|
||||||
|
case AMDGPU_HW_IP_UVD_ENC:
|
||||||
|
case AMDGPU_HW_IP_VCN_DEC:
|
||||||
|
case AMDGPU_HW_IP_VCN_ENC:
|
||||||
|
r = amdgpu_identity_map(adev, mapper, ring, out_ring);
|
||||||
|
break;
|
||||||
|
case AMDGPU_HW_IP_DMA:
|
||||||
|
case AMDGPU_HW_IP_COMPUTE:
|
||||||
|
r = amdgpu_lru_map(adev, mapper, ring, out_ring);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
*out_ring = NULL;
|
||||||
|
r = -EINVAL;
|
||||||
|
DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
|
||||||
|
}
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&mapper->lock);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
@@ -135,6 +135,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
|
|||||||
|
|
||||||
if (ring->funcs->end_use)
|
if (ring->funcs->end_use)
|
||||||
ring->funcs->end_use(ring);
|
ring->funcs->end_use(ring);
|
||||||
|
|
||||||
|
amdgpu_ring_lru_touch(ring->adev, ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -253,10 +255,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ring->max_dw = max_dw;
|
ring->max_dw = max_dw;
|
||||||
|
INIT_LIST_HEAD(&ring->lru_list);
|
||||||
|
amdgpu_ring_lru_touch(adev, ring);
|
||||||
|
|
||||||
if (amdgpu_debugfs_ring_init(adev, ring)) {
|
if (amdgpu_debugfs_ring_init(adev, ring)) {
|
||||||
DRM_ERROR("Failed to register debugfs file for rings !\n");
|
DRM_ERROR("Failed to register debugfs file for rings !\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -294,6 +299,84 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
|||||||
ring->adev->rings[ring->idx] = NULL;
|
ring->adev->rings[ring->idx] = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void amdgpu_ring_lru_touch_locked(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
/* list_move_tail handles the case where ring isn't part of the list */
|
||||||
|
list_move_tail(&ring->lru_list, &adev->ring_lru_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring,
|
||||||
|
int *blacklist, int num_blacklist)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < num_blacklist; i++) {
|
||||||
|
if (ring->idx == blacklist[i])
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_ring_lru_get - get the least recently used ring for a HW IP block
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
* @type: amdgpu_ring_type enum
|
||||||
|
* @blacklist: blacklisted ring ids array
|
||||||
|
* @num_blacklist: number of entries in @blacklist
|
||||||
|
* @ring: output ring
|
||||||
|
*
|
||||||
|
* Retrieve the amdgpu_ring structure for the least recently used ring of
|
||||||
|
* a specific IP block (all asics).
|
||||||
|
* Returns 0 on success, error on failure.
|
||||||
|
*/
|
||||||
|
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
|
||||||
|
int num_blacklist, struct amdgpu_ring **ring)
|
||||||
|
{
|
||||||
|
struct amdgpu_ring *entry;
|
||||||
|
|
||||||
|
/* List is sorted in LRU order, find first entry corresponding
|
||||||
|
* to the desired HW IP */
|
||||||
|
*ring = NULL;
|
||||||
|
spin_lock(&adev->ring_lru_list_lock);
|
||||||
|
list_for_each_entry(entry, &adev->ring_lru_list, lru_list) {
|
||||||
|
if (entry->funcs->type != type)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
*ring = entry;
|
||||||
|
amdgpu_ring_lru_touch_locked(adev, *ring);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
spin_unlock(&adev->ring_lru_list_lock);
|
||||||
|
|
||||||
|
if (!*ring) {
|
||||||
|
DRM_ERROR("Ring LRU contains no entries for ring type:%d\n", type);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_ring_lru_touch - mark a ring as recently being used
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
* @ring: ring to touch
|
||||||
|
*
|
||||||
|
* Move @ring to the tail of the lru list
|
||||||
|
*/
|
||||||
|
void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
spin_lock(&adev->ring_lru_list_lock);
|
||||||
|
amdgpu_ring_lru_touch_locked(adev, ring);
|
||||||
|
spin_unlock(&adev->ring_lru_list_lock);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Debugfs info
|
* Debugfs info
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -47,7 +47,9 @@ enum amdgpu_ring_type {
|
|||||||
AMDGPU_RING_TYPE_UVD,
|
AMDGPU_RING_TYPE_UVD,
|
||||||
AMDGPU_RING_TYPE_VCE,
|
AMDGPU_RING_TYPE_VCE,
|
||||||
AMDGPU_RING_TYPE_KIQ,
|
AMDGPU_RING_TYPE_KIQ,
|
||||||
AMDGPU_RING_TYPE_UVD_ENC
|
AMDGPU_RING_TYPE_UVD_ENC,
|
||||||
|
AMDGPU_RING_TYPE_VCN_DEC,
|
||||||
|
AMDGPU_RING_TYPE_VCN_ENC
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_device;
|
struct amdgpu_device;
|
||||||
@@ -76,6 +78,7 @@ struct amdgpu_fence_driver {
|
|||||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
|
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
|
||||||
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
|
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
|
||||||
|
void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring);
|
||||||
|
|
||||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||||
unsigned num_hw_submission);
|
unsigned num_hw_submission);
|
||||||
@@ -130,6 +133,7 @@ struct amdgpu_ring_funcs {
|
|||||||
int (*test_ib)(struct amdgpu_ring *ring, long timeout);
|
int (*test_ib)(struct amdgpu_ring *ring, long timeout);
|
||||||
/* insert NOP packets */
|
/* insert NOP packets */
|
||||||
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
|
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
|
||||||
|
void (*insert_start)(struct amdgpu_ring *ring);
|
||||||
void (*insert_end)(struct amdgpu_ring *ring);
|
void (*insert_end)(struct amdgpu_ring *ring);
|
||||||
/* pad the indirect buffer to the necessary number of dw */
|
/* pad the indirect buffer to the necessary number of dw */
|
||||||
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
||||||
@@ -142,6 +146,7 @@ struct amdgpu_ring_funcs {
|
|||||||
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
|
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
|
||||||
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
|
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
|
||||||
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
|
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
|
||||||
|
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_ring {
|
struct amdgpu_ring {
|
||||||
@@ -149,6 +154,7 @@ struct amdgpu_ring {
|
|||||||
const struct amdgpu_ring_funcs *funcs;
|
const struct amdgpu_ring_funcs *funcs;
|
||||||
struct amdgpu_fence_driver fence_drv;
|
struct amdgpu_fence_driver fence_drv;
|
||||||
struct amd_gpu_scheduler sched;
|
struct amd_gpu_scheduler sched;
|
||||||
|
struct list_head lru_list;
|
||||||
|
|
||||||
struct amdgpu_bo *ring_obj;
|
struct amdgpu_bo *ring_obj;
|
||||||
volatile uint32_t *ring;
|
volatile uint32_t *ring;
|
||||||
@@ -180,6 +186,7 @@ struct amdgpu_ring {
|
|||||||
u64 cond_exe_gpu_addr;
|
u64 cond_exe_gpu_addr;
|
||||||
volatile u32 *cond_exe_cpu_addr;
|
volatile u32 *cond_exe_cpu_addr;
|
||||||
unsigned vm_inv_eng;
|
unsigned vm_inv_eng;
|
||||||
|
bool has_compute_vm_bug;
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
struct dentry *ent;
|
struct dentry *ent;
|
||||||
#endif
|
#endif
|
||||||
@@ -194,6 +201,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||||||
unsigned ring_size, struct amdgpu_irq_src *irq_src,
|
unsigned ring_size, struct amdgpu_irq_src *irq_src,
|
||||||
unsigned irq_type);
|
unsigned irq_type);
|
||||||
void amdgpu_ring_fini(struct amdgpu_ring *ring);
|
void amdgpu_ring_fini(struct amdgpu_ring *ring);
|
||||||
|
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
|
||||||
|
int num_blacklist, struct amdgpu_ring **ring);
|
||||||
|
void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
|
||||||
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
|
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|||||||
@@ -298,6 +298,25 @@ struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
|
||||||
|
{
|
||||||
|
struct amdgpu_sync_entry *e;
|
||||||
|
struct hlist_node *tmp;
|
||||||
|
int i, r;
|
||||||
|
|
||||||
|
hash_for_each_safe(sync->fences, i, tmp, e, node) {
|
||||||
|
r = dma_fence_wait(e->fence, intr);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
hash_del(&e->node);
|
||||||
|
dma_fence_put(e->fence);
|
||||||
|
kmem_cache_free(amdgpu_sync_slab, e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_sync_free - free the sync object
|
* amdgpu_sync_free - free the sync object
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -49,6 +49,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
|
|||||||
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
||||||
struct amdgpu_ring *ring);
|
struct amdgpu_ring *ring);
|
||||||
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
|
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
|
||||||
|
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
|
||||||
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
||||||
int amdgpu_sync_init(void);
|
int amdgpu_sync_init(void);
|
||||||
void amdgpu_sync_fini(void);
|
void amdgpu_sync_fini(void);
|
||||||
|
|||||||
@@ -745,6 +745,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock(>t->adev->gtt_list_lock);
|
||||||
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
|
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
|
||||||
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
|
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
|
||||||
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
|
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
|
||||||
@@ -753,12 +754,13 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
|
|||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
|
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
|
||||||
ttm->num_pages, gtt->offset);
|
ttm->num_pages, gtt->offset);
|
||||||
return r;
|
goto error_gart_bind;
|
||||||
}
|
}
|
||||||
spin_lock(>t->adev->gtt_list_lock);
|
|
||||||
list_add_tail(>t->list, >t->adev->gtt_list);
|
list_add_tail(>t->list, >t->adev->gtt_list);
|
||||||
|
error_gart_bind:
|
||||||
spin_unlock(>t->adev->gtt_list_lock);
|
spin_unlock(>t->adev->gtt_list_lock);
|
||||||
return 0;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
|
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
|
||||||
@@ -789,6 +791,7 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
|
|||||||
static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
|
int r;
|
||||||
|
|
||||||
if (gtt->userptr)
|
if (gtt->userptr)
|
||||||
amdgpu_ttm_tt_unpin_userptr(ttm);
|
amdgpu_ttm_tt_unpin_userptr(ttm);
|
||||||
@@ -797,14 +800,17 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
|
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
|
||||||
if (gtt->adev->gart.ready)
|
|
||||||
amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
|
|
||||||
|
|
||||||
spin_lock(>t->adev->gtt_list_lock);
|
spin_lock(>t->adev->gtt_list_lock);
|
||||||
|
r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
|
||||||
|
gtt->ttm.ttm.num_pages, gtt->offset);
|
||||||
|
goto error_unbind;
|
||||||
|
}
|
||||||
list_del_init(>t->list);
|
list_del_init(>t->list);
|
||||||
|
error_unbind:
|
||||||
spin_unlock(>t->adev->gtt_list_lock);
|
spin_unlock(>t->adev->gtt_list_lock);
|
||||||
|
return r;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
|
static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
|
||||||
@@ -1115,7 +1121,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||||||
/* Change the size here instead of the init above so only lpfn is affected */
|
/* Change the size here instead of the init above so only lpfn is affected */
|
||||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||||
|
|
||||||
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
|
r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true,
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||||
@@ -1462,6 +1468,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
|
|||||||
if (size & 0x3 || *pos & 0x3)
|
if (size & 0x3 || *pos & 0x3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (*pos >= adev->mc.mc_vram_size)
|
||||||
|
return -ENXIO;
|
||||||
|
|
||||||
while (size) {
|
while (size) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
|
|||||||
@@ -197,6 +197,27 @@ void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr)
|
||||||
|
{
|
||||||
|
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
|
||||||
|
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
|
||||||
|
|
||||||
|
DRM_DEBUG("GPU_INFO\n");
|
||||||
|
amdgpu_ucode_print_common_hdr(hdr);
|
||||||
|
|
||||||
|
if (version_major == 1) {
|
||||||
|
const struct gpu_info_firmware_header_v1_0 *gpu_info_hdr =
|
||||||
|
container_of(hdr, struct gpu_info_firmware_header_v1_0, header);
|
||||||
|
|
||||||
|
DRM_DEBUG("version_major: %u\n",
|
||||||
|
le16_to_cpu(gpu_info_hdr->version_major));
|
||||||
|
DRM_DEBUG("version_minor: %u\n",
|
||||||
|
le16_to_cpu(gpu_info_hdr->version_minor));
|
||||||
|
} else {
|
||||||
|
DRM_ERROR("Unknown gpu_info ucode version: %u.%u\n", version_major, version_minor);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int amdgpu_ucode_validate(const struct firmware *fw)
|
int amdgpu_ucode_validate(const struct firmware *fw)
|
||||||
{
|
{
|
||||||
const struct common_firmware_header *hdr =
|
const struct common_firmware_header *hdr =
|
||||||
@@ -253,6 +274,15 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
|||||||
return AMDGPU_FW_LOAD_DIRECT;
|
return AMDGPU_FW_LOAD_DIRECT;
|
||||||
else
|
else
|
||||||
return AMDGPU_FW_LOAD_PSP;
|
return AMDGPU_FW_LOAD_PSP;
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
#if 0
|
||||||
|
if (!load_type)
|
||||||
|
return AMDGPU_FW_LOAD_DIRECT;
|
||||||
|
else
|
||||||
|
return AMDGPU_FW_LOAD_PSP;
|
||||||
|
#else
|
||||||
|
return AMDGPU_FW_LOAD_DIRECT;
|
||||||
|
#endif
|
||||||
default:
|
default:
|
||||||
DRM_ERROR("Unknow firmware load type\n");
|
DRM_ERROR("Unknow firmware load type\n");
|
||||||
}
|
}
|
||||||
@@ -349,7 +379,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
|
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
|
||||||
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
||||||
0, NULL, NULL, bo);
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||||
|
NULL, NULL, bo);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
|
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
|
||||||
goto failed;
|
goto failed;
|
||||||
|
|||||||
@@ -113,6 +113,32 @@ struct sdma_firmware_header_v1_1 {
|
|||||||
uint32_t digest_size;
|
uint32_t digest_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* gpu info payload */
|
||||||
|
struct gpu_info_firmware_v1_0 {
|
||||||
|
uint32_t gc_num_se;
|
||||||
|
uint32_t gc_num_cu_per_sh;
|
||||||
|
uint32_t gc_num_sh_per_se;
|
||||||
|
uint32_t gc_num_rb_per_se;
|
||||||
|
uint32_t gc_num_tccs;
|
||||||
|
uint32_t gc_num_gprs;
|
||||||
|
uint32_t gc_num_max_gs_thds;
|
||||||
|
uint32_t gc_gs_table_depth;
|
||||||
|
uint32_t gc_gsprim_buff_depth;
|
||||||
|
uint32_t gc_parameter_cache_depth;
|
||||||
|
uint32_t gc_double_offchip_lds_buffer;
|
||||||
|
uint32_t gc_wave_size;
|
||||||
|
uint32_t gc_max_waves_per_simd;
|
||||||
|
uint32_t gc_max_scratch_slots_per_cu;
|
||||||
|
uint32_t gc_lds_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* version_major=1, version_minor=0 */
|
||||||
|
struct gpu_info_firmware_header_v1_0 {
|
||||||
|
struct common_firmware_header header;
|
||||||
|
uint16_t version_major; /* version */
|
||||||
|
uint16_t version_minor; /* version */
|
||||||
|
};
|
||||||
|
|
||||||
/* header is fixed size */
|
/* header is fixed size */
|
||||||
union amdgpu_firmware_header {
|
union amdgpu_firmware_header {
|
||||||
struct common_firmware_header common;
|
struct common_firmware_header common;
|
||||||
@@ -124,6 +150,7 @@ union amdgpu_firmware_header {
|
|||||||
struct rlc_firmware_header_v2_0 rlc_v2_0;
|
struct rlc_firmware_header_v2_0 rlc_v2_0;
|
||||||
struct sdma_firmware_header_v1_0 sdma;
|
struct sdma_firmware_header_v1_0 sdma;
|
||||||
struct sdma_firmware_header_v1_1 sdma_v1_1;
|
struct sdma_firmware_header_v1_1 sdma_v1_1;
|
||||||
|
struct gpu_info_firmware_header_v1_0 gpu_info;
|
||||||
uint8_t raw[0x100];
|
uint8_t raw[0x100];
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -184,6 +211,7 @@ void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
|
|||||||
void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
|
void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
|
||||||
void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
|
void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
|
||||||
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
|
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
|
||||||
|
void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
|
||||||
int amdgpu_ucode_validate(const struct firmware *fw);
|
int amdgpu_ucode_validate(const struct firmware *fw);
|
||||||
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
|
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
|
||||||
uint16_t hdr_major, uint16_t hdr_minor);
|
uint16_t hdr_major, uint16_t hdr_minor);
|
||||||
|
|||||||
@@ -165,35 +165,14 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
|||||||
adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
|
adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
|
||||||
(binary_id << 8));
|
(binary_id << 8));
|
||||||
|
|
||||||
/* allocate firmware, stack and heap BO */
|
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
|
||||||
|
AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
|
||||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
&adev->vce.gpu_addr, &adev->vce.cpu_addr);
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
|
||||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
|
||||||
NULL, NULL, &adev->vce.vcpu_bo);
|
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
|
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
|
|
||||||
if (r) {
|
|
||||||
amdgpu_bo_unref(&adev->vce.vcpu_bo);
|
|
||||||
dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
|
|
||||||
&adev->vce.gpu_addr);
|
|
||||||
amdgpu_bo_unreserve(adev->vce.vcpu_bo);
|
|
||||||
if (r) {
|
|
||||||
amdgpu_bo_unref(&adev->vce.vcpu_bo);
|
|
||||||
dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
ring = &adev->vce.ring[0];
|
ring = &adev->vce.ring[0];
|
||||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||||
r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
|
r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
|
||||||
@@ -230,7 +209,8 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
|
amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
|
||||||
|
|
||||||
amdgpu_bo_unref(&adev->vce.vcpu_bo);
|
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
|
||||||
|
(void **)&adev->vce.cpu_addr);
|
||||||
|
|
||||||
for (i = 0; i < adev->vce.num_rings; i++)
|
for (i = 0; i < adev->vce.num_rings; i++)
|
||||||
amdgpu_ring_fini(&adev->vce.ring[i]);
|
amdgpu_ring_fini(&adev->vce.ring[i]);
|
||||||
|
|||||||
@@ -33,6 +33,8 @@
|
|||||||
struct amdgpu_vce {
|
struct amdgpu_vce {
|
||||||
struct amdgpu_bo *vcpu_bo;
|
struct amdgpu_bo *vcpu_bo;
|
||||||
uint64_t gpu_addr;
|
uint64_t gpu_addr;
|
||||||
|
void *cpu_addr;
|
||||||
|
void *saved_bo;
|
||||||
unsigned fw_version;
|
unsigned fw_version;
|
||||||
unsigned fb_version;
|
unsigned fb_version;
|
||||||
atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
|
atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
|
||||||
|
|||||||
654
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
Normal file
654
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
Normal file
@@ -0,0 +1,654 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/firmware.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <drm/drmP.h>
|
||||||
|
#include <drm/drm.h>
|
||||||
|
|
||||||
|
#include "amdgpu.h"
|
||||||
|
#include "amdgpu_pm.h"
|
||||||
|
#include "amdgpu_vcn.h"
|
||||||
|
#include "soc15d.h"
|
||||||
|
#include "soc15_common.h"
|
||||||
|
|
||||||
|
#include "vega10/soc15ip.h"
|
||||||
|
#include "raven1/VCN/vcn_1_0_offset.h"
|
||||||
|
|
||||||
|
/* 1 second timeout */
|
||||||
|
#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
|
||||||
|
|
||||||
|
/* Firmware Names */
|
||||||
|
#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
|
||||||
|
|
||||||
|
MODULE_FIRMWARE(FIRMWARE_RAVEN);
|
||||||
|
|
||||||
|
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
|
||||||
|
|
||||||
|
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
struct amdgpu_ring *ring;
|
||||||
|
struct amd_sched_rq *rq;
|
||||||
|
unsigned long bo_size;
|
||||||
|
const char *fw_name;
|
||||||
|
const struct common_firmware_header *hdr;
|
||||||
|
unsigned version_major, version_minor, family_id;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
|
||||||
|
|
||||||
|
switch (adev->asic_type) {
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
fw_name = FIRMWARE_RAVEN;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
|
||||||
|
if (r) {
|
||||||
|
dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
|
||||||
|
fw_name);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = amdgpu_ucode_validate(adev->vcn.fw);
|
||||||
|
if (r) {
|
||||||
|
dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
|
||||||
|
fw_name);
|
||||||
|
release_firmware(adev->vcn.fw);
|
||||||
|
adev->vcn.fw = NULL;
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||||
|
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
|
||||||
|
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
|
||||||
|
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
|
||||||
|
DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
|
||||||
|
version_major, version_minor, family_id);
|
||||||
|
|
||||||
|
|
||||||
|
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
|
||||||
|
+ AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
|
||||||
|
+ AMDGPU_VCN_SESSION_SIZE * 40;
|
||||||
|
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
|
||||||
|
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
|
||||||
|
&adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
|
||||||
|
if (r) {
|
||||||
|
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
ring = &adev->vcn.ring_dec;
|
||||||
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||||
|
r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
|
||||||
|
rq, amdgpu_sched_jobs);
|
||||||
|
if (r != 0) {
|
||||||
|
DRM_ERROR("Failed setting up VCN dec run queue.\n");
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
ring = &adev->vcn.ring_enc[0];
|
||||||
|
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||||
|
r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
|
||||||
|
rq, amdgpu_sched_jobs);
|
||||||
|
if (r != 0) {
|
||||||
|
DRM_ERROR("Failed setting up VCN enc run queue.\n");
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
kfree(adev->vcn.saved_bo);
|
||||||
|
|
||||||
|
amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
|
||||||
|
|
||||||
|
amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
|
||||||
|
|
||||||
|
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
|
||||||
|
&adev->vcn.gpu_addr,
|
||||||
|
(void **)&adev->vcn.cpu_addr);
|
||||||
|
|
||||||
|
amdgpu_ring_fini(&adev->vcn.ring_dec);
|
||||||
|
|
||||||
|
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
|
||||||
|
amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
|
||||||
|
|
||||||
|
release_firmware(adev->vcn.fw);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_vcn_suspend(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
unsigned size;
|
||||||
|
void *ptr;
|
||||||
|
|
||||||
|
if (adev->vcn.vcpu_bo == NULL)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||||
|
|
||||||
|
size = amdgpu_bo_size(adev->vcn.vcpu_bo);
|
||||||
|
ptr = adev->vcn.cpu_addr;
|
||||||
|
|
||||||
|
adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
|
||||||
|
if (!adev->vcn.saved_bo)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
memcpy_fromio(adev->vcn.saved_bo, ptr, size);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_vcn_resume(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
unsigned size;
|
||||||
|
void *ptr;
|
||||||
|
|
||||||
|
if (adev->vcn.vcpu_bo == NULL)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
size = amdgpu_bo_size(adev->vcn.vcpu_bo);
|
||||||
|
ptr = adev->vcn.cpu_addr;
|
||||||
|
|
||||||
|
if (adev->vcn.saved_bo != NULL) {
|
||||||
|
memcpy_toio(ptr, adev->vcn.saved_bo, size);
|
||||||
|
kfree(adev->vcn.saved_bo);
|
||||||
|
adev->vcn.saved_bo = NULL;
|
||||||
|
} else {
|
||||||
|
const struct common_firmware_header *hdr;
|
||||||
|
unsigned offset;
|
||||||
|
|
||||||
|
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||||
|
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
|
||||||
|
memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
|
||||||
|
le32_to_cpu(hdr->ucode_size_bytes));
|
||||||
|
size -= le32_to_cpu(hdr->ucode_size_bytes);
|
||||||
|
ptr += le32_to_cpu(hdr->ucode_size_bytes);
|
||||||
|
memset_io(ptr, 0, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev =
|
||||||
|
container_of(work, struct amdgpu_device, vcn.idle_work.work);
|
||||||
|
unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
|
||||||
|
|
||||||
|
if (fences == 0) {
|
||||||
|
if (adev->pm.dpm_enabled) {
|
||||||
|
amdgpu_dpm_enable_uvd(adev, false);
|
||||||
|
} else {
|
||||||
|
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||||
|
|
||||||
|
if (set_clocks) {
|
||||||
|
if (adev->pm.dpm_enabled) {
|
||||||
|
amdgpu_dpm_enable_uvd(adev, true);
|
||||||
|
} else {
|
||||||
|
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
uint32_t tmp = 0;
|
||||||
|
unsigned i;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
|
||||||
|
r = amdgpu_ring_alloc(ring, 3);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
|
||||||
|
ring->idx, r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
amdgpu_ring_write(ring,
|
||||||
|
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
|
||||||
|
amdgpu_ring_write(ring, 0xDEADBEEF);
|
||||||
|
amdgpu_ring_commit(ring);
|
||||||
|
for (i = 0; i < adev->usec_timeout; i++) {
|
||||||
|
tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
|
||||||
|
if (tmp == 0xDEADBEEF)
|
||||||
|
break;
|
||||||
|
DRM_UDELAY(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i < adev->usec_timeout) {
|
||||||
|
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
||||||
|
ring->idx, i);
|
||||||
|
} else {
|
||||||
|
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||||
|
ring->idx, tmp);
|
||||||
|
r = -EINVAL;
|
||||||
|
}
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||||
|
bool direct, struct dma_fence **fence)
|
||||||
|
{
|
||||||
|
struct ttm_validate_buffer tv;
|
||||||
|
struct ww_acquire_ctx ticket;
|
||||||
|
struct list_head head;
|
||||||
|
struct amdgpu_job *job;
|
||||||
|
struct amdgpu_ib *ib;
|
||||||
|
struct dma_fence *f = NULL;
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
uint64_t addr;
|
||||||
|
int i, r;
|
||||||
|
|
||||||
|
memset(&tv, 0, sizeof(tv));
|
||||||
|
tv.bo = &bo->tbo;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&head);
|
||||||
|
list_add(&tv.head, &head);
|
||||||
|
|
||||||
|
r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
||||||
|
if (r)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
|
||||||
|
if (r)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
ib = &job->ibs[0];
|
||||||
|
addr = amdgpu_bo_gpu_offset(bo);
|
||||||
|
ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
|
||||||
|
ib->ptr[1] = addr;
|
||||||
|
ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
|
||||||
|
ib->ptr[3] = addr >> 32;
|
||||||
|
ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
|
||||||
|
ib->ptr[5] = 0;
|
||||||
|
for (i = 6; i < 16; i += 2) {
|
||||||
|
ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
|
||||||
|
ib->ptr[i+1] = 0;
|
||||||
|
}
|
||||||
|
ib->length_dw = 16;
|
||||||
|
|
||||||
|
if (direct) {
|
||||||
|
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||||
|
job->fence = dma_fence_get(f);
|
||||||
|
if (r)
|
||||||
|
goto err_free;
|
||||||
|
|
||||||
|
amdgpu_job_free(job);
|
||||||
|
} else {
|
||||||
|
r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
|
||||||
|
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||||
|
if (r)
|
||||||
|
goto err_free;
|
||||||
|
}
|
||||||
|
|
||||||
|
ttm_eu_fence_buffer_objects(&ticket, &head, f);
|
||||||
|
|
||||||
|
if (fence)
|
||||||
|
*fence = dma_fence_get(f);
|
||||||
|
amdgpu_bo_unref(&bo);
|
||||||
|
dma_fence_put(f);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_free:
|
||||||
|
amdgpu_job_free(job);
|
||||||
|
|
||||||
|
err:
|
||||||
|
ttm_eu_backoff_reservation(&ticket, &head);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
|
struct dma_fence **fence)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
struct amdgpu_bo *bo;
|
||||||
|
uint32_t *msg;
|
||||||
|
int r, i;
|
||||||
|
|
||||||
|
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
||||||
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
|
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||||
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||||
|
NULL, NULL, &bo);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
r = amdgpu_bo_reserve(bo, false);
|
||||||
|
if (r) {
|
||||||
|
amdgpu_bo_unref(&bo);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = amdgpu_bo_kmap(bo, (void **)&msg);
|
||||||
|
if (r) {
|
||||||
|
amdgpu_bo_unreserve(bo);
|
||||||
|
amdgpu_bo_unref(&bo);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
msg[0] = cpu_to_le32(0x00000028);
|
||||||
|
msg[1] = cpu_to_le32(0x00000038);
|
||||||
|
msg[2] = cpu_to_le32(0x00000001);
|
||||||
|
msg[3] = cpu_to_le32(0x00000000);
|
||||||
|
msg[4] = cpu_to_le32(handle);
|
||||||
|
msg[5] = cpu_to_le32(0x00000000);
|
||||||
|
msg[6] = cpu_to_le32(0x00000001);
|
||||||
|
msg[7] = cpu_to_le32(0x00000028);
|
||||||
|
msg[8] = cpu_to_le32(0x00000010);
|
||||||
|
msg[9] = cpu_to_le32(0x00000000);
|
||||||
|
msg[10] = cpu_to_le32(0x00000007);
|
||||||
|
msg[11] = cpu_to_le32(0x00000000);
|
||||||
|
msg[12] = cpu_to_le32(0x00000780);
|
||||||
|
msg[13] = cpu_to_le32(0x00000440);
|
||||||
|
for (i = 14; i < 1024; ++i)
|
||||||
|
msg[i] = cpu_to_le32(0x0);
|
||||||
|
|
||||||
|
amdgpu_bo_kunmap(bo);
|
||||||
|
amdgpu_bo_unreserve(bo);
|
||||||
|
|
||||||
|
return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
|
bool direct, struct dma_fence **fence)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
struct amdgpu_bo *bo;
|
||||||
|
uint32_t *msg;
|
||||||
|
int r, i;
|
||||||
|
|
||||||
|
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
|
||||||
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
|
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||||
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||||
|
NULL, NULL, &bo);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
r = amdgpu_bo_reserve(bo, false);
|
||||||
|
if (r) {
|
||||||
|
amdgpu_bo_unref(&bo);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = amdgpu_bo_kmap(bo, (void **)&msg);
|
||||||
|
if (r) {
|
||||||
|
amdgpu_bo_unreserve(bo);
|
||||||
|
amdgpu_bo_unref(&bo);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
msg[0] = cpu_to_le32(0x00000028);
|
||||||
|
msg[1] = cpu_to_le32(0x00000018);
|
||||||
|
msg[2] = cpu_to_le32(0x00000000);
|
||||||
|
msg[3] = cpu_to_le32(0x00000002);
|
||||||
|
msg[4] = cpu_to_le32(handle);
|
||||||
|
msg[5] = cpu_to_le32(0x00000000);
|
||||||
|
for (i = 6; i < 1024; ++i)
|
||||||
|
msg[i] = cpu_to_le32(0x0);
|
||||||
|
|
||||||
|
amdgpu_bo_kunmap(bo);
|
||||||
|
amdgpu_bo_unreserve(bo);
|
||||||
|
|
||||||
|
return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
|
{
|
||||||
|
struct dma_fence *fence;
|
||||||
|
long r;
|
||||||
|
|
||||||
|
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = dma_fence_wait_timeout(fence, false, timeout);
|
||||||
|
if (r == 0) {
|
||||||
|
DRM_ERROR("amdgpu: IB test timed out.\n");
|
||||||
|
r = -ETIMEDOUT;
|
||||||
|
} else if (r < 0) {
|
||||||
|
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||||
|
} else {
|
||||||
|
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
||||||
|
r = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
dma_fence_put(fence);
|
||||||
|
|
||||||
|
error:
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
uint32_t rptr = amdgpu_ring_get_rptr(ring);
|
||||||
|
unsigned i;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
r = amdgpu_ring_alloc(ring, 16);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
|
||||||
|
ring->idx, r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
amdgpu_ring_write(ring, VCN_ENC_CMD_END);
|
||||||
|
amdgpu_ring_commit(ring);
|
||||||
|
|
||||||
|
for (i = 0; i < adev->usec_timeout; i++) {
|
||||||
|
if (amdgpu_ring_get_rptr(ring) != rptr)
|
||||||
|
break;
|
||||||
|
DRM_UDELAY(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i < adev->usec_timeout) {
|
||||||
|
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
||||||
|
ring->idx, i);
|
||||||
|
} else {
|
||||||
|
DRM_ERROR("amdgpu: ring %d test failed\n",
|
||||||
|
ring->idx);
|
||||||
|
r = -ETIMEDOUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
|
struct dma_fence **fence)
|
||||||
|
{
|
||||||
|
const unsigned ib_size_dw = 16;
|
||||||
|
struct amdgpu_job *job;
|
||||||
|
struct amdgpu_ib *ib;
|
||||||
|
struct dma_fence *f = NULL;
|
||||||
|
uint64_t dummy;
|
||||||
|
int i, r;
|
||||||
|
|
||||||
|
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
ib = &job->ibs[0];
|
||||||
|
dummy = ib->gpu_addr + 1024;
|
||||||
|
|
||||||
|
ib->length_dw = 0;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||||
|
ib->ptr[ib->length_dw++] = handle;
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||||
|
ib->ptr[ib->length_dw++] = dummy;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x0000000b;
|
||||||
|
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
|
||||||
|
ib->ptr[ib->length_dw++] = 0x0000001c;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000000;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000000;
|
||||||
|
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000008;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
|
||||||
|
|
||||||
|
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||||
|
ib->ptr[i] = 0x0;
|
||||||
|
|
||||||
|
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||||
|
job->fence = dma_fence_get(f);
|
||||||
|
if (r)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
amdgpu_job_free(job);
|
||||||
|
if (fence)
|
||||||
|
*fence = dma_fence_get(f);
|
||||||
|
dma_fence_put(f);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
amdgpu_job_free(job);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||||
|
struct dma_fence **fence)
|
||||||
|
{
|
||||||
|
const unsigned ib_size_dw = 16;
|
||||||
|
struct amdgpu_job *job;
|
||||||
|
struct amdgpu_ib *ib;
|
||||||
|
struct dma_fence *f = NULL;
|
||||||
|
uint64_t dummy;
|
||||||
|
int i, r;
|
||||||
|
|
||||||
|
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
ib = &job->ibs[0];
|
||||||
|
dummy = ib->gpu_addr + 1024;
|
||||||
|
|
||||||
|
ib->length_dw = 0;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000001;
|
||||||
|
ib->ptr[ib->length_dw++] = handle;
|
||||||
|
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||||
|
ib->ptr[ib->length_dw++] = dummy;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x0000000b;
|
||||||
|
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000002;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x0000001c;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000000;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000000;
|
||||||
|
|
||||||
|
ib->ptr[ib->length_dw++] = 0x00000008;
|
||||||
|
ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
|
||||||
|
|
||||||
|
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||||
|
ib->ptr[i] = 0x0;
|
||||||
|
|
||||||
|
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||||
|
job->fence = dma_fence_get(f);
|
||||||
|
if (r)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
amdgpu_job_free(job);
|
||||||
|
if (fence)
|
||||||
|
*fence = dma_fence_get(f);
|
||||||
|
dma_fence_put(f);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
amdgpu_job_free(job);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
|
{
|
||||||
|
struct dma_fence *fence = NULL;
|
||||||
|
long r;
|
||||||
|
|
||||||
|
r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = dma_fence_wait_timeout(fence, false, timeout);
|
||||||
|
if (r == 0) {
|
||||||
|
DRM_ERROR("amdgpu: IB test timed out.\n");
|
||||||
|
r = -ETIMEDOUT;
|
||||||
|
} else if (r < 0) {
|
||||||
|
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||||
|
} else {
|
||||||
|
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
||||||
|
r = 0;
|
||||||
|
}
|
||||||
|
error:
|
||||||
|
dma_fence_put(fence);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
77
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
Normal file
77
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __AMDGPU_VCN_H__
|
||||||
|
#define __AMDGPU_VCN_H__
|
||||||
|
|
||||||
|
#define AMDGPU_VCN_STACK_SIZE (200*1024)
|
||||||
|
#define AMDGPU_VCN_HEAP_SIZE (256*1024)
|
||||||
|
#define AMDGPU_VCN_SESSION_SIZE (50*1024)
|
||||||
|
#define AMDGPU_VCN_FIRMWARE_OFFSET 256
|
||||||
|
#define AMDGPU_VCN_MAX_ENC_RINGS 3
|
||||||
|
|
||||||
|
#define VCN_DEC_CMD_FENCE 0x00000000
|
||||||
|
#define VCN_DEC_CMD_TRAP 0x00000001
|
||||||
|
#define VCN_DEC_CMD_WRITE_REG 0x00000004
|
||||||
|
#define VCN_DEC_CMD_REG_READ_COND_WAIT 0x00000006
|
||||||
|
#define VCN_DEC_CMD_PACKET_START 0x0000000a
|
||||||
|
#define VCN_DEC_CMD_PACKET_END 0x0000000b
|
||||||
|
|
||||||
|
#define VCN_ENC_CMD_NO_OP 0x00000000
|
||||||
|
#define VCN_ENC_CMD_END 0x00000001
|
||||||
|
#define VCN_ENC_CMD_IB 0x00000002
|
||||||
|
#define VCN_ENC_CMD_FENCE 0x00000003
|
||||||
|
#define VCN_ENC_CMD_TRAP 0x00000004
|
||||||
|
#define VCN_ENC_CMD_REG_WRITE 0x0000000b
|
||||||
|
#define VCN_ENC_CMD_REG_WAIT 0x0000000c
|
||||||
|
|
||||||
|
struct amdgpu_vcn {
|
||||||
|
struct amdgpu_bo *vcpu_bo;
|
||||||
|
void *cpu_addr;
|
||||||
|
uint64_t gpu_addr;
|
||||||
|
unsigned fw_version;
|
||||||
|
void *saved_bo;
|
||||||
|
struct delayed_work idle_work;
|
||||||
|
const struct firmware *fw; /* VCN firmware */
|
||||||
|
struct amdgpu_ring ring_dec;
|
||||||
|
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
|
||||||
|
struct amdgpu_irq_src irq;
|
||||||
|
struct amd_sched_entity entity_dec;
|
||||||
|
struct amd_sched_entity entity_enc;
|
||||||
|
unsigned num_enc_rings;
|
||||||
|
};
|
||||||
|
|
||||||
|
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
|
||||||
|
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
|
||||||
|
int amdgpu_vcn_suspend(struct amdgpu_device *adev);
|
||||||
|
int amdgpu_vcn_resume(struct amdgpu_device *adev);
|
||||||
|
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
|
||||||
|
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring);
|
||||||
|
|
||||||
|
int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring);
|
||||||
|
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
|
||||||
|
|
||||||
|
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
|
||||||
|
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
|
||||||
|
|
||||||
|
#endif
|
||||||
@@ -22,6 +22,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
|
#define MAX_KIQ_REG_WAIT 100000
|
||||||
|
|
||||||
int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
|
int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
@@ -105,8 +106,9 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
|
|||||||
/* enable virtual display */
|
/* enable virtual display */
|
||||||
adev->mode_info.num_crtc = 1;
|
adev->mode_info.num_crtc = 1;
|
||||||
adev->enable_virtual_display = true;
|
adev->enable_virtual_display = true;
|
||||||
|
adev->cg_flags = 0;
|
||||||
|
adev->pg_flags = 0;
|
||||||
|
|
||||||
mutex_init(&adev->virt.lock_kiq);
|
|
||||||
mutex_init(&adev->virt.lock_reset);
|
mutex_init(&adev->virt.lock_reset);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,17 +122,19 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
|
|||||||
|
|
||||||
BUG_ON(!ring->funcs->emit_rreg);
|
BUG_ON(!ring->funcs->emit_rreg);
|
||||||
|
|
||||||
mutex_lock(&adev->virt.lock_kiq);
|
mutex_lock(&kiq->ring_mutex);
|
||||||
amdgpu_ring_alloc(ring, 32);
|
amdgpu_ring_alloc(ring, 32);
|
||||||
amdgpu_ring_emit_rreg(ring, reg);
|
amdgpu_ring_emit_rreg(ring, reg);
|
||||||
amdgpu_fence_emit(ring, &f);
|
amdgpu_fence_emit(ring, &f);
|
||||||
amdgpu_ring_commit(ring);
|
amdgpu_ring_commit(ring);
|
||||||
mutex_unlock(&adev->virt.lock_kiq);
|
mutex_unlock(&kiq->ring_mutex);
|
||||||
|
|
||||||
r = dma_fence_wait(f, false);
|
r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
|
||||||
if (r)
|
|
||||||
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
|
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
|
if (r < 1) {
|
||||||
|
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
|
||||||
|
return ~0;
|
||||||
|
}
|
||||||
|
|
||||||
val = adev->wb.wb[adev->virt.reg_val_offs];
|
val = adev->wb.wb[adev->virt.reg_val_offs];
|
||||||
|
|
||||||
@@ -146,15 +150,15 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
|
|||||||
|
|
||||||
BUG_ON(!ring->funcs->emit_wreg);
|
BUG_ON(!ring->funcs->emit_wreg);
|
||||||
|
|
||||||
mutex_lock(&adev->virt.lock_kiq);
|
mutex_lock(&kiq->ring_mutex);
|
||||||
amdgpu_ring_alloc(ring, 32);
|
amdgpu_ring_alloc(ring, 32);
|
||||||
amdgpu_ring_emit_wreg(ring, reg, v);
|
amdgpu_ring_emit_wreg(ring, reg, v);
|
||||||
amdgpu_fence_emit(ring, &f);
|
amdgpu_fence_emit(ring, &f);
|
||||||
amdgpu_ring_commit(ring);
|
amdgpu_ring_commit(ring);
|
||||||
mutex_unlock(&adev->virt.lock_kiq);
|
mutex_unlock(&kiq->ring_mutex);
|
||||||
|
|
||||||
r = dma_fence_wait(f, false);
|
r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
|
||||||
if (r)
|
if (r < 1)
|
||||||
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
|
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,7 +52,6 @@ struct amdgpu_virt {
|
|||||||
uint64_t csa_vmid0_addr;
|
uint64_t csa_vmid0_addr;
|
||||||
bool chained_ib_support;
|
bool chained_ib_support;
|
||||||
uint32_t reg_val_offs;
|
uint32_t reg_val_offs;
|
||||||
struct mutex lock_kiq;
|
|
||||||
struct mutex lock_reset;
|
struct mutex lock_reset;
|
||||||
struct amdgpu_irq_src ack_irq;
|
struct amdgpu_irq_src ack_irq;
|
||||||
struct amdgpu_irq_src rcv_irq;
|
struct amdgpu_irq_src rcv_irq;
|
||||||
@@ -97,7 +96,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
|
|||||||
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
|
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
|
||||||
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
|
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
|
||||||
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
|
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
|
||||||
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary);
|
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
|
||||||
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
|
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
|
||||||
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
|
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
|||||||
@@ -79,6 +79,12 @@ struct amdgpu_pte_update_params {
|
|||||||
uint64_t flags);
|
uint64_t flags);
|
||||||
/* indicate update pt or its shadow */
|
/* indicate update pt or its shadow */
|
||||||
bool shadow;
|
bool shadow;
|
||||||
|
/* The next two are used during VM update by CPU
|
||||||
|
* DMA addresses to use for mapping
|
||||||
|
* Kernel pointer of PD/PT BO that needs to be updated
|
||||||
|
*/
|
||||||
|
dma_addr_t *pages_addr;
|
||||||
|
void *kptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Helper to disable partial resident texture feature from a fence callback */
|
/* Helper to disable partial resident texture feature from a fence callback */
|
||||||
@@ -275,6 +281,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|||||||
adev->vm_manager.block_size;
|
adev->vm_manager.block_size;
|
||||||
unsigned pt_idx, from, to;
|
unsigned pt_idx, from, to;
|
||||||
int r;
|
int r;
|
||||||
|
u64 flags;
|
||||||
|
|
||||||
if (!parent->entries) {
|
if (!parent->entries) {
|
||||||
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
|
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
|
||||||
@@ -300,6 +307,14 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|||||||
saddr = saddr & ((1 << shift) - 1);
|
saddr = saddr & ((1 << shift) - 1);
|
||||||
eaddr = eaddr & ((1 << shift) - 1);
|
eaddr = eaddr & ((1 << shift) - 1);
|
||||||
|
|
||||||
|
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
||||||
|
AMDGPU_GEM_CREATE_VRAM_CLEARED;
|
||||||
|
if (vm->use_cpu_for_update)
|
||||||
|
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||||
|
else
|
||||||
|
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||||
|
AMDGPU_GEM_CREATE_SHADOW);
|
||||||
|
|
||||||
/* walk over the address space and allocate the page tables */
|
/* walk over the address space and allocate the page tables */
|
||||||
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
|
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
|
||||||
struct reservation_object *resv = vm->root.bo->tbo.resv;
|
struct reservation_object *resv = vm->root.bo->tbo.resv;
|
||||||
@@ -311,10 +326,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|||||||
amdgpu_vm_bo_size(adev, level),
|
amdgpu_vm_bo_size(adev, level),
|
||||||
AMDGPU_GPU_PAGE_SIZE, true,
|
AMDGPU_GPU_PAGE_SIZE, true,
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
flags,
|
||||||
AMDGPU_GEM_CREATE_SHADOW |
|
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
|
||||||
AMDGPU_GEM_CREATE_VRAM_CLEARED,
|
|
||||||
NULL, resv, &pt);
|
NULL, resv, &pt);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
@@ -392,6 +404,71 @@ static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
|
|||||||
atomic_read(&adev->gpu_reset_counter);
|
atomic_read(&adev->gpu_reset_counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
|
||||||
|
{
|
||||||
|
return !!vm->reserved_vmid[vmhub];
|
||||||
|
}
|
||||||
|
|
||||||
|
/* idr_mgr->lock must be held */
|
||||||
|
static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
|
||||||
|
struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_sync *sync,
|
||||||
|
struct dma_fence *fence,
|
||||||
|
struct amdgpu_job *job)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
unsigned vmhub = ring->funcs->vmhub;
|
||||||
|
uint64_t fence_context = adev->fence_context + ring->idx;
|
||||||
|
struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
|
||||||
|
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||||
|
struct dma_fence *updates = sync->last_vm_update;
|
||||||
|
int r = 0;
|
||||||
|
struct dma_fence *flushed, *tmp;
|
||||||
|
bool needs_flush = false;
|
||||||
|
|
||||||
|
flushed = id->flushed_updates;
|
||||||
|
if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
|
||||||
|
(atomic64_read(&id->owner) != vm->client_id) ||
|
||||||
|
(job->vm_pd_addr != id->pd_gpu_addr) ||
|
||||||
|
(updates && (!flushed || updates->context != flushed->context ||
|
||||||
|
dma_fence_is_later(updates, flushed))) ||
|
||||||
|
(!id->last_flush || (id->last_flush->context != fence_context &&
|
||||||
|
!dma_fence_is_signaled(id->last_flush)))) {
|
||||||
|
needs_flush = true;
|
||||||
|
/* to prevent one context starved by another context */
|
||||||
|
id->pd_gpu_addr = 0;
|
||||||
|
tmp = amdgpu_sync_peek_fence(&id->active, ring);
|
||||||
|
if (tmp) {
|
||||||
|
r = amdgpu_sync_fence(adev, sync, tmp);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Good we can use this VMID. Remember this submission as
|
||||||
|
* user of the VMID.
|
||||||
|
*/
|
||||||
|
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
|
||||||
|
if (r)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (updates && (!flushed || updates->context != flushed->context ||
|
||||||
|
dma_fence_is_later(updates, flushed))) {
|
||||||
|
dma_fence_put(id->flushed_updates);
|
||||||
|
id->flushed_updates = dma_fence_get(updates);
|
||||||
|
}
|
||||||
|
id->pd_gpu_addr = job->vm_pd_addr;
|
||||||
|
atomic64_set(&id->owner, vm->client_id);
|
||||||
|
job->vm_needs_flush = needs_flush;
|
||||||
|
if (needs_flush) {
|
||||||
|
dma_fence_put(id->last_flush);
|
||||||
|
id->last_flush = NULL;
|
||||||
|
}
|
||||||
|
job->vm_id = id - id_mgr->ids;
|
||||||
|
trace_amdgpu_vm_grab_id(vm, ring, job);
|
||||||
|
out:
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_grab_id - allocate the next free VMID
|
* amdgpu_vm_grab_id - allocate the next free VMID
|
||||||
*
|
*
|
||||||
@@ -416,12 +493,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
|
|
||||||
if (!fences)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
mutex_lock(&id_mgr->lock);
|
mutex_lock(&id_mgr->lock);
|
||||||
|
if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
|
||||||
|
r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job);
|
||||||
|
mutex_unlock(&id_mgr->lock);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
|
||||||
|
if (!fences) {
|
||||||
|
mutex_unlock(&id_mgr->lock);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
/* Check if we have an idle VMID */
|
/* Check if we have an idle VMID */
|
||||||
i = 0;
|
i = 0;
|
||||||
list_for_each_entry(idle, &id_mgr->ids_lru, list) {
|
list_for_each_entry(idle, &id_mgr->ids_lru, list) {
|
||||||
@@ -522,7 +604,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||||||
id->pd_gpu_addr = job->vm_pd_addr;
|
id->pd_gpu_addr = job->vm_pd_addr;
|
||||||
dma_fence_put(id->flushed_updates);
|
dma_fence_put(id->flushed_updates);
|
||||||
id->flushed_updates = dma_fence_get(updates);
|
id->flushed_updates = dma_fence_get(updates);
|
||||||
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
|
|
||||||
atomic64_set(&id->owner, vm->client_id);
|
atomic64_set(&id->owner, vm->client_id);
|
||||||
|
|
||||||
needs_flush:
|
needs_flush:
|
||||||
@@ -541,40 +622,118 @@ error:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
|
static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_vm *vm,
|
||||||
|
unsigned vmhub)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||||
const struct amdgpu_ip_block *ip_block;
|
|
||||||
|
|
||||||
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
|
mutex_lock(&id_mgr->lock);
|
||||||
/* only compute rings */
|
if (vm->reserved_vmid[vmhub]) {
|
||||||
return false;
|
list_add(&vm->reserved_vmid[vmhub]->list,
|
||||||
|
&id_mgr->ids_lru);
|
||||||
|
vm->reserved_vmid[vmhub] = NULL;
|
||||||
|
atomic_dec(&id_mgr->reserved_vmid_num);
|
||||||
|
}
|
||||||
|
mutex_unlock(&id_mgr->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_vm *vm,
|
||||||
|
unsigned vmhub)
|
||||||
|
{
|
||||||
|
struct amdgpu_vm_id_manager *id_mgr;
|
||||||
|
struct amdgpu_vm_id *idle;
|
||||||
|
int r = 0;
|
||||||
|
|
||||||
|
id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||||
|
mutex_lock(&id_mgr->lock);
|
||||||
|
if (vm->reserved_vmid[vmhub])
|
||||||
|
goto unlock;
|
||||||
|
if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
|
||||||
|
AMDGPU_VM_MAX_RESERVED_VMID) {
|
||||||
|
DRM_ERROR("Over limitation of reserved vmid\n");
|
||||||
|
atomic_dec(&id_mgr->reserved_vmid_num);
|
||||||
|
r = -EINVAL;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
/* Select the first entry VMID */
|
||||||
|
idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list);
|
||||||
|
list_del_init(&idle->list);
|
||||||
|
vm->reserved_vmid[vmhub] = idle;
|
||||||
|
mutex_unlock(&id_mgr->lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&id_mgr->lock);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
*/
|
||||||
|
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
const struct amdgpu_ip_block *ip_block;
|
||||||
|
bool has_compute_vm_bug;
|
||||||
|
struct amdgpu_ring *ring;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
has_compute_vm_bug = false;
|
||||||
|
|
||||||
ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
|
ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
|
||||||
if (!ip_block)
|
if (ip_block) {
|
||||||
return false;
|
/* Compute has a VM bug for GFX version < 7.
|
||||||
|
Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
|
||||||
|
if (ip_block->version->major <= 7)
|
||||||
|
has_compute_vm_bug = true;
|
||||||
|
else if (ip_block->version->major == 8)
|
||||||
|
if (adev->gfx.mec_fw_version < 673)
|
||||||
|
has_compute_vm_bug = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (ip_block->version->major <= 7) {
|
for (i = 0; i < adev->num_rings; i++) {
|
||||||
/* gfx7 has no workaround */
|
ring = adev->rings[i];
|
||||||
return true;
|
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
|
||||||
} else if (ip_block->version->major == 8) {
|
/* only compute rings */
|
||||||
if (adev->gfx.mec_fw_version >= 673)
|
ring->has_compute_vm_bug = has_compute_vm_bug;
|
||||||
/* gfx8 is fixed in MEC firmware 673 */
|
|
||||||
return false;
|
|
||||||
else
|
else
|
||||||
return true;
|
ring->has_compute_vm_bug = false;
|
||||||
}
|
}
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
|
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job)
|
||||||
{
|
{
|
||||||
u64 addr = mc_addr;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
unsigned vmhub = ring->funcs->vmhub;
|
||||||
|
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||||
|
struct amdgpu_vm_id *id;
|
||||||
|
bool gds_switch_needed;
|
||||||
|
bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
|
||||||
|
|
||||||
if (adev->gart.gart_funcs->adjust_mc_addr)
|
if (job->vm_id == 0)
|
||||||
addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);
|
return false;
|
||||||
|
id = &id_mgr->ids[job->vm_id];
|
||||||
|
gds_switch_needed = ring->funcs->emit_gds_switch && (
|
||||||
|
id->gds_base != job->gds_base ||
|
||||||
|
id->gds_size != job->gds_size ||
|
||||||
|
id->gws_base != job->gws_base ||
|
||||||
|
id->gws_size != job->gws_size ||
|
||||||
|
id->oa_base != job->oa_base ||
|
||||||
|
id->oa_size != job->oa_size);
|
||||||
|
|
||||||
return addr;
|
if (amdgpu_vm_had_gpu_reset(adev, id))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return vm_flush_needed || gds_switch_needed;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return (adev->mc.real_vram_size == adev->mc.visible_vram_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -599,8 +758,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
|
|||||||
id->gws_size != job->gws_size ||
|
id->gws_size != job->gws_size ||
|
||||||
id->oa_base != job->oa_base ||
|
id->oa_base != job->oa_base ||
|
||||||
id->oa_size != job->oa_size);
|
id->oa_size != job->oa_size);
|
||||||
bool vm_flush_needed = job->vm_needs_flush ||
|
bool vm_flush_needed = job->vm_needs_flush;
|
||||||
amdgpu_vm_ring_has_compute_vm_bug(ring);
|
|
||||||
unsigned patch_offset = 0;
|
unsigned patch_offset = 0;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
@@ -615,15 +773,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
|
|||||||
if (ring->funcs->init_cond_exec)
|
if (ring->funcs->init_cond_exec)
|
||||||
patch_offset = amdgpu_ring_init_cond_exec(ring);
|
patch_offset = amdgpu_ring_init_cond_exec(ring);
|
||||||
|
|
||||||
if (ring->funcs->emit_pipeline_sync && !job->need_pipeline_sync)
|
|
||||||
amdgpu_ring_emit_pipeline_sync(ring);
|
|
||||||
|
|
||||||
if (ring->funcs->emit_vm_flush && vm_flush_needed) {
|
if (ring->funcs->emit_vm_flush && vm_flush_needed) {
|
||||||
u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
|
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
|
|
||||||
trace_amdgpu_vm_flush(ring, job->vm_id, pd_addr);
|
trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr);
|
||||||
amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
|
amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
|
||||||
|
|
||||||
r = amdgpu_fence_emit(ring, &fence);
|
r = amdgpu_fence_emit(ring, &fence);
|
||||||
if (r)
|
if (r)
|
||||||
@@ -632,6 +786,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
|
|||||||
mutex_lock(&id_mgr->lock);
|
mutex_lock(&id_mgr->lock);
|
||||||
dma_fence_put(id->last_flush);
|
dma_fence_put(id->last_flush);
|
||||||
id->last_flush = fence;
|
id->last_flush = fence;
|
||||||
|
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
|
||||||
mutex_unlock(&id_mgr->lock);
|
mutex_unlock(&id_mgr->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -806,6 +961,53 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
|
||||||
|
*
|
||||||
|
* @params: see amdgpu_pte_update_params definition
|
||||||
|
* @pe: kmap addr of the page entry
|
||||||
|
* @addr: dst addr to write into pe
|
||||||
|
* @count: number of page entries to update
|
||||||
|
* @incr: increase next addr by incr bytes
|
||||||
|
* @flags: hw access flags
|
||||||
|
*
|
||||||
|
* Write count number of PT/PD entries directly.
|
||||||
|
*/
|
||||||
|
static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
|
||||||
|
uint64_t pe, uint64_t addr,
|
||||||
|
unsigned count, uint32_t incr,
|
||||||
|
uint64_t flags)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
uint64_t value;
|
||||||
|
|
||||||
|
for (i = 0; i < count; i++) {
|
||||||
|
value = params->pages_addr ?
|
||||||
|
amdgpu_vm_map_gart(params->pages_addr, addr) :
|
||||||
|
addr;
|
||||||
|
amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
|
||||||
|
i, value, flags);
|
||||||
|
addr += incr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Flush HDP */
|
||||||
|
mb();
|
||||||
|
amdgpu_gart_flush_gpu_tlb(params->adev, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo)
|
||||||
|
{
|
||||||
|
struct amdgpu_sync sync;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
amdgpu_sync_create(&sync);
|
||||||
|
amdgpu_sync_resv(adev, &sync, bo->tbo.resv, AMDGPU_FENCE_OWNER_VM);
|
||||||
|
r = amdgpu_sync_wait(&sync, true);
|
||||||
|
amdgpu_sync_free(&sync);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* amdgpu_vm_update_level - update a single level in the hierarchy
|
* amdgpu_vm_update_level - update a single level in the hierarchy
|
||||||
*
|
*
|
||||||
@@ -822,11 +1024,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
|||||||
unsigned level)
|
unsigned level)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *shadow;
|
struct amdgpu_bo *shadow;
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring = NULL;
|
||||||
uint64_t pd_addr, shadow_addr;
|
uint64_t pd_addr, shadow_addr = 0;
|
||||||
uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
|
uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
|
||||||
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
|
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
|
||||||
unsigned count = 0, pt_idx, ndw;
|
unsigned count = 0, pt_idx, ndw = 0;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
struct amdgpu_pte_update_params params;
|
struct amdgpu_pte_update_params params;
|
||||||
struct dma_fence *fence = NULL;
|
struct dma_fence *fence = NULL;
|
||||||
@@ -835,7 +1037,30 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
if (!parent->entries)
|
if (!parent->entries)
|
||||||
return 0;
|
return 0;
|
||||||
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
|
||||||
|
memset(¶ms, 0, sizeof(params));
|
||||||
|
params.adev = adev;
|
||||||
|
shadow = parent->bo->shadow;
|
||||||
|
|
||||||
|
WARN_ON(vm->use_cpu_for_update && shadow);
|
||||||
|
if (vm->use_cpu_for_update && !shadow) {
|
||||||
|
r = amdgpu_bo_kmap(parent->bo, (void **)&pd_addr);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
r = amdgpu_vm_bo_wait(adev, parent->bo);
|
||||||
|
if (unlikely(r)) {
|
||||||
|
amdgpu_bo_kunmap(parent->bo);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
params.func = amdgpu_vm_cpu_set_ptes;
|
||||||
|
} else {
|
||||||
|
if (shadow) {
|
||||||
|
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
ring = container_of(vm->entity.sched, struct amdgpu_ring,
|
||||||
|
sched);
|
||||||
|
|
||||||
/* padding, etc. */
|
/* padding, etc. */
|
||||||
ndw = 64;
|
ndw = 64;
|
||||||
@@ -845,11 +1070,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
pd_addr = amdgpu_bo_gpu_offset(parent->bo);
|
pd_addr = amdgpu_bo_gpu_offset(parent->bo);
|
||||||
|
|
||||||
shadow = parent->bo->shadow;
|
|
||||||
if (shadow) {
|
if (shadow) {
|
||||||
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
shadow_addr = amdgpu_bo_gpu_offset(shadow);
|
shadow_addr = amdgpu_bo_gpu_offset(shadow);
|
||||||
ndw *= 2;
|
ndw *= 2;
|
||||||
} else {
|
} else {
|
||||||
@@ -860,9 +1081,10 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
memset(¶ms, 0, sizeof(params));
|
|
||||||
params.adev = adev;
|
|
||||||
params.ib = &job->ibs[0];
|
params.ib = &job->ibs[0];
|
||||||
|
params.func = amdgpu_vm_do_set_ptes;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* walk over the address space and update the directory */
|
/* walk over the address space and update the directory */
|
||||||
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
|
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
|
||||||
@@ -882,6 +1104,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
pt = amdgpu_bo_gpu_offset(bo);
|
pt = amdgpu_bo_gpu_offset(bo);
|
||||||
|
pt = amdgpu_gart_get_vm_pde(adev, pt);
|
||||||
if (parent->entries[pt_idx].addr == pt)
|
if (parent->entries[pt_idx].addr == pt)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -893,18 +1116,15 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
|||||||
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
|
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
|
||||||
|
|
||||||
if (count) {
|
if (count) {
|
||||||
uint64_t pt_addr =
|
|
||||||
amdgpu_vm_adjust_mc_addr(adev, last_pt);
|
|
||||||
|
|
||||||
if (shadow)
|
if (shadow)
|
||||||
amdgpu_vm_do_set_ptes(¶ms,
|
params.func(¶ms,
|
||||||
last_shadow,
|
last_shadow,
|
||||||
pt_addr, count,
|
last_pt, count,
|
||||||
incr,
|
incr,
|
||||||
AMDGPU_PTE_VALID);
|
AMDGPU_PTE_VALID);
|
||||||
|
|
||||||
amdgpu_vm_do_set_ptes(¶ms, last_pde,
|
params.func(¶ms, last_pde,
|
||||||
pt_addr, count, incr,
|
last_pt, count, incr,
|
||||||
AMDGPU_PTE_VALID);
|
AMDGPU_PTE_VALID);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -918,17 +1138,17 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (count) {
|
if (count) {
|
||||||
uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt);
|
|
||||||
|
|
||||||
if (vm->root.bo->shadow)
|
if (vm->root.bo->shadow)
|
||||||
amdgpu_vm_do_set_ptes(¶ms, last_shadow, pt_addr,
|
params.func(¶ms, last_shadow, last_pt,
|
||||||
count, incr, AMDGPU_PTE_VALID);
|
count, incr, AMDGPU_PTE_VALID);
|
||||||
|
|
||||||
amdgpu_vm_do_set_ptes(¶ms, last_pde, pt_addr,
|
params.func(¶ms, last_pde, last_pt,
|
||||||
count, incr, AMDGPU_PTE_VALID);
|
count, incr, AMDGPU_PTE_VALID);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.ib->length_dw == 0) {
|
if (params.func == amdgpu_vm_cpu_set_ptes)
|
||||||
|
amdgpu_bo_kunmap(parent->bo);
|
||||||
|
else if (params.ib->length_dw == 0) {
|
||||||
amdgpu_job_free(job);
|
amdgpu_job_free(job);
|
||||||
} else {
|
} else {
|
||||||
amdgpu_ring_pad_ib(ring, params.ib);
|
amdgpu_ring_pad_ib(ring, params.ib);
|
||||||
@@ -971,6 +1191,32 @@ error_free:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* amdgpu_vm_invalidate_level - mark all PD levels as invalid
|
||||||
|
*
|
||||||
|
* @parent: parent PD
|
||||||
|
*
|
||||||
|
* Mark all PD level as invalid after an error.
|
||||||
|
*/
|
||||||
|
static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
|
||||||
|
{
|
||||||
|
unsigned pt_idx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Recurse into the subdirectories. This recursion is harmless because
|
||||||
|
* we only have a maximum of 5 layers.
|
||||||
|
*/
|
||||||
|
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
|
||||||
|
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
|
||||||
|
|
||||||
|
if (!entry->bo)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
entry->addr = ~0ULL;
|
||||||
|
amdgpu_vm_invalidate_level(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* amdgpu_vm_update_directories - make sure that all directories are valid
|
* amdgpu_vm_update_directories - make sure that all directories are valid
|
||||||
*
|
*
|
||||||
@@ -983,7 +1229,13 @@ error_free:
|
|||||||
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm)
|
struct amdgpu_vm *vm)
|
||||||
{
|
{
|
||||||
return amdgpu_vm_update_level(adev, vm, &vm->root, 0);
|
int r;
|
||||||
|
|
||||||
|
r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
|
||||||
|
if (r)
|
||||||
|
amdgpu_vm_invalidate_level(&vm->root);
|
||||||
|
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -1023,58 +1275,37 @@ static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
|
|||||||
* @flags: mapping flags
|
* @flags: mapping flags
|
||||||
*
|
*
|
||||||
* Update the page tables in the range @start - @end.
|
* Update the page tables in the range @start - @end.
|
||||||
|
* Returns 0 for success, -EINVAL for failure.
|
||||||
*/
|
*/
|
||||||
static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
||||||
uint64_t start, uint64_t end,
|
uint64_t start, uint64_t end,
|
||||||
uint64_t dst, uint64_t flags)
|
uint64_t dst, uint64_t flags)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = params->adev;
|
struct amdgpu_device *adev = params->adev;
|
||||||
const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
|
const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
|
||||||
|
|
||||||
uint64_t cur_pe_start, cur_nptes, cur_dst;
|
uint64_t addr, pe_start;
|
||||||
uint64_t addr; /* next GPU address to be updated */
|
|
||||||
struct amdgpu_bo *pt;
|
struct amdgpu_bo *pt;
|
||||||
unsigned nptes; /* next number of ptes to be updated */
|
unsigned nptes;
|
||||||
uint64_t next_pe_start;
|
int r;
|
||||||
|
bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
|
||||||
|
|
||||||
/* initialize the variables */
|
|
||||||
addr = start;
|
|
||||||
pt = amdgpu_vm_get_pt(params, addr);
|
|
||||||
if (!pt) {
|
|
||||||
pr_err("PT not found, aborting update_ptes\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (params->shadow) {
|
|
||||||
if (!pt->shadow)
|
|
||||||
return;
|
|
||||||
pt = pt->shadow;
|
|
||||||
}
|
|
||||||
if ((addr & ~mask) == (end & ~mask))
|
|
||||||
nptes = end - addr;
|
|
||||||
else
|
|
||||||
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
|
|
||||||
|
|
||||||
cur_pe_start = amdgpu_bo_gpu_offset(pt);
|
|
||||||
cur_pe_start += (addr & mask) * 8;
|
|
||||||
cur_nptes = nptes;
|
|
||||||
cur_dst = dst;
|
|
||||||
|
|
||||||
/* for next ptb*/
|
|
||||||
addr += nptes;
|
|
||||||
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
|
|
||||||
|
|
||||||
/* walk over the address space and update the page tables */
|
/* walk over the address space and update the page tables */
|
||||||
while (addr < end) {
|
for (addr = start; addr < end; addr += nptes) {
|
||||||
pt = amdgpu_vm_get_pt(params, addr);
|
pt = amdgpu_vm_get_pt(params, addr);
|
||||||
if (!pt) {
|
if (!pt) {
|
||||||
pr_err("PT not found, aborting update_ptes\n");
|
pr_err("PT not found, aborting update_ptes\n");
|
||||||
return;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params->shadow) {
|
if (params->shadow) {
|
||||||
|
if (WARN_ONCE(use_cpu_update,
|
||||||
|
"CPU VM update doesn't suuport shadow pages"))
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (!pt->shadow)
|
if (!pt->shadow)
|
||||||
return;
|
return 0;
|
||||||
pt = pt->shadow;
|
pt = pt->shadow;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1083,32 +1314,25 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
|||||||
else
|
else
|
||||||
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
|
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
|
||||||
|
|
||||||
next_pe_start = amdgpu_bo_gpu_offset(pt);
|
if (use_cpu_update) {
|
||||||
next_pe_start += (addr & mask) * 8;
|
r = amdgpu_bo_kmap(pt, (void *)&pe_start);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
} else
|
||||||
|
pe_start = amdgpu_bo_gpu_offset(pt);
|
||||||
|
|
||||||
if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
|
pe_start += (addr & mask) * 8;
|
||||||
((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
|
|
||||||
/* The next ptb is consecutive to current ptb.
|
params->func(params, pe_start, dst, nptes,
|
||||||
* Don't call the update function now.
|
|
||||||
* Will update two ptbs together in future.
|
|
||||||
*/
|
|
||||||
cur_nptes += nptes;
|
|
||||||
} else {
|
|
||||||
params->func(params, cur_pe_start, cur_dst, cur_nptes,
|
|
||||||
AMDGPU_GPU_PAGE_SIZE, flags);
|
AMDGPU_GPU_PAGE_SIZE, flags);
|
||||||
|
|
||||||
cur_pe_start = next_pe_start;
|
|
||||||
cur_nptes = nptes;
|
|
||||||
cur_dst = dst;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* for next ptb*/
|
|
||||||
addr += nptes;
|
|
||||||
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
|
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
|
if (use_cpu_update)
|
||||||
|
amdgpu_bo_kunmap(pt);
|
||||||
}
|
}
|
||||||
|
|
||||||
params->func(params, cur_pe_start, cur_dst, cur_nptes,
|
return 0;
|
||||||
AMDGPU_GPU_PAGE_SIZE, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1120,11 +1344,14 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
|||||||
* @end: last PTE to handle
|
* @end: last PTE to handle
|
||||||
* @dst: addr those PTEs should point to
|
* @dst: addr those PTEs should point to
|
||||||
* @flags: hw mapping flags
|
* @flags: hw mapping flags
|
||||||
|
* Returns 0 for success, -EINVAL for failure.
|
||||||
*/
|
*/
|
||||||
static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
|
static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
|
||||||
uint64_t start, uint64_t end,
|
uint64_t start, uint64_t end,
|
||||||
uint64_t dst, uint64_t flags)
|
uint64_t dst, uint64_t flags)
|
||||||
{
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The MC L1 TLB supports variable sized pages, based on a fragment
|
* The MC L1 TLB supports variable sized pages, based on a fragment
|
||||||
* field in the PTE. When this field is set to a non-zero value, page
|
* field in the PTE. When this field is set to a non-zero value, page
|
||||||
@@ -1153,28 +1380,30 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
|
|||||||
|
|
||||||
/* system pages are non continuously */
|
/* system pages are non continuously */
|
||||||
if (params->src || !(flags & AMDGPU_PTE_VALID) ||
|
if (params->src || !(flags & AMDGPU_PTE_VALID) ||
|
||||||
(frag_start >= frag_end)) {
|
(frag_start >= frag_end))
|
||||||
|
return amdgpu_vm_update_ptes(params, start, end, dst, flags);
|
||||||
amdgpu_vm_update_ptes(params, start, end, dst, flags);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* handle the 4K area at the beginning */
|
/* handle the 4K area at the beginning */
|
||||||
if (start != frag_start) {
|
if (start != frag_start) {
|
||||||
amdgpu_vm_update_ptes(params, start, frag_start,
|
r = amdgpu_vm_update_ptes(params, start, frag_start,
|
||||||
dst, flags);
|
dst, flags);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
|
dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* handle the area in the middle */
|
/* handle the area in the middle */
|
||||||
amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
|
r = amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
|
||||||
flags | frag_flags);
|
flags | frag_flags);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
/* handle the 4K area at the end */
|
/* handle the 4K area at the end */
|
||||||
if (frag_end != end) {
|
if (frag_end != end) {
|
||||||
dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
|
dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
|
||||||
amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
|
r = amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
|
||||||
}
|
}
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -1216,6 +1445,25 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||||||
params.vm = vm;
|
params.vm = vm;
|
||||||
params.src = src;
|
params.src = src;
|
||||||
|
|
||||||
|
if (vm->use_cpu_for_update) {
|
||||||
|
/* params.src is used as flag to indicate system Memory */
|
||||||
|
if (pages_addr)
|
||||||
|
params.src = ~0;
|
||||||
|
|
||||||
|
/* Wait for PT BOs to be free. PTs share the same resv. object
|
||||||
|
* as the root PD BO
|
||||||
|
*/
|
||||||
|
r = amdgpu_vm_bo_wait(adev, vm->root.bo);
|
||||||
|
if (unlikely(r))
|
||||||
|
return r;
|
||||||
|
|
||||||
|
params.func = amdgpu_vm_cpu_set_ptes;
|
||||||
|
params.pages_addr = pages_addr;
|
||||||
|
params.shadow = false;
|
||||||
|
return amdgpu_vm_frag_ptes(¶ms, start, last + 1,
|
||||||
|
addr, flags);
|
||||||
|
}
|
||||||
|
|
||||||
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
||||||
|
|
||||||
/* sync to everything on unmapping */
|
/* sync to everything on unmapping */
|
||||||
@@ -1295,9 +1543,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||||||
goto error_free;
|
goto error_free;
|
||||||
|
|
||||||
params.shadow = true;
|
params.shadow = true;
|
||||||
amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags);
|
r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags);
|
||||||
|
if (r)
|
||||||
|
goto error_free;
|
||||||
params.shadow = false;
|
params.shadow = false;
|
||||||
amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags);
|
r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags);
|
||||||
|
if (r)
|
||||||
|
goto error_free;
|
||||||
|
|
||||||
amdgpu_ring_pad_ib(ring, params.ib);
|
amdgpu_ring_pad_ib(ring, params.ib);
|
||||||
WARN_ON(params.ib->length_dw > ndw);
|
WARN_ON(params.ib->length_dw > ndw);
|
||||||
@@ -2138,20 +2390,25 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
|
|||||||
*
|
*
|
||||||
* @adev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
* @vm: requested vm
|
* @vm: requested vm
|
||||||
|
* @vm_context: Indicates if it GFX or Compute context
|
||||||
*
|
*
|
||||||
* Init @vm fields.
|
* Init @vm fields.
|
||||||
*/
|
*/
|
||||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
|
int vm_context)
|
||||||
{
|
{
|
||||||
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
|
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
|
||||||
AMDGPU_VM_PTE_COUNT(adev) * 8);
|
AMDGPU_VM_PTE_COUNT(adev) * 8);
|
||||||
unsigned ring_instance;
|
unsigned ring_instance;
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct amd_sched_rq *rq;
|
struct amd_sched_rq *rq;
|
||||||
int r;
|
int r, i;
|
||||||
|
u64 flags;
|
||||||
|
|
||||||
vm->va = RB_ROOT;
|
vm->va = RB_ROOT;
|
||||||
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
|
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
|
||||||
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
||||||
|
vm->reserved_vmid[i] = NULL;
|
||||||
spin_lock_init(&vm->status_lock);
|
spin_lock_init(&vm->status_lock);
|
||||||
INIT_LIST_HEAD(&vm->invalidated);
|
INIT_LIST_HEAD(&vm->invalidated);
|
||||||
INIT_LIST_HEAD(&vm->cleared);
|
INIT_LIST_HEAD(&vm->cleared);
|
||||||
@@ -2168,14 +2425,29 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
|
||||||
|
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
||||||
|
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
|
||||||
|
else
|
||||||
|
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
||||||
|
AMDGPU_VM_USE_CPU_FOR_GFX);
|
||||||
|
DRM_DEBUG_DRIVER("VM update mode is %s\n",
|
||||||
|
vm->use_cpu_for_update ? "CPU" : "SDMA");
|
||||||
|
WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
|
||||||
|
"CPU update of VM recommended only for large BAR system\n");
|
||||||
vm->last_dir_update = NULL;
|
vm->last_dir_update = NULL;
|
||||||
|
|
||||||
|
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
||||||
|
AMDGPU_GEM_CREATE_VRAM_CLEARED;
|
||||||
|
if (vm->use_cpu_for_update)
|
||||||
|
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||||
|
else
|
||||||
|
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||||
|
AMDGPU_GEM_CREATE_SHADOW);
|
||||||
|
|
||||||
r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
|
r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
flags,
|
||||||
AMDGPU_GEM_CREATE_SHADOW |
|
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
|
||||||
AMDGPU_GEM_CREATE_VRAM_CLEARED,
|
|
||||||
NULL, NULL, &vm->root.bo);
|
NULL, NULL, &vm->root.bo);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free_sched_entity;
|
goto error_free_sched_entity;
|
||||||
@@ -2236,6 +2508,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||||||
{
|
{
|
||||||
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
||||||
bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
|
bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
|
||||||
|
int i;
|
||||||
|
|
||||||
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
|
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
|
||||||
|
|
||||||
@@ -2259,6 +2532,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||||||
|
|
||||||
amdgpu_vm_free_levels(&vm->root);
|
amdgpu_vm_free_levels(&vm->root);
|
||||||
dma_fence_put(vm->last_dir_update);
|
dma_fence_put(vm->last_dir_update);
|
||||||
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
||||||
|
amdgpu_vm_free_reserved_vmid(adev, vm, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -2278,6 +2553,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
mutex_init(&id_mgr->lock);
|
mutex_init(&id_mgr->lock);
|
||||||
INIT_LIST_HEAD(&id_mgr->ids_lru);
|
INIT_LIST_HEAD(&id_mgr->ids_lru);
|
||||||
|
atomic_set(&id_mgr->reserved_vmid_num, 0);
|
||||||
|
|
||||||
/* skip over VMID 0, since it is the system VM */
|
/* skip over VMID 0, since it is the system VM */
|
||||||
for (j = 1; j < id_mgr->num_ids; ++j) {
|
for (j = 1; j < id_mgr->num_ids; ++j) {
|
||||||
@@ -2296,6 +2572,23 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|||||||
atomic64_set(&adev->vm_manager.client_counter, 0);
|
atomic64_set(&adev->vm_manager.client_counter, 0);
|
||||||
spin_lock_init(&adev->vm_manager.prt_lock);
|
spin_lock_init(&adev->vm_manager.prt_lock);
|
||||||
atomic_set(&adev->vm_manager.num_prt_users, 0);
|
atomic_set(&adev->vm_manager.num_prt_users, 0);
|
||||||
|
|
||||||
|
/* If not overridden by the user, by default, only in large BAR systems
|
||||||
|
* Compute VM tables will be updated by CPU
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
if (amdgpu_vm_update_mode == -1) {
|
||||||
|
if (amdgpu_vm_is_large_bar(adev))
|
||||||
|
adev->vm_manager.vm_update_mode =
|
||||||
|
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
|
||||||
|
else
|
||||||
|
adev->vm_manager.vm_update_mode = 0;
|
||||||
|
} else
|
||||||
|
adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
|
||||||
|
#else
|
||||||
|
adev->vm_manager.vm_update_mode = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -2323,3 +2616,28 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||||
|
{
|
||||||
|
union drm_amdgpu_vm *args = data;
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
switch (args->in.op) {
|
||||||
|
case AMDGPU_VM_OP_RESERVE_VMID:
|
||||||
|
/* current, we only have requirement to reserve vmid from gfxhub */
|
||||||
|
r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm,
|
||||||
|
AMDGPU_GFXHUB);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
break;
|
||||||
|
case AMDGPU_VM_OP_UNRESERVE_VMID:
|
||||||
|
amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|||||||
@@ -84,6 +84,16 @@ struct amdgpu_bo_list_entry;
|
|||||||
|
|
||||||
/* hardcode that limit for now */
|
/* hardcode that limit for now */
|
||||||
#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
|
#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
|
||||||
|
/* max vmids dedicated for process */
|
||||||
|
#define AMDGPU_VM_MAX_RESERVED_VMID 1
|
||||||
|
|
||||||
|
#define AMDGPU_VM_CONTEXT_GFX 0
|
||||||
|
#define AMDGPU_VM_CONTEXT_COMPUTE 1
|
||||||
|
|
||||||
|
/* See vm_update_mode */
|
||||||
|
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
|
||||||
|
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
|
||||||
|
|
||||||
|
|
||||||
struct amdgpu_vm_pt {
|
struct amdgpu_vm_pt {
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
@@ -123,8 +133,13 @@ struct amdgpu_vm {
|
|||||||
|
|
||||||
/* client id */
|
/* client id */
|
||||||
u64 client_id;
|
u64 client_id;
|
||||||
|
/* dedicated to vm */
|
||||||
|
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
|
||||||
/* each VM will map on CSA */
|
/* each VM will map on CSA */
|
||||||
struct amdgpu_bo_va *csa_bo_va;
|
struct amdgpu_bo_va *csa_bo_va;
|
||||||
|
|
||||||
|
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
|
||||||
|
bool use_cpu_for_update;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_vm_id {
|
struct amdgpu_vm_id {
|
||||||
@@ -152,6 +167,7 @@ struct amdgpu_vm_id_manager {
|
|||||||
unsigned num_ids;
|
unsigned num_ids;
|
||||||
struct list_head ids_lru;
|
struct list_head ids_lru;
|
||||||
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
|
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
|
||||||
|
atomic_t reserved_vmid_num;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_vm_manager {
|
struct amdgpu_vm_manager {
|
||||||
@@ -168,8 +184,6 @@ struct amdgpu_vm_manager {
|
|||||||
uint32_t block_size;
|
uint32_t block_size;
|
||||||
/* vram base address for page table entry */
|
/* vram base address for page table entry */
|
||||||
u64 vram_base_offset;
|
u64 vram_base_offset;
|
||||||
/* is vm enabled? */
|
|
||||||
bool enabled;
|
|
||||||
/* vm pte handling */
|
/* vm pte handling */
|
||||||
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
|
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
|
||||||
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
|
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
|
||||||
@@ -181,11 +195,18 @@ struct amdgpu_vm_manager {
|
|||||||
/* partial resident texture handling */
|
/* partial resident texture handling */
|
||||||
spinlock_t prt_lock;
|
spinlock_t prt_lock;
|
||||||
atomic_t num_prt_users;
|
atomic_t num_prt_users;
|
||||||
|
|
||||||
|
/* controls how VM page tables are updated for Graphics and Compute.
|
||||||
|
* BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
|
||||||
|
* BIT1[= 0] Compute updated by SDMA [= 1] by CPU
|
||||||
|
*/
|
||||||
|
int vm_update_mode;
|
||||||
};
|
};
|
||||||
|
|
||||||
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
||||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
|
int vm_context);
|
||||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||||
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
||||||
struct list_head *validated,
|
struct list_head *validated,
|
||||||
@@ -239,5 +260,9 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||||||
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||||
struct amdgpu_bo_va *bo_va);
|
struct amdgpu_bo_va *bo_va);
|
||||||
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
|
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
|
||||||
|
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
|
||||||
|
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
||||||
|
struct amdgpu_job *job);
|
||||||
|
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -964,62 +964,62 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
|
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
|
||||||
{mmGRBM_STATUS, false},
|
{mmGRBM_STATUS},
|
||||||
{mmGB_ADDR_CONFIG, false},
|
{mmGB_ADDR_CONFIG},
|
||||||
{mmMC_ARB_RAMCFG, false},
|
{mmMC_ARB_RAMCFG},
|
||||||
{mmGB_TILE_MODE0, false},
|
{mmGB_TILE_MODE0},
|
||||||
{mmGB_TILE_MODE1, false},
|
{mmGB_TILE_MODE1},
|
||||||
{mmGB_TILE_MODE2, false},
|
{mmGB_TILE_MODE2},
|
||||||
{mmGB_TILE_MODE3, false},
|
{mmGB_TILE_MODE3},
|
||||||
{mmGB_TILE_MODE4, false},
|
{mmGB_TILE_MODE4},
|
||||||
{mmGB_TILE_MODE5, false},
|
{mmGB_TILE_MODE5},
|
||||||
{mmGB_TILE_MODE6, false},
|
{mmGB_TILE_MODE6},
|
||||||
{mmGB_TILE_MODE7, false},
|
{mmGB_TILE_MODE7},
|
||||||
{mmGB_TILE_MODE8, false},
|
{mmGB_TILE_MODE8},
|
||||||
{mmGB_TILE_MODE9, false},
|
{mmGB_TILE_MODE9},
|
||||||
{mmGB_TILE_MODE10, false},
|
{mmGB_TILE_MODE10},
|
||||||
{mmGB_TILE_MODE11, false},
|
{mmGB_TILE_MODE11},
|
||||||
{mmGB_TILE_MODE12, false},
|
{mmGB_TILE_MODE12},
|
||||||
{mmGB_TILE_MODE13, false},
|
{mmGB_TILE_MODE13},
|
||||||
{mmGB_TILE_MODE14, false},
|
{mmGB_TILE_MODE14},
|
||||||
{mmGB_TILE_MODE15, false},
|
{mmGB_TILE_MODE15},
|
||||||
{mmGB_TILE_MODE16, false},
|
{mmGB_TILE_MODE16},
|
||||||
{mmGB_TILE_MODE17, false},
|
{mmGB_TILE_MODE17},
|
||||||
{mmGB_TILE_MODE18, false},
|
{mmGB_TILE_MODE18},
|
||||||
{mmGB_TILE_MODE19, false},
|
{mmGB_TILE_MODE19},
|
||||||
{mmGB_TILE_MODE20, false},
|
{mmGB_TILE_MODE20},
|
||||||
{mmGB_TILE_MODE21, false},
|
{mmGB_TILE_MODE21},
|
||||||
{mmGB_TILE_MODE22, false},
|
{mmGB_TILE_MODE22},
|
||||||
{mmGB_TILE_MODE23, false},
|
{mmGB_TILE_MODE23},
|
||||||
{mmGB_TILE_MODE24, false},
|
{mmGB_TILE_MODE24},
|
||||||
{mmGB_TILE_MODE25, false},
|
{mmGB_TILE_MODE25},
|
||||||
{mmGB_TILE_MODE26, false},
|
{mmGB_TILE_MODE26},
|
||||||
{mmGB_TILE_MODE27, false},
|
{mmGB_TILE_MODE27},
|
||||||
{mmGB_TILE_MODE28, false},
|
{mmGB_TILE_MODE28},
|
||||||
{mmGB_TILE_MODE29, false},
|
{mmGB_TILE_MODE29},
|
||||||
{mmGB_TILE_MODE30, false},
|
{mmGB_TILE_MODE30},
|
||||||
{mmGB_TILE_MODE31, false},
|
{mmGB_TILE_MODE31},
|
||||||
{mmGB_MACROTILE_MODE0, false},
|
{mmGB_MACROTILE_MODE0},
|
||||||
{mmGB_MACROTILE_MODE1, false},
|
{mmGB_MACROTILE_MODE1},
|
||||||
{mmGB_MACROTILE_MODE2, false},
|
{mmGB_MACROTILE_MODE2},
|
||||||
{mmGB_MACROTILE_MODE3, false},
|
{mmGB_MACROTILE_MODE3},
|
||||||
{mmGB_MACROTILE_MODE4, false},
|
{mmGB_MACROTILE_MODE4},
|
||||||
{mmGB_MACROTILE_MODE5, false},
|
{mmGB_MACROTILE_MODE5},
|
||||||
{mmGB_MACROTILE_MODE6, false},
|
{mmGB_MACROTILE_MODE6},
|
||||||
{mmGB_MACROTILE_MODE7, false},
|
{mmGB_MACROTILE_MODE7},
|
||||||
{mmGB_MACROTILE_MODE8, false},
|
{mmGB_MACROTILE_MODE8},
|
||||||
{mmGB_MACROTILE_MODE9, false},
|
{mmGB_MACROTILE_MODE9},
|
||||||
{mmGB_MACROTILE_MODE10, false},
|
{mmGB_MACROTILE_MODE10},
|
||||||
{mmGB_MACROTILE_MODE11, false},
|
{mmGB_MACROTILE_MODE11},
|
||||||
{mmGB_MACROTILE_MODE12, false},
|
{mmGB_MACROTILE_MODE12},
|
||||||
{mmGB_MACROTILE_MODE13, false},
|
{mmGB_MACROTILE_MODE13},
|
||||||
{mmGB_MACROTILE_MODE14, false},
|
{mmGB_MACROTILE_MODE14},
|
||||||
{mmGB_MACROTILE_MODE15, false},
|
{mmGB_MACROTILE_MODE15},
|
||||||
{mmCC_RB_BACKEND_DISABLE, false, true},
|
{mmCC_RB_BACKEND_DISABLE, true},
|
||||||
{mmGC_USER_RB_BACKEND_DISABLE, false, true},
|
{mmGC_USER_RB_BACKEND_DISABLE, true},
|
||||||
{mmGB_BACKEND_MAP, false, false},
|
{mmGB_BACKEND_MAP, false},
|
||||||
{mmPA_SC_RASTER_CONFIG, false, true},
|
{mmPA_SC_RASTER_CONFIG, true},
|
||||||
{mmPA_SC_RASTER_CONFIG_1, false, true},
|
{mmPA_SC_RASTER_CONFIG_1, true},
|
||||||
};
|
};
|
||||||
|
|
||||||
static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
|
static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
|
||||||
@@ -1050,7 +1050,6 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
|
|||||||
if (reg_offset != cik_allowed_read_registers[i].reg_offset)
|
if (reg_offset != cik_allowed_read_registers[i].reg_offset)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!cik_allowed_read_registers[i].untouched)
|
|
||||||
*value = cik_allowed_read_registers[i].grbm_indexed ?
|
*value = cik_allowed_read_registers[i].grbm_indexed ?
|
||||||
cik_read_indexed_register(adev, se_num,
|
cik_read_indexed_register(adev, se_num,
|
||||||
sh_num, reg_offset) :
|
sh_num, reg_offset) :
|
||||||
|
|||||||
@@ -118,14 +118,27 @@ static const struct {
|
|||||||
static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
|
static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
|
||||||
u32 block_offset, u32 reg)
|
u32 block_offset, u32 reg)
|
||||||
{
|
{
|
||||||
DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
|
unsigned long flags;
|
||||||
return 0;
|
u32 r;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
|
||||||
|
WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
|
||||||
|
r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
|
||||||
|
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
|
||||||
|
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
|
static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
|
||||||
u32 block_offset, u32 reg, u32 v)
|
u32 block_offset, u32 reg, u32 v)
|
||||||
{
|
{
|
||||||
DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
|
||||||
|
WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
|
||||||
|
reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
|
||||||
|
WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
|
||||||
|
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
|
static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
|
||||||
@@ -501,21 +514,16 @@ static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
|
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int num_crtc = 0;
|
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_TAHITI:
|
case CHIP_TAHITI:
|
||||||
case CHIP_PITCAIRN:
|
case CHIP_PITCAIRN:
|
||||||
case CHIP_VERDE:
|
case CHIP_VERDE:
|
||||||
num_crtc = 6;
|
return 6;
|
||||||
break;
|
|
||||||
case CHIP_OLAND:
|
case CHIP_OLAND:
|
||||||
num_crtc = 2;
|
return 2;
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
num_crtc = 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return num_crtc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void dce_v6_0_disable_dce(struct amdgpu_device *adev)
|
void dce_v6_0_disable_dce(struct amdgpu_device *adev)
|
||||||
@@ -1222,17 +1230,17 @@ static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
|
|||||||
dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
|
dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
|
static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
u32 offset, tmp;
|
u32 tmp;
|
||||||
|
|
||||||
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
|
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
|
||||||
offset = adev->mode_info.audio.pin[i].offset;
|
tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
|
||||||
tmp = RREG32_AUDIO_ENDPT(offset,
|
ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
|
||||||
AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
|
if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
|
||||||
if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
|
PORT_CONNECTIVITY))
|
||||||
adev->mode_info.audio.pin[i].connected = false;
|
adev->mode_info.audio.pin[i].connected = false;
|
||||||
else
|
else
|
||||||
adev->mode_info.audio.pin[i].connected = true;
|
adev->mode_info.audio.pin[i].connected = true;
|
||||||
@@ -1254,45 +1262,206 @@ static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *ade
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
|
static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = encoder->dev->dev_private;
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
u32 offset;
|
|
||||||
|
|
||||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
offset = dig->afmt->offset;
|
WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
|
||||||
|
REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
|
||||||
WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
|
dig->afmt->pin->id));
|
||||||
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||||
struct drm_display_mode *mode)
|
struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
|
struct drm_connector *connector;
|
||||||
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
|
int interlace = 0;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
|
if (connector->encoder == encoder) {
|
||||||
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!amdgpu_connector) {
|
||||||
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||||
|
interlace = 1;
|
||||||
|
|
||||||
|
if (connector->latency_present[interlace]) {
|
||||||
|
tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
|
||||||
|
VIDEO_LIPSYNC, connector->video_latency[interlace]);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
|
||||||
|
AUDIO_LIPSYNC, connector->audio_latency[interlace]);
|
||||||
|
} else {
|
||||||
|
tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
|
||||||
|
VIDEO_LIPSYNC, 0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
|
||||||
|
AUDIO_LIPSYNC, 0);
|
||||||
|
}
|
||||||
|
WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
|
||||||
|
ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
|
struct drm_connector *connector;
|
||||||
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
|
u8 *sadb = NULL;
|
||||||
|
int sad_count;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
|
if (connector->encoder == encoder) {
|
||||||
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!amdgpu_connector) {
|
||||||
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
|
||||||
|
if (sad_count < 0) {
|
||||||
|
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
|
||||||
|
sad_count = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* program the speaker allocation */
|
||||||
|
tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
|
||||||
|
ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
|
||||||
|
HDMI_CONNECTION, 0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
|
||||||
|
DP_CONNECTION, 0);
|
||||||
|
|
||||||
|
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
|
||||||
|
DP_CONNECTION, 1);
|
||||||
|
else
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
|
||||||
|
HDMI_CONNECTION, 1);
|
||||||
|
|
||||||
|
if (sad_count)
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
|
||||||
|
SPEAKER_ALLOCATION, sadb[0]);
|
||||||
|
else
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
|
||||||
|
SPEAKER_ALLOCATION, 5); /* stereo */
|
||||||
|
|
||||||
|
WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
|
||||||
|
ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
|
||||||
|
|
||||||
|
kfree(sadb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
|
struct amdgpu_device *adev = encoder->dev->dev_private;
|
||||||
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
|
struct drm_connector *connector;
|
||||||
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
|
struct cea_sad *sads;
|
||||||
|
int i, sad_count;
|
||||||
|
|
||||||
|
static const u16 eld_reg_to_type[][2] = {
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
|
||||||
|
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
|
||||||
|
};
|
||||||
|
|
||||||
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
|
if (connector->encoder == encoder) {
|
||||||
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!amdgpu_connector) {
|
||||||
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
|
||||||
|
if (sad_count <= 0) {
|
||||||
|
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
|
||||||
|
u32 tmp = 0;
|
||||||
|
u8 stereo_freqs = 0;
|
||||||
|
int max_channels = -1;
|
||||||
|
int j;
|
||||||
|
|
||||||
|
for (j = 0; j < sad_count; j++) {
|
||||||
|
struct cea_sad *sad = &sads[j];
|
||||||
|
|
||||||
|
if (sad->format == eld_reg_to_type[i][1]) {
|
||||||
|
if (sad->channels > max_channels) {
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
|
||||||
|
MAX_CHANNELS, sad->channels);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
|
||||||
|
DESCRIPTOR_BYTE_2, sad->byte2);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
|
||||||
|
SUPPORTED_FREQUENCIES, sad->freq);
|
||||||
|
max_channels = sad->channels;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
|
||||||
|
stereo_freqs |= sad->freq;
|
||||||
|
else
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
|
||||||
|
SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
|
||||||
|
WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(sads);
|
||||||
|
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
|
static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
|
||||||
struct amdgpu_audio_pin *pin,
|
struct amdgpu_audio_pin *pin,
|
||||||
bool enable)
|
bool enable)
|
||||||
{
|
{
|
||||||
DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
|
if (!pin)
|
||||||
|
return;
|
||||||
|
|
||||||
|
WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
|
||||||
|
enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const u32 pin_offsets[7] =
|
static const u32 pin_offsets[7] =
|
||||||
@@ -1308,42 +1477,372 @@ static const u32 pin_offsets[7] =
|
|||||||
|
|
||||||
static int dce_v6_0_audio_init(struct amdgpu_device *adev)
|
static int dce_v6_0_audio_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!amdgpu_audio)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
adev->mode_info.audio.enabled = true;
|
||||||
|
|
||||||
|
switch (adev->asic_type) {
|
||||||
|
case CHIP_TAHITI:
|
||||||
|
case CHIP_PITCAIRN:
|
||||||
|
case CHIP_VERDE:
|
||||||
|
default:
|
||||||
|
adev->mode_info.audio.num_pins = 6;
|
||||||
|
break;
|
||||||
|
case CHIP_OLAND:
|
||||||
|
adev->mode_info.audio.num_pins = 2;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
|
||||||
|
adev->mode_info.audio.pin[i].channels = -1;
|
||||||
|
adev->mode_info.audio.pin[i].rate = -1;
|
||||||
|
adev->mode_info.audio.pin[i].bits_per_sample = -1;
|
||||||
|
adev->mode_info.audio.pin[i].status_bits = 0;
|
||||||
|
adev->mode_info.audio.pin[i].category_code = 0;
|
||||||
|
adev->mode_info.audio.pin[i].connected = false;
|
||||||
|
adev->mode_info.audio.pin[i].offset = pin_offsets[i];
|
||||||
|
adev->mode_info.audio.pin[i].id = i;
|
||||||
|
dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
|
static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!amdgpu_audio)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!adev->mode_info.audio.enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < adev->mode_info.audio.num_pins; i++)
|
||||||
|
dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||||
|
|
||||||
|
adev->mode_info.audio.enabled = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
|
||||||
static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
|
|
||||||
{
|
{
|
||||||
DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
|
struct drm_device *dev = encoder->dev;
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
|
tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
|
||||||
|
WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
/*
|
static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
|
||||||
* build a HDMI Video Info Frame
|
uint32_t clock, int bpc)
|
||||||
*/
|
|
||||||
/*
|
|
||||||
static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
|
|
||||||
void *buffer, size_t size)
|
|
||||||
{
|
{
|
||||||
DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
|
struct drm_device *dev = encoder->dev;
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
|
||||||
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
|
tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
|
||||||
|
bpc > 8 ? 0 : 1);
|
||||||
|
WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
|
||||||
|
WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
|
||||||
|
tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
|
||||||
|
WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
|
||||||
|
WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
|
||||||
|
tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
|
||||||
|
WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
|
||||||
|
WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
|
||||||
|
tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
|
||||||
|
WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
|
||||||
|
struct drm_display_mode *mode)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = encoder->dev;
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
|
struct hdmi_avi_infoframe frame;
|
||||||
|
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
|
||||||
|
uint8_t *payload = buffer + 3;
|
||||||
|
uint8_t *header = buffer;
|
||||||
|
ssize_t err;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
|
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
|
||||||
|
if (err < 0) {
|
||||||
|
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
|
||||||
|
if (err < 0) {
|
||||||
|
DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
|
||||||
|
payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
|
||||||
|
WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
|
||||||
|
payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
|
||||||
|
WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
|
||||||
|
payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
|
||||||
|
WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
|
||||||
|
payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
|
||||||
|
|
||||||
|
tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
|
||||||
|
/* anything other than 0 */
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
|
||||||
|
HDMI_AUDIO_INFO_LINE, 2);
|
||||||
|
WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
|
static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
|
||||||
{
|
{
|
||||||
DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
|
struct drm_device *dev = encoder->dev;
|
||||||
}
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
*/
|
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
|
||||||
|
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* update the info frames with the data from the current display mode
|
* Two dtos: generally use dto0 for hdmi, dto1 for dp.
|
||||||
|
* Express [24MHz / target pixel clock] as an exact rational
|
||||||
|
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
|
||||||
|
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||||
*/
|
*/
|
||||||
|
tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
|
||||||
|
tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
|
||||||
|
DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
|
||||||
|
if (em == ATOM_ENCODER_MODE_HDMI) {
|
||||||
|
tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
|
||||||
|
DCCG_AUDIO_DTO_SEL, 0);
|
||||||
|
} else if (ENCODER_MODE_IS_DP(em)) {
|
||||||
|
tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
|
||||||
|
DCCG_AUDIO_DTO_SEL, 1);
|
||||||
|
}
|
||||||
|
WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
|
||||||
|
if (em == ATOM_ENCODER_MODE_HDMI) {
|
||||||
|
WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
|
||||||
|
WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
|
||||||
|
} else if (ENCODER_MODE_IS_DP(em)) {
|
||||||
|
WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
|
||||||
|
WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = encoder->dev;
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
|
tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
|
||||||
|
WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
|
||||||
|
WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
|
||||||
|
WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
|
||||||
|
WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
|
||||||
|
WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
|
||||||
|
WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
|
||||||
|
WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = encoder->dev;
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
|
tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
|
||||||
|
WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = encoder->dev;
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
|
if (enable) {
|
||||||
|
tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
|
||||||
|
WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
|
||||||
|
WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
|
||||||
|
WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
|
||||||
|
} else {
|
||||||
|
tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
|
||||||
|
WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
|
||||||
|
WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = encoder->dev;
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
|
if (enable) {
|
||||||
|
tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
|
||||||
|
WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
|
||||||
|
WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
|
||||||
|
tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
|
||||||
|
WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
|
||||||
|
} else {
|
||||||
|
WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
|
static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
|
||||||
struct drm_display_mode *mode)
|
struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
|
struct drm_device *dev = encoder->dev;
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||||
|
struct drm_connector *connector;
|
||||||
|
struct amdgpu_connector *amdgpu_connector = NULL;
|
||||||
|
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
|
||||||
|
int bpc = 8;
|
||||||
|
|
||||||
|
if (!dig || !dig->afmt)
|
||||||
|
return;
|
||||||
|
|
||||||
|
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||||
|
if (connector->encoder == encoder) {
|
||||||
|
amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!amdgpu_connector) {
|
||||||
|
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!dig->afmt->enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
|
||||||
|
if (!dig->afmt->pin)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (encoder->crtc) {
|
||||||
|
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
|
||||||
|
bpc = amdgpu_crtc->bpc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* disable audio before setting up hw */
|
||||||
|
dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
|
||||||
|
|
||||||
|
dce_v6_0_audio_set_mute(encoder, true);
|
||||||
|
dce_v6_0_audio_write_speaker_allocation(encoder);
|
||||||
|
dce_v6_0_audio_write_sad_regs(encoder);
|
||||||
|
dce_v6_0_audio_write_latency_fields(encoder, mode);
|
||||||
|
if (em == ATOM_ENCODER_MODE_HDMI) {
|
||||||
|
dce_v6_0_audio_set_dto(encoder, mode->clock);
|
||||||
|
dce_v6_0_audio_set_vbi_packet(encoder);
|
||||||
|
dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
|
||||||
|
} else if (ENCODER_MODE_IS_DP(em)) {
|
||||||
|
dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
|
||||||
|
}
|
||||||
|
dce_v6_0_audio_set_packet(encoder);
|
||||||
|
dce_v6_0_audio_select_pin(encoder);
|
||||||
|
dce_v6_0_audio_set_avi_infoframe(encoder, mode);
|
||||||
|
dce_v6_0_audio_set_mute(encoder, false);
|
||||||
|
if (em == ATOM_ENCODER_MODE_HDMI) {
|
||||||
|
dce_v6_0_audio_hdmi_enable(encoder, 1);
|
||||||
|
} else if (ENCODER_MODE_IS_DP(em)) {
|
||||||
|
dce_v6_0_audio_dp_enable(encoder, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* enable audio after setting up hw */
|
||||||
|
dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
|
static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
|
||||||
@@ -1359,6 +1858,7 @@ static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
|
|||||||
/* Silent, r600_hdmi_enable will raise WARN for us */
|
/* Silent, r600_hdmi_enable will raise WARN for us */
|
||||||
if (enable && dig->afmt->enabled)
|
if (enable && dig->afmt->enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!enable && !dig->afmt->enabled)
|
if (!enable && !dig->afmt->enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@@ -2753,6 +3253,7 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
|
|||||||
{
|
{
|
||||||
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
|
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
|
||||||
|
|
||||||
amdgpu_encoder->pixel_clock = adjusted_mode->clock;
|
amdgpu_encoder->pixel_clock = adjusted_mode->clock;
|
||||||
|
|
||||||
@@ -2762,7 +3263,7 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
|
|||||||
/* set scaler clears this on some chips */
|
/* set scaler clears this on some chips */
|
||||||
dce_v6_0_set_interleave(encoder->crtc, mode);
|
dce_v6_0_set_interleave(encoder->crtc, mode);
|
||||||
|
|
||||||
if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
|
if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
|
||||||
dce_v6_0_afmt_enable(encoder, true);
|
dce_v6_0_afmt_enable(encoder, true);
|
||||||
dce_v6_0_afmt_setmode(encoder, adjusted_mode);
|
dce_v6_0_afmt_setmode(encoder, adjusted_mode);
|
||||||
}
|
}
|
||||||
@@ -2824,11 +3325,12 @@ static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
|
|||||||
|
|
||||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||||
struct amdgpu_encoder_atom_dig *dig;
|
struct amdgpu_encoder_atom_dig *dig;
|
||||||
|
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
|
||||||
|
|
||||||
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
|
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
|
||||||
|
|
||||||
if (amdgpu_atombios_encoder_is_digital(encoder)) {
|
if (amdgpu_atombios_encoder_is_digital(encoder)) {
|
||||||
if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
|
if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
|
||||||
dce_v6_0_afmt_enable(encoder, false);
|
dce_v6_0_afmt_enable(encoder, false);
|
||||||
dig = amdgpu_encoder->enc_priv;
|
dig = amdgpu_encoder->enc_priv;
|
||||||
dig->dig_encoder = -1;
|
dig->dig_encoder = -1;
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -27,6 +27,7 @@
|
|||||||
#include "amdgpu_gfx.h"
|
#include "amdgpu_gfx.h"
|
||||||
#include "cikd.h"
|
#include "cikd.h"
|
||||||
#include "cik.h"
|
#include "cik.h"
|
||||||
|
#include "cik_structs.h"
|
||||||
#include "atom.h"
|
#include "atom.h"
|
||||||
#include "amdgpu_ucode.h"
|
#include "amdgpu_ucode.h"
|
||||||
#include "clearstate_ci.h"
|
#include "clearstate_ci.h"
|
||||||
@@ -48,7 +49,7 @@
|
|||||||
#include "oss/oss_2_0_sh_mask.h"
|
#include "oss/oss_2_0_sh_mask.h"
|
||||||
|
|
||||||
#define GFX7_NUM_GFX_RINGS 1
|
#define GFX7_NUM_GFX_RINGS 1
|
||||||
#define GFX7_NUM_COMPUTE_RINGS 8
|
#define GFX7_MEC_HPD_SIZE 2048
|
||||||
|
|
||||||
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
|
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||||
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
|
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||||
@@ -1606,19 +1607,6 @@ static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
|
|||||||
WREG32(mmGRBM_GFX_INDEX, data);
|
WREG32(mmGRBM_GFX_INDEX, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* gfx_v7_0_create_bitmask - create a bitmask
|
|
||||||
*
|
|
||||||
* @bit_width: length of the mask
|
|
||||||
*
|
|
||||||
* create a variable length bit mask (CIK).
|
|
||||||
* Returns the bitmask.
|
|
||||||
*/
|
|
||||||
static u32 gfx_v7_0_create_bitmask(u32 bit_width)
|
|
||||||
{
|
|
||||||
return (u32)((1ULL << bit_width) - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
|
* gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
|
||||||
*
|
*
|
||||||
@@ -1637,7 +1625,7 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
|
|||||||
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
|
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
|
||||||
data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
|
data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
|
||||||
|
|
||||||
mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se /
|
mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
|
||||||
adev->gfx.config.max_sh_per_se);
|
adev->gfx.config.max_sh_per_se);
|
||||||
|
|
||||||
return (~data) & mask;
|
return (~data) & mask;
|
||||||
@@ -1837,7 +1825,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
|
|||||||
/**
|
/**
|
||||||
* gmc_v7_0_init_compute_vmid - gart enable
|
* gmc_v7_0_init_compute_vmid - gart enable
|
||||||
*
|
*
|
||||||
* @rdev: amdgpu_device pointer
|
* @adev: amdgpu_device pointer
|
||||||
*
|
*
|
||||||
* Initialize compute vmid sh_mem registers
|
* Initialize compute vmid sh_mem registers
|
||||||
*
|
*
|
||||||
@@ -2821,26 +2809,23 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MEC_HPD_SIZE 2048
|
|
||||||
|
|
||||||
static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
|
static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
u32 *hpd;
|
u32 *hpd;
|
||||||
|
size_t mec_hpd_size;
|
||||||
|
|
||||||
/*
|
bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
|
||||||
* KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
|
|
||||||
* CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
|
|
||||||
* Nonetheless, we assign only 1 pipe because all other pipes will
|
|
||||||
* be handled by KFD
|
|
||||||
*/
|
|
||||||
adev->gfx.mec.num_mec = 1;
|
|
||||||
adev->gfx.mec.num_pipe = 1;
|
|
||||||
adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
|
|
||||||
|
|
||||||
|
/* take ownership of the relevant compute queues */
|
||||||
|
amdgpu_gfx_compute_queue_acquire(adev);
|
||||||
|
|
||||||
|
/* allocate space for ALL pipes (even the ones we don't own) */
|
||||||
|
mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
|
||||||
|
* GFX7_MEC_HPD_SIZE * 2;
|
||||||
if (adev->gfx.mec.hpd_eop_obj == NULL) {
|
if (adev->gfx.mec.hpd_eop_obj == NULL) {
|
||||||
r = amdgpu_bo_create(adev,
|
r = amdgpu_bo_create(adev,
|
||||||
adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
|
mec_hpd_size,
|
||||||
PAGE_SIZE, true,
|
PAGE_SIZE, true,
|
||||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||||
&adev->gfx.mec.hpd_eop_obj);
|
&adev->gfx.mec.hpd_eop_obj);
|
||||||
@@ -2870,7 +2855,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* clear memory. Not sure if this is required or not */
|
/* clear memory. Not sure if this is required or not */
|
||||||
memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
|
memset(hpd, 0, mec_hpd_size);
|
||||||
|
|
||||||
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
|
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
|
||||||
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
|
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
|
||||||
@@ -2917,33 +2902,256 @@ struct hqd_registers
|
|||||||
u32 cp_mqd_control;
|
u32 cp_mqd_control;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bonaire_mqd
|
static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev,
|
||||||
|
int mec, int pipe)
|
||||||
{
|
{
|
||||||
u32 header;
|
u64 eop_gpu_addr;
|
||||||
u32 dispatch_initiator;
|
u32 tmp;
|
||||||
u32 dimensions[3];
|
size_t eop_offset = (mec * adev->gfx.mec.num_pipe_per_mec + pipe)
|
||||||
u32 start_idx[3];
|
* GFX7_MEC_HPD_SIZE * 2;
|
||||||
u32 num_threads[3];
|
|
||||||
u32 pipeline_stat_enable;
|
mutex_lock(&adev->srbm_mutex);
|
||||||
u32 perf_counter_enable;
|
eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset;
|
||||||
u32 pgm[2];
|
|
||||||
u32 tba[2];
|
cik_srbm_select(adev, mec + 1, pipe, 0, 0);
|
||||||
u32 tma[2];
|
|
||||||
u32 pgm_rsrc[2];
|
/* write the EOP addr */
|
||||||
u32 vmid;
|
WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
|
||||||
u32 resource_limits;
|
WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
|
||||||
u32 static_thread_mgmt01[2];
|
|
||||||
u32 tmp_ring_size;
|
/* set the VMID assigned */
|
||||||
u32 static_thread_mgmt23[2];
|
WREG32(mmCP_HPD_EOP_VMID, 0);
|
||||||
u32 restart[3];
|
|
||||||
u32 thread_trace_enable;
|
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
|
||||||
u32 reserved1;
|
tmp = RREG32(mmCP_HPD_EOP_CONTROL);
|
||||||
u32 user_data[16];
|
tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
|
||||||
u32 vgtcs_invoke_count[2];
|
tmp |= order_base_2(GFX7_MEC_HPD_SIZE / 8);
|
||||||
struct hqd_registers queue_state;
|
WREG32(mmCP_HPD_EOP_CONTROL, tmp);
|
||||||
u32 dequeue_cntr;
|
|
||||||
u32 interrupt_queue[64];
|
cik_srbm_select(adev, 0, 0, 0, 0);
|
||||||
};
|
mutex_unlock(&adev->srbm_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* disable the queue if it's active */
|
||||||
|
if (RREG32(mmCP_HQD_ACTIVE) & 1) {
|
||||||
|
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
|
||||||
|
for (i = 0; i < adev->usec_timeout; i++) {
|
||||||
|
if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
|
||||||
|
break;
|
||||||
|
udelay(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i == adev->usec_timeout)
|
||||||
|
return -ETIMEDOUT;
|
||||||
|
|
||||||
|
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
|
||||||
|
WREG32(mmCP_HQD_PQ_RPTR, 0);
|
||||||
|
WREG32(mmCP_HQD_PQ_WPTR, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
|
||||||
|
struct cik_mqd *mqd,
|
||||||
|
uint64_t mqd_gpu_addr,
|
||||||
|
struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
u64 hqd_gpu_addr;
|
||||||
|
u64 wb_gpu_addr;
|
||||||
|
|
||||||
|
/* init the mqd struct */
|
||||||
|
memset(mqd, 0, sizeof(struct cik_mqd));
|
||||||
|
|
||||||
|
mqd->header = 0xC0310800;
|
||||||
|
mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
|
||||||
|
mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
|
||||||
|
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
|
||||||
|
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
|
||||||
|
|
||||||
|
/* enable doorbell? */
|
||||||
|
mqd->cp_hqd_pq_doorbell_control =
|
||||||
|
RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
|
||||||
|
if (ring->use_doorbell)
|
||||||
|
mqd->cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
|
||||||
|
else
|
||||||
|
mqd->cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
|
||||||
|
|
||||||
|
/* set the pointer to the MQD */
|
||||||
|
mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
|
||||||
|
mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
|
||||||
|
|
||||||
|
/* set MQD vmid to 0 */
|
||||||
|
mqd->cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
|
||||||
|
mqd->cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
|
||||||
|
|
||||||
|
/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
|
||||||
|
hqd_gpu_addr = ring->gpu_addr >> 8;
|
||||||
|
mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
|
||||||
|
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
|
||||||
|
|
||||||
|
/* set up the HQD, this is similar to CP_RB0_CNTL */
|
||||||
|
mqd->cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
|
||||||
|
mqd->cp_hqd_pq_control &=
|
||||||
|
~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
|
||||||
|
CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
|
||||||
|
|
||||||
|
mqd->cp_hqd_pq_control |=
|
||||||
|
order_base_2(ring->ring_size / 8);
|
||||||
|
mqd->cp_hqd_pq_control |=
|
||||||
|
(order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
|
||||||
|
#ifdef __BIG_ENDIAN
|
||||||
|
mqd->cp_hqd_pq_control |=
|
||||||
|
2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
|
||||||
|
#endif
|
||||||
|
mqd->cp_hqd_pq_control &=
|
||||||
|
~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
|
||||||
|
CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
|
||||||
|
CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
|
||||||
|
mqd->cp_hqd_pq_control |=
|
||||||
|
CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
|
||||||
|
CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
|
||||||
|
|
||||||
|
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
|
||||||
|
wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
|
||||||
|
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
|
||||||
|
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
|
||||||
|
|
||||||
|
/* set the wb address wether it's enabled or not */
|
||||||
|
wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
|
||||||
|
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
|
||||||
|
mqd->cp_hqd_pq_rptr_report_addr_hi =
|
||||||
|
upper_32_bits(wb_gpu_addr) & 0xffff;
|
||||||
|
|
||||||
|
/* enable the doorbell if requested */
|
||||||
|
if (ring->use_doorbell) {
|
||||||
|
mqd->cp_hqd_pq_doorbell_control =
|
||||||
|
RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
|
||||||
|
mqd->cp_hqd_pq_doorbell_control &=
|
||||||
|
~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
|
||||||
|
mqd->cp_hqd_pq_doorbell_control |=
|
||||||
|
(ring->doorbell_index <<
|
||||||
|
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
|
||||||
|
mqd->cp_hqd_pq_doorbell_control |=
|
||||||
|
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
|
||||||
|
mqd->cp_hqd_pq_doorbell_control &=
|
||||||
|
~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
|
||||||
|
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
mqd->cp_hqd_pq_doorbell_control = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
|
||||||
|
ring->wptr = 0;
|
||||||
|
mqd->cp_hqd_pq_wptr = lower_32_bits(ring->wptr);
|
||||||
|
mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
|
||||||
|
|
||||||
|
/* set the vmid for the queue */
|
||||||
|
mqd->cp_hqd_vmid = 0;
|
||||||
|
|
||||||
|
/* defaults */
|
||||||
|
mqd->cp_hqd_ib_control = RREG32(mmCP_HQD_IB_CONTROL);
|
||||||
|
mqd->cp_hqd_ib_base_addr_lo = RREG32(mmCP_HQD_IB_BASE_ADDR);
|
||||||
|
mqd->cp_hqd_ib_base_addr_hi = RREG32(mmCP_HQD_IB_BASE_ADDR_HI);
|
||||||
|
mqd->cp_hqd_ib_rptr = RREG32(mmCP_HQD_IB_RPTR);
|
||||||
|
mqd->cp_hqd_persistent_state = RREG32(mmCP_HQD_PERSISTENT_STATE);
|
||||||
|
mqd->cp_hqd_sema_cmd = RREG32(mmCP_HQD_SEMA_CMD);
|
||||||
|
mqd->cp_hqd_msg_type = RREG32(mmCP_HQD_MSG_TYPE);
|
||||||
|
mqd->cp_hqd_atomic0_preop_lo = RREG32(mmCP_HQD_ATOMIC0_PREOP_LO);
|
||||||
|
mqd->cp_hqd_atomic0_preop_hi = RREG32(mmCP_HQD_ATOMIC0_PREOP_HI);
|
||||||
|
mqd->cp_hqd_atomic1_preop_lo = RREG32(mmCP_HQD_ATOMIC1_PREOP_LO);
|
||||||
|
mqd->cp_hqd_atomic1_preop_hi = RREG32(mmCP_HQD_ATOMIC1_PREOP_HI);
|
||||||
|
mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
|
||||||
|
mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
|
||||||
|
mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
|
||||||
|
mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
|
||||||
|
mqd->cp_hqd_iq_rptr = RREG32(mmCP_HQD_IQ_RPTR);
|
||||||
|
|
||||||
|
/* activate the queue */
|
||||||
|
mqd->cp_hqd_active = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd)
|
||||||
|
{
|
||||||
|
uint32_t tmp;
|
||||||
|
uint32_t mqd_reg;
|
||||||
|
uint32_t *mqd_data;
|
||||||
|
|
||||||
|
/* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_MQD_CONTROL */
|
||||||
|
mqd_data = &mqd->cp_mqd_base_addr_lo;
|
||||||
|
|
||||||
|
/* disable wptr polling */
|
||||||
|
tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
|
||||||
|
tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
|
||||||
|
WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
|
||||||
|
|
||||||
|
/* program all HQD registers */
|
||||||
|
for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_MQD_CONTROL; mqd_reg++)
|
||||||
|
WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
|
||||||
|
|
||||||
|
/* activate the HQD */
|
||||||
|
for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
|
||||||
|
WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
u64 mqd_gpu_addr;
|
||||||
|
struct cik_mqd *mqd;
|
||||||
|
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
|
||||||
|
|
||||||
|
if (ring->mqd_obj == NULL) {
|
||||||
|
r = amdgpu_bo_create(adev,
|
||||||
|
sizeof(struct cik_mqd),
|
||||||
|
PAGE_SIZE, true,
|
||||||
|
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||||
|
&ring->mqd_obj);
|
||||||
|
if (r) {
|
||||||
|
dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r = amdgpu_bo_reserve(ring->mqd_obj, false);
|
||||||
|
if (unlikely(r != 0))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
|
||||||
|
&mqd_gpu_addr);
|
||||||
|
if (r) {
|
||||||
|
dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
|
||||||
|
goto out_unreserve;
|
||||||
|
}
|
||||||
|
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
|
||||||
|
if (r) {
|
||||||
|
dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
|
||||||
|
goto out_unreserve;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&adev->srbm_mutex);
|
||||||
|
cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
|
||||||
|
|
||||||
|
gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring);
|
||||||
|
gfx_v7_0_mqd_deactivate(adev);
|
||||||
|
gfx_v7_0_mqd_commit(adev, mqd);
|
||||||
|
|
||||||
|
cik_srbm_select(adev, 0, 0, 0, 0);
|
||||||
|
mutex_unlock(&adev->srbm_mutex);
|
||||||
|
|
||||||
|
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||||
|
out_unreserve:
|
||||||
|
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||||
|
out:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gfx_v7_0_cp_compute_resume - setup the compute queue registers
|
* gfx_v7_0_cp_compute_resume - setup the compute queue registers
|
||||||
@@ -2958,13 +3166,6 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
int r, i, j;
|
int r, i, j;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
bool use_doorbell = true;
|
|
||||||
u64 hqd_gpu_addr;
|
|
||||||
u64 mqd_gpu_addr;
|
|
||||||
u64 eop_gpu_addr;
|
|
||||||
u64 wb_gpu_addr;
|
|
||||||
u32 *buf;
|
|
||||||
struct bonaire_mqd *mqd;
|
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
|
|
||||||
/* fix up chicken bits */
|
/* fix up chicken bits */
|
||||||
@@ -2972,220 +3173,25 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
|
|||||||
tmp |= (1 << 23);
|
tmp |= (1 << 23);
|
||||||
WREG32(mmCP_CPF_DEBUG, tmp);
|
WREG32(mmCP_CPF_DEBUG, tmp);
|
||||||
|
|
||||||
/* init the pipes */
|
/* init all pipes (even the ones we don't own) */
|
||||||
mutex_lock(&adev->srbm_mutex);
|
for (i = 0; i < adev->gfx.mec.num_mec; i++)
|
||||||
for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
|
for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++)
|
||||||
int me = (i < 4) ? 1 : 2;
|
gfx_v7_0_compute_pipe_init(adev, i, j);
|
||||||
int pipe = (i < 4) ? i : (i - 4);
|
|
||||||
|
|
||||||
eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
|
/* init the queues */
|
||||||
|
|
||||||
cik_srbm_select(adev, me, pipe, 0, 0);
|
|
||||||
|
|
||||||
/* write the EOP addr */
|
|
||||||
WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
|
|
||||||
WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
|
|
||||||
|
|
||||||
/* set the VMID assigned */
|
|
||||||
WREG32(mmCP_HPD_EOP_VMID, 0);
|
|
||||||
|
|
||||||
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
|
|
||||||
tmp = RREG32(mmCP_HPD_EOP_CONTROL);
|
|
||||||
tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
|
|
||||||
tmp |= order_base_2(MEC_HPD_SIZE / 8);
|
|
||||||
WREG32(mmCP_HPD_EOP_CONTROL, tmp);
|
|
||||||
}
|
|
||||||
cik_srbm_select(adev, 0, 0, 0, 0);
|
|
||||||
mutex_unlock(&adev->srbm_mutex);
|
|
||||||
|
|
||||||
/* init the queues. Just two for now. */
|
|
||||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||||
ring = &adev->gfx.compute_ring[i];
|
r = gfx_v7_0_compute_queue_init(adev, i);
|
||||||
|
|
||||||
if (ring->mqd_obj == NULL) {
|
|
||||||
r = amdgpu_bo_create(adev,
|
|
||||||
sizeof(struct bonaire_mqd),
|
|
||||||
PAGE_SIZE, true,
|
|
||||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
|
||||||
&ring->mqd_obj);
|
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(ring->mqd_obj, false);
|
|
||||||
if (unlikely(r != 0)) {
|
|
||||||
gfx_v7_0_cp_compute_fini(adev);
|
gfx_v7_0_cp_compute_fini(adev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
|
|
||||||
&mqd_gpu_addr);
|
|
||||||
if (r) {
|
|
||||||
dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
|
|
||||||
gfx_v7_0_cp_compute_fini(adev);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
|
|
||||||
if (r) {
|
|
||||||
dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
|
|
||||||
gfx_v7_0_cp_compute_fini(adev);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* init the mqd struct */
|
|
||||||
memset(buf, 0, sizeof(struct bonaire_mqd));
|
|
||||||
|
|
||||||
mqd = (struct bonaire_mqd *)buf;
|
|
||||||
mqd->header = 0xC0310800;
|
|
||||||
mqd->static_thread_mgmt01[0] = 0xffffffff;
|
|
||||||
mqd->static_thread_mgmt01[1] = 0xffffffff;
|
|
||||||
mqd->static_thread_mgmt23[0] = 0xffffffff;
|
|
||||||
mqd->static_thread_mgmt23[1] = 0xffffffff;
|
|
||||||
|
|
||||||
mutex_lock(&adev->srbm_mutex);
|
|
||||||
cik_srbm_select(adev, ring->me,
|
|
||||||
ring->pipe,
|
|
||||||
ring->queue, 0);
|
|
||||||
|
|
||||||
/* disable wptr polling */
|
|
||||||
tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
|
|
||||||
tmp &= ~CP_PQ_WPTR_POLL_CNTL__EN_MASK;
|
|
||||||
WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
|
|
||||||
|
|
||||||
/* enable doorbell? */
|
|
||||||
mqd->queue_state.cp_hqd_pq_doorbell_control =
|
|
||||||
RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
|
|
||||||
if (use_doorbell)
|
|
||||||
mqd->queue_state.cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
|
|
||||||
else
|
|
||||||
mqd->queue_state.cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
|
|
||||||
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
|
|
||||||
mqd->queue_state.cp_hqd_pq_doorbell_control);
|
|
||||||
|
|
||||||
/* disable the queue if it's active */
|
|
||||||
mqd->queue_state.cp_hqd_dequeue_request = 0;
|
|
||||||
mqd->queue_state.cp_hqd_pq_rptr = 0;
|
|
||||||
mqd->queue_state.cp_hqd_pq_wptr= 0;
|
|
||||||
if (RREG32(mmCP_HQD_ACTIVE) & 1) {
|
|
||||||
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
|
|
||||||
for (j = 0; j < adev->usec_timeout; j++) {
|
|
||||||
if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
|
|
||||||
break;
|
|
||||||
udelay(1);
|
|
||||||
}
|
|
||||||
WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
|
|
||||||
WREG32(mmCP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
|
|
||||||
WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* set the pointer to the MQD */
|
|
||||||
mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
|
|
||||||
mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
|
|
||||||
WREG32(mmCP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
|
|
||||||
WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
|
|
||||||
/* set MQD vmid to 0 */
|
|
||||||
mqd->queue_state.cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
|
|
||||||
mqd->queue_state.cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
|
|
||||||
WREG32(mmCP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
|
|
||||||
|
|
||||||
/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
|
|
||||||
hqd_gpu_addr = ring->gpu_addr >> 8;
|
|
||||||
mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
|
|
||||||
mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
|
|
||||||
WREG32(mmCP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
|
|
||||||
WREG32(mmCP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
|
|
||||||
|
|
||||||
/* set up the HQD, this is similar to CP_RB0_CNTL */
|
|
||||||
mqd->queue_state.cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
|
|
||||||
mqd->queue_state.cp_hqd_pq_control &=
|
|
||||||
~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
|
|
||||||
CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
|
|
||||||
|
|
||||||
mqd->queue_state.cp_hqd_pq_control |=
|
|
||||||
order_base_2(ring->ring_size / 8);
|
|
||||||
mqd->queue_state.cp_hqd_pq_control |=
|
|
||||||
(order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
|
|
||||||
#ifdef __BIG_ENDIAN
|
|
||||||
mqd->queue_state.cp_hqd_pq_control |=
|
|
||||||
2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
|
|
||||||
#endif
|
|
||||||
mqd->queue_state.cp_hqd_pq_control &=
|
|
||||||
~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
|
|
||||||
CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
|
|
||||||
CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
|
|
||||||
mqd->queue_state.cp_hqd_pq_control |=
|
|
||||||
CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
|
|
||||||
CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
|
|
||||||
WREG32(mmCP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
|
|
||||||
|
|
||||||
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
|
|
||||||
wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
|
|
||||||
mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
|
|
||||||
mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
|
|
||||||
WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
|
|
||||||
WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
|
|
||||||
mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
|
|
||||||
|
|
||||||
/* set the wb address wether it's enabled or not */
|
|
||||||
wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
|
|
||||||
mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
|
|
||||||
mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
|
|
||||||
upper_32_bits(wb_gpu_addr) & 0xffff;
|
|
||||||
WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
|
|
||||||
mqd->queue_state.cp_hqd_pq_rptr_report_addr);
|
|
||||||
WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
|
|
||||||
mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
|
|
||||||
|
|
||||||
/* enable the doorbell if requested */
|
|
||||||
if (use_doorbell) {
|
|
||||||
mqd->queue_state.cp_hqd_pq_doorbell_control =
|
|
||||||
RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
|
|
||||||
mqd->queue_state.cp_hqd_pq_doorbell_control &=
|
|
||||||
~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
|
|
||||||
mqd->queue_state.cp_hqd_pq_doorbell_control |=
|
|
||||||
(ring->doorbell_index <<
|
|
||||||
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
|
|
||||||
mqd->queue_state.cp_hqd_pq_doorbell_control |=
|
|
||||||
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
|
|
||||||
mqd->queue_state.cp_hqd_pq_doorbell_control &=
|
|
||||||
~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
|
|
||||||
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
|
|
||||||
}
|
|
||||||
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
|
|
||||||
mqd->queue_state.cp_hqd_pq_doorbell_control);
|
|
||||||
|
|
||||||
/* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
|
|
||||||
ring->wptr = 0;
|
|
||||||
mqd->queue_state.cp_hqd_pq_wptr = lower_32_bits(ring->wptr);
|
|
||||||
WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
|
|
||||||
mqd->queue_state.cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
|
|
||||||
|
|
||||||
/* set the vmid for the queue */
|
|
||||||
mqd->queue_state.cp_hqd_vmid = 0;
|
|
||||||
WREG32(mmCP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
|
|
||||||
|
|
||||||
/* activate the queue */
|
|
||||||
mqd->queue_state.cp_hqd_active = 1;
|
|
||||||
WREG32(mmCP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
|
|
||||||
|
|
||||||
cik_srbm_select(adev, 0, 0, 0, 0);
|
|
||||||
mutex_unlock(&adev->srbm_mutex);
|
|
||||||
|
|
||||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
|
||||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
|
||||||
|
|
||||||
ring->ready = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
gfx_v7_0_cp_compute_enable(adev, true);
|
gfx_v7_0_cp_compute_enable(adev, true);
|
||||||
|
|
||||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||||
ring = &adev->gfx.compute_ring[i];
|
ring = &adev->gfx.compute_ring[i];
|
||||||
|
ring->ready = true;
|
||||||
r = amdgpu_ring_test_ring(ring);
|
r = amdgpu_ring_test_ring(ring);
|
||||||
if (r)
|
if (r)
|
||||||
ring->ready = false;
|
ring->ready = false;
|
||||||
@@ -3797,6 +3803,9 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
|
|||||||
gfx_v7_0_update_rlc(adev, tmp);
|
gfx_v7_0_update_rlc(adev, tmp);
|
||||||
|
|
||||||
data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
|
data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
|
||||||
|
if (orig != data)
|
||||||
|
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
gfx_v7_0_enable_gui_idle_interrupt(adev, false);
|
gfx_v7_0_enable_gui_idle_interrupt(adev, false);
|
||||||
|
|
||||||
@@ -3806,11 +3815,11 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
|
|||||||
RREG32(mmCB_CGTT_SCLK_CTRL);
|
RREG32(mmCB_CGTT_SCLK_CTRL);
|
||||||
|
|
||||||
data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
|
data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
|
||||||
}
|
|
||||||
|
|
||||||
if (orig != data)
|
if (orig != data)
|
||||||
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
|
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
|
||||||
|
|
||||||
|
gfx_v7_0_enable_gui_idle_interrupt(adev, true);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
|
static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
|
||||||
@@ -4089,7 +4098,7 @@ static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
|
|||||||
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
|
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
|
||||||
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
|
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
|
||||||
|
|
||||||
mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
|
mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
|
||||||
|
|
||||||
return (~data) & mask;
|
return (~data) & mask;
|
||||||
}
|
}
|
||||||
@@ -4470,7 +4479,7 @@ static int gfx_v7_0_early_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
|
adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
|
||||||
adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
|
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
|
||||||
adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
|
adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
|
||||||
adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
|
adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
|
||||||
gfx_v7_0_set_ring_funcs(adev);
|
gfx_v7_0_set_ring_funcs(adev);
|
||||||
@@ -4662,11 +4671,57 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
|
|||||||
adev->gfx.config.gb_addr_config = gb_addr_config;
|
adev->gfx.config.gb_addr_config = gb_addr_config;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
||||||
|
int mec, int pipe, int queue)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
unsigned irq_type;
|
||||||
|
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
|
||||||
|
|
||||||
|
/* mec0 is me1 */
|
||||||
|
ring->me = mec + 1;
|
||||||
|
ring->pipe = pipe;
|
||||||
|
ring->queue = queue;
|
||||||
|
|
||||||
|
ring->ring_obj = NULL;
|
||||||
|
ring->use_doorbell = true;
|
||||||
|
ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
|
||||||
|
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||||
|
|
||||||
|
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
|
||||||
|
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
|
||||||
|
+ ring->pipe;
|
||||||
|
|
||||||
|
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||||
|
r = amdgpu_ring_init(adev, ring, 1024,
|
||||||
|
&adev->gfx.eop_irq, irq_type);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int gfx_v7_0_sw_init(void *handle)
|
static int gfx_v7_0_sw_init(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
int i, r;
|
int i, j, k, r, ring_id;
|
||||||
|
|
||||||
|
switch (adev->asic_type) {
|
||||||
|
case CHIP_KAVERI:
|
||||||
|
adev->gfx.mec.num_mec = 2;
|
||||||
|
break;
|
||||||
|
case CHIP_BONAIRE:
|
||||||
|
case CHIP_HAWAII:
|
||||||
|
case CHIP_KABINI:
|
||||||
|
case CHIP_MULLINS:
|
||||||
|
default:
|
||||||
|
adev->gfx.mec.num_mec = 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
adev->gfx.mec.num_pipe_per_mec = 4;
|
||||||
|
adev->gfx.mec.num_queue_per_pipe = 8;
|
||||||
|
|
||||||
/* EOP Event */
|
/* EOP Event */
|
||||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
|
||||||
@@ -4716,29 +4771,23 @@ static int gfx_v7_0_sw_init(void *handle)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set up the compute queues */
|
/* set up the compute queues - allocate horizontally across pipes */
|
||||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
ring_id = 0;
|
||||||
unsigned irq_type;
|
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
|
||||||
|
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
|
||||||
|
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
|
||||||
|
if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
|
||||||
|
continue;
|
||||||
|
|
||||||
/* max 32 queues per MEC */
|
r = gfx_v7_0_compute_ring_init(adev,
|
||||||
if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
|
ring_id,
|
||||||
DRM_ERROR("Too many (%d) compute rings!\n", i);
|
i, k, j);
|
||||||
break;
|
|
||||||
}
|
|
||||||
ring = &adev->gfx.compute_ring[i];
|
|
||||||
ring->ring_obj = NULL;
|
|
||||||
ring->use_doorbell = true;
|
|
||||||
ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
|
|
||||||
ring->me = 1; /* first MEC */
|
|
||||||
ring->pipe = i / 8;
|
|
||||||
ring->queue = i % 8;
|
|
||||||
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
|
||||||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
|
|
||||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
|
||||||
r = amdgpu_ring_init(adev, ring, 1024,
|
|
||||||
&adev->gfx.eop_irq, irq_type);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
ring_id++;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reserve GDS, GWS and OA resource for gfx */
|
/* reserve GDS, GWS and OA resource for gfx */
|
||||||
@@ -4969,8 +5018,8 @@ static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
|
|||||||
u32 mec_int_cntl, mec_int_cntl_reg;
|
u32 mec_int_cntl, mec_int_cntl_reg;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* amdgpu controls only pipe 0 of MEC1. That's why this function only
|
* amdgpu controls only the first MEC. That's why this function only
|
||||||
* handles the setting of interrupts for this specific pipe. All other
|
* handles the setting of interrupts for this specific MEC. All other
|
||||||
* pipes' interrupts are set by amdkfd.
|
* pipes' interrupts are set by amdkfd.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@@ -4979,6 +5028,15 @@ static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
|
|||||||
case 0:
|
case 0:
|
||||||
mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
|
mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
|
||||||
break;
|
break;
|
||||||
|
case 1:
|
||||||
|
mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
DRM_DEBUG("invalid pipe %d\n", pipe);
|
DRM_DEBUG("invalid pipe %d\n", pipe);
|
||||||
return;
|
return;
|
||||||
@@ -5336,6 +5394,12 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
|
|||||||
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
|
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
|
||||||
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
|
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
|
||||||
unsigned disable_masks[4 * 2];
|
unsigned disable_masks[4 * 2];
|
||||||
|
u32 ao_cu_num;
|
||||||
|
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
ao_cu_num = 2;
|
||||||
|
else
|
||||||
|
ao_cu_num = adev->gfx.config.max_cu_per_sh;
|
||||||
|
|
||||||
memset(cu_info, 0, sizeof(*cu_info));
|
memset(cu_info, 0, sizeof(*cu_info));
|
||||||
|
|
||||||
@@ -5354,9 +5418,9 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
|
|||||||
bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
|
bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
|
||||||
cu_info->bitmap[i][j] = bitmap;
|
cu_info->bitmap[i][j] = bitmap;
|
||||||
|
|
||||||
for (k = 0; k < 16; k ++) {
|
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
|
||||||
if (bitmap & mask) {
|
if (bitmap & mask) {
|
||||||
if (counter < 2)
|
if (counter < ao_cu_num)
|
||||||
ao_bitmap |= mask;
|
ao_bitmap |= mask;
|
||||||
counter ++;
|
counter ++;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,4 +29,9 @@ extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block;
|
|||||||
extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block;
|
extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block;
|
||||||
extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block;
|
extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block;
|
||||||
|
|
||||||
|
struct amdgpu_device;
|
||||||
|
struct cik_mqd;
|
||||||
|
|
||||||
|
int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -27,4 +27,9 @@
|
|||||||
extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block;
|
extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block;
|
||||||
extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block;
|
extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block;
|
||||||
|
|
||||||
|
struct amdgpu_device;
|
||||||
|
struct vi_mqd;
|
||||||
|
|
||||||
|
int gfx_v8_0_mqd_commit(struct amdgpu_device *adev, struct vi_mqd *mqd);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -31,128 +31,14 @@
|
|||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
|
||||||
int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u32 tmp;
|
return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
|
||||||
u64 value;
|
|
||||||
u32 i;
|
|
||||||
|
|
||||||
/* Program MC. */
|
|
||||||
/* Update configuration */
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR),
|
|
||||||
adev->mc.vram_start >> 18);
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR),
|
|
||||||
adev->mc.vram_end >> 18);
|
|
||||||
|
|
||||||
value = adev->vram_scratch.gpu_addr - adev->mc.vram_start
|
|
||||||
+ adev->vm_manager.vram_base_offset;
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
|
||||||
mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB),
|
|
||||||
(u32)(value >> 12));
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
|
||||||
mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),
|
|
||||||
(u32)(value >> 44));
|
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
|
||||||
/* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are VF copy registers so
|
|
||||||
vbios post doesn't program them, for SRIOV driver need to program them */
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_FB_LOCATION_BASE),
|
|
||||||
adev->mc.vram_start >> 24);
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_FB_LOCATION_TOP),
|
|
||||||
adev->mc.vram_end >> 24);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Disable AGP. */
|
static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_BASE), 0);
|
{
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_TOP), 0);
|
uint64_t value;
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_BOT), 0xFFFFFFFF);
|
|
||||||
|
|
||||||
/* GART Enable. */
|
|
||||||
|
|
||||||
/* Setup TLB control */
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL));
|
|
||||||
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
SYSTEM_ACCESS_MODE,
|
|
||||||
3);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
ENABLE_ADVANCED_DRIVER_MODEL,
|
|
||||||
1);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
SYSTEM_APERTURE_UNMAPPED_ACCESS,
|
|
||||||
0);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
ECO_BITS,
|
|
||||||
0);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
MTYPE,
|
|
||||||
MTYPE_UC);/* XXX for emulation. */
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
ATC_EN,
|
|
||||||
1);
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
|
|
||||||
|
|
||||||
/* Setup L2 cache */
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL));
|
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL,
|
|
||||||
ENABLE_L2_FRAGMENT_PROCESSING,
|
|
||||||
0);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL,
|
|
||||||
L2_PDE0_CACHE_TAG_GENERATION_MODE,
|
|
||||||
0);/* XXX for emulation, Refer to closed source code.*/
|
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL,
|
|
||||||
CONTEXT1_IDENTITY_ACCESS_MODE,
|
|
||||||
1);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL,
|
|
||||||
IDENTITY_MODE_FRAGMENT_SIZE,
|
|
||||||
0);
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL), tmp);
|
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL2));
|
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
|
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL2), tmp);
|
|
||||||
|
|
||||||
tmp = mmVM_L2_CNTL3_DEFAULT;
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL3), tmp);
|
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL4));
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL4,
|
|
||||||
VMC_TAP_PDE_REQUEST_PHYSICAL,
|
|
||||||
0);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL4,
|
|
||||||
VMC_TAP_PTE_REQUEST_PHYSICAL,
|
|
||||||
0);
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL4), tmp);
|
|
||||||
|
|
||||||
/* setup context0 */
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32),
|
|
||||||
(u32)(adev->mc.gtt_start >> 12));
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32),
|
|
||||||
(u32)(adev->mc.gtt_start >> 44));
|
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32),
|
|
||||||
(u32)(adev->mc.gtt_end >> 12));
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32),
|
|
||||||
(u32)(adev->mc.gtt_end >> 44));
|
|
||||||
|
|
||||||
BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
|
BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
|
||||||
value = adev->gart.table_addr - adev->mc.vram_start
|
value = adev->gart.table_addr - adev->mc.vram_start
|
||||||
@@ -160,49 +46,146 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
|||||||
value &= 0x0000FFFFFFFFF000ULL;
|
value &= 0x0000FFFFFFFFF000ULL;
|
||||||
value |= 0x1; /*valid bit*/
|
value |= 0x1; /*valid bit*/
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),
|
lower_32_bits(value));
|
||||||
(u32)value);
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),
|
|
||||||
(u32)(value >> 32));
|
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
|
||||||
mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32),
|
upper_32_bits(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
gfxhub_v1_0_init_gart_pt_regs(adev);
|
||||||
|
|
||||||
|
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
|
||||||
|
(u32)(adev->mc.gtt_start >> 12));
|
||||||
|
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
|
||||||
|
(u32)(adev->mc.gtt_start >> 44));
|
||||||
|
|
||||||
|
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
|
||||||
|
(u32)(adev->mc.gtt_end >> 12));
|
||||||
|
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
|
||||||
|
(u32)(adev->mc.gtt_end >> 44));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint64_t value;
|
||||||
|
|
||||||
|
/* Disable AGP. */
|
||||||
|
WREG32_SOC15(GC, 0, mmMC_VM_AGP_BASE, 0);
|
||||||
|
WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
|
||||||
|
WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFFFF);
|
||||||
|
|
||||||
|
/* Program the system aperture low logical page number. */
|
||||||
|
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||||
|
adev->mc.vram_start >> 18);
|
||||||
|
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||||
|
adev->mc.vram_end >> 18);
|
||||||
|
|
||||||
|
/* Set default page address. */
|
||||||
|
value = adev->vram_scratch.gpu_addr - adev->mc.vram_start
|
||||||
|
+ adev->vm_manager.vram_base_offset;
|
||||||
|
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
|
||||||
|
(u32)(value >> 12));
|
||||||
|
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
|
||||||
|
(u32)(value >> 44));
|
||||||
|
|
||||||
|
/* Program "protection fault". */
|
||||||
|
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
|
||||||
(u32)(adev->dummy_page.addr >> 12));
|
(u32)(adev->dummy_page.addr >> 12));
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
|
||||||
mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32),
|
|
||||||
(u32)((u64)adev->dummy_page.addr >> 44));
|
(u32)((u64)adev->dummy_page.addr >> 44));
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL2));
|
WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
|
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
|
||||||
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY,
|
}
|
||||||
1);
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL2), tmp);
|
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL));
|
static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint32_t tmp;
|
||||||
|
|
||||||
|
/* Setup TLB control */
|
||||||
|
tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL);
|
||||||
|
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
|
||||||
|
ENABLE_ADVANCED_DRIVER_MODEL, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
|
||||||
|
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
|
||||||
|
MTYPE, MTYPE_UC);/* XXX for emulation. */
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
|
||||||
|
|
||||||
|
WREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint32_t tmp;
|
||||||
|
|
||||||
|
/* Setup L2 cache */
|
||||||
|
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
|
||||||
|
/* XXX for emulation, Refer to closed source code.*/
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
|
||||||
|
0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
|
||||||
|
WREG32_SOC15(GC, 0, mmVM_L2_CNTL, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL2);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
|
||||||
|
WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
|
||||||
|
|
||||||
|
tmp = mmVM_L2_CNTL3_DEFAULT;
|
||||||
|
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
|
||||||
|
|
||||||
|
tmp = mmVM_L2_CNTL4_DEFAULT;
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
|
||||||
|
WREG32_SOC15(GC, 0, mmVM_L2_CNTL4, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint32_t tmp;
|
||||||
|
|
||||||
|
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL), tmp);
|
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
/* Disable identity aperture.*/
|
static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
{
|
||||||
mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32), 0XFFFFFFFF);
|
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
0XFFFFFFFF);
|
||||||
mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32), 0x0000000F);
|
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
|
||||||
|
0x0000000F);
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
|
||||||
mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32), 0);
|
0);
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
|
||||||
mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32), 0);
|
0);
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
|
||||||
mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32), 0);
|
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
|
||||||
mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32), 0);
|
}
|
||||||
|
|
||||||
|
static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
uint32_t tmp;
|
||||||
|
|
||||||
for (i = 0; i <= 14; i++) {
|
for (i = 0; i <= 14; i++) {
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i);
|
tmp = RREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
|
||||||
adev->vm_manager.num_level);
|
adev->vm_manager.num_level);
|
||||||
@@ -223,15 +206,52 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
|||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||||
PAGE_TABLE_BLOCK_SIZE,
|
PAGE_TABLE_BLOCK_SIZE,
|
||||||
adev->vm_manager.block_size - 9);
|
adev->vm_manager.block_size - 9);
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
|
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp);
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
|
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
|
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32) + i*2,
|
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2,
|
||||||
lower_32_bits(adev->vm_manager.max_pfn - 1));
|
lower_32_bits(adev->vm_manager.max_pfn - 1));
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32) + i*2,
|
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2,
|
||||||
upper_32_bits(adev->vm_manager.max_pfn - 1));
|
upper_32_bits(adev->vm_manager.max_pfn - 1));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
unsigned i;
|
||||||
|
|
||||||
|
for (i = 0 ; i < 18; ++i) {
|
||||||
|
WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
|
||||||
|
2 * i, 0xffffffff);
|
||||||
|
WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
|
||||||
|
2 * i, 0x1f);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
/*
|
||||||
|
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
|
||||||
|
* VF copy registers so vbios post doesn't program them, for
|
||||||
|
* SRIOV driver need to program them
|
||||||
|
*/
|
||||||
|
WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE,
|
||||||
|
adev->mc.vram_start >> 24);
|
||||||
|
WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP,
|
||||||
|
adev->mc.vram_end >> 24);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* GART Enable. */
|
||||||
|
gfxhub_v1_0_init_gart_aperture_regs(adev);
|
||||||
|
gfxhub_v1_0_init_system_aperture_regs(adev);
|
||||||
|
gfxhub_v1_0_init_tlb_regs(adev);
|
||||||
|
gfxhub_v1_0_init_cache_regs(adev);
|
||||||
|
|
||||||
|
gfxhub_v1_0_enable_system_domain(adev);
|
||||||
|
gfxhub_v1_0_disable_identity_aperture(adev);
|
||||||
|
gfxhub_v1_0_setup_vmid_config(adev);
|
||||||
|
gfxhub_v1_0_program_invalidation(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -243,22 +263,20 @@ void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
/* Disable all tables */
|
/* Disable all tables */
|
||||||
for (i = 0; i < 16; i++)
|
for (i = 0; i < 16; i++)
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL) + i, 0);
|
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL, i, 0);
|
||||||
|
|
||||||
/* Setup TLB control */
|
/* Setup TLB control */
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL));
|
tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
|
||||||
tmp = REG_SET_FIELD(tmp,
|
tmp = REG_SET_FIELD(tmp,
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
MC_VM_MX_L1_TLB_CNTL,
|
||||||
ENABLE_ADVANCED_DRIVER_MODEL,
|
ENABLE_ADVANCED_DRIVER_MODEL,
|
||||||
0);
|
0);
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
|
WREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
|
||||||
|
|
||||||
/* Setup L2 cache */
|
/* Setup L2 cache */
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL));
|
WREG32_FIELD15(GC, 0, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
|
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL), tmp);
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL3), 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -271,7 +289,7 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
|
|||||||
bool value)
|
bool value)
|
||||||
{
|
{
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL));
|
tmp = RREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
@@ -296,22 +314,11 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
|
|||||||
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
|
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfxhub_v1_0_early_init(void *handle)
|
void gfxhub_v1_0_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfxhub_v1_0_late_init(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfxhub_v1_0_sw_init(void *handle)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
||||||
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB];
|
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB];
|
||||||
|
|
||||||
hub->ctx0_ptb_addr_lo32 =
|
hub->ctx0_ptb_addr_lo32 =
|
||||||
@@ -330,96 +337,4 @@ static int gfxhub_v1_0_sw_init(void *handle)
|
|||||||
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
|
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
|
||||||
hub->vm_l2_pro_fault_cntl =
|
hub->vm_l2_pro_fault_cntl =
|
||||||
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
|
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfxhub_v1_0_sw_fini(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfxhub_v1_0_hw_init(void *handle)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for (i = 0 ; i < 18; ++i) {
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
|
||||||
mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32) +
|
|
||||||
2 * i, 0xffffffff);
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0,
|
|
||||||
mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32) +
|
|
||||||
2 * i, 0x1f);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfxhub_v1_0_hw_fini(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfxhub_v1_0_suspend(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfxhub_v1_0_resume(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool gfxhub_v1_0_is_idle(void *handle)
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfxhub_v1_0_wait_for_idle(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfxhub_v1_0_soft_reset(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfxhub_v1_0_set_clockgating_state(void *handle,
|
|
||||||
enum amd_clockgating_state state)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gfxhub_v1_0_set_powergating_state(void *handle,
|
|
||||||
enum amd_powergating_state state)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct amd_ip_funcs gfxhub_v1_0_ip_funcs = {
|
|
||||||
.name = "gfxhub_v1_0",
|
|
||||||
.early_init = gfxhub_v1_0_early_init,
|
|
||||||
.late_init = gfxhub_v1_0_late_init,
|
|
||||||
.sw_init = gfxhub_v1_0_sw_init,
|
|
||||||
.sw_fini = gfxhub_v1_0_sw_fini,
|
|
||||||
.hw_init = gfxhub_v1_0_hw_init,
|
|
||||||
.hw_fini = gfxhub_v1_0_hw_fini,
|
|
||||||
.suspend = gfxhub_v1_0_suspend,
|
|
||||||
.resume = gfxhub_v1_0_resume,
|
|
||||||
.is_idle = gfxhub_v1_0_is_idle,
|
|
||||||
.wait_for_idle = gfxhub_v1_0_wait_for_idle,
|
|
||||||
.soft_reset = gfxhub_v1_0_soft_reset,
|
|
||||||
.set_clockgating_state = gfxhub_v1_0_set_clockgating_state,
|
|
||||||
.set_powergating_state = gfxhub_v1_0_set_powergating_state,
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block =
|
|
||||||
{
|
|
||||||
.type = AMD_IP_BLOCK_TYPE_GFXHUB,
|
|
||||||
.major = 1,
|
|
||||||
.minor = 0,
|
|
||||||
.rev = 0,
|
|
||||||
.funcs = &gfxhub_v1_0_ip_funcs,
|
|
||||||
};
|
|
||||||
|
|||||||
@@ -28,7 +28,8 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev);
|
|||||||
void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev);
|
void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev);
|
||||||
void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
|
void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
|
||||||
bool value);
|
bool value);
|
||||||
|
void gfxhub_v1_0_init(struct amdgpu_device *adev);
|
||||||
|
u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
|
||||||
extern const struct amd_ip_funcs gfxhub_v1_0_ip_funcs;
|
extern const struct amd_ip_funcs gfxhub_v1_0_ip_funcs;
|
||||||
extern const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block;
|
extern const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block;
|
||||||
|
|
||||||
|
|||||||
@@ -395,6 +395,12 @@ static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
|||||||
return pte_flag;
|
return pte_flag;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
|
||||||
|
{
|
||||||
|
BUG_ON(addr & 0xFFFFFF0000000FFFULL);
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
|
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
|
||||||
bool value)
|
bool value)
|
||||||
{
|
{
|
||||||
@@ -614,33 +620,6 @@ static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
|
|||||||
amdgpu_gart_fini(adev);
|
amdgpu_gart_fini(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* number of VMs
|
|
||||||
* VMID 0 is reserved for System
|
|
||||||
* amdgpu graphics/compute will use VMIDs 1-7
|
|
||||||
* amdkfd will use VMIDs 8-15
|
|
||||||
*/
|
|
||||||
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
|
||||||
adev->vm_manager.num_level = 1;
|
|
||||||
amdgpu_vm_manager_init(adev);
|
|
||||||
|
|
||||||
/* base offset of vram pages */
|
|
||||||
if (adev->flags & AMD_IS_APU) {
|
|
||||||
u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
|
|
||||||
tmp <<= 22;
|
|
||||||
adev->vm_manager.vram_base_offset = tmp;
|
|
||||||
} else
|
|
||||||
adev->vm_manager.vram_base_offset = 0;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
|
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
|
||||||
u32 status, u32 addr, u32 mc_client)
|
u32 status, u32 addr, u32 mc_client)
|
||||||
{
|
{
|
||||||
@@ -855,6 +834,8 @@ static int gmc_v6_0_sw_init(void *handle)
|
|||||||
|
|
||||||
adev->mc.mc_mask = 0xffffffffffULL;
|
adev->mc.mc_mask = 0xffffffffffULL;
|
||||||
|
|
||||||
|
adev->mc.stolen_size = 256 * 1024;
|
||||||
|
|
||||||
adev->need_dma32 = false;
|
adev->need_dma32 = false;
|
||||||
dma_bits = adev->need_dma32 ? 32 : 40;
|
dma_bits = adev->need_dma32 ? 32 : 40;
|
||||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||||
@@ -887,26 +868,34 @@ static int gmc_v6_0_sw_init(void *handle)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (!adev->vm_manager.enabled) {
|
/*
|
||||||
r = gmc_v6_0_vm_init(adev);
|
* number of VMs
|
||||||
if (r) {
|
* VMID 0 is reserved for System
|
||||||
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
|
* amdgpu graphics/compute will use VMIDs 1-7
|
||||||
return r;
|
* amdkfd will use VMIDs 8-15
|
||||||
}
|
*/
|
||||||
adev->vm_manager.enabled = true;
|
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
||||||
|
adev->vm_manager.num_level = 1;
|
||||||
|
amdgpu_vm_manager_init(adev);
|
||||||
|
|
||||||
|
/* base offset of vram pages */
|
||||||
|
if (adev->flags & AMD_IS_APU) {
|
||||||
|
u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
|
||||||
|
|
||||||
|
tmp <<= 22;
|
||||||
|
adev->vm_manager.vram_base_offset = tmp;
|
||||||
|
} else {
|
||||||
|
adev->vm_manager.vram_base_offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return r;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gmc_v6_0_sw_fini(void *handle)
|
static int gmc_v6_0_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
amdgpu_vm_manager_fini(adev);
|
||||||
gmc_v6_0_vm_fini(adev);
|
|
||||||
adev->vm_manager.enabled = false;
|
|
||||||
}
|
|
||||||
gmc_v6_0_gart_fini(adev);
|
gmc_v6_0_gart_fini(adev);
|
||||||
amdgpu_gem_force_release(adev);
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_bo_fini(adev);
|
amdgpu_bo_fini(adev);
|
||||||
@@ -984,16 +973,10 @@ static bool gmc_v6_0_is_idle(void *handle)
|
|||||||
static int gmc_v6_0_wait_for_idle(void *handle)
|
static int gmc_v6_0_wait_for_idle(void *handle)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
u32 tmp;
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
for (i = 0; i < adev->usec_timeout; i++) {
|
for (i = 0; i < adev->usec_timeout; i++) {
|
||||||
tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
|
if (gmc_v6_0_is_idle(handle))
|
||||||
SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
|
|
||||||
SRBM_STATUS__MCC_BUSY_MASK |
|
|
||||||
SRBM_STATUS__MCD_BUSY_MASK |
|
|
||||||
SRBM_STATUS__VMC_BUSY_MASK);
|
|
||||||
if (!tmp)
|
|
||||||
return 0;
|
return 0;
|
||||||
udelay(1);
|
udelay(1);
|
||||||
}
|
}
|
||||||
@@ -1146,6 +1129,7 @@ static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
|
|||||||
.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
|
.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
|
||||||
.set_pte_pde = gmc_v6_0_gart_set_pte_pde,
|
.set_pte_pde = gmc_v6_0_gart_set_pte_pde,
|
||||||
.set_prt = gmc_v6_0_set_prt,
|
.set_prt = gmc_v6_0_set_prt,
|
||||||
|
.get_vm_pde = gmc_v6_0_get_vm_pde,
|
||||||
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
|
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -472,6 +472,12 @@ static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
|||||||
return pte_flag;
|
return pte_flag;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
|
||||||
|
{
|
||||||
|
BUG_ON(addr & 0xFFFFFF0000000FFFULL);
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
||||||
*
|
*
|
||||||
@@ -724,55 +730,6 @@ static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
|
|||||||
amdgpu_gart_fini(adev);
|
amdgpu_gart_fini(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* vm
|
|
||||||
* VMID 0 is the physical GPU addresses as used by the kernel.
|
|
||||||
* VMIDs 1-15 are used for userspace clients and are handled
|
|
||||||
* by the amdgpu vm/hsa code.
|
|
||||||
*/
|
|
||||||
/**
|
|
||||||
* gmc_v7_0_vm_init - cik vm init callback
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
*
|
|
||||||
* Inits cik specific vm parameters (number of VMs, base of vram for
|
|
||||||
* VMIDs 1-15) (CIK).
|
|
||||||
* Returns 0 for success.
|
|
||||||
*/
|
|
||||||
static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* number of VMs
|
|
||||||
* VMID 0 is reserved for System
|
|
||||||
* amdgpu graphics/compute will use VMIDs 1-7
|
|
||||||
* amdkfd will use VMIDs 8-15
|
|
||||||
*/
|
|
||||||
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
|
||||||
adev->vm_manager.num_level = 1;
|
|
||||||
amdgpu_vm_manager_init(adev);
|
|
||||||
|
|
||||||
/* base offset of vram pages */
|
|
||||||
if (adev->flags & AMD_IS_APU) {
|
|
||||||
u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
|
|
||||||
tmp <<= 22;
|
|
||||||
adev->vm_manager.vram_base_offset = tmp;
|
|
||||||
} else
|
|
||||||
adev->vm_manager.vram_base_offset = 0;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* gmc_v7_0_vm_fini - cik vm fini callback
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
*
|
|
||||||
* Tear down any asic specific VM setup (CIK).
|
|
||||||
*/
|
|
||||||
static void gmc_v7_0_vm_fini(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gmc_v7_0_vm_decode_fault - print human readable fault info
|
* gmc_v7_0_vm_decode_fault - print human readable fault info
|
||||||
*
|
*
|
||||||
@@ -1013,6 +970,8 @@ static int gmc_v7_0_sw_init(void *handle)
|
|||||||
*/
|
*/
|
||||||
adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||||
|
|
||||||
|
adev->mc.stolen_size = 256 * 1024;
|
||||||
|
|
||||||
/* set DMA mask + need_dma32 flags.
|
/* set DMA mask + need_dma32 flags.
|
||||||
* PCIE - can handle 40-bits.
|
* PCIE - can handle 40-bits.
|
||||||
* IGP - can handle 40-bits
|
* IGP - can handle 40-bits
|
||||||
@@ -1051,27 +1010,34 @@ static int gmc_v7_0_sw_init(void *handle)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (!adev->vm_manager.enabled) {
|
/*
|
||||||
r = gmc_v7_0_vm_init(adev);
|
* number of VMs
|
||||||
if (r) {
|
* VMID 0 is reserved for System
|
||||||
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
|
* amdgpu graphics/compute will use VMIDs 1-7
|
||||||
return r;
|
* amdkfd will use VMIDs 8-15
|
||||||
}
|
*/
|
||||||
adev->vm_manager.enabled = true;
|
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
||||||
|
adev->vm_manager.num_level = 1;
|
||||||
|
amdgpu_vm_manager_init(adev);
|
||||||
|
|
||||||
|
/* base offset of vram pages */
|
||||||
|
if (adev->flags & AMD_IS_APU) {
|
||||||
|
u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
|
||||||
|
|
||||||
|
tmp <<= 22;
|
||||||
|
adev->vm_manager.vram_base_offset = tmp;
|
||||||
|
} else {
|
||||||
|
adev->vm_manager.vram_base_offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return r;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gmc_v7_0_sw_fini(void *handle)
|
static int gmc_v7_0_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
|
||||||
amdgpu_vm_manager_fini(adev);
|
amdgpu_vm_manager_fini(adev);
|
||||||
gmc_v7_0_vm_fini(adev);
|
|
||||||
adev->vm_manager.enabled = false;
|
|
||||||
}
|
|
||||||
gmc_v7_0_gart_fini(adev);
|
gmc_v7_0_gart_fini(adev);
|
||||||
amdgpu_gem_force_release(adev);
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_bo_fini(adev);
|
amdgpu_bo_fini(adev);
|
||||||
@@ -1335,7 +1301,8 @@ static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
|
|||||||
.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
|
.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
|
||||||
.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
|
.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
|
||||||
.set_prt = gmc_v7_0_set_prt,
|
.set_prt = gmc_v7_0_set_prt,
|
||||||
.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags
|
.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
|
||||||
|
.get_vm_pde = gmc_v7_0_get_vm_pde
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
|
||||||
|
|||||||
@@ -656,6 +656,12 @@ static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
|||||||
return pte_flag;
|
return pte_flag;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
|
||||||
|
{
|
||||||
|
BUG_ON(addr & 0xFFFFFF0000000FFFULL);
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
||||||
*
|
*
|
||||||
@@ -927,55 +933,6 @@ static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
|
|||||||
amdgpu_gart_fini(adev);
|
amdgpu_gart_fini(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* vm
|
|
||||||
* VMID 0 is the physical GPU addresses as used by the kernel.
|
|
||||||
* VMIDs 1-15 are used for userspace clients and are handled
|
|
||||||
* by the amdgpu vm/hsa code.
|
|
||||||
*/
|
|
||||||
/**
|
|
||||||
* gmc_v8_0_vm_init - cik vm init callback
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
*
|
|
||||||
* Inits cik specific vm parameters (number of VMs, base of vram for
|
|
||||||
* VMIDs 1-15) (CIK).
|
|
||||||
* Returns 0 for success.
|
|
||||||
*/
|
|
||||||
static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* number of VMs
|
|
||||||
* VMID 0 is reserved for System
|
|
||||||
* amdgpu graphics/compute will use VMIDs 1-7
|
|
||||||
* amdkfd will use VMIDs 8-15
|
|
||||||
*/
|
|
||||||
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
|
||||||
adev->vm_manager.num_level = 1;
|
|
||||||
amdgpu_vm_manager_init(adev);
|
|
||||||
|
|
||||||
/* base offset of vram pages */
|
|
||||||
if (adev->flags & AMD_IS_APU) {
|
|
||||||
u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
|
|
||||||
tmp <<= 22;
|
|
||||||
adev->vm_manager.vram_base_offset = tmp;
|
|
||||||
} else
|
|
||||||
adev->vm_manager.vram_base_offset = 0;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* gmc_v8_0_vm_fini - cik vm fini callback
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
*
|
|
||||||
* Tear down any asic specific VM setup (CIK).
|
|
||||||
*/
|
|
||||||
static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gmc_v8_0_vm_decode_fault - print human readable fault info
|
* gmc_v8_0_vm_decode_fault - print human readable fault info
|
||||||
*
|
*
|
||||||
@@ -1097,6 +1054,8 @@ static int gmc_v8_0_sw_init(void *handle)
|
|||||||
*/
|
*/
|
||||||
adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||||
|
|
||||||
|
adev->mc.stolen_size = 256 * 1024;
|
||||||
|
|
||||||
/* set DMA mask + need_dma32 flags.
|
/* set DMA mask + need_dma32 flags.
|
||||||
* PCIE - can handle 40-bits.
|
* PCIE - can handle 40-bits.
|
||||||
* IGP - can handle 40-bits
|
* IGP - can handle 40-bits
|
||||||
@@ -1135,27 +1094,34 @@ static int gmc_v8_0_sw_init(void *handle)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (!adev->vm_manager.enabled) {
|
/*
|
||||||
r = gmc_v8_0_vm_init(adev);
|
* number of VMs
|
||||||
if (r) {
|
* VMID 0 is reserved for System
|
||||||
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
|
* amdgpu graphics/compute will use VMIDs 1-7
|
||||||
return r;
|
* amdkfd will use VMIDs 8-15
|
||||||
}
|
*/
|
||||||
adev->vm_manager.enabled = true;
|
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
|
||||||
|
adev->vm_manager.num_level = 1;
|
||||||
|
amdgpu_vm_manager_init(adev);
|
||||||
|
|
||||||
|
/* base offset of vram pages */
|
||||||
|
if (adev->flags & AMD_IS_APU) {
|
||||||
|
u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
|
||||||
|
|
||||||
|
tmp <<= 22;
|
||||||
|
adev->vm_manager.vram_base_offset = tmp;
|
||||||
|
} else {
|
||||||
|
adev->vm_manager.vram_base_offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return r;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gmc_v8_0_sw_fini(void *handle)
|
static int gmc_v8_0_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
|
||||||
amdgpu_vm_manager_fini(adev);
|
amdgpu_vm_manager_fini(adev);
|
||||||
gmc_v8_0_vm_fini(adev);
|
|
||||||
adev->vm_manager.enabled = false;
|
|
||||||
}
|
|
||||||
gmc_v8_0_gart_fini(adev);
|
gmc_v8_0_gart_fini(adev);
|
||||||
amdgpu_gem_force_release(adev);
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_bo_fini(adev);
|
amdgpu_bo_fini(adev);
|
||||||
@@ -1654,7 +1620,8 @@ static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
|
|||||||
.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
|
.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
|
||||||
.set_pte_pde = gmc_v8_0_gart_set_pte_pde,
|
.set_pte_pde = gmc_v8_0_gart_set_pte_pde,
|
||||||
.set_prt = gmc_v8_0_set_prt,
|
.set_prt = gmc_v8_0_set_prt,
|
||||||
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags
|
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
|
||||||
|
.get_vm_pde = gmc_v8_0_get_vm_pde
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
|
||||||
|
|||||||
@@ -33,6 +33,7 @@
|
|||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
|
||||||
#include "nbio_v6_1.h"
|
#include "nbio_v6_1.h"
|
||||||
|
#include "nbio_v7_0.h"
|
||||||
#include "gfxhub_v1_0.h"
|
#include "gfxhub_v1_0.h"
|
||||||
#include "mmhub_v1_0.h"
|
#include "mmhub_v1_0.h"
|
||||||
|
|
||||||
@@ -215,6 +216,9 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
|
|||||||
unsigned i, j;
|
unsigned i, j;
|
||||||
|
|
||||||
/* flush hdp cache */
|
/* flush hdp cache */
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
nbio_v7_0_hdp_flush(adev);
|
||||||
|
else
|
||||||
nbio_v6_1_hdp_flush(adev);
|
nbio_v6_1_hdp_flush(adev);
|
||||||
|
|
||||||
spin_lock(&adev->mc.invalidate_lock);
|
spin_lock(&adev->mc.invalidate_lock);
|
||||||
@@ -354,17 +358,19 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
|||||||
return pte_flag;
|
return pte_flag;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
|
static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
|
||||||
{
|
{
|
||||||
return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
|
addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
|
||||||
|
BUG_ON(addr & 0xFFFF00000000003FULL);
|
||||||
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
|
static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
|
||||||
.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
|
.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
|
||||||
.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
|
.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
|
||||||
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
|
|
||||||
.adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
|
|
||||||
.get_invalidate_req = gmc_v9_0_get_invalidate_req,
|
.get_invalidate_req = gmc_v9_0_get_invalidate_req,
|
||||||
|
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
|
||||||
|
.get_vm_pde = gmc_v9_0_get_vm_pde
|
||||||
};
|
};
|
||||||
|
|
||||||
static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
|
static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
|
||||||
@@ -415,6 +421,11 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
|
|||||||
amdgpu_vram_location(adev, &adev->mc, base);
|
amdgpu_vram_location(adev, &adev->mc, base);
|
||||||
adev->mc.gtt_base_align = 0;
|
adev->mc.gtt_base_align = 0;
|
||||||
amdgpu_gtt_location(adev, mc);
|
amdgpu_gtt_location(adev, mc);
|
||||||
|
/* base offset of vram pages */
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
|
||||||
|
else
|
||||||
|
adev->vm_manager.vram_base_offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -434,7 +445,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
|||||||
/* hbm memory channel size */
|
/* hbm memory channel size */
|
||||||
chansize = 128;
|
chansize = 128;
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_CS_AON0_DramBaseAddress0));
|
tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
|
||||||
tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
|
tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
|
||||||
tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
|
tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
|
||||||
switch (tmp) {
|
switch (tmp) {
|
||||||
@@ -474,7 +485,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
|||||||
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||||
/* size in MB on si */
|
/* size in MB on si */
|
||||||
adev->mc.mc_vram_size =
|
adev->mc.mc_vram_size =
|
||||||
nbio_v6_1_get_memsize(adev) * 1024ULL * 1024ULL;
|
((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
|
||||||
|
nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
|
||||||
adev->mc.real_vram_size = adev->mc.mc_vram_size;
|
adev->mc.real_vram_size = adev->mc.mc_vram_size;
|
||||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||||
|
|
||||||
@@ -514,64 +526,15 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
|
|||||||
return amdgpu_gart_table_vram_alloc(adev);
|
return amdgpu_gart_table_vram_alloc(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* vm
|
|
||||||
* VMID 0 is the physical GPU addresses as used by the kernel.
|
|
||||||
* VMIDs 1-15 are used for userspace clients and are handled
|
|
||||||
* by the amdgpu vm/hsa code.
|
|
||||||
*/
|
|
||||||
/**
|
|
||||||
* gmc_v9_0_vm_init - vm init callback
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
*
|
|
||||||
* Inits vega10 specific vm parameters (number of VMs, base of vram for
|
|
||||||
* VMIDs 1-15) (vega10).
|
|
||||||
* Returns 0 for success.
|
|
||||||
*/
|
|
||||||
static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* number of VMs
|
|
||||||
* VMID 0 is reserved for System
|
|
||||||
* amdgpu graphics/compute will use VMIDs 1-7
|
|
||||||
* amdkfd will use VMIDs 8-15
|
|
||||||
*/
|
|
||||||
adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
|
|
||||||
adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
|
|
||||||
|
|
||||||
/* TODO: fix num_level for APU when updating vm size and block size */
|
|
||||||
if (adev->flags & AMD_IS_APU)
|
|
||||||
adev->vm_manager.num_level = 1;
|
|
||||||
else
|
|
||||||
adev->vm_manager.num_level = 3;
|
|
||||||
amdgpu_vm_manager_init(adev);
|
|
||||||
|
|
||||||
/* base offset of vram pages */
|
|
||||||
/*XXX This value is not zero for APU*/
|
|
||||||
adev->vm_manager.vram_base_offset = 0;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* gmc_v9_0_vm_fini - vm fini callback
|
|
||||||
*
|
|
||||||
* @adev: amdgpu_device pointer
|
|
||||||
*
|
|
||||||
* Tear down any asic specific VM setup.
|
|
||||||
*/
|
|
||||||
static void gmc_v9_0_vm_fini(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gmc_v9_0_sw_init(void *handle)
|
static int gmc_v9_0_sw_init(void *handle)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
int dma_bits;
|
int dma_bits;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
gfxhub_v1_0_init(adev);
|
||||||
|
mmhub_v1_0_init(adev);
|
||||||
|
|
||||||
spin_lock_init(&adev->mc.invalidate_lock);
|
spin_lock_init(&adev->mc.invalidate_lock);
|
||||||
|
|
||||||
if (adev->flags & AMD_IS_APU) {
|
if (adev->flags & AMD_IS_APU) {
|
||||||
@@ -609,6 +572,12 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||||||
*/
|
*/
|
||||||
adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
|
adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It needs to reserve 8M stolen memory for vega10
|
||||||
|
* TODO: Figure out how to avoid that...
|
||||||
|
*/
|
||||||
|
adev->mc.stolen_size = 8 * 1024 * 1024;
|
||||||
|
|
||||||
/* set DMA mask + need_dma32 flags.
|
/* set DMA mask + need_dma32 flags.
|
||||||
* PCIE - can handle 44-bits.
|
* PCIE - can handle 44-bits.
|
||||||
* IGP - can handle 44-bits
|
* IGP - can handle 44-bits
|
||||||
@@ -641,15 +610,23 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (!adev->vm_manager.enabled) {
|
/*
|
||||||
r = gmc_v9_0_vm_init(adev);
|
* number of VMs
|
||||||
if (r) {
|
* VMID 0 is reserved for System
|
||||||
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
|
* amdgpu graphics/compute will use VMIDs 1-7
|
||||||
return r;
|
* amdkfd will use VMIDs 8-15
|
||||||
}
|
*/
|
||||||
adev->vm_manager.enabled = true;
|
adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
|
||||||
}
|
adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
|
||||||
return r;
|
|
||||||
|
/* TODO: fix num_level for APU when updating vm size and block size */
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
adev->vm_manager.num_level = 1;
|
||||||
|
else
|
||||||
|
adev->vm_manager.num_level = 3;
|
||||||
|
amdgpu_vm_manager_init(adev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -669,11 +646,7 @@ static int gmc_v9_0_sw_fini(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev->vm_manager.enabled) {
|
|
||||||
amdgpu_vm_manager_fini(adev);
|
amdgpu_vm_manager_fini(adev);
|
||||||
gmc_v9_0_vm_fini(adev);
|
|
||||||
adev->vm_manager.enabled = false;
|
|
||||||
}
|
|
||||||
gmc_v9_0_gart_fini(adev);
|
gmc_v9_0_gart_fini(adev);
|
||||||
amdgpu_gem_force_release(adev);
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_bo_fini(adev);
|
amdgpu_bo_fini(adev);
|
||||||
@@ -686,6 +659,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
|||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
break;
|
break;
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -715,6 +690,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
|||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* After HDP is initialized, flush HDP.*/
|
/* After HDP is initialized, flush HDP.*/
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
nbio_v7_0_hdp_flush(adev);
|
||||||
|
else
|
||||||
nbio_v6_1_hdp_flush(adev);
|
nbio_v6_1_hdp_flush(adev);
|
||||||
|
|
||||||
r = gfxhub_v1_0_gart_enable(adev);
|
r = gfxhub_v1_0_gart_enable(adev);
|
||||||
@@ -725,12 +703,12 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MISC_CNTL));
|
tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
|
||||||
tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
|
tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
|
||||||
WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MISC_CNTL), tmp);
|
WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_HOST_PATH_CNTL));
|
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
|
||||||
WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_HOST_PATH_CNTL), tmp);
|
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
|
||||||
|
|
||||||
|
|
||||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||||
@@ -781,6 +759,12 @@ static int gmc_v9_0_hw_fini(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
/* full access mode, so don't touch any GMC register */
|
||||||
|
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
|
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
|
||||||
gmc_v9_0_gart_disable(adev);
|
gmc_v9_0_gart_disable(adev);
|
||||||
|
|
||||||
@@ -831,7 +815,16 @@ static int gmc_v9_0_soft_reset(void *handle)
|
|||||||
static int gmc_v9_0_set_clockgating_state(void *handle,
|
static int gmc_v9_0_set_clockgating_state(void *handle,
|
||||||
enum amd_clockgating_state state)
|
enum amd_clockgating_state state)
|
||||||
{
|
{
|
||||||
return 0;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
return mmhub_v1_0_set_clockgating(adev, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
mmhub_v1_0_get_clockgating(adev, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gmc_v9_0_set_powergating_state(void *handle,
|
static int gmc_v9_0_set_powergating_state(void *handle,
|
||||||
@@ -855,6 +848,7 @@ const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
|
|||||||
.soft_reset = gmc_v9_0_soft_reset,
|
.soft_reset = gmc_v9_0_soft_reset,
|
||||||
.set_clockgating_state = gmc_v9_0_set_clockgating_state,
|
.set_clockgating_state = gmc_v9_0_set_clockgating_state,
|
||||||
.set_powergating_state = gmc_v9_0_set_powergating_state,
|
.set_powergating_state = gmc_v9_0_set_powergating_state,
|
||||||
|
.get_clockgating_state = gmc_v9_0_get_clockgating_state,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
|
const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
|
||||||
|
|||||||
@@ -34,9 +34,12 @@
|
|||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
|
||||||
|
#define mmDAGB0_CNTL_MISC2_RV 0x008f
|
||||||
|
#define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
|
||||||
|
|
||||||
u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
|
u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u64 base = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE));
|
u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
|
||||||
|
|
||||||
base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
|
base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
|
||||||
base <<= 24;
|
base <<= 24;
|
||||||
@@ -44,129 +47,9 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
|
|||||||
return base;
|
return base;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u32 tmp;
|
uint64_t value;
|
||||||
u64 value;
|
|
||||||
uint64_t addr;
|
|
||||||
u32 i;
|
|
||||||
|
|
||||||
/* Program MC. */
|
|
||||||
/* Update configuration */
|
|
||||||
DRM_INFO("%s -- in\n", __func__);
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR),
|
|
||||||
adev->mc.vram_start >> 18);
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR),
|
|
||||||
adev->mc.vram_end >> 18);
|
|
||||||
value = adev->vram_scratch.gpu_addr - adev->mc.vram_start +
|
|
||||||
adev->vm_manager.vram_base_offset;
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
|
||||||
mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB),
|
|
||||||
(u32)(value >> 12));
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
|
||||||
mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),
|
|
||||||
(u32)(value >> 44));
|
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
|
||||||
/* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are VF copy registers so
|
|
||||||
vbios post doesn't program them, for SRIOV driver need to program them */
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE),
|
|
||||||
adev->mc.vram_start >> 24);
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP),
|
|
||||||
adev->mc.vram_end >> 24);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Disable AGP. */
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BASE), 0);
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_TOP), 0);
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BOT), 0x00FFFFFF);
|
|
||||||
|
|
||||||
/* GART Enable. */
|
|
||||||
|
|
||||||
/* Setup TLB control */
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
|
|
||||||
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
SYSTEM_ACCESS_MODE,
|
|
||||||
3);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
ENABLE_ADVANCED_DRIVER_MODEL,
|
|
||||||
1);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
SYSTEM_APERTURE_UNMAPPED_ACCESS,
|
|
||||||
0);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
ECO_BITS,
|
|
||||||
0);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
MTYPE,
|
|
||||||
MTYPE_UC);/* XXX for emulation. */
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
|
||||||
ATC_EN,
|
|
||||||
1);
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
|
|
||||||
|
|
||||||
/* Setup L2 cache */
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
|
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL,
|
|
||||||
ENABLE_L2_FRAGMENT_PROCESSING,
|
|
||||||
0);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL,
|
|
||||||
L2_PDE0_CACHE_TAG_GENERATION_MODE,
|
|
||||||
0);/* XXX for emulation, Refer to closed source code.*/
|
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL,
|
|
||||||
CONTEXT1_IDENTITY_ACCESS_MODE,
|
|
||||||
1);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL,
|
|
||||||
IDENTITY_MODE_FRAGMENT_SIZE,
|
|
||||||
0);
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
|
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2));
|
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
|
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2), tmp);
|
|
||||||
|
|
||||||
tmp = mmVM_L2_CNTL3_DEFAULT;
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), tmp);
|
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4));
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL4,
|
|
||||||
VMC_TAP_PDE_REQUEST_PHYSICAL,
|
|
||||||
0);
|
|
||||||
tmp = REG_SET_FIELD(tmp,
|
|
||||||
VM_L2_CNTL4,
|
|
||||||
VMC_TAP_PTE_REQUEST_PHYSICAL,
|
|
||||||
0);
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4), tmp);
|
|
||||||
|
|
||||||
/* setup context0 */
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32),
|
|
||||||
(u32)(adev->mc.gtt_start >> 12));
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32),
|
|
||||||
(u32)(adev->mc.gtt_start >> 44));
|
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32),
|
|
||||||
(u32)(adev->mc.gtt_end >> 12));
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32),
|
|
||||||
(u32)(adev->mc.gtt_end >> 44));
|
|
||||||
|
|
||||||
BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
|
BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
|
||||||
value = adev->gart.table_addr - adev->mc.vram_start +
|
value = adev->gart.table_addr - adev->mc.vram_start +
|
||||||
@@ -174,54 +57,150 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
|||||||
value &= 0x0000FFFFFFFFF000ULL;
|
value &= 0x0000FFFFFFFFF000ULL;
|
||||||
value |= 0x1; /* valid bit */
|
value |= 0x1; /* valid bit */
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),
|
lower_32_bits(value));
|
||||||
(u32)value);
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
|
||||||
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),
|
|
||||||
(u32)(value >> 32));
|
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
|
||||||
mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32),
|
upper_32_bits(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
mmhub_v1_0_init_gart_pt_regs(adev);
|
||||||
|
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
|
||||||
|
(u32)(adev->mc.gtt_start >> 12));
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
|
||||||
|
(u32)(adev->mc.gtt_start >> 44));
|
||||||
|
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
|
||||||
|
(u32)(adev->mc.gtt_end >> 12));
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
|
||||||
|
(u32)(adev->mc.gtt_end >> 44));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint64_t value;
|
||||||
|
uint32_t tmp;
|
||||||
|
|
||||||
|
/* Disable AGP. */
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, 0);
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, 0x00FFFFFF);
|
||||||
|
|
||||||
|
/* Program the system aperture low logical page number. */
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||||
|
adev->mc.vram_start >> 18);
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||||
|
adev->mc.vram_end >> 18);
|
||||||
|
|
||||||
|
/* Set default page address. */
|
||||||
|
value = adev->vram_scratch.gpu_addr - adev->mc.vram_start +
|
||||||
|
adev->vm_manager.vram_base_offset;
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
|
||||||
|
(u32)(value >> 12));
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
|
||||||
|
(u32)(value >> 44));
|
||||||
|
|
||||||
|
/* Program "protection fault". */
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
|
||||||
(u32)(adev->dummy_page.addr >> 12));
|
(u32)(adev->dummy_page.addr >> 12));
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
|
||||||
mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32),
|
|
||||||
(u32)((u64)adev->dummy_page.addr >> 44));
|
(u32)((u64)adev->dummy_page.addr >> 44));
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2));
|
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
|
||||||
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY,
|
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
|
||||||
1);
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp);
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2), tmp);
|
}
|
||||||
|
|
||||||
addr = SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
|
static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
|
||||||
tmp = RREG32(addr);
|
{
|
||||||
|
uint32_t tmp;
|
||||||
|
|
||||||
|
/* Setup TLB control */
|
||||||
|
tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
|
||||||
|
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
|
||||||
|
ENABLE_ADVANCED_DRIVER_MODEL, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
|
||||||
|
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
|
||||||
|
MTYPE, MTYPE_UC);/* XXX for emulation. */
|
||||||
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
|
||||||
|
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint32_t tmp;
|
||||||
|
|
||||||
|
/* Setup L2 cache */
|
||||||
|
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
|
||||||
|
/* XXX for emulation, Refer to closed source code.*/
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
|
||||||
|
0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
|
||||||
|
|
||||||
|
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
|
||||||
|
|
||||||
|
tmp = mmVM_L2_CNTL3_DEFAULT;
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
|
||||||
|
|
||||||
|
tmp = mmVM_L2_CNTL4_DEFAULT;
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint32_t tmp;
|
||||||
|
|
||||||
|
tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL), tmp);
|
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
|
||||||
|
}
|
||||||
|
|
||||||
tmp = RREG32(addr);
|
static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
|
||||||
|
0XFFFFFFFF);
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
|
||||||
|
0x0000000F);
|
||||||
|
|
||||||
/* Disable identity aperture.*/
|
WREG32_SOC15(MMHUB, 0,
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
|
||||||
mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32), 0XFFFFFFFF);
|
WREG32_SOC15(MMHUB, 0,
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
|
||||||
mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32), 0x0000000F);
|
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
|
||||||
mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32), 0);
|
0);
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
|
||||||
mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32), 0);
|
0);
|
||||||
|
}
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
|
||||||
mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32), 0);
|
{
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
int i;
|
||||||
mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32), 0);
|
uint32_t tmp;
|
||||||
|
|
||||||
for (i = 0; i <= 14; i++) {
|
for (i = 0; i <= 14; i++) {
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL)
|
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
|
||||||
+ i);
|
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||||
ENABLE_CONTEXT, 1);
|
ENABLE_CONTEXT, 1);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||||
@@ -243,14 +222,52 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
|||||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||||
PAGE_TABLE_BLOCK_SIZE,
|
PAGE_TABLE_BLOCK_SIZE,
|
||||||
adev->vm_manager.block_size - 9);
|
adev->vm_manager.block_size - 9);
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
|
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
|
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
|
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32) + i*2,
|
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2,
|
||||||
lower_32_bits(adev->vm_manager.max_pfn - 1));
|
lower_32_bits(adev->vm_manager.max_pfn - 1));
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32) + i*2,
|
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2,
|
||||||
upper_32_bits(adev->vm_manager.max_pfn - 1));
|
upper_32_bits(adev->vm_manager.max_pfn - 1));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
unsigned i;
|
||||||
|
|
||||||
|
for (i = 0; i < 18; ++i) {
|
||||||
|
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
|
||||||
|
2 * i, 0xffffffff);
|
||||||
|
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
|
||||||
|
2 * i, 0x1f);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
/*
|
||||||
|
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
|
||||||
|
* VF copy registers so vbios post doesn't program them, for
|
||||||
|
* SRIOV driver need to program them
|
||||||
|
*/
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
|
||||||
|
adev->mc.vram_start >> 24);
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
|
||||||
|
adev->mc.vram_end >> 24);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* GART Enable. */
|
||||||
|
mmhub_v1_0_init_gart_aperture_regs(adev);
|
||||||
|
mmhub_v1_0_init_system_aperture_regs(adev);
|
||||||
|
mmhub_v1_0_init_tlb_regs(adev);
|
||||||
|
mmhub_v1_0_init_cache_regs(adev);
|
||||||
|
|
||||||
|
mmhub_v1_0_enable_system_domain(adev);
|
||||||
|
mmhub_v1_0_disable_identity_aperture(adev);
|
||||||
|
mmhub_v1_0_setup_vmid_config(adev);
|
||||||
|
mmhub_v1_0_program_invalidation(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -262,22 +279,22 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
/* Disable all tables */
|
/* Disable all tables */
|
||||||
for (i = 0; i < 16; i++)
|
for (i = 0; i < 16; i++)
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL) + i, 0);
|
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL, i, 0);
|
||||||
|
|
||||||
/* Setup TLB control */
|
/* Setup TLB control */
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
|
tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
|
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
|
||||||
tmp = REG_SET_FIELD(tmp,
|
tmp = REG_SET_FIELD(tmp,
|
||||||
MC_VM_MX_L1_TLB_CNTL,
|
MC_VM_MX_L1_TLB_CNTL,
|
||||||
ENABLE_ADVANCED_DRIVER_MODEL,
|
ENABLE_ADVANCED_DRIVER_MODEL,
|
||||||
0);
|
0);
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
|
WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
|
||||||
|
|
||||||
/* Setup L2 cache */
|
/* Setup L2 cache */
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
|
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), 0);
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -289,7 +306,7 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
|
|||||||
void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
|
void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
|
||||||
{
|
{
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL));
|
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
@@ -314,22 +331,11 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
|
|||||||
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mmhub_v1_0_early_init(void *handle)
|
void mmhub_v1_0_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmhub_v1_0_late_init(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmhub_v1_0_sw_init(void *handle)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
||||||
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
|
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
|
||||||
|
|
||||||
hub->ctx0_ptb_addr_lo32 =
|
hub->ctx0_ptb_addr_lo32 =
|
||||||
@@ -349,69 +355,20 @@ static int mmhub_v1_0_sw_init(void *handle)
|
|||||||
hub->vm_l2_pro_fault_cntl =
|
hub->vm_l2_pro_fault_cntl =
|
||||||
SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
|
SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmhub_v1_0_sw_fini(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmhub_v1_0_hw_init(void *handle)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for (i = 0; i < 18; ++i) {
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
|
||||||
mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32) +
|
|
||||||
2 * i, 0xffffffff);
|
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0,
|
|
||||||
mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32) +
|
|
||||||
2 * i, 0x1f);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmhub_v1_0_hw_fini(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmhub_v1_0_suspend(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmhub_v1_0_resume(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool mmhub_v1_0_is_idle(void *handle)
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmhub_v1_0_wait_for_idle(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmhub_v1_0_soft_reset(void *handle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||||
bool enable)
|
bool enable)
|
||||||
{
|
{
|
||||||
uint32_t def, data, def1, data1, def2, data2;
|
uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
|
||||||
|
|
||||||
def = data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
|
def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
|
||||||
def1 = data1 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2));
|
|
||||||
def2 = data2 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2));
|
if (adev->asic_type != CHIP_RAVEN) {
|
||||||
|
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
|
||||||
|
def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
|
||||||
|
} else
|
||||||
|
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV);
|
||||||
|
|
||||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
|
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
|
||||||
data |= ATC_L2_MISC_CG__ENABLE_MASK;
|
data |= ATC_L2_MISC_CG__ENABLE_MASK;
|
||||||
@@ -423,6 +380,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
|
|||||||
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
|
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
|
||||||
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
|
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
|
||||||
|
|
||||||
|
if (adev->asic_type != CHIP_RAVEN)
|
||||||
data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
||||||
DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
|
DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
|
||||||
DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
|
DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
|
||||||
@@ -439,6 +397,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
|
|||||||
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
|
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
|
||||||
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
|
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
|
||||||
|
|
||||||
|
if (adev->asic_type != CHIP_RAVEN)
|
||||||
data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
||||||
DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
|
DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
|
||||||
DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
|
DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
|
||||||
@@ -448,13 +407,17 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (def != data)
|
if (def != data)
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
|
WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
|
||||||
|
|
||||||
if (def1 != data1)
|
if (def1 != data1) {
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2), data1);
|
if (adev->asic_type != CHIP_RAVEN)
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
|
||||||
|
else
|
||||||
|
WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
|
||||||
|
}
|
||||||
|
|
||||||
if (def2 != data2)
|
if (adev->asic_type != CHIP_RAVEN && def2 != data2)
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2), data2);
|
WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||||
@@ -462,7 +425,7 @@ static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
|||||||
{
|
{
|
||||||
uint32_t def, data;
|
uint32_t def, data;
|
||||||
|
|
||||||
def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
|
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
|
||||||
|
|
||||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
|
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
|
||||||
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
||||||
@@ -470,7 +433,7 @@ static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
|||||||
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
||||||
|
|
||||||
if (def != data)
|
if (def != data)
|
||||||
WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
|
WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||||
@@ -478,7 +441,7 @@ static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
|
|||||||
{
|
{
|
||||||
uint32_t def, data;
|
uint32_t def, data;
|
||||||
|
|
||||||
def = data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
|
def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
|
||||||
|
|
||||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
|
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
|
||||||
data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
||||||
@@ -486,7 +449,7 @@ static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
|
|||||||
data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
||||||
|
|
||||||
if (def != data)
|
if (def != data)
|
||||||
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
|
WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||||
@@ -494,7 +457,7 @@ static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
|||||||
{
|
{
|
||||||
uint32_t def, data;
|
uint32_t def, data;
|
||||||
|
|
||||||
def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
|
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
|
||||||
|
|
||||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
|
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
|
||||||
(adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
|
(adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
|
||||||
@@ -503,19 +466,18 @@ static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
|||||||
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
|
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
|
||||||
|
|
||||||
if(def != data)
|
if(def != data)
|
||||||
WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
|
WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mmhub_v1_0_set_clockgating_state(void *handle,
|
int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
|
||||||
enum amd_clockgating_state state)
|
enum amd_clockgating_state state)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
if (amdgpu_sriov_vf(adev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
|
case CHIP_RAVEN:
|
||||||
mmhub_v1_0_update_medium_grain_clock_gating(adev,
|
mmhub_v1_0_update_medium_grain_clock_gating(adev,
|
||||||
state == AMD_CG_STATE_GATE ? true : false);
|
state == AMD_CG_STATE_GATE ? true : false);
|
||||||
athub_update_medium_grain_clock_gating(adev,
|
athub_update_medium_grain_clock_gating(adev,
|
||||||
@@ -532,54 +494,20 @@ static int mmhub_v1_0_set_clockgating_state(void *handle,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmhub_v1_0_get_clockgating_state(void *handle, u32 *flags)
|
void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
||||||
int data;
|
int data;
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
if (amdgpu_sriov_vf(adev))
|
||||||
*flags = 0;
|
*flags = 0;
|
||||||
|
|
||||||
/* AMD_CG_SUPPORT_MC_MGCG */
|
/* AMD_CG_SUPPORT_MC_MGCG */
|
||||||
data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
|
data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
|
||||||
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
|
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
|
||||||
*flags |= AMD_CG_SUPPORT_MC_MGCG;
|
*flags |= AMD_CG_SUPPORT_MC_MGCG;
|
||||||
|
|
||||||
/* AMD_CG_SUPPORT_MC_LS */
|
/* AMD_CG_SUPPORT_MC_LS */
|
||||||
data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
|
data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
|
||||||
if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
|
if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
|
||||||
*flags |= AMD_CG_SUPPORT_MC_LS;
|
*flags |= AMD_CG_SUPPORT_MC_LS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mmhub_v1_0_set_powergating_state(void *handle,
|
|
||||||
enum amd_powergating_state state)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct amd_ip_funcs mmhub_v1_0_ip_funcs = {
|
|
||||||
.name = "mmhub_v1_0",
|
|
||||||
.early_init = mmhub_v1_0_early_init,
|
|
||||||
.late_init = mmhub_v1_0_late_init,
|
|
||||||
.sw_init = mmhub_v1_0_sw_init,
|
|
||||||
.sw_fini = mmhub_v1_0_sw_fini,
|
|
||||||
.hw_init = mmhub_v1_0_hw_init,
|
|
||||||
.hw_fini = mmhub_v1_0_hw_fini,
|
|
||||||
.suspend = mmhub_v1_0_suspend,
|
|
||||||
.resume = mmhub_v1_0_resume,
|
|
||||||
.is_idle = mmhub_v1_0_is_idle,
|
|
||||||
.wait_for_idle = mmhub_v1_0_wait_for_idle,
|
|
||||||
.soft_reset = mmhub_v1_0_soft_reset,
|
|
||||||
.set_clockgating_state = mmhub_v1_0_set_clockgating_state,
|
|
||||||
.set_powergating_state = mmhub_v1_0_set_powergating_state,
|
|
||||||
.get_clockgating_state = mmhub_v1_0_get_clockgating_state,
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct amdgpu_ip_block_version mmhub_v1_0_ip_block =
|
|
||||||
{
|
|
||||||
.type = AMD_IP_BLOCK_TYPE_MMHUB,
|
|
||||||
.major = 1,
|
|
||||||
.minor = 0,
|
|
||||||
.rev = 0,
|
|
||||||
.funcs = &mmhub_v1_0_ip_funcs,
|
|
||||||
};
|
|
||||||
|
|||||||
@@ -28,6 +28,10 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
|
|||||||
void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
|
void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
|
||||||
void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
|
void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
|
||||||
bool value);
|
bool value);
|
||||||
|
void mmhub_v1_0_init(struct amdgpu_device *adev);
|
||||||
|
int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
|
||||||
|
enum amd_clockgating_state state);
|
||||||
|
void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
|
||||||
|
|
||||||
extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs;
|
extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs;
|
||||||
extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block;
|
extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block;
|
||||||
|
|||||||
@@ -124,8 +124,8 @@ static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
|
|||||||
r = -ETIME;
|
r = -ETIME;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
msleep(1);
|
mdelay(5);
|
||||||
timeout -= 1;
|
timeout -= 5;
|
||||||
|
|
||||||
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||||
mmBIF_BX_PF0_MAILBOX_CONTROL));
|
mmBIF_BX_PF0_MAILBOX_CONTROL));
|
||||||
@@ -141,12 +141,12 @@ static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
|
|||||||
r = xgpu_ai_mailbox_rcv_msg(adev, event);
|
r = xgpu_ai_mailbox_rcv_msg(adev, event);
|
||||||
while (r) {
|
while (r) {
|
||||||
if (timeout <= 0) {
|
if (timeout <= 0) {
|
||||||
pr_err("Doesn't get ack from pf.\n");
|
pr_err("Doesn't get msg:%d from pf.\n", event);
|
||||||
r = -ETIME;
|
r = -ETIME;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
msleep(1);
|
mdelay(5);
|
||||||
timeout -= 1;
|
timeout -= 5;
|
||||||
|
|
||||||
r = xgpu_ai_mailbox_rcv_msg(adev, event);
|
r = xgpu_ai_mailbox_rcv_msg(adev, event);
|
||||||
}
|
}
|
||||||
@@ -165,7 +165,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
|
|||||||
/* start to poll ack */
|
/* start to poll ack */
|
||||||
r = xgpu_ai_poll_ack(adev);
|
r = xgpu_ai_poll_ack(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
pr_err("Doesn't get ack from pf, continue\n");
|
||||||
|
|
||||||
xgpu_ai_mailbox_set_valid(adev, false);
|
xgpu_ai_mailbox_set_valid(adev, false);
|
||||||
|
|
||||||
@@ -174,9 +174,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
|
|||||||
req == IDH_REQ_GPU_FINI_ACCESS ||
|
req == IDH_REQ_GPU_FINI_ACCESS ||
|
||||||
req == IDH_REQ_GPU_RESET_ACCESS) {
|
req == IDH_REQ_GPU_RESET_ACCESS) {
|
||||||
r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
|
r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
|
||||||
if (r)
|
if (r) {
|
||||||
|
pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -241,7 +243,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Trigger recovery due to world switch failure */
|
/* Trigger recovery due to world switch failure */
|
||||||
amdgpu_sriov_gpu_reset(adev, false);
|
amdgpu_sriov_gpu_reset(adev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||||
@@ -264,12 +266,15 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
/* trigger gpu-reset by hypervisor only if TDR disbaled */
|
||||||
|
if (amdgpu_lockup_timeout == 0) {
|
||||||
/* see what event we get */
|
/* see what event we get */
|
||||||
r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
|
r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
|
||||||
|
|
||||||
/* only handle FLR_NOTIFY now */
|
/* only handle FLR_NOTIFY now */
|
||||||
if (!r)
|
if (!r)
|
||||||
schedule_work(&adev->virt.flr_work);
|
schedule_work(&adev->virt.flr_work);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -296,11 +301,11 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
|
||||||
if (r) {
|
if (r) {
|
||||||
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
||||||
return r;
|
return r;
|
||||||
|
|||||||
@@ -398,8 +398,8 @@ static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
|
|||||||
r = -ETIME;
|
r = -ETIME;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
msleep(1);
|
mdelay(5);
|
||||||
timeout -= 1;
|
timeout -= 5;
|
||||||
|
|
||||||
reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
|
reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
|
||||||
}
|
}
|
||||||
@@ -418,8 +418,8 @@ static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
|
|||||||
r = -ETIME;
|
r = -ETIME;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
msleep(1);
|
mdelay(5);
|
||||||
timeout -= 1;
|
timeout -= 5;
|
||||||
|
|
||||||
r = xgpu_vi_mailbox_rcv_msg(adev, event);
|
r = xgpu_vi_mailbox_rcv_msg(adev, event);
|
||||||
}
|
}
|
||||||
@@ -447,7 +447,7 @@ static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
|
|||||||
request == IDH_REQ_GPU_RESET_ACCESS) {
|
request == IDH_REQ_GPU_RESET_ACCESS) {
|
||||||
r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
|
r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
pr_err("Doesn't get ack from pf, continue\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -514,7 +514,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Trigger recovery due to world switch failure */
|
/* Trigger recovery due to world switch failure */
|
||||||
amdgpu_sriov_gpu_reset(adev, false);
|
amdgpu_sriov_gpu_reset(adev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||||
@@ -537,12 +537,15 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
/* trigger gpu-reset by hypervisor only if TDR disbaled */
|
||||||
|
if (amdgpu_lockup_timeout == 0) {
|
||||||
/* see what event we get */
|
/* see what event we get */
|
||||||
r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
|
r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
|
||||||
|
|
||||||
/* only handle FLR_NOTIFY now */
|
/* only handle FLR_NOTIFY now */
|
||||||
if (!r)
|
if (!r)
|
||||||
schedule_work(&adev->virt.flr_work);
|
schedule_work(&adev->virt.flr_work);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,7 +35,7 @@
|
|||||||
|
|
||||||
u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
|
u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u32 tmp = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0));
|
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
|
||||||
|
|
||||||
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
|
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
|
||||||
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
|
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
|
||||||
@@ -46,32 +46,33 @@ u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
|
|||||||
u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
|
u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
|
||||||
uint32_t idx)
|
uint32_t idx)
|
||||||
{
|
{
|
||||||
return RREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0) + idx);
|
return RREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
|
void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
|
||||||
uint32_t idx, uint32_t val)
|
uint32_t idx, uint32_t val)
|
||||||
{
|
{
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0) + idx, val);
|
WREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
|
void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
|
||||||
{
|
{
|
||||||
if (enable)
|
if (enable)
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_FB_EN),
|
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
|
||||||
BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
|
BIF_FB_EN__FB_READ_EN_MASK |
|
||||||
|
BIF_FB_EN__FB_WRITE_EN_MASK);
|
||||||
else
|
else
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_FB_EN), 0);
|
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
|
void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
|
WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
|
u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
return RREG32(SOC15_REG_OFFSET(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE));
|
return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const u32 nbio_sdma_doorbell_range_reg[] =
|
static const u32 nbio_sdma_doorbell_range_reg[] =
|
||||||
@@ -97,15 +98,7 @@ void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
|
|||||||
void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
|
void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||||
bool enable)
|
bool enable)
|
||||||
{
|
{
|
||||||
u32 tmp;
|
WREG32_FIELD15(NBIO, 0, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmRCC_PF_0_0_RCC_DOORBELL_APER_EN));
|
|
||||||
if (enable)
|
|
||||||
tmp = REG_SET_FIELD(tmp, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
|
|
||||||
else
|
|
||||||
tmp = REG_SET_FIELD(tmp, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
|
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmRCC_PF_0_0_RCC_DOORBELL_APER_EN), tmp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
|
void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
|
||||||
@@ -118,20 +111,20 @@ void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
|
|||||||
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
|
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
|
||||||
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
|
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW),
|
WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
|
||||||
lower_32_bits(adev->doorbell.base));
|
lower_32_bits(adev->doorbell.base));
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH),
|
WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
|
||||||
upper_32_bits(adev->doorbell.base));
|
upper_32_bits(adev->doorbell.base));
|
||||||
}
|
}
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL), tmp);
|
WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
|
void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
|
||||||
bool use_doorbell, int doorbell_index)
|
bool use_doorbell, int doorbell_index)
|
||||||
{
|
{
|
||||||
u32 ih_doorbell_range = RREG32(SOC15_REG_OFFSET(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE));
|
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
|
||||||
|
|
||||||
if (use_doorbell) {
|
if (use_doorbell) {
|
||||||
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
|
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
|
||||||
@@ -139,7 +132,7 @@ void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
|
|||||||
} else
|
} else
|
||||||
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
|
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_IH_DOORBELL_RANGE), ih_doorbell_range);
|
WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nbio_v6_1_ih_control(struct amdgpu_device *adev)
|
void nbio_v6_1_ih_control(struct amdgpu_device *adev)
|
||||||
@@ -147,15 +140,15 @@ void nbio_v6_1_ih_control(struct amdgpu_device *adev)
|
|||||||
u32 interrupt_cntl;
|
u32 interrupt_cntl;
|
||||||
|
|
||||||
/* setup interrupt control */
|
/* setup interrupt control */
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmINTERRUPT_CNTL2), adev->dummy_page.addr >> 8);
|
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
|
||||||
interrupt_cntl = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmINTERRUPT_CNTL));
|
interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
|
||||||
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
|
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
|
||||||
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
|
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||||
*/
|
*/
|
||||||
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
|
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
|
||||||
/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
|
/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
|
||||||
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
|
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmINTERRUPT_CNTL), interrupt_cntl);
|
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||||
@@ -251,8 +244,7 @@ void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
uint32_t reg;
|
uint32_t reg;
|
||||||
|
|
||||||
reg = RREG32(SOC15_REG_OFFSET(NBIO, 0,
|
reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
|
||||||
mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER));
|
|
||||||
if (reg & 1)
|
if (reg & 1)
|
||||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
|
adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
|
||||||
|
|
||||||
|
|||||||
212
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
Normal file
212
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#include "amdgpu.h"
|
||||||
|
#include "amdgpu_atombios.h"
|
||||||
|
#include "nbio_v7_0.h"
|
||||||
|
|
||||||
|
#include "vega10/soc15ip.h"
|
||||||
|
#include "raven1/NBIO/nbio_7_0_default.h"
|
||||||
|
#include "raven1/NBIO/nbio_7_0_offset.h"
|
||||||
|
#include "raven1/NBIO/nbio_7_0_sh_mask.h"
|
||||||
|
#include "vega10/vega10_enum.h"
|
||||||
|
|
||||||
|
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
|
||||||
|
|
||||||
|
u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
|
||||||
|
|
||||||
|
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
|
||||||
|
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
|
||||||
|
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
|
||||||
|
uint32_t idx)
|
||||||
|
{
|
||||||
|
return RREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nbio_v7_0_set_atombios_scratch_regs(struct amdgpu_device *adev,
|
||||||
|
uint32_t idx, uint32_t val)
|
||||||
|
{
|
||||||
|
WREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
|
||||||
|
{
|
||||||
|
if (enable)
|
||||||
|
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
|
||||||
|
BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
|
||||||
|
else
|
||||||
|
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
WREG32_SOC15(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const u32 nbio_sdma_doorbell_range_reg[] =
|
||||||
|
{
|
||||||
|
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
|
||||||
|
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
|
||||||
|
};
|
||||||
|
|
||||||
|
void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
|
||||||
|
bool use_doorbell, int doorbell_index)
|
||||||
|
{
|
||||||
|
u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]);
|
||||||
|
|
||||||
|
if (use_doorbell) {
|
||||||
|
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
|
||||||
|
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
|
||||||
|
} else
|
||||||
|
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
|
||||||
|
|
||||||
|
WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||||
|
bool enable)
|
||||||
|
{
|
||||||
|
WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
|
||||||
|
bool use_doorbell, int doorbell_index)
|
||||||
|
{
|
||||||
|
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
|
||||||
|
|
||||||
|
if (use_doorbell) {
|
||||||
|
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
|
||||||
|
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
|
||||||
|
} else
|
||||||
|
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
|
||||||
|
|
||||||
|
WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint32_t nbio_7_0_read_syshub_ind_mmr(struct amdgpu_device *adev, uint32_t offset)
|
||||||
|
{
|
||||||
|
uint32_t data;
|
||||||
|
|
||||||
|
WREG32_SOC15(NBIO, 0, mmSYSHUB_INDEX, offset);
|
||||||
|
data = RREG32_SOC15(NBIO, 0, mmSYSHUB_DATA);
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nbio_7_0_write_syshub_ind_mmr(struct amdgpu_device *adev, uint32_t offset,
|
||||||
|
uint32_t data)
|
||||||
|
{
|
||||||
|
WREG32_SOC15(NBIO, 0, mmSYSHUB_INDEX, offset);
|
||||||
|
WREG32_SOC15(NBIO, 0, mmSYSHUB_DATA, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||||
|
bool enable)
|
||||||
|
{
|
||||||
|
uint32_t def, data;
|
||||||
|
|
||||||
|
/* NBIF_MGCG_CTRL_LCLK */
|
||||||
|
def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
|
||||||
|
|
||||||
|
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
|
||||||
|
data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK;
|
||||||
|
else
|
||||||
|
data &= ~NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK;
|
||||||
|
|
||||||
|
if (def != data)
|
||||||
|
WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
|
||||||
|
|
||||||
|
/* SYSHUB_MGCG_CTRL_SOCCLK */
|
||||||
|
def = data = nbio_7_0_read_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SOCCLK);
|
||||||
|
|
||||||
|
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
|
||||||
|
data |= SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK_MASK;
|
||||||
|
else
|
||||||
|
data &= ~SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK_MASK;
|
||||||
|
|
||||||
|
if (def != data)
|
||||||
|
nbio_7_0_write_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SOCCLK, data);
|
||||||
|
|
||||||
|
/* SYSHUB_MGCG_CTRL_SHUBCLK */
|
||||||
|
def = data = nbio_7_0_read_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK);
|
||||||
|
|
||||||
|
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
|
||||||
|
data |= SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK_MASK;
|
||||||
|
else
|
||||||
|
data &= ~SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK_MASK;
|
||||||
|
|
||||||
|
if (def != data)
|
||||||
|
nbio_7_0_write_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nbio_v7_0_ih_control(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
u32 interrupt_cntl;
|
||||||
|
|
||||||
|
/* setup interrupt control */
|
||||||
|
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
|
||||||
|
interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
|
||||||
|
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
|
||||||
|
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
|
||||||
|
*/
|
||||||
|
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
|
||||||
|
/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
|
||||||
|
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
|
||||||
|
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
|
||||||
|
struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
|
||||||
|
|
||||||
|
int nbio_v7_0_init(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
nbio_v7_0_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
|
||||||
|
nbio_v7_0_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK;
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK;
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK;
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK;
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK;
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK;
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK;
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK;
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK;
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK;
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
|
||||||
|
nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
|
||||||
|
|
||||||
|
nbio_v7_0_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
|
||||||
|
nbio_v7_0_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
49
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
Normal file
49
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __NBIO_V7_0_H__
|
||||||
|
#define __NBIO_V7_0_H__
|
||||||
|
|
||||||
|
#include "soc15_common.h"
|
||||||
|
|
||||||
|
extern struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
|
||||||
|
extern struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
|
||||||
|
int nbio_v7_0_init(struct amdgpu_device *adev);
|
||||||
|
u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
|
||||||
|
uint32_t idx);
|
||||||
|
void nbio_v7_0_set_atombios_scratch_regs(struct amdgpu_device *adev,
|
||||||
|
uint32_t idx, uint32_t val);
|
||||||
|
void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable);
|
||||||
|
void nbio_v7_0_hdp_flush(struct amdgpu_device *adev);
|
||||||
|
u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev);
|
||||||
|
void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
|
||||||
|
bool use_doorbell, int doorbell_index);
|
||||||
|
void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||||
|
bool enable);
|
||||||
|
void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
|
||||||
|
bool use_doorbell, int doorbell_index);
|
||||||
|
void nbio_v7_0_ih_control(struct amdgpu_device *adev);
|
||||||
|
u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev);
|
||||||
|
void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||||
|
bool enable);
|
||||||
|
#endif
|
||||||
308
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
Normal file
308
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
Normal file
@@ -0,0 +1,308 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* Author: Huang Rui
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/firmware.h>
|
||||||
|
#include "amdgpu.h"
|
||||||
|
#include "amdgpu_psp.h"
|
||||||
|
#include "amdgpu_ucode.h"
|
||||||
|
#include "soc15_common.h"
|
||||||
|
#include "psp_v10_0.h"
|
||||||
|
|
||||||
|
#include "vega10/soc15ip.h"
|
||||||
|
#include "raven1/MP/mp_10_0_offset.h"
|
||||||
|
#include "raven1/GC/gc_9_1_offset.h"
|
||||||
|
#include "raven1/SDMA0/sdma0_4_1_offset.h"
|
||||||
|
|
||||||
|
static int
|
||||||
|
psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
|
||||||
|
{
|
||||||
|
switch(ucode->ucode_id) {
|
||||||
|
case AMDGPU_UCODE_ID_SDMA0:
|
||||||
|
*type = GFX_FW_TYPE_SDMA0;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_SDMA1:
|
||||||
|
*type = GFX_FW_TYPE_SDMA1;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_CP_CE:
|
||||||
|
*type = GFX_FW_TYPE_CP_CE;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_CP_PFP:
|
||||||
|
*type = GFX_FW_TYPE_CP_PFP;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_CP_ME:
|
||||||
|
*type = GFX_FW_TYPE_CP_ME;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_CP_MEC1:
|
||||||
|
*type = GFX_FW_TYPE_CP_MEC;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_CP_MEC1_JT:
|
||||||
|
*type = GFX_FW_TYPE_CP_MEC_ME1;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_CP_MEC2:
|
||||||
|
*type = GFX_FW_TYPE_CP_MEC;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_CP_MEC2_JT:
|
||||||
|
*type = GFX_FW_TYPE_CP_MEC_ME2;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_RLC_G:
|
||||||
|
*type = GFX_FW_TYPE_RLC_G;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_SMC:
|
||||||
|
*type = GFX_FW_TYPE_SMU;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_UVD:
|
||||||
|
*type = GFX_FW_TYPE_UVD;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_VCE:
|
||||||
|
*type = GFX_FW_TYPE_VCE;
|
||||||
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_MAXIMUM:
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
uint64_t fw_mem_mc_addr = ucode->mc_addr;
|
||||||
|
struct common_firmware_header *header;
|
||||||
|
|
||||||
|
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
|
||||||
|
header = (struct common_firmware_header *)ucode->fw;
|
||||||
|
|
||||||
|
cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
|
||||||
|
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = (uint32_t)fw_mem_mc_addr;
|
||||||
|
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = (uint32_t)((uint64_t)fw_mem_mc_addr >> 32);
|
||||||
|
cmd->cmd.cmd_load_ip_fw.fw_size = le32_to_cpu(header->ucode_size_bytes);
|
||||||
|
|
||||||
|
ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
|
||||||
|
if (ret)
|
||||||
|
DRM_ERROR("Unknown firmware type\n");
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
unsigned int psp_ring_reg = 0;
|
||||||
|
struct psp_ring *ring;
|
||||||
|
struct amdgpu_device *adev = psp->adev;
|
||||||
|
|
||||||
|
ring = &psp->km_ring;
|
||||||
|
|
||||||
|
ring->ring_type = ring_type;
|
||||||
|
|
||||||
|
/* allocate 4k Page of Local Frame Buffer memory for ring */
|
||||||
|
ring->ring_size = 0x1000;
|
||||||
|
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
|
||||||
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
|
&adev->firmware.rbuf,
|
||||||
|
&ring->ring_mem_mc_addr,
|
||||||
|
(void **)&ring->ring_mem);
|
||||||
|
if (ret) {
|
||||||
|
ring->ring_size = 0;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Write low address of the ring to C2PMSG_69 */
|
||||||
|
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
|
||||||
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
|
||||||
|
/* Write high address of the ring to C2PMSG_70 */
|
||||||
|
psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
|
||||||
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
|
||||||
|
/* Write size of ring to C2PMSG_71 */
|
||||||
|
psp_ring_reg = ring->ring_size;
|
||||||
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
|
||||||
|
/* Write the ring initialization command to C2PMSG_64 */
|
||||||
|
psp_ring_reg = ring_type;
|
||||||
|
psp_ring_reg = psp_ring_reg << 16;
|
||||||
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
|
||||||
|
/* Wait for response flag (bit 31) in C2PMSG_64 */
|
||||||
|
psp_ring_reg = 0;
|
||||||
|
while ((psp_ring_reg & 0x80000000) == 0) {
|
||||||
|
psp_ring_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int psp_v10_0_cmd_submit(struct psp_context *psp,
|
||||||
|
struct amdgpu_firmware_info *ucode,
|
||||||
|
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
|
||||||
|
int index)
|
||||||
|
{
|
||||||
|
unsigned int psp_write_ptr_reg = 0;
|
||||||
|
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
|
||||||
|
struct psp_ring *ring = &psp->km_ring;
|
||||||
|
struct amdgpu_device *adev = psp->adev;
|
||||||
|
|
||||||
|
/* KM (GPCOM) prepare write pointer */
|
||||||
|
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
|
||||||
|
|
||||||
|
/* Update KM RB frame pointer to new frame */
|
||||||
|
if ((psp_write_ptr_reg % ring->ring_size) == 0)
|
||||||
|
write_frame = ring->ring_mem;
|
||||||
|
else
|
||||||
|
write_frame = ring->ring_mem + (psp_write_ptr_reg / (sizeof(struct psp_gfx_rb_frame) / 4));
|
||||||
|
|
||||||
|
/* Update KM RB frame */
|
||||||
|
write_frame->cmd_buf_addr_hi = (unsigned int)(cmd_buf_mc_addr >> 32);
|
||||||
|
write_frame->cmd_buf_addr_lo = (unsigned int)(cmd_buf_mc_addr);
|
||||||
|
write_frame->fence_addr_hi = (unsigned int)(fence_mc_addr >> 32);
|
||||||
|
write_frame->fence_addr_lo = (unsigned int)(fence_mc_addr);
|
||||||
|
write_frame->fence_value = index;
|
||||||
|
|
||||||
|
/* Update the write Pointer in DWORDs */
|
||||||
|
psp_write_ptr_reg += sizeof(struct psp_gfx_rb_frame) / 4;
|
||||||
|
psp_write_ptr_reg = (psp_write_ptr_reg >= ring->ring_size) ? 0 : psp_write_ptr_reg;
|
||||||
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
psp_v10_0_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
|
||||||
|
unsigned int *sram_data_reg_offset,
|
||||||
|
enum AMDGPU_UCODE_ID ucode_id)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
switch(ucode_id) {
|
||||||
|
/* TODO: needs to confirm */
|
||||||
|
#if 0
|
||||||
|
case AMDGPU_UCODE_ID_SMC:
|
||||||
|
*sram_offset = 0;
|
||||||
|
*sram_addr_reg_offset = 0;
|
||||||
|
*sram_data_reg_offset = 0;
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
case AMDGPU_UCODE_ID_CP_CE:
|
||||||
|
*sram_offset = 0x0;
|
||||||
|
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
|
||||||
|
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case AMDGPU_UCODE_ID_CP_PFP:
|
||||||
|
*sram_offset = 0x0;
|
||||||
|
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
|
||||||
|
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case AMDGPU_UCODE_ID_CP_ME:
|
||||||
|
*sram_offset = 0x0;
|
||||||
|
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
|
||||||
|
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case AMDGPU_UCODE_ID_CP_MEC1:
|
||||||
|
*sram_offset = 0x10000;
|
||||||
|
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
|
||||||
|
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case AMDGPU_UCODE_ID_CP_MEC2:
|
||||||
|
*sram_offset = 0x10000;
|
||||||
|
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
|
||||||
|
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case AMDGPU_UCODE_ID_RLC_G:
|
||||||
|
*sram_offset = 0x2000;
|
||||||
|
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
|
||||||
|
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case AMDGPU_UCODE_ID_SDMA0:
|
||||||
|
*sram_offset = 0x0;
|
||||||
|
*sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
|
||||||
|
*sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* TODO: needs to confirm */
|
||||||
|
#if 0
|
||||||
|
case AMDGPU_UCODE_ID_SDMA1:
|
||||||
|
*sram_offset = ;
|
||||||
|
*sram_addr_reg_offset = ;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case AMDGPU_UCODE_ID_UVD:
|
||||||
|
*sram_offset = ;
|
||||||
|
*sram_addr_reg_offset = ;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case AMDGPU_UCODE_ID_VCE:
|
||||||
|
*sram_offset = ;
|
||||||
|
*sram_addr_reg_offset = ;
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
case AMDGPU_UCODE_ID_MAXIMUM:
|
||||||
|
default:
|
||||||
|
ret = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool psp_v10_0_compare_sram_data(struct psp_context *psp,
|
||||||
|
struct amdgpu_firmware_info *ucode,
|
||||||
|
enum AMDGPU_UCODE_ID ucode_type)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
unsigned int fw_sram_reg_val = 0;
|
||||||
|
unsigned int fw_sram_addr_reg_offset = 0;
|
||||||
|
unsigned int fw_sram_data_reg_offset = 0;
|
||||||
|
unsigned int ucode_size;
|
||||||
|
uint32_t *ucode_mem = NULL;
|
||||||
|
struct amdgpu_device *adev = psp->adev;
|
||||||
|
|
||||||
|
err = psp_v10_0_sram_map(&fw_sram_reg_val, &fw_sram_addr_reg_offset,
|
||||||
|
&fw_sram_data_reg_offset, ucode_type);
|
||||||
|
if (err)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
|
||||||
|
|
||||||
|
ucode_size = ucode->ucode_size;
|
||||||
|
ucode_mem = (uint32_t *)ucode->kaddr;
|
||||||
|
while (!ucode_size) {
|
||||||
|
fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
|
||||||
|
|
||||||
|
if (*ucode_mem != fw_sram_reg_val)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
ucode_mem++;
|
||||||
|
/* 4 bytes */
|
||||||
|
ucode_size -= 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
41
drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
Normal file
41
drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* Author: Huang Rui
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#ifndef __PSP_V10_0_H__
|
||||||
|
#define __PSP_V10_0_H__
|
||||||
|
|
||||||
|
#include "amdgpu_psp.h"
|
||||||
|
|
||||||
|
extern int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
|
||||||
|
struct psp_gfx_cmd_resp *cmd);
|
||||||
|
extern int psp_v10_0_ring_init(struct psp_context *psp,
|
||||||
|
enum psp_ring_type ring_type);
|
||||||
|
extern int psp_v10_0_cmd_submit(struct psp_context *psp,
|
||||||
|
struct amdgpu_firmware_info *ucode,
|
||||||
|
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
|
||||||
|
int index);
|
||||||
|
extern bool psp_v10_0_compare_sram_data(struct psp_context *psp,
|
||||||
|
struct amdgpu_firmware_info *ucode,
|
||||||
|
enum AMDGPU_UCODE_ID ucode_type);
|
||||||
|
#endif
|
||||||
@@ -172,7 +172,7 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
|
|||||||
/* Check sOS sign of life register to confirm sys driver and sOS
|
/* Check sOS sign of life register to confirm sys driver and sOS
|
||||||
* are already been loaded.
|
* are already been loaded.
|
||||||
*/
|
*/
|
||||||
sol_reg = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81));
|
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||||
if (sol_reg)
|
if (sol_reg)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@@ -188,10 +188,10 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
|
|||||||
memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
|
memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
|
||||||
|
|
||||||
/* Provide the sys driver to bootrom */
|
/* Provide the sys driver to bootrom */
|
||||||
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_36),
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||||
(uint32_t)(psp->fw_pri_mc_addr >> 20));
|
(uint32_t)(psp->fw_pri_mc_addr >> 20));
|
||||||
psp_gfxdrv_command_reg = 1 << 16;
|
psp_gfxdrv_command_reg = 1 << 16;
|
||||||
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
|
||||||
psp_gfxdrv_command_reg);
|
psp_gfxdrv_command_reg);
|
||||||
|
|
||||||
/* there might be handshake issue with hardware which needs delay */
|
/* there might be handshake issue with hardware which needs delay */
|
||||||
@@ -213,7 +213,7 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
|
|||||||
/* Check sOS sign of life register to confirm sys driver and sOS
|
/* Check sOS sign of life register to confirm sys driver and sOS
|
||||||
* are already been loaded.
|
* are already been loaded.
|
||||||
*/
|
*/
|
||||||
sol_reg = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81));
|
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||||
if (sol_reg)
|
if (sol_reg)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@@ -229,17 +229,17 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
|
|||||||
memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
|
memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
|
||||||
|
|
||||||
/* Provide the PSP secure OS to bootrom */
|
/* Provide the PSP secure OS to bootrom */
|
||||||
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_36),
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||||
(uint32_t)(psp->fw_pri_mc_addr >> 20));
|
(uint32_t)(psp->fw_pri_mc_addr >> 20));
|
||||||
psp_gfxdrv_command_reg = 2 << 16;
|
psp_gfxdrv_command_reg = 2 << 16;
|
||||||
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
|
||||||
psp_gfxdrv_command_reg);
|
psp_gfxdrv_command_reg);
|
||||||
|
|
||||||
/* there might be handshake issue with hardware which needs delay */
|
/* there might be handshake issue with hardware which needs delay */
|
||||||
mdelay(20);
|
mdelay(20);
|
||||||
#if 0
|
#if 0
|
||||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
|
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
|
||||||
RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81)),
|
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
|
||||||
0, true);
|
0, true);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -299,17 +299,17 @@ int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
|
|||||||
|
|
||||||
/* Write low address of the ring to C2PMSG_69 */
|
/* Write low address of the ring to C2PMSG_69 */
|
||||||
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
|
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
|
||||||
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_69), psp_ring_reg);
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
|
||||||
/* Write high address of the ring to C2PMSG_70 */
|
/* Write high address of the ring to C2PMSG_70 */
|
||||||
psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
|
psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
|
||||||
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_70), psp_ring_reg);
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
|
||||||
/* Write size of ring to C2PMSG_71 */
|
/* Write size of ring to C2PMSG_71 */
|
||||||
psp_ring_reg = ring->ring_size;
|
psp_ring_reg = ring->ring_size;
|
||||||
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_71), psp_ring_reg);
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
|
||||||
/* Write the ring initialization command to C2PMSG_64 */
|
/* Write the ring initialization command to C2PMSG_64 */
|
||||||
psp_ring_reg = ring_type;
|
psp_ring_reg = ring_type;
|
||||||
psp_ring_reg = psp_ring_reg << 16;
|
psp_ring_reg = psp_ring_reg << 16;
|
||||||
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), psp_ring_reg);
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
|
||||||
|
|
||||||
/* there might be handshake issue with hardware which needs delay */
|
/* there might be handshake issue with hardware which needs delay */
|
||||||
mdelay(20);
|
mdelay(20);
|
||||||
@@ -332,7 +332,7 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
|
|||||||
|
|
||||||
/* Write the ring destroy command to C2PMSG_64 */
|
/* Write the ring destroy command to C2PMSG_64 */
|
||||||
psp_ring_reg = 3 << 16;
|
psp_ring_reg = 3 << 16;
|
||||||
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), psp_ring_reg);
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
|
||||||
|
|
||||||
/* there might be handshake issue with hardware which needs delay */
|
/* there might be handshake issue with hardware which needs delay */
|
||||||
mdelay(20);
|
mdelay(20);
|
||||||
@@ -361,7 +361,7 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
|
|||||||
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
|
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
|
||||||
|
|
||||||
/* KM (GPCOM) prepare write pointer */
|
/* KM (GPCOM) prepare write pointer */
|
||||||
psp_write_ptr_reg = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_67));
|
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
|
||||||
|
|
||||||
/* Update KM RB frame pointer to new frame */
|
/* Update KM RB frame pointer to new frame */
|
||||||
/* write_frame ptr increments by size of rb_frame in bytes */
|
/* write_frame ptr increments by size of rb_frame in bytes */
|
||||||
@@ -383,7 +383,7 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
|
|||||||
|
|
||||||
/* Update the write Pointer in DWORDs */
|
/* Update the write Pointer in DWORDs */
|
||||||
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
|
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
|
||||||
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_67), psp_write_ptr_reg);
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -515,7 +515,7 @@ bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
|
|||||||
uint32_t reg;
|
uint32_t reg;
|
||||||
|
|
||||||
reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000;
|
reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000;
|
||||||
WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg);
|
WREG32_SOC15(NBIO, 0, mmPCIE_INDEX2, reg);
|
||||||
reg = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2));
|
reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2);
|
||||||
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
|
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -643,8 +643,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
|||||||
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
|
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||||
|
|
||||||
/* Initialize the ring buffer's read and write pointers */
|
/* Initialize the ring buffer's read and write pointers */
|
||||||
|
ring->wptr = 0;
|
||||||
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
|
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
|
||||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
|
sdma_v3_0_ring_set_wptr(ring);
|
||||||
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
|
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
|
||||||
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
|
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
|
||||||
|
|
||||||
@@ -659,9 +660,6 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
|||||||
WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
|
WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
|
||||||
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
|
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
|
||||||
|
|
||||||
ring->wptr = 0;
|
|
||||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
|
|
||||||
|
|
||||||
doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
|
doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
|
||||||
|
|
||||||
if (ring->use_doorbell) {
|
if (ring->use_doorbell) {
|
||||||
|
|||||||
@@ -35,6 +35,7 @@
|
|||||||
#include "vega10/MMHUB/mmhub_1_0_offset.h"
|
#include "vega10/MMHUB/mmhub_1_0_offset.h"
|
||||||
#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
|
#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
|
||||||
#include "vega10/HDP/hdp_4_0_offset.h"
|
#include "vega10/HDP/hdp_4_0_offset.h"
|
||||||
|
#include "raven1/SDMA0/sdma0_4_1_default.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
@@ -42,6 +43,10 @@
|
|||||||
|
|
||||||
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
|
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
|
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
|
||||||
|
|
||||||
|
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
|
||||||
|
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
|
||||||
|
|
||||||
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
|
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||||
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
|
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
|
||||||
@@ -82,6 +87,26 @@ static const u32 golden_settings_sdma_vg10[] = {
|
|||||||
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002
|
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const u32 golden_settings_sdma_4_1[] =
|
||||||
|
{
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100,
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100,
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0xfc3fffff, 0x40000051,
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0111, 0x00000100,
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0111, 0x00000100,
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u32 golden_settings_sdma_rv1[] =
|
||||||
|
{
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00000002,
|
||||||
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00000002
|
||||||
|
};
|
||||||
|
|
||||||
static u32 sdma_v4_0_get_reg_offset(u32 instance, u32 internal_offset)
|
static u32 sdma_v4_0_get_reg_offset(u32 instance, u32 internal_offset)
|
||||||
{
|
{
|
||||||
u32 base = 0;
|
u32 base = 0;
|
||||||
@@ -112,25 +137,19 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
|
|||||||
golden_settings_sdma_vg10,
|
golden_settings_sdma_vg10,
|
||||||
(const u32)ARRAY_SIZE(golden_settings_sdma_vg10));
|
(const u32)ARRAY_SIZE(golden_settings_sdma_vg10));
|
||||||
break;
|
break;
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
amdgpu_program_register_sequence(adev,
|
||||||
|
golden_settings_sdma_4_1,
|
||||||
|
(const u32)ARRAY_SIZE(golden_settings_sdma_4_1));
|
||||||
|
amdgpu_program_register_sequence(adev,
|
||||||
|
golden_settings_sdma_rv1,
|
||||||
|
(const u32)ARRAY_SIZE(golden_settings_sdma_rv1));
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sdma_v4_0_print_ucode_regs(void *handle)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
||||||
|
|
||||||
dev_info(adev->dev, "VEGA10 SDMA ucode registers\n");
|
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
|
||||||
dev_info(adev->dev, " SDMA%d_UCODE_ADDR=0x%08X\n",
|
|
||||||
i, RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR)));
|
|
||||||
dev_info(adev->dev, " SDMA%d_UCODE_CHECKSUM=0x%08X\n",
|
|
||||||
i, RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_CHECKSUM)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sdma_v4_0_init_microcode - load ucode images from disk
|
* sdma_v4_0_init_microcode - load ucode images from disk
|
||||||
*
|
*
|
||||||
@@ -158,6 +177,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
|
|||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
chip_name = "vega10";
|
chip_name = "vega10";
|
||||||
break;
|
break;
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
chip_name = "raven";
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
@@ -350,7 +372,9 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||||||
u32 ref_and_mask = 0;
|
u32 ref_and_mask = 0;
|
||||||
struct nbio_hdp_flush_reg *nbio_hf_reg;
|
struct nbio_hdp_flush_reg *nbio_hf_reg;
|
||||||
|
|
||||||
if (ring->adev->asic_type == CHIP_VEGA10)
|
if (ring->adev->flags & AMD_IS_APU)
|
||||||
|
nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
|
||||||
|
else
|
||||||
nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
|
nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
|
||||||
|
|
||||||
if (ring == &ring->adev->sdma.instance[0].ring)
|
if (ring == &ring->adev->sdma.instance[0].ring)
|
||||||
@@ -581,6 +605,9 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL), doorbell);
|
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL), doorbell);
|
||||||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
|
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
nbio_v7_0_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
|
||||||
|
else
|
||||||
nbio_v6_1_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
|
nbio_v6_1_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
if (amdgpu_sriov_vf(adev))
|
||||||
@@ -633,6 +660,69 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
|
||||||
|
{
|
||||||
|
uint32_t def, data;
|
||||||
|
|
||||||
|
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) {
|
||||||
|
/* disable idle interrupt */
|
||||||
|
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
|
||||||
|
data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
|
||||||
|
|
||||||
|
if (data != def)
|
||||||
|
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
|
||||||
|
} else {
|
||||||
|
/* disable idle interrupt */
|
||||||
|
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
|
||||||
|
data &= ~SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
|
||||||
|
if (data != def)
|
||||||
|
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sdma_v4_1_init_power_gating(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint32_t def, data;
|
||||||
|
|
||||||
|
/* Enable HW based PG. */
|
||||||
|
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
|
||||||
|
data |= SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK;
|
||||||
|
if (data != def)
|
||||||
|
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
|
||||||
|
|
||||||
|
/* enable interrupt */
|
||||||
|
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
|
||||||
|
data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
|
||||||
|
if (data != def)
|
||||||
|
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
|
||||||
|
|
||||||
|
/* Configure hold time to filter in-valid power on/off request. Use default right now */
|
||||||
|
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
|
||||||
|
data &= ~SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK;
|
||||||
|
data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK);
|
||||||
|
/* Configure switch time for hysteresis purpose. Use default right now */
|
||||||
|
data &= ~SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK;
|
||||||
|
data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK);
|
||||||
|
if(data != def)
|
||||||
|
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))
|
||||||
|
return;
|
||||||
|
|
||||||
|
switch (adev->asic_type) {
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
sdma_v4_1_init_power_gating(adev);
|
||||||
|
sdma_v4_1_update_power_gating(adev, true);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sdma_v4_0_rlc_resume - setup and start the async dma engines
|
* sdma_v4_0_rlc_resume - setup and start the async dma engines
|
||||||
*
|
*
|
||||||
@@ -643,7 +733,8 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
|
|||||||
*/
|
*/
|
||||||
static int sdma_v4_0_rlc_resume(struct amdgpu_device *adev)
|
static int sdma_v4_0_rlc_resume(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
/* XXX todo */
|
sdma_v4_0_init_pg(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -699,8 +790,6 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
|
|||||||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
|
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
sdma_v4_0_print_ucode_regs(adev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -726,7 +815,6 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
|
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
|
||||||
DRM_INFO("Loading via direct write\n");
|
|
||||||
r = sdma_v4_0_load_microcode(adev);
|
r = sdma_v4_0_load_microcode(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
@@ -764,8 +852,6 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
|
|||||||
u32 tmp;
|
u32 tmp;
|
||||||
u64 gpu_addr;
|
u64 gpu_addr;
|
||||||
|
|
||||||
DRM_INFO("In Ring test func\n");
|
|
||||||
|
|
||||||
r = amdgpu_wb_get(adev, &index);
|
r = amdgpu_wb_get(adev, &index);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
|
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
|
||||||
@@ -1038,9 +1124,8 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||||||
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
|
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
|
||||||
unsigned eng = ring->vm_inv_eng;
|
unsigned eng = ring->vm_inv_eng;
|
||||||
|
|
||||||
pd_addr = pd_addr | 0x1; /* valid bit */
|
pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
|
||||||
/* now only use physical base address of PDE and valid */
|
pd_addr |= AMDGPU_PTE_VALID;
|
||||||
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
|
|
||||||
|
|
||||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
|
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
|
||||||
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
|
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
|
||||||
@@ -1074,6 +1159,9 @@ static int sdma_v4_0_early_init(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
if (adev->asic_type == CHIP_RAVEN)
|
||||||
|
adev->sdma.num_instances = 1;
|
||||||
|
else
|
||||||
adev->sdma.num_instances = 2;
|
adev->sdma.num_instances = 2;
|
||||||
|
|
||||||
sdma_v4_0_set_ring_funcs(adev);
|
sdma_v4_0_set_ring_funcs(adev);
|
||||||
@@ -1406,6 +1494,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
|
|||||||
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
|
case CHIP_RAVEN:
|
||||||
sdma_v4_0_update_medium_grain_clock_gating(adev,
|
sdma_v4_0_update_medium_grain_clock_gating(adev,
|
||||||
state == AMD_CG_STATE_GATE ? true : false);
|
state == AMD_CG_STATE_GATE ? true : false);
|
||||||
sdma_v4_0_update_medium_grain_light_sleep(adev,
|
sdma_v4_0_update_medium_grain_light_sleep(adev,
|
||||||
@@ -1420,6 +1509,17 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
|
|||||||
static int sdma_v4_0_set_powergating_state(void *handle,
|
static int sdma_v4_0_set_powergating_state(void *handle,
|
||||||
enum amd_powergating_state state)
|
enum amd_powergating_state state)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
switch (adev->asic_type) {
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
sdma_v4_1_update_power_gating(adev,
|
||||||
|
state == AMD_PG_STATE_GATE ? true : false);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -971,44 +971,44 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
|
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
|
||||||
{GRBM_STATUS, false},
|
{GRBM_STATUS},
|
||||||
{GB_ADDR_CONFIG, false},
|
{GB_ADDR_CONFIG},
|
||||||
{MC_ARB_RAMCFG, false},
|
{MC_ARB_RAMCFG},
|
||||||
{GB_TILE_MODE0, false},
|
{GB_TILE_MODE0},
|
||||||
{GB_TILE_MODE1, false},
|
{GB_TILE_MODE1},
|
||||||
{GB_TILE_MODE2, false},
|
{GB_TILE_MODE2},
|
||||||
{GB_TILE_MODE3, false},
|
{GB_TILE_MODE3},
|
||||||
{GB_TILE_MODE4, false},
|
{GB_TILE_MODE4},
|
||||||
{GB_TILE_MODE5, false},
|
{GB_TILE_MODE5},
|
||||||
{GB_TILE_MODE6, false},
|
{GB_TILE_MODE6},
|
||||||
{GB_TILE_MODE7, false},
|
{GB_TILE_MODE7},
|
||||||
{GB_TILE_MODE8, false},
|
{GB_TILE_MODE8},
|
||||||
{GB_TILE_MODE9, false},
|
{GB_TILE_MODE9},
|
||||||
{GB_TILE_MODE10, false},
|
{GB_TILE_MODE10},
|
||||||
{GB_TILE_MODE11, false},
|
{GB_TILE_MODE11},
|
||||||
{GB_TILE_MODE12, false},
|
{GB_TILE_MODE12},
|
||||||
{GB_TILE_MODE13, false},
|
{GB_TILE_MODE13},
|
||||||
{GB_TILE_MODE14, false},
|
{GB_TILE_MODE14},
|
||||||
{GB_TILE_MODE15, false},
|
{GB_TILE_MODE15},
|
||||||
{GB_TILE_MODE16, false},
|
{GB_TILE_MODE16},
|
||||||
{GB_TILE_MODE17, false},
|
{GB_TILE_MODE17},
|
||||||
{GB_TILE_MODE18, false},
|
{GB_TILE_MODE18},
|
||||||
{GB_TILE_MODE19, false},
|
{GB_TILE_MODE19},
|
||||||
{GB_TILE_MODE20, false},
|
{GB_TILE_MODE20},
|
||||||
{GB_TILE_MODE21, false},
|
{GB_TILE_MODE21},
|
||||||
{GB_TILE_MODE22, false},
|
{GB_TILE_MODE22},
|
||||||
{GB_TILE_MODE23, false},
|
{GB_TILE_MODE23},
|
||||||
{GB_TILE_MODE24, false},
|
{GB_TILE_MODE24},
|
||||||
{GB_TILE_MODE25, false},
|
{GB_TILE_MODE25},
|
||||||
{GB_TILE_MODE26, false},
|
{GB_TILE_MODE26},
|
||||||
{GB_TILE_MODE27, false},
|
{GB_TILE_MODE27},
|
||||||
{GB_TILE_MODE28, false},
|
{GB_TILE_MODE28},
|
||||||
{GB_TILE_MODE29, false},
|
{GB_TILE_MODE29},
|
||||||
{GB_TILE_MODE30, false},
|
{GB_TILE_MODE30},
|
||||||
{GB_TILE_MODE31, false},
|
{GB_TILE_MODE31},
|
||||||
{CC_RB_BACKEND_DISABLE, false, true},
|
{CC_RB_BACKEND_DISABLE, true},
|
||||||
{GC_USER_RB_BACKEND_DISABLE, false, true},
|
{GC_USER_RB_BACKEND_DISABLE, true},
|
||||||
{PA_SC_RASTER_CONFIG, false, true},
|
{PA_SC_RASTER_CONFIG, true},
|
||||||
};
|
};
|
||||||
|
|
||||||
static uint32_t si_get_register_value(struct amdgpu_device *adev,
|
static uint32_t si_get_register_value(struct amdgpu_device *adev,
|
||||||
@@ -1093,13 +1093,13 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num,
|
|||||||
|
|
||||||
*value = 0;
|
*value = 0;
|
||||||
for (i = 0; i < ARRAY_SIZE(si_allowed_read_registers); i++) {
|
for (i = 0; i < ARRAY_SIZE(si_allowed_read_registers); i++) {
|
||||||
|
bool indexed = si_allowed_read_registers[i].grbm_indexed;
|
||||||
|
|
||||||
if (reg_offset != si_allowed_read_registers[i].reg_offset)
|
if (reg_offset != si_allowed_read_registers[i].reg_offset)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!si_allowed_read_registers[i].untouched)
|
*value = si_get_register_value(adev, indexed, se_num, sh_num,
|
||||||
*value = si_get_register_value(adev,
|
reg_offset);
|
||||||
si_allowed_read_registers[i].grbm_indexed,
|
|
||||||
se_num, sh_num, reg_offset);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|||||||
@@ -57,6 +57,7 @@
|
|||||||
#include "sdma_v4_0.h"
|
#include "sdma_v4_0.h"
|
||||||
#include "uvd_v7_0.h"
|
#include "uvd_v7_0.h"
|
||||||
#include "vce_v4_0.h"
|
#include "vce_v4_0.h"
|
||||||
|
#include "vcn_v1_0.h"
|
||||||
#include "amdgpu_powerplay.h"
|
#include "amdgpu_powerplay.h"
|
||||||
#include "dce_virtual.h"
|
#include "dce_virtual.h"
|
||||||
#include "mxgpu_ai.h"
|
#include "mxgpu_ai.h"
|
||||||
@@ -104,10 +105,10 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
|
|||||||
u32 r;
|
u32 r;
|
||||||
struct nbio_pcie_index_data *nbio_pcie_id;
|
struct nbio_pcie_index_data *nbio_pcie_id;
|
||||||
|
|
||||||
if (adev->asic_type == CHIP_VEGA10)
|
if (adev->flags & AMD_IS_APU)
|
||||||
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
|
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
|
||||||
else
|
else
|
||||||
BUG();
|
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
|
||||||
|
|
||||||
address = nbio_pcie_id->index_offset;
|
address = nbio_pcie_id->index_offset;
|
||||||
data = nbio_pcie_id->data_offset;
|
data = nbio_pcie_id->data_offset;
|
||||||
@@ -125,10 +126,10 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|||||||
unsigned long flags, address, data;
|
unsigned long flags, address, data;
|
||||||
struct nbio_pcie_index_data *nbio_pcie_id;
|
struct nbio_pcie_index_data *nbio_pcie_id;
|
||||||
|
|
||||||
if (adev->asic_type == CHIP_VEGA10)
|
if (adev->flags & AMD_IS_APU)
|
||||||
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
|
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
|
||||||
else
|
else
|
||||||
BUG();
|
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
|
||||||
|
|
||||||
address = nbio_pcie_id->index_offset;
|
address = nbio_pcie_id->index_offset;
|
||||||
data = nbio_pcie_id->data_offset;
|
data = nbio_pcie_id->data_offset;
|
||||||
@@ -199,6 +200,9 @@ static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|||||||
|
|
||||||
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
|
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
return nbio_v7_0_get_memsize(adev);
|
||||||
|
else
|
||||||
return nbio_v6_1_get_memsize(adev);
|
return nbio_v6_1_get_memsize(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,6 +210,10 @@ static const u32 vega10_golden_init[] =
|
|||||||
{
|
{
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const u32 raven_golden_init[] =
|
||||||
|
{
|
||||||
|
};
|
||||||
|
|
||||||
static void soc15_init_golden_registers(struct amdgpu_device *adev)
|
static void soc15_init_golden_registers(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
/* Some of the registers might be dependent on GRBM_GFX_INDEX */
|
/* Some of the registers might be dependent on GRBM_GFX_INDEX */
|
||||||
@@ -217,6 +225,11 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev)
|
|||||||
vega10_golden_init,
|
vega10_golden_init,
|
||||||
(const u32)ARRAY_SIZE(vega10_golden_init));
|
(const u32)ARRAY_SIZE(vega10_golden_init));
|
||||||
break;
|
break;
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
amdgpu_program_register_sequence(adev,
|
||||||
|
raven_golden_init,
|
||||||
|
(const u32)ARRAY_SIZE(raven_golden_init));
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -280,29 +293,25 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct amdgpu_allowed_register_entry vega10_allowed_read_registers[] = {
|
|
||||||
/* todo */
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = {
|
static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = {
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)},
|
||||||
{ SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG), false},
|
{ SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG)},
|
||||||
{ SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG), false},
|
{ SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmCP_STAT), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmCP_STAT)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)},
|
||||||
{ SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), false},
|
{ SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)},
|
||||||
};
|
};
|
||||||
|
|
||||||
static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
|
static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
|
||||||
@@ -341,38 +350,13 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
|
|||||||
static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
|
static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
|
||||||
u32 sh_num, u32 reg_offset, u32 *value)
|
u32 sh_num, u32 reg_offset, u32 *value)
|
||||||
{
|
{
|
||||||
struct amdgpu_allowed_register_entry *asic_register_table = NULL;
|
uint32_t i;
|
||||||
struct amdgpu_allowed_register_entry *asic_register_entry;
|
|
||||||
uint32_t size, i;
|
|
||||||
|
|
||||||
*value = 0;
|
*value = 0;
|
||||||
switch (adev->asic_type) {
|
|
||||||
case CHIP_VEGA10:
|
|
||||||
asic_register_table = vega10_allowed_read_registers;
|
|
||||||
size = ARRAY_SIZE(vega10_allowed_read_registers);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (asic_register_table) {
|
|
||||||
for (i = 0; i < size; i++) {
|
|
||||||
asic_register_entry = asic_register_table + i;
|
|
||||||
if (reg_offset != asic_register_entry->reg_offset)
|
|
||||||
continue;
|
|
||||||
if (!asic_register_entry->untouched)
|
|
||||||
*value = soc15_get_register_value(adev,
|
|
||||||
asic_register_entry->grbm_indexed,
|
|
||||||
se_num, sh_num, reg_offset);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
|
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
|
||||||
if (reg_offset != soc15_allowed_read_registers[i].reg_offset)
|
if (reg_offset != soc15_allowed_read_registers[i].reg_offset)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!soc15_allowed_read_registers[i].untouched)
|
|
||||||
*value = soc15_get_register_value(adev,
|
*value = soc15_get_register_value(adev,
|
||||||
soc15_allowed_read_registers[i].grbm_indexed,
|
soc15_allowed_read_registers[i].grbm_indexed,
|
||||||
se_num, sh_num, reg_offset);
|
se_num, sh_num, reg_offset);
|
||||||
@@ -396,7 +380,10 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
/* wait for asic to come out of reset */
|
/* wait for asic to come out of reset */
|
||||||
for (i = 0; i < adev->usec_timeout; i++) {
|
for (i = 0; i < adev->usec_timeout; i++) {
|
||||||
if (nbio_v6_1_get_memsize(adev) != 0xffffffff)
|
u32 memsize = (adev->flags & AMD_IS_APU) ?
|
||||||
|
nbio_v7_0_get_memsize(adev) :
|
||||||
|
nbio_v6_1_get_memsize(adev);
|
||||||
|
if (memsize != 0xffffffff)
|
||||||
break;
|
break;
|
||||||
udelay(1);
|
udelay(1);
|
||||||
}
|
}
|
||||||
@@ -470,9 +457,13 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
|
|||||||
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
|
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||||
bool enable)
|
bool enable)
|
||||||
{
|
{
|
||||||
|
if (adev->flags & AMD_IS_APU) {
|
||||||
|
nbio_v7_0_enable_doorbell_aperture(adev, enable);
|
||||||
|
} else {
|
||||||
nbio_v6_1_enable_doorbell_aperture(adev, enable);
|
nbio_v6_1_enable_doorbell_aperture(adev, enable);
|
||||||
nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
|
nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static const struct amdgpu_ip_block_version vega10_common_ip_block =
|
static const struct amdgpu_ip_block_version vega10_common_ip_block =
|
||||||
{
|
{
|
||||||
@@ -493,8 +484,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
amdgpu_ip_block_add(adev, &vega10_common_ip_block);
|
amdgpu_ip_block_add(adev, &vega10_common_ip_block);
|
||||||
amdgpu_ip_block_add(adev, &gfxhub_v1_0_ip_block);
|
|
||||||
amdgpu_ip_block_add(adev, &mmhub_v1_0_ip_block);
|
|
||||||
amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
|
amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
|
||||||
amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
|
amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
|
||||||
if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
|
if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
|
||||||
@@ -508,6 +497,18 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||||||
amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
|
amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
|
||||||
amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
|
amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
|
||||||
break;
|
break;
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
amdgpu_ip_block_add(adev, &vega10_common_ip_block);
|
||||||
|
amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
|
||||||
|
amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
|
||||||
|
amdgpu_ip_block_add(adev, &psp_v10_0_ip_block);
|
||||||
|
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||||
|
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||||
|
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
|
||||||
|
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
|
||||||
|
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
|
||||||
|
amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -517,6 +518,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
|
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
return nbio_v7_0_get_rev_id(adev);
|
||||||
|
else
|
||||||
return nbio_v6_1_get_rev_id(adev);
|
return nbio_v6_1_get_rev_id(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -560,11 +564,6 @@ static int soc15_common_early_init(void *handle)
|
|||||||
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
|
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
|
||||||
psp_enabled = true;
|
psp_enabled = true;
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
|
||||||
amdgpu_virt_init_setting(adev);
|
|
||||||
xgpu_ai_mailbox_set_irq_funcs(adev);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* nbio need be used for both sdma and gfx9, but only
|
* nbio need be used for both sdma and gfx9, but only
|
||||||
* initializes once
|
* initializes once
|
||||||
@@ -573,6 +572,9 @@ static int soc15_common_early_init(void *handle)
|
|||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
nbio_v6_1_init(adev);
|
nbio_v6_1_init(adev);
|
||||||
break;
|
break;
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
nbio_v7_0_init(adev);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -603,11 +605,39 @@ static int soc15_common_early_init(void *handle)
|
|||||||
adev->pg_flags = 0;
|
adev->pg_flags = 0;
|
||||||
adev->external_rev_id = 0x1;
|
adev->external_rev_id = 0x1;
|
||||||
break;
|
break;
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||||
|
AMD_CG_SUPPORT_GFX_MGLS |
|
||||||
|
AMD_CG_SUPPORT_GFX_RLC_LS |
|
||||||
|
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||||
|
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||||
|
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
||||||
|
AMD_CG_SUPPORT_GFX_CGCG |
|
||||||
|
AMD_CG_SUPPORT_GFX_CGLS |
|
||||||
|
AMD_CG_SUPPORT_BIF_MGCG |
|
||||||
|
AMD_CG_SUPPORT_BIF_LS |
|
||||||
|
AMD_CG_SUPPORT_HDP_MGCG |
|
||||||
|
AMD_CG_SUPPORT_HDP_LS |
|
||||||
|
AMD_CG_SUPPORT_DRM_MGCG |
|
||||||
|
AMD_CG_SUPPORT_DRM_LS |
|
||||||
|
AMD_CG_SUPPORT_ROM_MGCG |
|
||||||
|
AMD_CG_SUPPORT_MC_MGCG |
|
||||||
|
AMD_CG_SUPPORT_MC_LS |
|
||||||
|
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||||
|
AMD_CG_SUPPORT_SDMA_LS;
|
||||||
|
adev->pg_flags = AMD_PG_SUPPORT_SDMA;
|
||||||
|
adev->external_rev_id = 0x1;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
/* FIXME: not supported yet */
|
/* FIXME: not supported yet */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
amdgpu_virt_init_setting(adev);
|
||||||
|
xgpu_ai_mailbox_set_irq_funcs(adev);
|
||||||
|
}
|
||||||
|
|
||||||
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
||||||
|
|
||||||
amdgpu_get_pcie_info(adev);
|
amdgpu_get_pcie_info(adev);
|
||||||
@@ -825,6 +855,20 @@ static int soc15_common_set_clockgating_state(void *handle,
|
|||||||
soc15_update_df_medium_grain_clock_gating(adev,
|
soc15_update_df_medium_grain_clock_gating(adev,
|
||||||
state == AMD_CG_STATE_GATE ? true : false);
|
state == AMD_CG_STATE_GATE ? true : false);
|
||||||
break;
|
break;
|
||||||
|
case CHIP_RAVEN:
|
||||||
|
nbio_v7_0_update_medium_grain_clock_gating(adev,
|
||||||
|
state == AMD_CG_STATE_GATE ? true : false);
|
||||||
|
nbio_v6_1_update_medium_grain_light_sleep(adev,
|
||||||
|
state == AMD_CG_STATE_GATE ? true : false);
|
||||||
|
soc15_update_hdp_light_sleep(adev,
|
||||||
|
state == AMD_CG_STATE_GATE ? true : false);
|
||||||
|
soc15_update_drm_clock_gating(adev,
|
||||||
|
state == AMD_CG_STATE_GATE ? true : false);
|
||||||
|
soc15_update_drm_light_sleep(adev,
|
||||||
|
state == AMD_CG_STATE_GATE ? true : false);
|
||||||
|
soc15_update_rom_medium_grain_clock_gating(adev,
|
||||||
|
state == AMD_CG_STATE_GATE ? true : false);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,7 @@
|
|||||||
#define __SOC15_H__
|
#define __SOC15_H__
|
||||||
|
|
||||||
#include "nbio_v6_1.h"
|
#include "nbio_v6_1.h"
|
||||||
|
#include "nbio_v7_0.h"
|
||||||
|
|
||||||
extern const struct amd_ip_funcs soc15_common_ip_funcs;
|
extern const struct amd_ip_funcs soc15_common_ip_funcs;
|
||||||
|
|
||||||
|
|||||||
@@ -63,6 +63,13 @@ struct nbio_pcie_index_data {
|
|||||||
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
|
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
|
||||||
(ip##_BASE__INST##inst##_SEG4 + reg))))))
|
(ip##_BASE__INST##inst##_SEG4 + reg))))))
|
||||||
|
|
||||||
|
#define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
|
||||||
|
RREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
|
||||||
|
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
|
||||||
|
(2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
|
||||||
|
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
|
||||||
|
(ip##_BASE__INST##inst##_SEG4 + reg))))) + offset)
|
||||||
|
|
||||||
#define WREG32_SOC15(ip, inst, reg, value) \
|
#define WREG32_SOC15(ip, inst, reg, value) \
|
||||||
WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
|
WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
|
||||||
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
|
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
|
||||||
@@ -70,6 +77,13 @@ struct nbio_pcie_index_data {
|
|||||||
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
|
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
|
||||||
(ip##_BASE__INST##inst##_SEG4 + reg))))), value)
|
(ip##_BASE__INST##inst##_SEG4 + reg))))), value)
|
||||||
|
|
||||||
|
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
|
||||||
|
WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
|
||||||
|
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
|
||||||
|
(2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
|
||||||
|
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
|
||||||
|
(ip##_BASE__INST##inst##_SEG4 + reg))))) + offset, value)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -132,6 +132,7 @@
|
|||||||
* 1 - pfp
|
* 1 - pfp
|
||||||
*/
|
*/
|
||||||
#define PACKET3_INDIRECT_BUFFER 0x3F
|
#define PACKET3_INDIRECT_BUFFER 0x3F
|
||||||
|
#define INDIRECT_BUFFER_VALID (1 << 23)
|
||||||
#define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28)
|
#define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28)
|
||||||
/* 0 - LRU
|
/* 0 - LRU
|
||||||
* 1 - Stream
|
* 1 - Stream
|
||||||
@@ -259,8 +260,97 @@
|
|||||||
#define PACKET3_WAIT_ON_CE_COUNTER 0x86
|
#define PACKET3_WAIT_ON_CE_COUNTER 0x86
|
||||||
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
|
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
|
||||||
#define PACKET3_SWITCH_BUFFER 0x8B
|
#define PACKET3_SWITCH_BUFFER 0x8B
|
||||||
|
#define PACKET3_FRAME_CONTROL 0x90
|
||||||
|
# define FRAME_CMD(x) ((x) << 28)
|
||||||
|
/*
|
||||||
|
* x=0: tmz_begin
|
||||||
|
* x=1: tmz_end
|
||||||
|
*/
|
||||||
|
|
||||||
#define PACKET3_SET_RESOURCES 0xA0
|
#define PACKET3_SET_RESOURCES 0xA0
|
||||||
|
/* 1. header
|
||||||
|
* 2. CONTROL
|
||||||
|
* 3. QUEUE_MASK_LO [31:0]
|
||||||
|
* 4. QUEUE_MASK_HI [31:0]
|
||||||
|
* 5. GWS_MASK_LO [31:0]
|
||||||
|
* 6. GWS_MASK_HI [31:0]
|
||||||
|
* 7. OAC_MASK [15:0]
|
||||||
|
* 8. GDS_HEAP_SIZE [16:11] | GDS_HEAP_BASE [5:0]
|
||||||
|
*/
|
||||||
|
# define PACKET3_SET_RESOURCES_VMID_MASK(x) ((x) << 0)
|
||||||
|
# define PACKET3_SET_RESOURCES_UNMAP_LATENTY(x) ((x) << 16)
|
||||||
|
# define PACKET3_SET_RESOURCES_QUEUE_TYPE(x) ((x) << 29)
|
||||||
#define PACKET3_MAP_QUEUES 0xA2
|
#define PACKET3_MAP_QUEUES 0xA2
|
||||||
|
/* 1. header
|
||||||
|
* 2. CONTROL
|
||||||
|
* 3. CONTROL2
|
||||||
|
* 4. MQD_ADDR_LO [31:0]
|
||||||
|
* 5. MQD_ADDR_HI [31:0]
|
||||||
|
* 6. WPTR_ADDR_LO [31:0]
|
||||||
|
* 7. WPTR_ADDR_HI [31:0]
|
||||||
|
*/
|
||||||
|
/* CONTROL */
|
||||||
|
# define PACKET3_MAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
|
||||||
|
# define PACKET3_MAP_QUEUES_VMID(x) ((x) << 8)
|
||||||
|
# define PACKET3_MAP_QUEUES_QUEUE(x) ((x) << 13)
|
||||||
|
# define PACKET3_MAP_QUEUES_PIPE(x) ((x) << 16)
|
||||||
|
# define PACKET3_MAP_QUEUES_ME(x) ((x) << 18)
|
||||||
|
# define PACKET3_MAP_QUEUES_QUEUE_TYPE(x) ((x) << 21)
|
||||||
|
# define PACKET3_MAP_QUEUES_ALLOC_FORMAT(x) ((x) << 24)
|
||||||
|
# define PACKET3_MAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
|
||||||
|
# define PACKET3_MAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
|
||||||
|
/* CONTROL2 */
|
||||||
|
# define PACKET3_MAP_QUEUES_CHECK_DISABLE(x) ((x) << 1)
|
||||||
|
# define PACKET3_MAP_QUEUES_DOORBELL_OFFSET(x) ((x) << 2)
|
||||||
|
#define PACKET3_UNMAP_QUEUES 0xA3
|
||||||
|
/* 1. header
|
||||||
|
* 2. CONTROL
|
||||||
|
* 3. CONTROL2
|
||||||
|
* 4. CONTROL3
|
||||||
|
* 5. CONTROL4
|
||||||
|
* 6. CONTROL5
|
||||||
|
*/
|
||||||
|
/* CONTROL */
|
||||||
|
# define PACKET3_UNMAP_QUEUES_ACTION(x) ((x) << 0)
|
||||||
|
/* 0 - PREEMPT_QUEUES
|
||||||
|
* 1 - RESET_QUEUES
|
||||||
|
* 2 - DISABLE_PROCESS_QUEUES
|
||||||
|
* 3 - PREEMPT_QUEUES_NO_UNMAP
|
||||||
|
*/
|
||||||
|
# define PACKET3_UNMAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
|
||||||
|
# define PACKET3_UNMAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
|
||||||
|
# define PACKET3_UNMAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
|
||||||
|
/* CONTROL2a */
|
||||||
|
# define PACKET3_UNMAP_QUEUES_PASID(x) ((x) << 0)
|
||||||
|
/* CONTROL2b */
|
||||||
|
# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(x) ((x) << 2)
|
||||||
|
/* CONTROL3a */
|
||||||
|
# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET1(x) ((x) << 2)
|
||||||
|
/* CONTROL3b */
|
||||||
|
# define PACKET3_UNMAP_QUEUES_RB_WPTR(x) ((x) << 0)
|
||||||
|
/* CONTROL4 */
|
||||||
|
# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET2(x) ((x) << 2)
|
||||||
|
/* CONTROL5 */
|
||||||
|
# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET3(x) ((x) << 2)
|
||||||
|
#define PACKET3_QUERY_STATUS 0xA4
|
||||||
|
/* 1. header
|
||||||
|
* 2. CONTROL
|
||||||
|
* 3. CONTROL2
|
||||||
|
* 4. ADDR_LO [31:0]
|
||||||
|
* 5. ADDR_HI [31:0]
|
||||||
|
* 6. DATA_LO [31:0]
|
||||||
|
* 7. DATA_HI [31:0]
|
||||||
|
*/
|
||||||
|
/* CONTROL */
|
||||||
|
# define PACKET3_QUERY_STATUS_CONTEXT_ID(x) ((x) << 0)
|
||||||
|
# define PACKET3_QUERY_STATUS_INTERRUPT_SEL(x) ((x) << 28)
|
||||||
|
# define PACKET3_QUERY_STATUS_COMMAND(x) ((x) << 30)
|
||||||
|
/* CONTROL2a */
|
||||||
|
# define PACKET3_QUERY_STATUS_PASID(x) ((x) << 0)
|
||||||
|
/* CONTROL2b */
|
||||||
|
# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
|
||||||
|
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
|
||||||
|
|
||||||
|
|
||||||
#define VCE_CMD_NO_OP 0x00000000
|
#define VCE_CMD_NO_OP 0x00000000
|
||||||
#define VCE_CMD_END 0x00000001
|
#define VCE_CMD_END 0x00000001
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
|
||||||
return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
|
return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -73,9 +73,9 @@ static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
|
|||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
|
||||||
if (ring == &adev->uvd.ring_enc[0])
|
if (ring == &adev->uvd.ring_enc[0])
|
||||||
return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR));
|
return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
|
||||||
else
|
else
|
||||||
return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR2));
|
return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -89,7 +89,7 @@ static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
|
||||||
return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR));
|
return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -107,9 +107,9 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
|
|||||||
return adev->wb.wb[ring->wptr_offs];
|
return adev->wb.wb[ring->wptr_offs];
|
||||||
|
|
||||||
if (ring == &adev->uvd.ring_enc[0])
|
if (ring == &adev->uvd.ring_enc[0])
|
||||||
return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR));
|
return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
|
||||||
else
|
else
|
||||||
return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2));
|
return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -123,7 +123,7 @@ static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR), lower_32_bits(ring->wptr));
|
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -145,10 +145,10 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ring == &adev->uvd.ring_enc[0])
|
if (ring == &adev->uvd.ring_enc[0])
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR),
|
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
|
||||||
lower_32_bits(ring->wptr));
|
lower_32_bits(ring->wptr));
|
||||||
else
|
else
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2),
|
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
|
||||||
lower_32_bits(ring->wptr));
|
lower_32_bits(ring->wptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -562,7 +562,13 @@ static int uvd_v7_0_hw_fini(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
uvd_v7_0_stop(adev);
|
uvd_v7_0_stop(adev);
|
||||||
|
else {
|
||||||
|
/* full access mode, so don't touch any UVD register */
|
||||||
|
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
|
||||||
|
}
|
||||||
|
|
||||||
ring->ready = false;
|
ring->ready = false;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -611,46 +617,46 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
|
|||||||
uint32_t offset;
|
uint32_t offset;
|
||||||
|
|
||||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
|
||||||
lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
|
lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
|
||||||
upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
|
upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
|
||||||
offset = 0;
|
offset = 0;
|
||||||
} else {
|
} else {
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
|
||||||
lower_32_bits(adev->uvd.gpu_addr));
|
lower_32_bits(adev->uvd.gpu_addr));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
|
||||||
upper_32_bits(adev->uvd.gpu_addr));
|
upper_32_bits(adev->uvd.gpu_addr));
|
||||||
offset = size;
|
offset = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
|
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
|
||||||
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
|
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
|
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
|
||||||
lower_32_bits(adev->uvd.gpu_addr + offset));
|
lower_32_bits(adev->uvd.gpu_addr + offset));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
|
||||||
upper_32_bits(adev->uvd.gpu_addr + offset));
|
upper_32_bits(adev->uvd.gpu_addr + offset));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
|
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
|
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
|
||||||
lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
|
lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
|
||||||
upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
|
upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
|
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
|
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
|
||||||
AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
|
AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
|
WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
|
||||||
adev->gfx.config.gb_addr_config);
|
adev->gfx.config.gb_addr_config);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
|
WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
|
||||||
adev->gfx.config.gb_addr_config);
|
adev->gfx.config.gb_addr_config);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
|
WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
|
||||||
adev->gfx.config.gb_addr_config);
|
adev->gfx.config.gb_addr_config);
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
|
WREG32_SOC15(UVD, 0, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
|
static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
|
||||||
@@ -664,29 +670,29 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
|
|||||||
size = header->header_size + header->vce_table_size + header->uvd_table_size;
|
size = header->header_size + header->vce_table_size + header->uvd_table_size;
|
||||||
|
|
||||||
/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
|
/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
|
||||||
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO), lower_32_bits(addr));
|
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
|
||||||
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI), upper_32_bits(addr));
|
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
|
||||||
|
|
||||||
/* 2, update vmid of descriptor */
|
/* 2, update vmid of descriptor */
|
||||||
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID));
|
data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
|
||||||
data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
|
data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
|
||||||
data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
|
data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
|
||||||
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID), data);
|
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
|
||||||
|
|
||||||
/* 3, notify mmsch about the size of this descriptor */
|
/* 3, notify mmsch about the size of this descriptor */
|
||||||
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE), size);
|
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
|
||||||
|
|
||||||
/* 4, set resp to zero */
|
/* 4, set resp to zero */
|
||||||
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
|
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
|
||||||
|
|
||||||
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
|
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
|
||||||
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
|
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
|
||||||
|
|
||||||
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
|
data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
|
||||||
loop = 1000;
|
loop = 1000;
|
||||||
while ((data & 0x10000002) != 0x10000002) {
|
while ((data & 0x10000002) != 0x10000002) {
|
||||||
udelay(10);
|
udelay(10);
|
||||||
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
|
data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
|
||||||
loop--;
|
loop--;
|
||||||
if (!loop)
|
if (!loop)
|
||||||
break;
|
break;
|
||||||
@@ -696,6 +702,7 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
|
|||||||
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
|
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -928,7 +935,7 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
|
|||||||
mdelay(1);
|
mdelay(1);
|
||||||
|
|
||||||
/* put LMI, VCPU, RBC etc... into reset */
|
/* put LMI, VCPU, RBC etc... into reset */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
|
WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
|
||||||
UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
|
UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
|
||||||
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
|
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
|
||||||
UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
|
UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
|
||||||
@@ -940,7 +947,7 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
|
|||||||
mdelay(5);
|
mdelay(5);
|
||||||
|
|
||||||
/* initialize UVD memory controller */
|
/* initialize UVD memory controller */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
|
||||||
(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
|
(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
|
||||||
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
|
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
|
||||||
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
|
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
|
||||||
@@ -953,23 +960,23 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
|
|||||||
lmi_swap_cntl = 0xa;
|
lmi_swap_cntl = 0xa;
|
||||||
mp_swap_cntl = 0;
|
mp_swap_cntl = 0;
|
||||||
#endif
|
#endif
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), lmi_swap_cntl);
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), mp_swap_cntl);
|
WREG32_SOC15(UVD, 0, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
|
WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
|
WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
|
WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
|
WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
|
WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
|
WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
|
||||||
|
|
||||||
/* take all subblocks out of reset, except VCPU */
|
/* take all subblocks out of reset, except VCPU */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
|
WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
|
||||||
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
|
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
|
||||||
mdelay(5);
|
mdelay(5);
|
||||||
|
|
||||||
/* enable VCPU clock */
|
/* enable VCPU clock */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
|
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
|
||||||
UVD_VCPU_CNTL__CLK_EN_MASK);
|
UVD_VCPU_CNTL__CLK_EN_MASK);
|
||||||
|
|
||||||
/* enable UMC */
|
/* enable UMC */
|
||||||
@@ -977,14 +984,14 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
|
|||||||
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
|
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
|
||||||
|
|
||||||
/* boot up the VCPU */
|
/* boot up the VCPU */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
|
WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
|
||||||
mdelay(10);
|
mdelay(10);
|
||||||
|
|
||||||
for (i = 0; i < 10; ++i) {
|
for (i = 0; i < 10; ++i) {
|
||||||
uint32_t status;
|
uint32_t status;
|
||||||
|
|
||||||
for (j = 0; j < 100; ++j) {
|
for (j = 0; j < 100; ++j) {
|
||||||
status = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS));
|
status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
|
||||||
if (status & 2)
|
if (status & 2)
|
||||||
break;
|
break;
|
||||||
mdelay(10);
|
mdelay(10);
|
||||||
@@ -1025,44 +1032,44 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
|
|||||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
|
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
|
||||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
|
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
|
||||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
|
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
|
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
|
||||||
|
|
||||||
/* set the write pointer delay */
|
/* set the write pointer delay */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
|
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
|
||||||
|
|
||||||
/* set the wb address */
|
/* set the wb address */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
|
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
|
||||||
(upper_32_bits(ring->gpu_addr) >> 2));
|
(upper_32_bits(ring->gpu_addr) >> 2));
|
||||||
|
|
||||||
/* programm the RB_BASE for ring buffer */
|
/* programm the RB_BASE for ring buffer */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
|
||||||
lower_32_bits(ring->gpu_addr));
|
lower_32_bits(ring->gpu_addr));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
|
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
|
||||||
upper_32_bits(ring->gpu_addr));
|
upper_32_bits(ring->gpu_addr));
|
||||||
|
|
||||||
/* Initialize the ring buffer's read and write pointers */
|
/* Initialize the ring buffer's read and write pointers */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR), 0);
|
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
|
||||||
|
|
||||||
ring->wptr = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
|
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR),
|
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
|
||||||
lower_32_bits(ring->wptr));
|
lower_32_bits(ring->wptr));
|
||||||
|
|
||||||
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
|
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
|
||||||
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
|
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
|
||||||
|
|
||||||
ring = &adev->uvd.ring_enc[0];
|
ring = &adev->uvd.ring_enc[0];
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR), lower_32_bits(ring->wptr));
|
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR), lower_32_bits(ring->wptr));
|
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
|
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
|
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
|
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
|
||||||
|
|
||||||
ring = &adev->uvd.ring_enc[1];
|
ring = &adev->uvd.ring_enc[1];
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_RPTR2), lower_32_bits(ring->wptr));
|
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR2), lower_32_bits(ring->wptr));
|
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO2), ring->gpu_addr);
|
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
|
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE2), ring->ring_size / 4);
|
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1077,7 +1084,7 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
|
|||||||
static void uvd_v7_0_stop(struct amdgpu_device *adev)
|
static void uvd_v7_0_stop(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
/* force RBC into idle state */
|
/* force RBC into idle state */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0x11010101);
|
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
|
||||||
|
|
||||||
/* Stall UMC and register bus before resetting VCPU */
|
/* Stall UMC and register bus before resetting VCPU */
|
||||||
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
|
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
|
||||||
@@ -1086,12 +1093,12 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
|
|||||||
mdelay(1);
|
mdelay(1);
|
||||||
|
|
||||||
/* put VCPU into reset */
|
/* put VCPU into reset */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
|
WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
|
||||||
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
|
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
|
||||||
mdelay(5);
|
mdelay(5);
|
||||||
|
|
||||||
/* disable VCPU clock */
|
/* disable VCPU clock */
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0x0);
|
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
|
||||||
|
|
||||||
/* Unstall UMC and register bus */
|
/* Unstall UMC and register bus */
|
||||||
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
|
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
|
||||||
@@ -1196,7 +1203,7 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
|
WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
|
||||||
r = amdgpu_ring_alloc(ring, 3);
|
r = amdgpu_ring_alloc(ring, 3);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
|
DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
|
||||||
@@ -1208,7 +1215,7 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
|
|||||||
amdgpu_ring_write(ring, 0xDEADBEEF);
|
amdgpu_ring_write(ring, 0xDEADBEEF);
|
||||||
amdgpu_ring_commit(ring);
|
amdgpu_ring_commit(ring);
|
||||||
for (i = 0; i < adev->usec_timeout; i++) {
|
for (i = 0; i < adev->usec_timeout; i++) {
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
|
tmp = RREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID);
|
||||||
if (tmp == 0xDEADBEEF)
|
if (tmp == 0xDEADBEEF)
|
||||||
break;
|
break;
|
||||||
DRM_UDELAY(1);
|
DRM_UDELAY(1);
|
||||||
@@ -1309,9 +1316,8 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||||||
uint32_t data0, data1, mask;
|
uint32_t data0, data1, mask;
|
||||||
unsigned eng = ring->vm_inv_eng;
|
unsigned eng = ring->vm_inv_eng;
|
||||||
|
|
||||||
pd_addr = pd_addr | 0x1; /* valid bit */
|
pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
|
||||||
/* now only use physical base address of PDE and valid */
|
pd_addr |= AMDGPU_PTE_VALID;
|
||||||
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
|
|
||||||
|
|
||||||
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
|
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
|
||||||
data1 = upper_32_bits(pd_addr);
|
data1 = upper_32_bits(pd_addr);
|
||||||
@@ -1350,9 +1356,8 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||||||
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
|
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
|
||||||
unsigned eng = ring->vm_inv_eng;
|
unsigned eng = ring->vm_inv_eng;
|
||||||
|
|
||||||
pd_addr = pd_addr | 0x1; /* valid bit */
|
pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
|
||||||
/* now only use physical base address of PDE and valid */
|
pd_addr |= AMDGPU_PTE_VALID;
|
||||||
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
|
|
||||||
|
|
||||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
|
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
|
||||||
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
|
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
|
||||||
@@ -1408,8 +1413,8 @@ static bool uvd_v7_0_check_soft_reset(void *handle)
|
|||||||
|
|
||||||
if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
|
if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
|
||||||
REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
|
REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
|
||||||
(RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS) &
|
(RREG32_SOC15(UVD, 0, mmUVD_STATUS) &
|
||||||
AMDGPU_UVD_STATUS_BUSY_MASK)))
|
AMDGPU_UVD_STATUS_BUSY_MASK))
|
||||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
|
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
|
||||||
SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
|
SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
|
||||||
|
|
||||||
@@ -1516,9 +1521,9 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
uint32_t data, data1, data2, suvd_flags;
|
uint32_t data, data1, data2, suvd_flags;
|
||||||
|
|
||||||
data = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL));
|
data = RREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL);
|
||||||
data1 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE));
|
data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE);
|
||||||
data2 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_CTRL));
|
data2 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL);
|
||||||
|
|
||||||
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
|
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
|
||||||
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
|
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
|
||||||
@@ -1562,18 +1567,18 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
|
|||||||
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
|
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
|
||||||
data1 |= suvd_flags;
|
data1 |= suvd_flags;
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), data);
|
WREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL, data);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE), 0);
|
WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, 0);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE), data1);
|
WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_CTRL), data2);
|
WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL, data2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
|
static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
uint32_t data, data1, cgc_flags, suvd_flags;
|
uint32_t data, data1, cgc_flags, suvd_flags;
|
||||||
|
|
||||||
data = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE));
|
data = RREG32_SOC15(UVD, 0, mmUVD_CGC_GATE);
|
||||||
data1 = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE));
|
data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE);
|
||||||
|
|
||||||
cgc_flags = UVD_CGC_GATE__SYS_MASK |
|
cgc_flags = UVD_CGC_GATE__SYS_MASK |
|
||||||
UVD_CGC_GATE__UDEC_MASK |
|
UVD_CGC_GATE__UDEC_MASK |
|
||||||
@@ -1605,8 +1610,8 @@ static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
|
|||||||
data |= cgc_flags;
|
data |= cgc_flags;
|
||||||
data1 |= suvd_flags;
|
data1 |= suvd_flags;
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_GATE), data);
|
WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, data);
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SUVD_CGC_GATE), data1);
|
WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||||
@@ -1665,7 +1670,7 @@ static int uvd_v7_0_set_powergating_state(void *handle,
|
|||||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), UVD_POWER_STATUS__UVD_PG_EN_MASK);
|
WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
|
||||||
|
|
||||||
if (state == AMD_PG_STATE_GATE) {
|
if (state == AMD_PG_STATE_GATE) {
|
||||||
uvd_v7_0_stop(adev);
|
uvd_v7_0_stop(adev);
|
||||||
|
|||||||
@@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
|||||||
static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
|
static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
u32 v;
|
||||||
|
|
||||||
|
mutex_lock(&adev->grbm_idx_mutex);
|
||||||
|
if (adev->vce.harvest_config == 0 ||
|
||||||
|
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||||
|
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
|
||||||
|
|
||||||
if (ring == &adev->vce.ring[0])
|
if (ring == &adev->vce.ring[0])
|
||||||
return RREG32(mmVCE_RB_RPTR);
|
v = RREG32(mmVCE_RB_RPTR);
|
||||||
else if (ring == &adev->vce.ring[1])
|
else if (ring == &adev->vce.ring[1])
|
||||||
return RREG32(mmVCE_RB_RPTR2);
|
v = RREG32(mmVCE_RB_RPTR2);
|
||||||
else
|
else
|
||||||
return RREG32(mmVCE_RB_RPTR3);
|
v = RREG32(mmVCE_RB_RPTR3);
|
||||||
|
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||||
|
mutex_unlock(&adev->grbm_idx_mutex);
|
||||||
|
|
||||||
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
|
|||||||
static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
|
static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
u32 v;
|
||||||
|
|
||||||
|
mutex_lock(&adev->grbm_idx_mutex);
|
||||||
|
if (adev->vce.harvest_config == 0 ||
|
||||||
|
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||||
|
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
|
||||||
|
|
||||||
if (ring == &adev->vce.ring[0])
|
if (ring == &adev->vce.ring[0])
|
||||||
return RREG32(mmVCE_RB_WPTR);
|
v = RREG32(mmVCE_RB_WPTR);
|
||||||
else if (ring == &adev->vce.ring[1])
|
else if (ring == &adev->vce.ring[1])
|
||||||
return RREG32(mmVCE_RB_WPTR2);
|
v = RREG32(mmVCE_RB_WPTR2);
|
||||||
else
|
else
|
||||||
return RREG32(mmVCE_RB_WPTR3);
|
v = RREG32(mmVCE_RB_WPTR3);
|
||||||
|
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||||
|
mutex_unlock(&adev->grbm_idx_mutex);
|
||||||
|
|
||||||
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
|
||||||
|
mutex_lock(&adev->grbm_idx_mutex);
|
||||||
|
if (adev->vce.harvest_config == 0 ||
|
||||||
|
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||||
|
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
|
||||||
|
|
||||||
if (ring == &adev->vce.ring[0])
|
if (ring == &adev->vce.ring[0])
|
||||||
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
|
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
|
||||||
else if (ring == &adev->vce.ring[1])
|
else if (ring == &adev->vce.ring[1])
|
||||||
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
|
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||||
else
|
else
|
||||||
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
|
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
|
||||||
|
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||||
|
mutex_unlock(&adev->grbm_idx_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
|
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
|
||||||
@@ -231,6 +267,16 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
|||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
int idx, r;
|
int idx, r;
|
||||||
|
|
||||||
|
mutex_lock(&adev->grbm_idx_mutex);
|
||||||
|
for (idx = 0; idx < 2; ++idx) {
|
||||||
|
if (adev->vce.harvest_config & (1 << idx))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
|
||||||
|
|
||||||
|
/* Program instance 0 reg space for two instances or instance 0 case
|
||||||
|
program instance 1 reg space for only instance 1 available case */
|
||||||
|
if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
|
||||||
ring = &adev->vce.ring[0];
|
ring = &adev->vce.ring[0];
|
||||||
WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
|
WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
|
||||||
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
|
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
|
||||||
@@ -251,13 +297,8 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
|||||||
WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
|
WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
|
||||||
WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
|
WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
|
||||||
WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
|
WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&adev->grbm_idx_mutex);
|
|
||||||
for (idx = 0; idx < 2; ++idx) {
|
|
||||||
if (adev->vce.harvest_config & (1 << idx))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
|
|
||||||
vce_v3_0_mc_resume(adev, idx);
|
vce_v3_0_mc_resume(adev, idx);
|
||||||
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
|
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
|
||||||
|
|
||||||
|
|||||||
@@ -190,6 +190,7 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
|
|||||||
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
|
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -418,15 +419,19 @@ static int vce_v4_0_sw_init(void *handle)
|
|||||||
|
|
||||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||||
const struct common_firmware_header *hdr;
|
const struct common_firmware_header *hdr;
|
||||||
|
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
|
||||||
|
|
||||||
|
adev->vce.saved_bo = kmalloc(size, GFP_KERNEL);
|
||||||
|
if (!adev->vce.saved_bo)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
|
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
|
||||||
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].ucode_id = AMDGPU_UCODE_ID_VCE;
|
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].ucode_id = AMDGPU_UCODE_ID_VCE;
|
||||||
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw;
|
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw;
|
||||||
adev->firmware.fw_size +=
|
adev->firmware.fw_size +=
|
||||||
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
||||||
DRM_INFO("PSP loading VCE firmware\n");
|
DRM_INFO("PSP loading VCE firmware\n");
|
||||||
}
|
} else {
|
||||||
|
|
||||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
|
|
||||||
r = amdgpu_vce_resume(adev);
|
r = amdgpu_vce_resume(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
@@ -465,6 +470,11 @@ static int vce_v4_0_sw_fini(void *handle)
|
|||||||
/* free MM table */
|
/* free MM table */
|
||||||
amdgpu_virt_free_mm_table(adev);
|
amdgpu_virt_free_mm_table(adev);
|
||||||
|
|
||||||
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||||
|
kfree(adev->vce.saved_bo);
|
||||||
|
adev->vce.saved_bo = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
r = amdgpu_vce_suspend(adev);
|
r = amdgpu_vce_suspend(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
@@ -505,8 +515,14 @@ static int vce_v4_0_hw_fini(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
/* vce_v4_0_wait_for_idle(handle); */
|
/* vce_v4_0_wait_for_idle(handle); */
|
||||||
vce_v4_0_stop(adev);
|
vce_v4_0_stop(adev);
|
||||||
|
} else {
|
||||||
|
/* full access mode, so don't touch any VCE register */
|
||||||
|
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < adev->vce.num_rings; i++)
|
for (i = 0; i < adev->vce.num_rings; i++)
|
||||||
adev->vce.ring[i].ready = false;
|
adev->vce.ring[i].ready = false;
|
||||||
|
|
||||||
@@ -515,8 +531,18 @@ static int vce_v4_0_hw_fini(void *handle)
|
|||||||
|
|
||||||
static int vce_v4_0_suspend(void *handle)
|
static int vce_v4_0_suspend(void *handle)
|
||||||
{
|
{
|
||||||
int r;
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (adev->vce.vcpu_bo == NULL)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||||
|
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
|
||||||
|
void *ptr = adev->vce.cpu_addr;
|
||||||
|
|
||||||
|
memcpy_fromio(adev->vce.saved_bo, ptr, size);
|
||||||
|
}
|
||||||
|
|
||||||
r = vce_v4_0_hw_fini(adev);
|
r = vce_v4_0_hw_fini(adev);
|
||||||
if (r)
|
if (r)
|
||||||
@@ -527,12 +553,22 @@ static int vce_v4_0_suspend(void *handle)
|
|||||||
|
|
||||||
static int vce_v4_0_resume(void *handle)
|
static int vce_v4_0_resume(void *handle)
|
||||||
{
|
{
|
||||||
int r;
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (adev->vce.vcpu_bo == NULL)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||||
|
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
|
||||||
|
void *ptr = adev->vce.cpu_addr;
|
||||||
|
|
||||||
|
memcpy_toio(ptr, adev->vce.saved_bo, size);
|
||||||
|
} else {
|
||||||
r = amdgpu_vce_resume(adev);
|
r = amdgpu_vce_resume(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
return vce_v4_0_hw_init(adev);
|
return vce_v4_0_hw_init(adev);
|
||||||
}
|
}
|
||||||
@@ -919,9 +955,8 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
|
|||||||
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
|
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
|
||||||
unsigned eng = ring->vm_inv_eng;
|
unsigned eng = ring->vm_inv_eng;
|
||||||
|
|
||||||
pd_addr = pd_addr | 0x1; /* valid bit */
|
pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
|
||||||
/* now only use physical base address of PDE and valid */
|
pd_addr |= AMDGPU_PTE_VALID;
|
||||||
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
|
|
||||||
|
|
||||||
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
|
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
|
||||||
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
|
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
|
||||||
|
|||||||
1189
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
Normal file
1189
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
Normal file
File diff suppressed because it is too large
Load Diff
29
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
Normal file
29
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __VCN_V1_0_H__
|
||||||
|
#define __VCN_V1_0_H__
|
||||||
|
|
||||||
|
extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
|
||||||
|
|
||||||
|
#endif
|
||||||
@@ -97,6 +97,9 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
|||||||
/* disable irqs */
|
/* disable irqs */
|
||||||
vega10_ih_disable_interrupts(adev);
|
vega10_ih_disable_interrupts(adev);
|
||||||
|
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
nbio_v7_0_ih_control(adev);
|
||||||
|
else
|
||||||
nbio_v6_1_ih_control(adev);
|
nbio_v6_1_ih_control(adev);
|
||||||
|
|
||||||
ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
|
ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
|
||||||
@@ -148,6 +151,9 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
|||||||
ENABLE, 0);
|
ENABLE, 0);
|
||||||
}
|
}
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR), ih_doorbell_rtpr);
|
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR), ih_doorbell_rtpr);
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
|
||||||
|
else
|
||||||
nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
|
nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL));
|
tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL));
|
||||||
|
|||||||
@@ -463,89 +463,83 @@ static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
|
static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
|
||||||
{mmGRBM_STATUS, false},
|
{mmGRBM_STATUS},
|
||||||
{mmGRBM_STATUS2, false},
|
{mmGRBM_STATUS2},
|
||||||
{mmGRBM_STATUS_SE0, false},
|
{mmGRBM_STATUS_SE0},
|
||||||
{mmGRBM_STATUS_SE1, false},
|
{mmGRBM_STATUS_SE1},
|
||||||
{mmGRBM_STATUS_SE2, false},
|
{mmGRBM_STATUS_SE2},
|
||||||
{mmGRBM_STATUS_SE3, false},
|
{mmGRBM_STATUS_SE3},
|
||||||
{mmSRBM_STATUS, false},
|
{mmSRBM_STATUS},
|
||||||
{mmSRBM_STATUS2, false},
|
{mmSRBM_STATUS2},
|
||||||
{mmSRBM_STATUS3, false},
|
{mmSRBM_STATUS3},
|
||||||
{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
|
{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
|
||||||
{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
|
{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
|
||||||
{mmCP_STAT, false},
|
{mmCP_STAT},
|
||||||
{mmCP_STALLED_STAT1, false},
|
{mmCP_STALLED_STAT1},
|
||||||
{mmCP_STALLED_STAT2, false},
|
{mmCP_STALLED_STAT2},
|
||||||
{mmCP_STALLED_STAT3, false},
|
{mmCP_STALLED_STAT3},
|
||||||
{mmCP_CPF_BUSY_STAT, false},
|
{mmCP_CPF_BUSY_STAT},
|
||||||
{mmCP_CPF_STALLED_STAT1, false},
|
{mmCP_CPF_STALLED_STAT1},
|
||||||
{mmCP_CPF_STATUS, false},
|
{mmCP_CPF_STATUS},
|
||||||
{mmCP_CPC_BUSY_STAT, false},
|
{mmCP_CPC_BUSY_STAT},
|
||||||
{mmCP_CPC_STALLED_STAT1, false},
|
{mmCP_CPC_STALLED_STAT1},
|
||||||
{mmCP_CPC_STATUS, false},
|
{mmCP_CPC_STATUS},
|
||||||
{mmGB_ADDR_CONFIG, false},
|
{mmGB_ADDR_CONFIG},
|
||||||
{mmMC_ARB_RAMCFG, false},
|
{mmMC_ARB_RAMCFG},
|
||||||
{mmGB_TILE_MODE0, false},
|
{mmGB_TILE_MODE0},
|
||||||
{mmGB_TILE_MODE1, false},
|
{mmGB_TILE_MODE1},
|
||||||
{mmGB_TILE_MODE2, false},
|
{mmGB_TILE_MODE2},
|
||||||
{mmGB_TILE_MODE3, false},
|
{mmGB_TILE_MODE3},
|
||||||
{mmGB_TILE_MODE4, false},
|
{mmGB_TILE_MODE4},
|
||||||
{mmGB_TILE_MODE5, false},
|
{mmGB_TILE_MODE5},
|
||||||
{mmGB_TILE_MODE6, false},
|
{mmGB_TILE_MODE6},
|
||||||
{mmGB_TILE_MODE7, false},
|
{mmGB_TILE_MODE7},
|
||||||
{mmGB_TILE_MODE8, false},
|
{mmGB_TILE_MODE8},
|
||||||
{mmGB_TILE_MODE9, false},
|
{mmGB_TILE_MODE9},
|
||||||
{mmGB_TILE_MODE10, false},
|
{mmGB_TILE_MODE10},
|
||||||
{mmGB_TILE_MODE11, false},
|
{mmGB_TILE_MODE11},
|
||||||
{mmGB_TILE_MODE12, false},
|
{mmGB_TILE_MODE12},
|
||||||
{mmGB_TILE_MODE13, false},
|
{mmGB_TILE_MODE13},
|
||||||
{mmGB_TILE_MODE14, false},
|
{mmGB_TILE_MODE14},
|
||||||
{mmGB_TILE_MODE15, false},
|
{mmGB_TILE_MODE15},
|
||||||
{mmGB_TILE_MODE16, false},
|
{mmGB_TILE_MODE16},
|
||||||
{mmGB_TILE_MODE17, false},
|
{mmGB_TILE_MODE17},
|
||||||
{mmGB_TILE_MODE18, false},
|
{mmGB_TILE_MODE18},
|
||||||
{mmGB_TILE_MODE19, false},
|
{mmGB_TILE_MODE19},
|
||||||
{mmGB_TILE_MODE20, false},
|
{mmGB_TILE_MODE20},
|
||||||
{mmGB_TILE_MODE21, false},
|
{mmGB_TILE_MODE21},
|
||||||
{mmGB_TILE_MODE22, false},
|
{mmGB_TILE_MODE22},
|
||||||
{mmGB_TILE_MODE23, false},
|
{mmGB_TILE_MODE23},
|
||||||
{mmGB_TILE_MODE24, false},
|
{mmGB_TILE_MODE24},
|
||||||
{mmGB_TILE_MODE25, false},
|
{mmGB_TILE_MODE25},
|
||||||
{mmGB_TILE_MODE26, false},
|
{mmGB_TILE_MODE26},
|
||||||
{mmGB_TILE_MODE27, false},
|
{mmGB_TILE_MODE27},
|
||||||
{mmGB_TILE_MODE28, false},
|
{mmGB_TILE_MODE28},
|
||||||
{mmGB_TILE_MODE29, false},
|
{mmGB_TILE_MODE29},
|
||||||
{mmGB_TILE_MODE30, false},
|
{mmGB_TILE_MODE30},
|
||||||
{mmGB_TILE_MODE31, false},
|
{mmGB_TILE_MODE31},
|
||||||
{mmGB_MACROTILE_MODE0, false},
|
{mmGB_MACROTILE_MODE0},
|
||||||
{mmGB_MACROTILE_MODE1, false},
|
{mmGB_MACROTILE_MODE1},
|
||||||
{mmGB_MACROTILE_MODE2, false},
|
{mmGB_MACROTILE_MODE2},
|
||||||
{mmGB_MACROTILE_MODE3, false},
|
{mmGB_MACROTILE_MODE3},
|
||||||
{mmGB_MACROTILE_MODE4, false},
|
{mmGB_MACROTILE_MODE4},
|
||||||
{mmGB_MACROTILE_MODE5, false},
|
{mmGB_MACROTILE_MODE5},
|
||||||
{mmGB_MACROTILE_MODE6, false},
|
{mmGB_MACROTILE_MODE6},
|
||||||
{mmGB_MACROTILE_MODE7, false},
|
{mmGB_MACROTILE_MODE7},
|
||||||
{mmGB_MACROTILE_MODE8, false},
|
{mmGB_MACROTILE_MODE8},
|
||||||
{mmGB_MACROTILE_MODE9, false},
|
{mmGB_MACROTILE_MODE9},
|
||||||
{mmGB_MACROTILE_MODE10, false},
|
{mmGB_MACROTILE_MODE10},
|
||||||
{mmGB_MACROTILE_MODE11, false},
|
{mmGB_MACROTILE_MODE11},
|
||||||
{mmGB_MACROTILE_MODE12, false},
|
{mmGB_MACROTILE_MODE12},
|
||||||
{mmGB_MACROTILE_MODE13, false},
|
{mmGB_MACROTILE_MODE13},
|
||||||
{mmGB_MACROTILE_MODE14, false},
|
{mmGB_MACROTILE_MODE14},
|
||||||
{mmGB_MACROTILE_MODE15, false},
|
{mmGB_MACROTILE_MODE15},
|
||||||
{mmCC_RB_BACKEND_DISABLE, false, true},
|
{mmCC_RB_BACKEND_DISABLE, true},
|
||||||
{mmGC_USER_RB_BACKEND_DISABLE, false, true},
|
{mmGC_USER_RB_BACKEND_DISABLE, true},
|
||||||
{mmGB_BACKEND_MAP, false, false},
|
{mmGB_BACKEND_MAP, false},
|
||||||
{mmPA_SC_RASTER_CONFIG, false, true},
|
{mmPA_SC_RASTER_CONFIG, true},
|
||||||
{mmPA_SC_RASTER_CONFIG_1, false, true},
|
{mmPA_SC_RASTER_CONFIG_1, true},
|
||||||
};
|
};
|
||||||
|
|
||||||
static uint32_t vi_get_register_value(struct amdgpu_device *adev,
|
static uint32_t vi_get_register_value(struct amdgpu_device *adev,
|
||||||
@@ -647,51 +641,17 @@ static uint32_t vi_get_register_value(struct amdgpu_device *adev,
|
|||||||
static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
|
static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
|
||||||
u32 sh_num, u32 reg_offset, u32 *value)
|
u32 sh_num, u32 reg_offset, u32 *value)
|
||||||
{
|
{
|
||||||
const struct amdgpu_allowed_register_entry *asic_register_table = NULL;
|
uint32_t i;
|
||||||
const struct amdgpu_allowed_register_entry *asic_register_entry;
|
|
||||||
uint32_t size, i;
|
|
||||||
|
|
||||||
*value = 0;
|
*value = 0;
|
||||||
switch (adev->asic_type) {
|
|
||||||
case CHIP_TOPAZ:
|
|
||||||
asic_register_table = tonga_allowed_read_registers;
|
|
||||||
size = ARRAY_SIZE(tonga_allowed_read_registers);
|
|
||||||
break;
|
|
||||||
case CHIP_FIJI:
|
|
||||||
case CHIP_TONGA:
|
|
||||||
case CHIP_POLARIS11:
|
|
||||||
case CHIP_POLARIS10:
|
|
||||||
case CHIP_POLARIS12:
|
|
||||||
case CHIP_CARRIZO:
|
|
||||||
case CHIP_STONEY:
|
|
||||||
asic_register_table = cz_allowed_read_registers;
|
|
||||||
size = ARRAY_SIZE(cz_allowed_read_registers);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (asic_register_table) {
|
|
||||||
for (i = 0; i < size; i++) {
|
|
||||||
asic_register_entry = asic_register_table + i;
|
|
||||||
if (reg_offset != asic_register_entry->reg_offset)
|
|
||||||
continue;
|
|
||||||
if (!asic_register_entry->untouched)
|
|
||||||
*value = vi_get_register_value(adev,
|
|
||||||
asic_register_entry->grbm_indexed,
|
|
||||||
se_num, sh_num, reg_offset);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
|
for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
|
||||||
|
bool indexed = vi_allowed_read_registers[i].grbm_indexed;
|
||||||
|
|
||||||
if (reg_offset != vi_allowed_read_registers[i].reg_offset)
|
if (reg_offset != vi_allowed_read_registers[i].reg_offset)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!vi_allowed_read_registers[i].untouched)
|
*value = vi_get_register_value(adev, indexed, se_num, sh_num,
|
||||||
*value = vi_get_register_value(adev,
|
reg_offset);
|
||||||
vi_allowed_read_registers[i].grbm_indexed,
|
|
||||||
se_num, sh_num, reg_offset);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -934,11 +894,6 @@ static int vi_common_early_init(void *handle)
|
|||||||
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
|
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
|
||||||
smc_enabled = true;
|
smc_enabled = true;
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
|
||||||
amdgpu_virt_init_setting(adev);
|
|
||||||
xgpu_vi_mailbox_set_irq_funcs(adev);
|
|
||||||
}
|
|
||||||
|
|
||||||
adev->rev_id = vi_get_rev_id(adev);
|
adev->rev_id = vi_get_rev_id(adev);
|
||||||
adev->external_rev_id = 0xFF;
|
adev->external_rev_id = 0xFF;
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
@@ -1073,7 +1028,7 @@ static int vi_common_early_init(void *handle)
|
|||||||
/* rev0 hardware requires workarounds to support PG */
|
/* rev0 hardware requires workarounds to support PG */
|
||||||
adev->pg_flags = 0;
|
adev->pg_flags = 0;
|
||||||
if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
|
if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
|
||||||
adev->pg_flags |=
|
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||||
AMD_PG_SUPPORT_GFX_SMG |
|
AMD_PG_SUPPORT_GFX_SMG |
|
||||||
AMD_PG_SUPPORT_GFX_PIPELINE |
|
AMD_PG_SUPPORT_GFX_PIPELINE |
|
||||||
AMD_PG_SUPPORT_CP |
|
AMD_PG_SUPPORT_CP |
|
||||||
@@ -1111,6 +1066,11 @@ static int vi_common_early_init(void *handle)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
amdgpu_virt_init_setting(adev);
|
||||||
|
xgpu_vi_mailbox_set_irq_funcs(adev);
|
||||||
|
}
|
||||||
|
|
||||||
/* vi use smc load by default */
|
/* vi use smc load by default */
|
||||||
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
||||||
|
|
||||||
|
|||||||
@@ -361,6 +361,12 @@
|
|||||||
#define PACKET3_WAIT_ON_CE_COUNTER 0x86
|
#define PACKET3_WAIT_ON_CE_COUNTER 0x86
|
||||||
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
|
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
|
||||||
#define PACKET3_SWITCH_BUFFER 0x8B
|
#define PACKET3_SWITCH_BUFFER 0x8B
|
||||||
|
#define PACKET3_FRAME_CONTROL 0x90
|
||||||
|
# define FRAME_CMD(x) ((x) << 28)
|
||||||
|
/*
|
||||||
|
* x=0: tmz_begin
|
||||||
|
* x=1: tmz_end
|
||||||
|
*/
|
||||||
#define PACKET3_SET_RESOURCES 0xA0
|
#define PACKET3_SET_RESOURCES 0xA0
|
||||||
/* 1. header
|
/* 1. header
|
||||||
* 2. CONTROL
|
* 2. CONTROL
|
||||||
|
|||||||
@@ -226,6 +226,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
|||||||
|
|
||||||
kfd->shared_resources = *gpu_resources;
|
kfd->shared_resources = *gpu_resources;
|
||||||
|
|
||||||
|
/* We only use the first MEC */
|
||||||
|
if (kfd->shared_resources.num_mec > 1)
|
||||||
|
kfd->shared_resources.num_mec = 1;
|
||||||
|
|
||||||
/* calculate max size of mqds needed for queues */
|
/* calculate max size of mqds needed for queues */
|
||||||
size = max_num_of_queues_per_device *
|
size = max_num_of_queues_per_device *
|
||||||
kfd->device_info->mqd_size_aligned;
|
kfd->device_info->mqd_size_aligned;
|
||||||
|
|||||||
@@ -63,21 +63,44 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
|
|||||||
return KFD_MQD_TYPE_CP;
|
return KFD_MQD_TYPE_CP;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int get_first_pipe(struct device_queue_manager *dqm)
|
static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
|
||||||
{
|
{
|
||||||
BUG_ON(!dqm || !dqm->dev);
|
int i;
|
||||||
return dqm->dev->shared_resources.first_compute_pipe;
|
int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
|
||||||
|
+ pipe * dqm->dev->shared_resources.num_queue_per_pipe;
|
||||||
|
|
||||||
|
/* queue is available for KFD usage if bit is 1 */
|
||||||
|
for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
|
||||||
|
if (test_bit(pipe_offset + i,
|
||||||
|
dqm->dev->shared_resources.queue_bitmap))
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int get_pipes_num(struct device_queue_manager *dqm)
|
unsigned int get_mec_num(struct device_queue_manager *dqm)
|
||||||
{
|
{
|
||||||
BUG_ON(!dqm || !dqm->dev);
|
BUG_ON(!dqm || !dqm->dev);
|
||||||
return dqm->dev->shared_resources.compute_pipe_count;
|
|
||||||
|
return dqm->dev->shared_resources.num_mec;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int get_pipes_num_cpsch(void)
|
unsigned int get_queues_num(struct device_queue_manager *dqm)
|
||||||
{
|
{
|
||||||
return PIPE_PER_ME_CP_SCHEDULING;
|
BUG_ON(!dqm || !dqm->dev);
|
||||||
|
return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
|
||||||
|
KGD_MAX_QUEUES);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
|
||||||
|
{
|
||||||
|
BUG_ON(!dqm || !dqm->dev);
|
||||||
|
return dqm->dev->shared_resources.num_queue_per_pipe;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
|
||||||
|
{
|
||||||
|
BUG_ON(!dqm || !dqm->dev);
|
||||||
|
return dqm->dev->shared_resources.num_pipe_per_mec;
|
||||||
}
|
}
|
||||||
|
|
||||||
void program_sh_mem_settings(struct device_queue_manager *dqm,
|
void program_sh_mem_settings(struct device_queue_manager *dqm,
|
||||||
@@ -200,12 +223,16 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
|
|||||||
|
|
||||||
set = false;
|
set = false;
|
||||||
|
|
||||||
for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_num(dqm);
|
for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_per_mec(dqm);
|
||||||
pipe = ((pipe + 1) % get_pipes_num(dqm)), ++i) {
|
pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
|
||||||
|
|
||||||
|
if (!is_pipe_enabled(dqm, 0, pipe))
|
||||||
|
continue;
|
||||||
|
|
||||||
if (dqm->allocated_queues[pipe] != 0) {
|
if (dqm->allocated_queues[pipe] != 0) {
|
||||||
bit = find_first_bit(
|
bit = find_first_bit(
|
||||||
(unsigned long *)&dqm->allocated_queues[pipe],
|
(unsigned long *)&dqm->allocated_queues[pipe],
|
||||||
QUEUES_PER_PIPE);
|
get_queues_per_pipe(dqm));
|
||||||
|
|
||||||
clear_bit(bit,
|
clear_bit(bit,
|
||||||
(unsigned long *)&dqm->allocated_queues[pipe]);
|
(unsigned long *)&dqm->allocated_queues[pipe]);
|
||||||
@@ -222,7 +249,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
|
|||||||
pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
|
pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
|
||||||
__func__, q->pipe, q->queue);
|
__func__, q->pipe, q->queue);
|
||||||
/* horizontal hqd allocation */
|
/* horizontal hqd allocation */
|
||||||
dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
|
dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -469,81 +496,25 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
|
|||||||
vmid);
|
vmid);
|
||||||
}
|
}
|
||||||
|
|
||||||
int init_pipelines(struct device_queue_manager *dqm,
|
|
||||||
unsigned int pipes_num, unsigned int first_pipe)
|
|
||||||
{
|
|
||||||
void *hpdptr;
|
|
||||||
struct mqd_manager *mqd;
|
|
||||||
unsigned int i, err, inx;
|
|
||||||
uint64_t pipe_hpd_addr;
|
|
||||||
|
|
||||||
BUG_ON(!dqm || !dqm->dev);
|
|
||||||
|
|
||||||
pr_debug("kfd: In func %s\n", __func__);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate memory for the HPDs. This is hardware-owned per-pipe data.
|
|
||||||
* The driver never accesses this memory after zeroing it.
|
|
||||||
* It doesn't even have to be saved/restored on suspend/resume
|
|
||||||
* because it contains no data when there are no active queues.
|
|
||||||
*/
|
|
||||||
|
|
||||||
err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num,
|
|
||||||
&dqm->pipeline_mem);
|
|
||||||
|
|
||||||
if (err) {
|
|
||||||
pr_err("kfd: error allocate vidmem num pipes: %d\n",
|
|
||||||
pipes_num);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
hpdptr = dqm->pipeline_mem->cpu_ptr;
|
|
||||||
dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
|
|
||||||
|
|
||||||
memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
|
|
||||||
|
|
||||||
mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
|
|
||||||
if (mqd == NULL) {
|
|
||||||
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < pipes_num; i++) {
|
|
||||||
inx = i + first_pipe;
|
|
||||||
/*
|
|
||||||
* HPD buffer on GTT is allocated by amdkfd, no need to waste
|
|
||||||
* space in GTT for pipelines we don't initialize
|
|
||||||
*/
|
|
||||||
pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
|
|
||||||
pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
|
|
||||||
/* = log2(bytes/4)-1 */
|
|
||||||
dqm->dev->kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
|
|
||||||
CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void init_interrupts(struct device_queue_manager *dqm)
|
static void init_interrupts(struct device_queue_manager *dqm)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
BUG_ON(dqm == NULL);
|
BUG_ON(dqm == NULL);
|
||||||
|
|
||||||
for (i = 0 ; i < get_pipes_num(dqm) ; i++)
|
for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
|
||||||
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd,
|
if (is_pipe_enabled(dqm, 0, i))
|
||||||
i + get_first_pipe(dqm));
|
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int init_scheduler(struct device_queue_manager *dqm)
|
static int init_scheduler(struct device_queue_manager *dqm)
|
||||||
{
|
{
|
||||||
int retval;
|
int retval = 0;
|
||||||
|
|
||||||
BUG_ON(!dqm);
|
BUG_ON(!dqm);
|
||||||
|
|
||||||
pr_debug("kfd: In %s\n", __func__);
|
pr_debug("kfd: In %s\n", __func__);
|
||||||
|
|
||||||
retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -554,21 +525,21 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
|
|||||||
BUG_ON(!dqm);
|
BUG_ON(!dqm);
|
||||||
|
|
||||||
pr_debug("kfd: In func %s num of pipes: %d\n",
|
pr_debug("kfd: In func %s num of pipes: %d\n",
|
||||||
__func__, get_pipes_num(dqm));
|
__func__, get_pipes_per_mec(dqm));
|
||||||
|
|
||||||
mutex_init(&dqm->lock);
|
mutex_init(&dqm->lock);
|
||||||
INIT_LIST_HEAD(&dqm->queues);
|
INIT_LIST_HEAD(&dqm->queues);
|
||||||
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
|
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
|
||||||
dqm->sdma_queue_count = 0;
|
dqm->sdma_queue_count = 0;
|
||||||
dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
|
dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
|
||||||
sizeof(unsigned int), GFP_KERNEL);
|
sizeof(unsigned int), GFP_KERNEL);
|
||||||
if (!dqm->allocated_queues) {
|
if (!dqm->allocated_queues) {
|
||||||
mutex_destroy(&dqm->lock);
|
mutex_destroy(&dqm->lock);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < get_pipes_num(dqm); i++)
|
for (i = 0; i < get_pipes_per_mec(dqm); i++)
|
||||||
dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
|
dqm->allocated_queues[i] = (1 << get_queues_per_pipe(dqm)) - 1;
|
||||||
|
|
||||||
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
|
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
|
||||||
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
|
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
|
||||||
@@ -675,18 +646,38 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
|
|||||||
|
|
||||||
static int set_sched_resources(struct device_queue_manager *dqm)
|
static int set_sched_resources(struct device_queue_manager *dqm)
|
||||||
{
|
{
|
||||||
|
int i, mec;
|
||||||
struct scheduling_resources res;
|
struct scheduling_resources res;
|
||||||
unsigned int queue_num, queue_mask;
|
|
||||||
|
|
||||||
BUG_ON(!dqm);
|
BUG_ON(!dqm);
|
||||||
|
|
||||||
pr_debug("kfd: In func %s\n", __func__);
|
pr_debug("kfd: In func %s\n", __func__);
|
||||||
|
|
||||||
queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
|
|
||||||
queue_mask = (1 << queue_num) - 1;
|
|
||||||
res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
|
res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
|
||||||
res.vmid_mask <<= KFD_VMID_START_OFFSET;
|
res.vmid_mask <<= KFD_VMID_START_OFFSET;
|
||||||
res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
|
|
||||||
|
res.queue_mask = 0;
|
||||||
|
for (i = 0; i < KGD_MAX_QUEUES; ++i) {
|
||||||
|
mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
|
||||||
|
/ dqm->dev->shared_resources.num_pipe_per_mec;
|
||||||
|
|
||||||
|
if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* only acquire queues from the first MEC */
|
||||||
|
if (mec > 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* This situation may be hit in the future if a new HW
|
||||||
|
* generation exposes more than 64 queues. If so, the
|
||||||
|
* definition of res.queue_mask needs updating */
|
||||||
|
if (WARN_ON(i > (sizeof(res.queue_mask)*8))) {
|
||||||
|
pr_err("Invalid queue enabled by amdgpu: %d\n", i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
res.queue_mask |= (1ull << i);
|
||||||
|
}
|
||||||
res.gws_mask = res.oac_mask = res.gds_heap_base =
|
res.gws_mask = res.oac_mask = res.gds_heap_base =
|
||||||
res.gds_heap_size = 0;
|
res.gds_heap_size = 0;
|
||||||
|
|
||||||
@@ -705,7 +696,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
|
|||||||
BUG_ON(!dqm);
|
BUG_ON(!dqm);
|
||||||
|
|
||||||
pr_debug("kfd: In func %s num of pipes: %d\n",
|
pr_debug("kfd: In func %s num of pipes: %d\n",
|
||||||
__func__, get_pipes_num_cpsch());
|
__func__, get_pipes_per_mec(dqm));
|
||||||
|
|
||||||
mutex_init(&dqm->lock);
|
mutex_init(&dqm->lock);
|
||||||
INIT_LIST_HEAD(&dqm->queues);
|
INIT_LIST_HEAD(&dqm->queues);
|
||||||
|
|||||||
@@ -30,8 +30,6 @@
|
|||||||
#include "kfd_mqd_manager.h"
|
#include "kfd_mqd_manager.h"
|
||||||
|
|
||||||
#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500)
|
#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500)
|
||||||
#define QUEUES_PER_PIPE (8)
|
|
||||||
#define PIPE_PER_ME_CP_SCHEDULING (3)
|
|
||||||
#define CIK_VMID_NUM (8)
|
#define CIK_VMID_NUM (8)
|
||||||
#define KFD_VMID_START_OFFSET (8)
|
#define KFD_VMID_START_OFFSET (8)
|
||||||
#define VMID_PER_DEVICE CIK_VMID_NUM
|
#define VMID_PER_DEVICE CIK_VMID_NUM
|
||||||
@@ -182,10 +180,10 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
|
|||||||
void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
|
void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
|
||||||
void program_sh_mem_settings(struct device_queue_manager *dqm,
|
void program_sh_mem_settings(struct device_queue_manager *dqm,
|
||||||
struct qcm_process_device *qpd);
|
struct qcm_process_device *qpd);
|
||||||
int init_pipelines(struct device_queue_manager *dqm,
|
unsigned int get_mec_num(struct device_queue_manager *dqm);
|
||||||
unsigned int pipes_num, unsigned int first_pipe);
|
unsigned int get_queues_num(struct device_queue_manager *dqm);
|
||||||
unsigned int get_first_pipe(struct device_queue_manager *dqm);
|
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
|
||||||
unsigned int get_pipes_num(struct device_queue_manager *dqm);
|
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
|
||||||
|
|
||||||
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
|
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -151,5 +151,5 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
|
|||||||
|
|
||||||
static int initialize_cpsch_cik(struct device_queue_manager *dqm)
|
static int initialize_cpsch_cik(struct device_queue_manager *dqm)
|
||||||
{
|
{
|
||||||
return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -65,8 +65,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
|
|||||||
|
|
||||||
/* check if there is over subscription*/
|
/* check if there is over subscription*/
|
||||||
*over_subscription = false;
|
*over_subscription = false;
|
||||||
if ((process_count > 1) ||
|
if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
|
||||||
queue_count > PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE) {
|
|
||||||
*over_subscription = true;
|
*over_subscription = true;
|
||||||
pr_debug("kfd: over subscribed runlist\n");
|
pr_debug("kfd: over subscribed runlist\n");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -209,7 +209,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
|
|||||||
/* check if there is over subscription */
|
/* check if there is over subscription */
|
||||||
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
|
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
|
||||||
((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
|
((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
|
||||||
(dev->dqm->queue_count >= PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE))) {
|
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
|
||||||
pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
|
pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
|
||||||
retval = -EPERM;
|
retval = -EPERM;
|
||||||
goto err_create_queue;
|
goto err_create_queue;
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ enum amd_asic_type {
|
|||||||
CHIP_POLARIS11,
|
CHIP_POLARIS11,
|
||||||
CHIP_POLARIS12,
|
CHIP_POLARIS12,
|
||||||
CHIP_VEGA10,
|
CHIP_VEGA10,
|
||||||
|
CHIP_RAVEN,
|
||||||
CHIP_LAST,
|
CHIP_LAST,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -75,8 +76,7 @@ enum amd_ip_block_type {
|
|||||||
AMD_IP_BLOCK_TYPE_UVD,
|
AMD_IP_BLOCK_TYPE_UVD,
|
||||||
AMD_IP_BLOCK_TYPE_VCE,
|
AMD_IP_BLOCK_TYPE_VCE,
|
||||||
AMD_IP_BLOCK_TYPE_ACP,
|
AMD_IP_BLOCK_TYPE_ACP,
|
||||||
AMD_IP_BLOCK_TYPE_GFXHUB,
|
AMD_IP_BLOCK_TYPE_VCN
|
||||||
AMD_IP_BLOCK_TYPE_MMHUB
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum amd_clockgating_state {
|
enum amd_clockgating_state {
|
||||||
|
|||||||
@@ -906,6 +906,8 @@
|
|||||||
#define AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x00000000
|
#define AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x00000000
|
||||||
#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x000000ffL
|
#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x000000ffL
|
||||||
#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x00000000
|
#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x00000000
|
||||||
|
#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK 0x00000100L
|
||||||
|
#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN__SHIFT 0x00000008
|
||||||
#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x0000003fL
|
#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x0000003fL
|
||||||
#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x00000000
|
#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x00000000
|
||||||
#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
|
#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
|
||||||
|
|||||||
7988
drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_default.h
Normal file
7988
drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_default.h
Normal file
File diff suppressed because it is too large
Load Diff
14087
drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
Normal file
14087
drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
Normal file
File diff suppressed because it is too large
Load Diff
54316
drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
Normal file
54316
drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
Normal file
File diff suppressed because it is too large
Load Diff
4005
drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_default.h
Normal file
4005
drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_default.h
Normal file
File diff suppressed because it is too large
Load Diff
7491
drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_offset.h
Normal file
7491
drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_offset.h
Normal file
File diff suppressed because it is too large
Load Diff
31191
drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_sh_mask.h
Normal file
31191
drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_sh_mask.h
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1999
drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_offset.h
Normal file
1999
drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_offset.h
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
182
drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_default.h
Normal file
182
drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_default.h
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2017 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included
|
||||||
|
* in all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||||
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||||
|
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
#ifndef _mp_10_0_DEFAULT_HEADER
|
||||||
|
#define _mp_10_0_DEFAULT_HEADER
|
||||||
|
|
||||||
|
|
||||||
|
// addressBlock: mp_SmuMp0_SmnDec
|
||||||
|
#define mmMP0_SMN_C2PMSG_32_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_33_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_34_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_35_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_36_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_37_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_38_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_39_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_40_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_41_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_42_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_43_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_44_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_45_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_46_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_47_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_48_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_49_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_50_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_51_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_52_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_53_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_54_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_55_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_56_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_57_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_58_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_59_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_60_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_61_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_62_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_63_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_64_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_65_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_66_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_67_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_68_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_69_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_70_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_71_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_72_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_73_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_74_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_75_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_76_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_77_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_78_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_79_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_80_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_81_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_82_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_83_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_84_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_85_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_86_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_87_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_88_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_89_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_90_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_91_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_92_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_93_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_94_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_95_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_96_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_97_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_98_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_99_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_100_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_101_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_102_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_C2PMSG_103_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_IH_CREDIT_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_IH_SW_INT_DEFAULT 0x00000000
|
||||||
|
#define mmMP0_SMN_IH_SW_INT_CTRL_DEFAULT 0x00000000
|
||||||
|
|
||||||
|
|
||||||
|
// addressBlock: mp_SmuMp1_SmnDec
|
||||||
|
#define mmMP1_SMN_C2PMSG_32_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_33_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_34_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_35_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_36_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_37_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_38_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_39_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_40_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_41_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_42_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_43_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_44_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_45_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_46_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_47_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_48_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_49_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_50_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_51_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_52_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_53_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_54_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_55_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_56_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_57_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_58_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_59_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_60_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_61_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_62_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_63_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_64_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_65_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_66_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_67_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_68_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_69_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_70_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_71_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_72_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_73_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_74_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_75_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_76_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_77_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_78_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_79_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_80_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_81_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_82_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_83_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_84_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_85_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_86_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_87_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_88_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_89_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_90_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_91_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_92_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_93_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_94_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_95_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_96_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_97_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_98_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_99_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_100_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_101_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_102_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_C2PMSG_103_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_IH_CREDIT_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_IH_SW_INT_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_IH_SW_INT_CTRL_DEFAULT 0x00000000
|
||||||
|
#define mmMP1_SMN_FPS_CNT_DEFAULT 0x00000000
|
||||||
|
|
||||||
|
|
||||||
|
#endif
|
||||||
336
drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_offset.h
Normal file
336
drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_offset.h
Normal file
@@ -0,0 +1,336 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2017 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included
|
||||||
|
* in all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||||
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||||
|
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
#ifndef _mp_10_0_OFFSET_HEADER
|
||||||
|
#define _mp_10_0_OFFSET_HEADER
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// addressBlock: mp_SmuMp0_SmnDec
|
||||||
|
// base address: 0x0
|
||||||
|
#define mmMP0_SMN_C2PMSG_32 0x0060
|
||||||
|
#define mmMP0_SMN_C2PMSG_32_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_33 0x0061
|
||||||
|
#define mmMP0_SMN_C2PMSG_33_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_34 0x0062
|
||||||
|
#define mmMP0_SMN_C2PMSG_34_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_35 0x0063
|
||||||
|
#define mmMP0_SMN_C2PMSG_35_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_36 0x0064
|
||||||
|
#define mmMP0_SMN_C2PMSG_36_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_37 0x0065
|
||||||
|
#define mmMP0_SMN_C2PMSG_37_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_38 0x0066
|
||||||
|
#define mmMP0_SMN_C2PMSG_38_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_39 0x0067
|
||||||
|
#define mmMP0_SMN_C2PMSG_39_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_40 0x0068
|
||||||
|
#define mmMP0_SMN_C2PMSG_40_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_41 0x0069
|
||||||
|
#define mmMP0_SMN_C2PMSG_41_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_42 0x006a
|
||||||
|
#define mmMP0_SMN_C2PMSG_42_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_43 0x006b
|
||||||
|
#define mmMP0_SMN_C2PMSG_43_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_44 0x006c
|
||||||
|
#define mmMP0_SMN_C2PMSG_44_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_45 0x006d
|
||||||
|
#define mmMP0_SMN_C2PMSG_45_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_46 0x006e
|
||||||
|
#define mmMP0_SMN_C2PMSG_46_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_47 0x006f
|
||||||
|
#define mmMP0_SMN_C2PMSG_47_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_48 0x0070
|
||||||
|
#define mmMP0_SMN_C2PMSG_48_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_49 0x0071
|
||||||
|
#define mmMP0_SMN_C2PMSG_49_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_50 0x0072
|
||||||
|
#define mmMP0_SMN_C2PMSG_50_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_51 0x0073
|
||||||
|
#define mmMP0_SMN_C2PMSG_51_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_52 0x0074
|
||||||
|
#define mmMP0_SMN_C2PMSG_52_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_53 0x0075
|
||||||
|
#define mmMP0_SMN_C2PMSG_53_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_54 0x0076
|
||||||
|
#define mmMP0_SMN_C2PMSG_54_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_55 0x0077
|
||||||
|
#define mmMP0_SMN_C2PMSG_55_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_56 0x0078
|
||||||
|
#define mmMP0_SMN_C2PMSG_56_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_57 0x0079
|
||||||
|
#define mmMP0_SMN_C2PMSG_57_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_58 0x007a
|
||||||
|
#define mmMP0_SMN_C2PMSG_58_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_59 0x007b
|
||||||
|
#define mmMP0_SMN_C2PMSG_59_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_60 0x007c
|
||||||
|
#define mmMP0_SMN_C2PMSG_60_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_61 0x007d
|
||||||
|
#define mmMP0_SMN_C2PMSG_61_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_62 0x007e
|
||||||
|
#define mmMP0_SMN_C2PMSG_62_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_63 0x007f
|
||||||
|
#define mmMP0_SMN_C2PMSG_63_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_64 0x0080
|
||||||
|
#define mmMP0_SMN_C2PMSG_64_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_65 0x0081
|
||||||
|
#define mmMP0_SMN_C2PMSG_65_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_66 0x0082
|
||||||
|
#define mmMP0_SMN_C2PMSG_66_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_67 0x0083
|
||||||
|
#define mmMP0_SMN_C2PMSG_67_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_68 0x0084
|
||||||
|
#define mmMP0_SMN_C2PMSG_68_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_69 0x0085
|
||||||
|
#define mmMP0_SMN_C2PMSG_69_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_70 0x0086
|
||||||
|
#define mmMP0_SMN_C2PMSG_70_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_71 0x0087
|
||||||
|
#define mmMP0_SMN_C2PMSG_71_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_72 0x0088
|
||||||
|
#define mmMP0_SMN_C2PMSG_72_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_73 0x0089
|
||||||
|
#define mmMP0_SMN_C2PMSG_73_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_74 0x008a
|
||||||
|
#define mmMP0_SMN_C2PMSG_74_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_75 0x008b
|
||||||
|
#define mmMP0_SMN_C2PMSG_75_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_76 0x008c
|
||||||
|
#define mmMP0_SMN_C2PMSG_76_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_77 0x008d
|
||||||
|
#define mmMP0_SMN_C2PMSG_77_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_78 0x008e
|
||||||
|
#define mmMP0_SMN_C2PMSG_78_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_79 0x008f
|
||||||
|
#define mmMP0_SMN_C2PMSG_79_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_80 0x0090
|
||||||
|
#define mmMP0_SMN_C2PMSG_80_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_81 0x0091
|
||||||
|
#define mmMP0_SMN_C2PMSG_81_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_82 0x0092
|
||||||
|
#define mmMP0_SMN_C2PMSG_82_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_83 0x0093
|
||||||
|
#define mmMP0_SMN_C2PMSG_83_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_84 0x0094
|
||||||
|
#define mmMP0_SMN_C2PMSG_84_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_85 0x0095
|
||||||
|
#define mmMP0_SMN_C2PMSG_85_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_86 0x0096
|
||||||
|
#define mmMP0_SMN_C2PMSG_86_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_87 0x0097
|
||||||
|
#define mmMP0_SMN_C2PMSG_87_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_88 0x0098
|
||||||
|
#define mmMP0_SMN_C2PMSG_88_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_89 0x0099
|
||||||
|
#define mmMP0_SMN_C2PMSG_89_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_90 0x009a
|
||||||
|
#define mmMP0_SMN_C2PMSG_90_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_91 0x009b
|
||||||
|
#define mmMP0_SMN_C2PMSG_91_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_92 0x009c
|
||||||
|
#define mmMP0_SMN_C2PMSG_92_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_93 0x009d
|
||||||
|
#define mmMP0_SMN_C2PMSG_93_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_94 0x009e
|
||||||
|
#define mmMP0_SMN_C2PMSG_94_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_95 0x009f
|
||||||
|
#define mmMP0_SMN_C2PMSG_95_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_96 0x00a0
|
||||||
|
#define mmMP0_SMN_C2PMSG_96_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_97 0x00a1
|
||||||
|
#define mmMP0_SMN_C2PMSG_97_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_98 0x00a2
|
||||||
|
#define mmMP0_SMN_C2PMSG_98_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_99 0x00a3
|
||||||
|
#define mmMP0_SMN_C2PMSG_99_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_100 0x00a4
|
||||||
|
#define mmMP0_SMN_C2PMSG_100_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_101 0x00a5
|
||||||
|
#define mmMP0_SMN_C2PMSG_101_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_102 0x00a6
|
||||||
|
#define mmMP0_SMN_C2PMSG_102_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_C2PMSG_103 0x00a7
|
||||||
|
#define mmMP0_SMN_C2PMSG_103_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_IH_CREDIT 0x00c1
|
||||||
|
#define mmMP0_SMN_IH_CREDIT_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_IH_SW_INT 0x00c2
|
||||||
|
#define mmMP0_SMN_IH_SW_INT_BASE_IDX 0
|
||||||
|
#define mmMP0_SMN_IH_SW_INT_CTRL 0x00c3
|
||||||
|
#define mmMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
|
||||||
|
|
||||||
|
|
||||||
|
// addressBlock: mp_SmuMp1_SmnDec
|
||||||
|
// base address: 0x0
|
||||||
|
#define mmMP1_SMN_C2PMSG_32 0x0260
|
||||||
|
#define mmMP1_SMN_C2PMSG_32_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_33 0x0261
|
||||||
|
#define mmMP1_SMN_C2PMSG_33_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_34 0x0262
|
||||||
|
#define mmMP1_SMN_C2PMSG_34_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_35 0x0263
|
||||||
|
#define mmMP1_SMN_C2PMSG_35_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_36 0x0264
|
||||||
|
#define mmMP1_SMN_C2PMSG_36_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_37 0x0265
|
||||||
|
#define mmMP1_SMN_C2PMSG_37_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_38 0x0266
|
||||||
|
#define mmMP1_SMN_C2PMSG_38_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_39 0x0267
|
||||||
|
#define mmMP1_SMN_C2PMSG_39_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_40 0x0268
|
||||||
|
#define mmMP1_SMN_C2PMSG_40_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_41 0x0269
|
||||||
|
#define mmMP1_SMN_C2PMSG_41_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_42 0x026a
|
||||||
|
#define mmMP1_SMN_C2PMSG_42_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_43 0x026b
|
||||||
|
#define mmMP1_SMN_C2PMSG_43_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_44 0x026c
|
||||||
|
#define mmMP1_SMN_C2PMSG_44_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_45 0x026d
|
||||||
|
#define mmMP1_SMN_C2PMSG_45_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_46 0x026e
|
||||||
|
#define mmMP1_SMN_C2PMSG_46_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_47 0x026f
|
||||||
|
#define mmMP1_SMN_C2PMSG_47_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_48 0x0270
|
||||||
|
#define mmMP1_SMN_C2PMSG_48_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_49 0x0271
|
||||||
|
#define mmMP1_SMN_C2PMSG_49_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_50 0x0272
|
||||||
|
#define mmMP1_SMN_C2PMSG_50_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_51 0x0273
|
||||||
|
#define mmMP1_SMN_C2PMSG_51_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_52 0x0274
|
||||||
|
#define mmMP1_SMN_C2PMSG_52_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_53 0x0275
|
||||||
|
#define mmMP1_SMN_C2PMSG_53_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_54 0x0276
|
||||||
|
#define mmMP1_SMN_C2PMSG_54_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_55 0x0277
|
||||||
|
#define mmMP1_SMN_C2PMSG_55_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_56 0x0278
|
||||||
|
#define mmMP1_SMN_C2PMSG_56_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_57 0x0279
|
||||||
|
#define mmMP1_SMN_C2PMSG_57_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_58 0x027a
|
||||||
|
#define mmMP1_SMN_C2PMSG_58_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_59 0x027b
|
||||||
|
#define mmMP1_SMN_C2PMSG_59_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_60 0x027c
|
||||||
|
#define mmMP1_SMN_C2PMSG_60_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_61 0x027d
|
||||||
|
#define mmMP1_SMN_C2PMSG_61_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_62 0x027e
|
||||||
|
#define mmMP1_SMN_C2PMSG_62_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_63 0x027f
|
||||||
|
#define mmMP1_SMN_C2PMSG_63_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_64 0x0280
|
||||||
|
#define mmMP1_SMN_C2PMSG_64_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_65 0x0281
|
||||||
|
#define mmMP1_SMN_C2PMSG_65_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_66 0x0282
|
||||||
|
#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_67 0x0283
|
||||||
|
#define mmMP1_SMN_C2PMSG_67_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_68 0x0284
|
||||||
|
#define mmMP1_SMN_C2PMSG_68_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_69 0x0285
|
||||||
|
#define mmMP1_SMN_C2PMSG_69_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_70 0x0286
|
||||||
|
#define mmMP1_SMN_C2PMSG_70_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_71 0x0287
|
||||||
|
#define mmMP1_SMN_C2PMSG_71_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_72 0x0288
|
||||||
|
#define mmMP1_SMN_C2PMSG_72_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_73 0x0289
|
||||||
|
#define mmMP1_SMN_C2PMSG_73_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_74 0x028a
|
||||||
|
#define mmMP1_SMN_C2PMSG_74_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_75 0x028b
|
||||||
|
#define mmMP1_SMN_C2PMSG_75_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_76 0x028c
|
||||||
|
#define mmMP1_SMN_C2PMSG_76_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_77 0x028d
|
||||||
|
#define mmMP1_SMN_C2PMSG_77_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_78 0x028e
|
||||||
|
#define mmMP1_SMN_C2PMSG_78_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_79 0x028f
|
||||||
|
#define mmMP1_SMN_C2PMSG_79_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_80 0x0290
|
||||||
|
#define mmMP1_SMN_C2PMSG_80_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_81 0x0291
|
||||||
|
#define mmMP1_SMN_C2PMSG_81_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_82 0x0292
|
||||||
|
#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_83 0x0293
|
||||||
|
#define mmMP1_SMN_C2PMSG_83_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_84 0x0294
|
||||||
|
#define mmMP1_SMN_C2PMSG_84_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_85 0x0295
|
||||||
|
#define mmMP1_SMN_C2PMSG_85_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_86 0x0296
|
||||||
|
#define mmMP1_SMN_C2PMSG_86_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_87 0x0297
|
||||||
|
#define mmMP1_SMN_C2PMSG_87_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_88 0x0298
|
||||||
|
#define mmMP1_SMN_C2PMSG_88_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_89 0x0299
|
||||||
|
#define mmMP1_SMN_C2PMSG_89_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_90 0x029a
|
||||||
|
#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_91 0x029b
|
||||||
|
#define mmMP1_SMN_C2PMSG_91_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_92 0x029c
|
||||||
|
#define mmMP1_SMN_C2PMSG_92_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_93 0x029d
|
||||||
|
#define mmMP1_SMN_C2PMSG_93_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_94 0x029e
|
||||||
|
#define mmMP1_SMN_C2PMSG_94_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_95 0x029f
|
||||||
|
#define mmMP1_SMN_C2PMSG_95_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_96 0x02a0
|
||||||
|
#define mmMP1_SMN_C2PMSG_96_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_97 0x02a1
|
||||||
|
#define mmMP1_SMN_C2PMSG_97_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_98 0x02a2
|
||||||
|
#define mmMP1_SMN_C2PMSG_98_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_99 0x02a3
|
||||||
|
#define mmMP1_SMN_C2PMSG_99_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_100 0x02a4
|
||||||
|
#define mmMP1_SMN_C2PMSG_100_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_101 0x02a5
|
||||||
|
#define mmMP1_SMN_C2PMSG_101_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_102 0x02a6
|
||||||
|
#define mmMP1_SMN_C2PMSG_102_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_C2PMSG_103 0x02a7
|
||||||
|
#define mmMP1_SMN_C2PMSG_103_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_IH_CREDIT 0x02c1
|
||||||
|
#define mmMP1_SMN_IH_CREDIT_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_IH_SW_INT 0x02c2
|
||||||
|
#define mmMP1_SMN_IH_SW_INT_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_IH_SW_INT_CTRL 0x02c3
|
||||||
|
#define mmMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
|
||||||
|
#define mmMP1_SMN_FPS_CNT 0x02c4
|
||||||
|
#define mmMP1_SMN_FPS_CNT_BASE_IDX 0
|
||||||
|
|
||||||
|
|
||||||
|
#endif
|
||||||
886
drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_sh_mask.h
Normal file
886
drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_sh_mask.h
Normal file
@@ -0,0 +1,886 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2017 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included
|
||||||
|
* in all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||||
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||||
|
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
#ifndef _mp_10_0_SH_MASK_HEADER
|
||||||
|
#define _mp_10_0_SH_MASK_HEADER
|
||||||
|
|
||||||
|
|
||||||
|
// addressBlock: mp_SmuMp0_SmnDec
|
||||||
|
//MP0_SMN_C2PMSG_32
|
||||||
|
#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_33
|
||||||
|
#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_34
|
||||||
|
#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_35
|
||||||
|
#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_36
|
||||||
|
#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_37
|
||||||
|
#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_38
|
||||||
|
#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_39
|
||||||
|
#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_40
|
||||||
|
#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_41
|
||||||
|
#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_42
|
||||||
|
#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_43
|
||||||
|
#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_44
|
||||||
|
#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_45
|
||||||
|
#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_46
|
||||||
|
#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_47
|
||||||
|
#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_48
|
||||||
|
#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_49
|
||||||
|
#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_50
|
||||||
|
#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_51
|
||||||
|
#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_52
|
||||||
|
#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_53
|
||||||
|
#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_54
|
||||||
|
#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_55
|
||||||
|
#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_56
|
||||||
|
#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_57
|
||||||
|
#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_58
|
||||||
|
#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_59
|
||||||
|
#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_60
|
||||||
|
#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_61
|
||||||
|
#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_62
|
||||||
|
#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_63
|
||||||
|
#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_64
|
||||||
|
#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_65
|
||||||
|
#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_66
|
||||||
|
#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_67
|
||||||
|
#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_68
|
||||||
|
#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_69
|
||||||
|
#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_70
|
||||||
|
#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_71
|
||||||
|
#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_72
|
||||||
|
#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_73
|
||||||
|
#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_74
|
||||||
|
#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_75
|
||||||
|
#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_76
|
||||||
|
#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_77
|
||||||
|
#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_78
|
||||||
|
#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_79
|
||||||
|
#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_80
|
||||||
|
#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_81
|
||||||
|
#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_82
|
||||||
|
#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_83
|
||||||
|
#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_84
|
||||||
|
#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_85
|
||||||
|
#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_86
|
||||||
|
#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_87
|
||||||
|
#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_88
|
||||||
|
#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_89
|
||||||
|
#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_90
|
||||||
|
#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_91
|
||||||
|
#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_92
|
||||||
|
#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_93
|
||||||
|
#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_94
|
||||||
|
#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_95
|
||||||
|
#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_96
|
||||||
|
#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_97
|
||||||
|
#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_98
|
||||||
|
#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_99
|
||||||
|
#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_100
|
||||||
|
#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_101
|
||||||
|
#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_102
|
||||||
|
#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_C2PMSG_103
|
||||||
|
#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
|
||||||
|
#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP0_SMN_IH_CREDIT
|
||||||
|
#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
|
||||||
|
#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
|
||||||
|
#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
|
||||||
|
#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
|
||||||
|
//MP0_SMN_IH_SW_INT
|
||||||
|
#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x0
|
||||||
|
#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x1
|
||||||
|
#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000001L
|
||||||
|
#define MP0_SMN_IH_SW_INT__ID_MASK 0x000001FEL
|
||||||
|
//MP0_SMN_IH_SW_INT_CTRL
|
||||||
|
#define MP0_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK__SHIFT 0x0
|
||||||
|
#define MP0_SMN_IH_SW_INT_CTRL__SW_INT_ACK__SHIFT 0x8
|
||||||
|
#define MP0_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK_MASK 0x00000001L
|
||||||
|
#define MP0_SMN_IH_SW_INT_CTRL__SW_INT_ACK_MASK 0x00000100L
|
||||||
|
|
||||||
|
|
||||||
|
// addressBlock: mp_SmuMp1_SmnDec
|
||||||
|
//MP1_SMN_C2PMSG_32
|
||||||
|
#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_33
|
||||||
|
#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_34
|
||||||
|
#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_35
|
||||||
|
#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_36
|
||||||
|
#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_37
|
||||||
|
#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_38
|
||||||
|
#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_39
|
||||||
|
#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_40
|
||||||
|
#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_41
|
||||||
|
#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_42
|
||||||
|
#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_43
|
||||||
|
#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_44
|
||||||
|
#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_45
|
||||||
|
#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_46
|
||||||
|
#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_47
|
||||||
|
#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_48
|
||||||
|
#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_49
|
||||||
|
#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_50
|
||||||
|
#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_51
|
||||||
|
#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_52
|
||||||
|
#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_53
|
||||||
|
#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_54
|
||||||
|
#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_55
|
||||||
|
#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_56
|
||||||
|
#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_57
|
||||||
|
#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_58
|
||||||
|
#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_59
|
||||||
|
#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_60
|
||||||
|
#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_61
|
||||||
|
#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_62
|
||||||
|
#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_63
|
||||||
|
#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_64
|
||||||
|
#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_65
|
||||||
|
#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_66
|
||||||
|
#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_67
|
||||||
|
#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_68
|
||||||
|
#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_69
|
||||||
|
#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_70
|
||||||
|
#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_71
|
||||||
|
#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_72
|
||||||
|
#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_73
|
||||||
|
#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_74
|
||||||
|
#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_75
|
||||||
|
#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_76
|
||||||
|
#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_77
|
||||||
|
#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_78
|
||||||
|
#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_79
|
||||||
|
#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_80
|
||||||
|
#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_81
|
||||||
|
#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_82
|
||||||
|
#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_83
|
||||||
|
#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_84
|
||||||
|
#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_85
|
||||||
|
#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_86
|
||||||
|
#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_87
|
||||||
|
#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_88
|
||||||
|
#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_89
|
||||||
|
#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_90
|
||||||
|
#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_91
|
||||||
|
#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_92
|
||||||
|
#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_93
|
||||||
|
#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_94
|
||||||
|
#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_95
|
||||||
|
#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_96
|
||||||
|
#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_97
|
||||||
|
#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_98
|
||||||
|
#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_99
|
||||||
|
#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_100
|
||||||
|
#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_101
|
||||||
|
#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_102
|
||||||
|
#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_C2PMSG_103
|
||||||
|
#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_SMN_IH_CREDIT
|
||||||
|
#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
|
||||||
|
#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
|
||||||
|
#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
|
||||||
|
#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
|
||||||
|
//MP1_SMN_IH_SW_INT
|
||||||
|
#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x0
|
||||||
|
#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x1
|
||||||
|
#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000001L
|
||||||
|
#define MP1_SMN_IH_SW_INT__ID_MASK 0x000001FEL
|
||||||
|
//MP1_SMN_IH_SW_INT_CTRL
|
||||||
|
#define MP1_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK__SHIFT 0x0
|
||||||
|
#define MP1_SMN_IH_SW_INT_CTRL__SW_INT_ACK__SHIFT 0x8
|
||||||
|
#define MP1_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK_MASK 0x00000001L
|
||||||
|
#define MP1_SMN_IH_SW_INT_CTRL__SW_INT_ACK_MASK 0x00000100L
|
||||||
|
//MP1_SMN_FPS_CNT
|
||||||
|
#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
|
||||||
|
#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
|
||||||
|
|
||||||
|
|
||||||
|
// addressBlock: mp_SmuMp0Pub_CruDec
|
||||||
|
//MP0_ACTIVE_FCN_ID
|
||||||
|
#define MP0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
|
||||||
|
#define MP0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
|
||||||
|
#define MP0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
|
||||||
|
#define MP0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
|
||||||
|
//MP0_IH_CREDIT
|
||||||
|
#define MP0_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
|
||||||
|
#define MP0_IH_CREDIT__CLIENT_ID__SHIFT 0x10
|
||||||
|
#define MP0_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
|
||||||
|
#define MP0_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
|
||||||
|
//MP0_IH_SW_INT
|
||||||
|
#define MP0_IH_SW_INT__ID__SHIFT 0x0
|
||||||
|
#define MP0_IH_SW_INT__VALID__SHIFT 0x8
|
||||||
|
#define MP0_IH_SW_INT__ID_MASK 0x000000FFL
|
||||||
|
#define MP0_IH_SW_INT__VALID_MASK 0x00000100L
|
||||||
|
//MP0_IH_SW_INT_CTRL
|
||||||
|
#define MP0_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
|
||||||
|
#define MP0_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
|
||||||
|
#define MP0_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
|
||||||
|
#define MP0_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
|
||||||
|
|
||||||
|
|
||||||
|
// addressBlock: mp_SmuMp1Pub_CruDec
|
||||||
|
//MP1_FIRMWARE_FLAGS
|
||||||
|
#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
|
||||||
|
#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
|
||||||
|
#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
|
||||||
|
#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
|
||||||
|
//MP1_C2PMSG_0
|
||||||
|
#define MP1_C2PMSG_0__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_1
|
||||||
|
#define MP1_C2PMSG_1__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_1__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_2
|
||||||
|
#define MP1_C2PMSG_2__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_2__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_3
|
||||||
|
#define MP1_C2PMSG_3__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_3__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_4
|
||||||
|
#define MP1_C2PMSG_4__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_4__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_5
|
||||||
|
#define MP1_C2PMSG_5__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_5__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_6
|
||||||
|
#define MP1_C2PMSG_6__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_6__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_7
|
||||||
|
#define MP1_C2PMSG_7__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_7__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_8
|
||||||
|
#define MP1_C2PMSG_8__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_8__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_9
|
||||||
|
#define MP1_C2PMSG_9__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_9__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_10
|
||||||
|
#define MP1_C2PMSG_10__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_10__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_11
|
||||||
|
#define MP1_C2PMSG_11__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_11__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_12
|
||||||
|
#define MP1_C2PMSG_12__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_12__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_13
|
||||||
|
#define MP1_C2PMSG_13__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_13__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_14
|
||||||
|
#define MP1_C2PMSG_14__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_14__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_15
|
||||||
|
#define MP1_C2PMSG_15__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_15__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_16
|
||||||
|
#define MP1_C2PMSG_16__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_16__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_17
|
||||||
|
#define MP1_C2PMSG_17__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_17__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_18
|
||||||
|
#define MP1_C2PMSG_18__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_18__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_19
|
||||||
|
#define MP1_C2PMSG_19__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_19__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_20
|
||||||
|
#define MP1_C2PMSG_20__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_20__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_21
|
||||||
|
#define MP1_C2PMSG_21__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_21__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_22
|
||||||
|
#define MP1_C2PMSG_22__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_22__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_23
|
||||||
|
#define MP1_C2PMSG_23__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_23__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_24
|
||||||
|
#define MP1_C2PMSG_24__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_24__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_25
|
||||||
|
#define MP1_C2PMSG_25__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_25__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_26
|
||||||
|
#define MP1_C2PMSG_26__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_26__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_27
|
||||||
|
#define MP1_C2PMSG_27__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_27__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_28
|
||||||
|
#define MP1_C2PMSG_28__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_28__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_29
|
||||||
|
#define MP1_C2PMSG_29__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_29__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_30
|
||||||
|
#define MP1_C2PMSG_30__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_30__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_31
|
||||||
|
#define MP1_C2PMSG_31__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_31__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_P2CMSG_0
|
||||||
|
#define MP1_P2CMSG_0__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_P2CMSG_0__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_P2CMSG_1
|
||||||
|
#define MP1_P2CMSG_1__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_P2CMSG_1__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_P2CMSG_2
|
||||||
|
#define MP1_P2CMSG_2__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_P2CMSG_2__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_P2CMSG_3
|
||||||
|
#define MP1_P2CMSG_3__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_P2CMSG_3__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_P2CMSG_INTEN
|
||||||
|
#define MP1_P2CMSG_INTEN__INTEN__SHIFT 0x0
|
||||||
|
#define MP1_P2CMSG_INTEN__INTEN_MASK 0x0000000FL
|
||||||
|
//MP1_P2CMSG_INTSTS
|
||||||
|
#define MP1_P2CMSG_INTSTS__INTSTS0__SHIFT 0x0
|
||||||
|
#define MP1_P2CMSG_INTSTS__INTSTS1__SHIFT 0x1
|
||||||
|
#define MP1_P2CMSG_INTSTS__INTSTS2__SHIFT 0x2
|
||||||
|
#define MP1_P2CMSG_INTSTS__INTSTS3__SHIFT 0x3
|
||||||
|
#define MP1_P2CMSG_INTSTS__INTSTS0_MASK 0x00000001L
|
||||||
|
#define MP1_P2CMSG_INTSTS__INTSTS1_MASK 0x00000002L
|
||||||
|
#define MP1_P2CMSG_INTSTS__INTSTS2_MASK 0x00000004L
|
||||||
|
#define MP1_P2CMSG_INTSTS__INTSTS3_MASK 0x00000008L
|
||||||
|
//MP1_C2PMSG_32
|
||||||
|
#define MP1_C2PMSG_32__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_33
|
||||||
|
#define MP1_C2PMSG_33__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_34
|
||||||
|
#define MP1_C2PMSG_34__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_35
|
||||||
|
#define MP1_C2PMSG_35__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_36
|
||||||
|
#define MP1_C2PMSG_36__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_37
|
||||||
|
#define MP1_C2PMSG_37__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_38
|
||||||
|
#define MP1_C2PMSG_38__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_39
|
||||||
|
#define MP1_C2PMSG_39__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_40
|
||||||
|
#define MP1_C2PMSG_40__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_41
|
||||||
|
#define MP1_C2PMSG_41__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_42
|
||||||
|
#define MP1_C2PMSG_42__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_43
|
||||||
|
#define MP1_C2PMSG_43__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_44
|
||||||
|
#define MP1_C2PMSG_44__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_45
|
||||||
|
#define MP1_C2PMSG_45__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_46
|
||||||
|
#define MP1_C2PMSG_46__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_47
|
||||||
|
#define MP1_C2PMSG_47__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_48
|
||||||
|
#define MP1_C2PMSG_48__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_49
|
||||||
|
#define MP1_C2PMSG_49__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_50
|
||||||
|
#define MP1_C2PMSG_50__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_51
|
||||||
|
#define MP1_C2PMSG_51__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_52
|
||||||
|
#define MP1_C2PMSG_52__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_53
|
||||||
|
#define MP1_C2PMSG_53__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_54
|
||||||
|
#define MP1_C2PMSG_54__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_55
|
||||||
|
#define MP1_C2PMSG_55__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_56
|
||||||
|
#define MP1_C2PMSG_56__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_57
|
||||||
|
#define MP1_C2PMSG_57__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_58
|
||||||
|
#define MP1_C2PMSG_58__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_59
|
||||||
|
#define MP1_C2PMSG_59__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_60
|
||||||
|
#define MP1_C2PMSG_60__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_61
|
||||||
|
#define MP1_C2PMSG_61__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_62
|
||||||
|
#define MP1_C2PMSG_62__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_63
|
||||||
|
#define MP1_C2PMSG_63__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_64
|
||||||
|
#define MP1_C2PMSG_64__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_65
|
||||||
|
#define MP1_C2PMSG_65__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_66
|
||||||
|
#define MP1_C2PMSG_66__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_67
|
||||||
|
#define MP1_C2PMSG_67__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_68
|
||||||
|
#define MP1_C2PMSG_68__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_69
|
||||||
|
#define MP1_C2PMSG_69__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_70
|
||||||
|
#define MP1_C2PMSG_70__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_71
|
||||||
|
#define MP1_C2PMSG_71__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_72
|
||||||
|
#define MP1_C2PMSG_72__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_73
|
||||||
|
#define MP1_C2PMSG_73__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_74
|
||||||
|
#define MP1_C2PMSG_74__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_75
|
||||||
|
#define MP1_C2PMSG_75__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_76
|
||||||
|
#define MP1_C2PMSG_76__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_77
|
||||||
|
#define MP1_C2PMSG_77__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_78
|
||||||
|
#define MP1_C2PMSG_78__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_79
|
||||||
|
#define MP1_C2PMSG_79__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_80
|
||||||
|
#define MP1_C2PMSG_80__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_81
|
||||||
|
#define MP1_C2PMSG_81__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_82
|
||||||
|
#define MP1_C2PMSG_82__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_83
|
||||||
|
#define MP1_C2PMSG_83__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_84
|
||||||
|
#define MP1_C2PMSG_84__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_85
|
||||||
|
#define MP1_C2PMSG_85__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_86
|
||||||
|
#define MP1_C2PMSG_86__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_87
|
||||||
|
#define MP1_C2PMSG_87__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_88
|
||||||
|
#define MP1_C2PMSG_88__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_89
|
||||||
|
#define MP1_C2PMSG_89__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_90
|
||||||
|
#define MP1_C2PMSG_90__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_91
|
||||||
|
#define MP1_C2PMSG_91__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_92
|
||||||
|
#define MP1_C2PMSG_92__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_93
|
||||||
|
#define MP1_C2PMSG_93__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_94
|
||||||
|
#define MP1_C2PMSG_94__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_95
|
||||||
|
#define MP1_C2PMSG_95__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_96
|
||||||
|
#define MP1_C2PMSG_96__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_97
|
||||||
|
#define MP1_C2PMSG_97__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_98
|
||||||
|
#define MP1_C2PMSG_98__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_99
|
||||||
|
#define MP1_C2PMSG_99__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_100
|
||||||
|
#define MP1_C2PMSG_100__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_101
|
||||||
|
#define MP1_C2PMSG_101__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_102
|
||||||
|
#define MP1_C2PMSG_102__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_C2PMSG_103
|
||||||
|
#define MP1_C2PMSG_103__CONTENT__SHIFT 0x0
|
||||||
|
#define MP1_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
|
||||||
|
//MP1_ACTIVE_FCN_ID
|
||||||
|
#define MP1_ACTIVE_FCN_ID__VFID__SHIFT 0x0
|
||||||
|
#define MP1_ACTIVE_FCN_ID__VF__SHIFT 0x1f
|
||||||
|
#define MP1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
|
||||||
|
#define MP1_ACTIVE_FCN_ID__VF_MASK 0x80000000L
|
||||||
|
//MP1_IH_CREDIT
|
||||||
|
#define MP1_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
|
||||||
|
#define MP1_IH_CREDIT__CLIENT_ID__SHIFT 0x10
|
||||||
|
#define MP1_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
|
||||||
|
#define MP1_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
|
||||||
|
//MP1_IH_SW_INT
|
||||||
|
#define MP1_IH_SW_INT__ID__SHIFT 0x0
|
||||||
|
#define MP1_IH_SW_INT__VALID__SHIFT 0x8
|
||||||
|
#define MP1_IH_SW_INT__ID_MASK 0x000000FFL
|
||||||
|
#define MP1_IH_SW_INT__VALID_MASK 0x00000100L
|
||||||
|
//MP1_IH_SW_INT_CTRL
|
||||||
|
#define MP1_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
|
||||||
|
#define MP1_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
|
||||||
|
#define MP1_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
|
||||||
|
#define MP1_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
|
||||||
|
//MP1_FPS_CNT
|
||||||
|
#define MP1_FPS_CNT__COUNT__SHIFT 0x0
|
||||||
|
#define MP1_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
|
||||||
|
|
||||||
|
|
||||||
|
#endif
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user