drm/amdgpu: switch to new amdgpu_nbio structure
no functional change, just switch to new structures Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
078ef4e932
commit
bebc076285
@ -73,6 +73,7 @@
|
||||
#include "amdgpu_gmc.h"
|
||||
#include "amdgpu_gfx.h"
|
||||
#include "amdgpu_sdma.h"
|
||||
#include "amdgpu_nbio.h"
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_virt.h"
|
||||
#include "amdgpu_csa.h"
|
||||
@ -644,69 +645,11 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
|
||||
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
||||
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
|
||||
|
||||
|
||||
/*
|
||||
* amdgpu nbio functions
|
||||
*
|
||||
*/
|
||||
struct nbio_hdp_flush_reg {
|
||||
u32 ref_and_mask_cp0;
|
||||
u32 ref_and_mask_cp1;
|
||||
u32 ref_and_mask_cp2;
|
||||
u32 ref_and_mask_cp3;
|
||||
u32 ref_and_mask_cp4;
|
||||
u32 ref_and_mask_cp5;
|
||||
u32 ref_and_mask_cp6;
|
||||
u32 ref_and_mask_cp7;
|
||||
u32 ref_and_mask_cp8;
|
||||
u32 ref_and_mask_cp9;
|
||||
u32 ref_and_mask_sdma0;
|
||||
u32 ref_and_mask_sdma1;
|
||||
u32 ref_and_mask_sdma2;
|
||||
u32 ref_and_mask_sdma3;
|
||||
u32 ref_and_mask_sdma4;
|
||||
u32 ref_and_mask_sdma5;
|
||||
u32 ref_and_mask_sdma6;
|
||||
u32 ref_and_mask_sdma7;
|
||||
};
|
||||
|
||||
struct amdgpu_mmio_remap {
|
||||
u32 reg_offset;
|
||||
resource_size_t bus_addr;
|
||||
};
|
||||
|
||||
struct amdgpu_nbio_funcs {
|
||||
const struct nbio_hdp_flush_reg *hdp_flush_reg;
|
||||
u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
|
||||
u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
|
||||
u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
|
||||
u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
|
||||
u32 (*get_rev_id)(struct amdgpu_device *adev);
|
||||
void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
|
||||
void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
|
||||
u32 (*get_memsize)(struct amdgpu_device *adev);
|
||||
void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
|
||||
bool use_doorbell, int doorbell_index, int doorbell_size);
|
||||
void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
|
||||
int doorbell_index, int instance);
|
||||
void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
void (*ih_doorbell_range)(struct amdgpu_device *adev,
|
||||
bool use_doorbell, int doorbell_index);
|
||||
void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
void (*get_clockgating_state)(struct amdgpu_device *adev,
|
||||
u32 *flags);
|
||||
void (*ih_control)(struct amdgpu_device *adev);
|
||||
void (*init_registers)(struct amdgpu_device *adev);
|
||||
void (*detect_hw_virt)(struct amdgpu_device *adev);
|
||||
void (*remap_hdp_registers)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
struct amdgpu_df_funcs {
|
||||
void (*sw_init)(struct amdgpu_device *adev);
|
||||
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
|
||||
@ -921,6 +864,9 @@ struct amdgpu_device {
|
||||
u32 cg_flags;
|
||||
u32 pg_flags;
|
||||
|
||||
/* nbio */
|
||||
struct amdgpu_nbio nbio;
|
||||
|
||||
/* gfx */
|
||||
struct amdgpu_gfx gfx;
|
||||
|
||||
@ -974,7 +920,6 @@ struct amdgpu_device {
|
||||
/* soc15 register offset based on ip, instance and segment */
|
||||
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
|
||||
|
||||
const struct amdgpu_nbio_funcs *nbio_funcs;
|
||||
const struct amdgpu_df_funcs *df_funcs;
|
||||
const struct amdgpu_mmhub_funcs *mmhub_funcs;
|
||||
|
||||
|
@ -99,8 +99,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
|
||||
unsigned long flags, address, data;
|
||||
uint32_t ficadl_val, ficadh_val;
|
||||
|
||||
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
|
||||
@ -122,8 +122,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
|
||||
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
|
||||
@ -150,8 +150,8 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
|
||||
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
WREG32(address, lo_addr);
|
||||
@ -172,8 +172,8 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
|
||||
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
WREG32(address, lo_addr);
|
||||
|
@ -2421,7 +2421,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (amdgpu_emu_mode == 1)
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
|
||||
@ -2491,7 +2491,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (amdgpu_emu_mode == 1)
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
|
||||
@ -2560,7 +2560,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (amdgpu_emu_mode == 1)
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
|
||||
@ -2881,7 +2881,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (amdgpu_emu_mode == 1)
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
|
||||
@ -4335,7 +4335,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 ref_and_mask, reg_mem_engine;
|
||||
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
|
||||
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
switch (ring->me) {
|
||||
@ -4355,8 +4355,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
}
|
||||
|
||||
gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
|
||||
adev->nbio_funcs->get_hdp_flush_req_offset(adev),
|
||||
adev->nbio_funcs->get_hdp_flush_done_offset(adev),
|
||||
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
|
||||
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
|
||||
ref_and_mask, ref_and_mask, 0x20);
|
||||
}
|
||||
|
||||
|
@ -4972,7 +4972,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 ref_and_mask, reg_mem_engine;
|
||||
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
|
||||
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
switch (ring->me) {
|
||||
@ -4992,8 +4992,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
}
|
||||
|
||||
gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
|
||||
adev->nbio_funcs->get_hdp_flush_req_offset(adev),
|
||||
adev->nbio_funcs->get_hdp_flush_done_offset(adev),
|
||||
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
|
||||
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
|
||||
ref_and_mask, ref_and_mask, 0x20);
|
||||
}
|
||||
|
||||
|
@ -278,7 +278,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
int r;
|
||||
|
||||
/* flush hdp cache */
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
mutex_lock(&adev->mman.gtt_window_lock);
|
||||
|
||||
@ -557,7 +557,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
|
||||
|
||||
/* size in MB on si */
|
||||
adev->gmc.mc_vram_size =
|
||||
adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
|
||||
adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
|
||||
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
|
||||
adev->gmc.visible_vram_size = adev->gmc.aper_size;
|
||||
|
||||
@ -794,7 +794,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
|
||||
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
|
||||
|
||||
/* Flush HDP after it is initialized */
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
|
||||
false : true;
|
||||
|
@ -996,7 +996,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
||||
|
||||
/* size in MB on si */
|
||||
adev->gmc.mc_vram_size =
|
||||
adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
|
||||
adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
|
||||
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
@ -1361,7 +1361,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
|
||||
|
||||
/* After HDP is initialized, flush HDP.*/
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
value = false;
|
||||
|
@ -117,7 +117,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
||||
/* disable irqs */
|
||||
navi10_ih_disable_interrupts(adev);
|
||||
|
||||
adev->nbio_funcs->ih_control(adev);
|
||||
adev->nbio.funcs->ih_control(adev);
|
||||
|
||||
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
|
||||
@ -162,7 +162,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
||||
}
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
|
||||
|
||||
adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell,
|
||||
adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
|
||||
ih->doorbell_index);
|
||||
|
||||
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
|
||||
|
@ -311,7 +311,6 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
|
||||
.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg,
|
||||
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
|
||||
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
|
||||
.get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include "soc15_common.h"
|
||||
|
||||
extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
|
||||
extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
|
||||
|
||||
#endif
|
||||
|
@ -226,7 +226,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
|
||||
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
|
||||
}
|
||||
|
||||
static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
|
||||
const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
|
||||
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
|
||||
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
|
||||
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
|
||||
@ -277,7 +277,6 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
|
||||
.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
|
||||
.get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
|
||||
.get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
|
||||
.get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include "soc15_common.h"
|
||||
|
||||
extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
|
||||
extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
|
||||
|
||||
#endif
|
||||
|
@ -292,7 +292,6 @@ static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
|
||||
.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
|
||||
.get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
|
||||
.get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
|
||||
.get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include "soc15_common.h"
|
||||
|
||||
extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
|
||||
extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
|
||||
|
||||
#endif
|
||||
|
@ -266,7 +266,7 @@ static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
|
||||
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
|
||||
}
|
||||
|
||||
static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
|
||||
const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
|
||||
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
|
||||
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
|
||||
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
|
||||
@ -316,7 +316,6 @@ static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
|
||||
.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
|
||||
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
|
||||
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
|
||||
.get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include "soc15_common.h"
|
||||
|
||||
extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
|
||||
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
|
||||
|
||||
#endif
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include "gmc_v10_0.h"
|
||||
#include "gfxhub_v2_0.h"
|
||||
#include "mmhub_v2_0.h"
|
||||
#include "nbio_v2_3.h"
|
||||
#include "nv.h"
|
||||
#include "navi10_ih.h"
|
||||
#include "gfx_v10_0.h"
|
||||
@ -63,8 +64,8 @@ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
u32 r;
|
||||
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
WREG32(address, reg);
|
||||
@ -78,8 +79,8 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
|
||||
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
WREG32(address, reg);
|
||||
@ -119,7 +120,7 @@ static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
|
||||
static u32 nv_get_config_memsize(struct amdgpu_device *adev)
|
||||
{
|
||||
return adev->nbio_funcs->get_memsize(adev);
|
||||
return adev->nbio.funcs->get_memsize(adev);
|
||||
}
|
||||
|
||||
static u32 nv_get_xclk(struct amdgpu_device *adev)
|
||||
@ -279,7 +280,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
|
||||
|
||||
/* wait for asic to come out of reset */
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
u32 memsize = adev->nbio_funcs->get_memsize(adev);
|
||||
u32 memsize = adev->nbio.funcs->get_memsize(adev);
|
||||
|
||||
if (memsize != 0xffffffff)
|
||||
break;
|
||||
@ -366,8 +367,8 @@ static void nv_program_aspm(struct amdgpu_device *adev)
|
||||
static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
|
||||
adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
|
||||
adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
|
||||
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
|
||||
}
|
||||
|
||||
static const struct amdgpu_ip_block_version nv_common_ip_block =
|
||||
@ -421,9 +422,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->nbio_funcs = &nbio_v2_3_funcs;
|
||||
adev->nbio.funcs = &nbio_v2_3_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
|
||||
|
||||
adev->nbio_funcs->detect_hw_virt(adev);
|
||||
adev->nbio.funcs->detect_hw_virt(adev);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_NAVI10:
|
||||
@ -480,12 +482,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
|
||||
static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
|
||||
{
|
||||
return adev->nbio_funcs->get_rev_id(adev);
|
||||
return adev->nbio.funcs->get_rev_id(adev);
|
||||
}
|
||||
|
||||
static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
|
||||
{
|
||||
adev->nbio_funcs->hdp_flush(adev, ring);
|
||||
adev->nbio.funcs->hdp_flush(adev, ring);
|
||||
}
|
||||
|
||||
static void nv_invalidate_hdp(struct amdgpu_device *adev,
|
||||
@ -692,7 +694,7 @@ static int nv_common_hw_init(void *handle)
|
||||
/* enable aspm */
|
||||
nv_program_aspm(adev);
|
||||
/* setup nbio registers */
|
||||
adev->nbio_funcs->init_registers(adev);
|
||||
adev->nbio.funcs->init_registers(adev);
|
||||
/* enable the doorbell aperture */
|
||||
nv_enable_doorbell_aperture(adev, true);
|
||||
|
||||
@ -854,9 +856,9 @@ static int nv_common_set_clockgating_state(void *handle,
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
adev->nbio_funcs->update_medium_grain_clock_gating(adev,
|
||||
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
adev->nbio_funcs->update_medium_grain_light_sleep(adev,
|
||||
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
nv_update_hdp_mem_power_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
@ -884,7 +886,7 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
*flags = 0;
|
||||
|
||||
adev->nbio_funcs->get_clockgating_state(adev, flags);
|
||||
adev->nbio.funcs->get_clockgating_state(adev, flags);
|
||||
|
||||
/* AMD_CG_SUPPORT_HDP_MGCG */
|
||||
tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
|
||||
|
@ -746,13 +746,13 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 ref_and_mask = 0;
|
||||
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
|
||||
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
|
||||
|
||||
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
|
||||
|
||||
sdma_v4_0_wait_reg_mem(ring, 0, 1,
|
||||
adev->nbio_funcs->get_hdp_flush_done_offset(adev),
|
||||
adev->nbio_funcs->get_hdp_flush_req_offset(adev),
|
||||
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
|
||||
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
|
||||
ref_and_mask, ref_and_mask, 10);
|
||||
}
|
||||
|
||||
|
@ -406,7 +406,7 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 ref_and_mask = 0;
|
||||
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
|
||||
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
|
||||
|
||||
if (ring->me == 0)
|
||||
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
|
||||
@ -416,8 +416,8 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
|
||||
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
|
||||
amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
|
||||
amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
|
||||
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
|
||||
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
|
||||
amdgpu_ring_write(ring, ref_and_mask); /* reference */
|
||||
amdgpu_ring_write(ring, ref_and_mask); /* mask */
|
||||
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
|
||||
@ -683,7 +683,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
|
||||
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
|
||||
|
||||
adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
|
||||
adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
|
||||
ring->doorbell_index, 20);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
|
@ -58,6 +58,9 @@
|
||||
#include "mmhub_v1_0.h"
|
||||
#include "df_v1_7.h"
|
||||
#include "df_v3_6.h"
|
||||
#include "nbio_v6_1.h"
|
||||
#include "nbio_v7_0.h"
|
||||
#include "nbio_v7_4.h"
|
||||
#include "vega10_ih.h"
|
||||
#include "sdma_v4_0.h"
|
||||
#include "uvd_v7_0.h"
|
||||
@ -91,8 +94,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
u32 r;
|
||||
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
WREG32(address, reg);
|
||||
@ -106,8 +109,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
|
||||
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
WREG32(address, reg);
|
||||
@ -121,8 +124,8 @@ static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
u64 r;
|
||||
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
/* read low 32 bit */
|
||||
@ -142,8 +145,8 @@ static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
|
||||
address = adev->nbio_funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio_funcs->get_pcie_data_offset(adev);
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
/* write low 32 bit */
|
||||
@ -262,7 +265,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
|
||||
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
|
||||
{
|
||||
return adev->nbio_funcs->get_memsize(adev);
|
||||
return adev->nbio.funcs->get_memsize(adev);
|
||||
}
|
||||
|
||||
static u32 soc15_get_xclk(struct amdgpu_device *adev)
|
||||
@ -461,7 +464,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
|
||||
|
||||
/* wait for asic to come out of reset */
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
u32 memsize = adev->nbio_funcs->get_memsize(adev);
|
||||
u32 memsize = adev->nbio.funcs->get_memsize(adev);
|
||||
|
||||
if (memsize != 0xffffffff)
|
||||
break;
|
||||
@ -624,8 +627,8 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
|
||||
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
|
||||
adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
|
||||
adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
|
||||
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
|
||||
}
|
||||
|
||||
static const struct amdgpu_ip_block_version vega10_common_ip_block =
|
||||
@ -639,7 +642,7 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
|
||||
|
||||
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
|
||||
{
|
||||
return adev->nbio_funcs->get_rev_id(adev);
|
||||
return adev->nbio.funcs->get_rev_id(adev);
|
||||
}
|
||||
|
||||
int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
||||
@ -665,13 +668,17 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
||||
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
|
||||
adev->gmc.xgmi.supported = true;
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
adev->nbio_funcs = &nbio_v7_0_funcs;
|
||||
else if (adev->asic_type == CHIP_VEGA20 ||
|
||||
adev->asic_type == CHIP_ARCTURUS)
|
||||
adev->nbio_funcs = &nbio_v7_4_funcs;
|
||||
else
|
||||
adev->nbio_funcs = &nbio_v6_1_funcs;
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->nbio.funcs = &nbio_v7_0_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
|
||||
} else if (adev->asic_type == CHIP_VEGA20 ||
|
||||
adev->asic_type == CHIP_ARCTURUS) {
|
||||
adev->nbio.funcs = &nbio_v7_4_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
|
||||
} else {
|
||||
adev->nbio.funcs = &nbio_v6_1_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
|
||||
adev->df_funcs = &df_v3_6_funcs;
|
||||
@ -679,7 +686,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
||||
adev->df_funcs = &df_v1_7_funcs;
|
||||
|
||||
adev->rev_id = soc15_get_rev_id(adev);
|
||||
adev->nbio_funcs->detect_hw_virt(adev);
|
||||
adev->nbio.funcs->detect_hw_virt(adev);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->virt.ops = &xgpu_ai_virt_ops;
|
||||
@ -785,7 +792,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
||||
|
||||
static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
|
||||
{
|
||||
adev->nbio_funcs->hdp_flush(adev, ring);
|
||||
adev->nbio.funcs->hdp_flush(adev, ring);
|
||||
}
|
||||
|
||||
static void soc15_invalidate_hdp(struct amdgpu_device *adev,
|
||||
@ -1241,12 +1248,12 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
adev->nbio_funcs->sdma_doorbell_range(adev, i,
|
||||
adev->nbio.funcs->sdma_doorbell_range(adev, i,
|
||||
ring->use_doorbell, ring->doorbell_index,
|
||||
adev->doorbell_index.sdma_doorbell_range);
|
||||
}
|
||||
|
||||
adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
|
||||
adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
|
||||
adev->irq.ih.doorbell_index);
|
||||
}
|
||||
}
|
||||
@ -1260,13 +1267,13 @@ static int soc15_common_hw_init(void *handle)
|
||||
/* enable aspm */
|
||||
soc15_program_aspm(adev);
|
||||
/* setup nbio registers */
|
||||
adev->nbio_funcs->init_registers(adev);
|
||||
adev->nbio.funcs->init_registers(adev);
|
||||
/* remap HDP registers to a hole in mmio space,
|
||||
* for the purpose of expose those registers
|
||||
* to process space
|
||||
*/
|
||||
if (adev->nbio_funcs->remap_hdp_registers)
|
||||
adev->nbio_funcs->remap_hdp_registers(adev);
|
||||
if (adev->nbio.funcs->remap_hdp_registers)
|
||||
adev->nbio.funcs->remap_hdp_registers(adev);
|
||||
|
||||
/* enable the doorbell aperture */
|
||||
soc15_enable_doorbell_aperture(adev, true);
|
||||
@ -1429,9 +1436,9 @@ static int soc15_common_set_clockgating_state(void *handle,
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
adev->nbio_funcs->update_medium_grain_clock_gating(adev,
|
||||
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
adev->nbio_funcs->update_medium_grain_light_sleep(adev,
|
||||
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
soc15_update_hdp_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
@ -1446,9 +1453,9 @@ static int soc15_common_set_clockgating_state(void *handle,
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_RENOIR:
|
||||
adev->nbio_funcs->update_medium_grain_clock_gating(adev,
|
||||
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
adev->nbio_funcs->update_medium_grain_light_sleep(adev,
|
||||
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
soc15_update_hdp_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
@ -1477,7 +1484,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
*flags = 0;
|
||||
|
||||
adev->nbio_funcs->get_clockgating_state(adev, flags);
|
||||
adev->nbio.funcs->get_clockgating_state(adev, flags);
|
||||
|
||||
/* AMD_CG_SUPPORT_HDP_LS */
|
||||
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
|
||||
|
@ -244,7 +244,7 @@ static int vcn_v2_0_hw_init(void *handle)
|
||||
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
|
||||
int i, r;
|
||||
|
||||
adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
|
||||
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
|
||||
ring->doorbell_index, 0);
|
||||
|
||||
ring->sched.ready = true;
|
||||
|
@ -255,7 +255,7 @@ static int vcn_v2_5_hw_init(void *handle)
|
||||
continue;
|
||||
ring = &adev->vcn.inst[j].ring_dec;
|
||||
|
||||
adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
|
||||
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
|
||||
ring->doorbell_index, j);
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
|
@ -226,7 +226,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||
/* disable irqs */
|
||||
vega10_ih_disable_interrupts(adev);
|
||||
|
||||
adev->nbio_funcs->ih_control(adev);
|
||||
adev->nbio.funcs->ih_control(adev);
|
||||
|
||||
ih = &adev->irq.ih;
|
||||
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
|
||||
|
@ -460,7 +460,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
|
||||
return ret;
|
||||
|
||||
/* flush hdp cache */
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
if (!drv2smu)
|
||||
memcpy(table_data, table->cpu_addr, table->size);
|
||||
|
@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
|
||||
priv->smu_tables.entry[table_id].table_id);
|
||||
|
||||
/* flush hdp cache */
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
|
||||
priv->smu_tables.entry[table_id].size);
|
||||
|
@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
|
||||
priv->smu_tables.entry[table_id].table_id);
|
||||
|
||||
/* flush hdp cache */
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
memcpy(table, priv->smu_tables.entry[table_id].table,
|
||||
priv->smu_tables.entry[table_id].size);
|
||||
|
@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
|
||||
return -EINVAL);
|
||||
|
||||
/* flush hdp cache */
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
memcpy(table, priv->smu_tables.entry[table_id].table,
|
||||
priv->smu_tables.entry[table_id].size);
|
||||
|
@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
|
||||
return ret);
|
||||
|
||||
/* flush hdp cache */
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
memcpy(table, priv->smu_tables.entry[table_id].table,
|
||||
priv->smu_tables.entry[table_id].size);
|
||||
@ -290,7 +290,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
|
||||
return ret);
|
||||
|
||||
/* flush hdp cache */
|
||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||
adev->nbio.funcs->hdp_flush(adev, NULL);
|
||||
|
||||
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
|
||||
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
|
||||
|
Loading…
Reference in New Issue
Block a user