drm/amdgpu: init vega10 SR-IOV reg access mode
Set different register access mode according to the features provided by firmware Signed-off-by: Trigger Huang <Trigger.Huang@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
e79a04d531
commit
78d4811267
@@ -1532,6 +1532,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||||||
r = amdgpu_virt_request_full_gpu(adev, true);
|
r = amdgpu_virt_request_full_gpu(adev, true);
|
||||||
if (r)
|
if (r)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
|
/* query the reg access mode at the very beginning */
|
||||||
|
amdgpu_virt_init_reg_access_mode(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
adev->pm.pp_feature = amdgpu_pp_feature_mask;
|
adev->pm.pp_feature = amdgpu_pp_feature_mask;
|
||||||
|
|||||||
@@ -426,3 +426,47 @@ uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
|
|||||||
return clk;
|
return clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
struct amdgpu_virt *virt = &adev->virt;
|
||||||
|
|
||||||
|
if (virt->ops && virt->ops->init_reg_access_mode)
|
||||||
|
virt->ops->init_reg_access_mode(adev);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
bool ret = false;
|
||||||
|
struct amdgpu_virt *virt = &adev->virt;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev)
|
||||||
|
&& (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH))
|
||||||
|
ret = true;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
bool ret = false;
|
||||||
|
struct amdgpu_virt *virt = &adev->virt;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev)
|
||||||
|
&& (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_RLC)
|
||||||
|
&& !(amdgpu_sriov_runtime(adev)))
|
||||||
|
ret = true;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
bool ret = false;
|
||||||
|
struct amdgpu_virt *virt = &adev->virt;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev)
|
||||||
|
&& (virt->reg_access_mode & AMDGPU_VIRT_REG_SKIP_SEETING))
|
||||||
|
ret = true;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|||||||
@@ -48,6 +48,12 @@ struct amdgpu_vf_error_buffer {
|
|||||||
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
|
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* According to the fw feature, some new reg access modes are supported */
|
||||||
|
#define AMDGPU_VIRT_REG_ACCESS_LEGACY (1 << 0) /* directly mmio */
|
||||||
|
#define AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH (1 << 1) /* by PSP */
|
||||||
|
#define AMDGPU_VIRT_REG_ACCESS_RLC (1 << 2) /* by RLC */
|
||||||
|
#define AMDGPU_VIRT_REG_SKIP_SEETING (1 << 3) /* Skip setting reg */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct amdgpu_virt_ops - amdgpu device virt operations
|
* struct amdgpu_virt_ops - amdgpu device virt operations
|
||||||
*/
|
*/
|
||||||
@@ -59,6 +65,7 @@ struct amdgpu_virt_ops {
|
|||||||
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
|
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
|
||||||
int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
|
int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
|
||||||
int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
|
int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
|
||||||
|
void (*init_reg_access_mode)(struct amdgpu_device *adev);
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -258,6 +265,7 @@ struct amdgpu_virt {
|
|||||||
uint32_t gim_feature;
|
uint32_t gim_feature;
|
||||||
/* protect DPM events to GIM */
|
/* protect DPM events to GIM */
|
||||||
struct mutex dpm_mutex;
|
struct mutex dpm_mutex;
|
||||||
|
uint32_t reg_access_mode;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define amdgpu_sriov_enabled(adev) \
|
#define amdgpu_sriov_enabled(adev) \
|
||||||
@@ -307,4 +315,9 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
|
|||||||
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
|
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
|
||||||
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
|
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
|
||||||
|
|
||||||
|
void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev);
|
||||||
|
bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev);
|
||||||
|
bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev);
|
||||||
|
bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -26,6 +26,7 @@
|
|||||||
#include "nbio/nbio_6_1_sh_mask.h"
|
#include "nbio/nbio_6_1_sh_mask.h"
|
||||||
#include "gc/gc_9_0_offset.h"
|
#include "gc/gc_9_0_offset.h"
|
||||||
#include "gc/gc_9_0_sh_mask.h"
|
#include "gc/gc_9_0_sh_mask.h"
|
||||||
|
#include "mp/mp_9_0_offset.h"
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
#include "vega10_ih.h"
|
#include "vega10_ih.h"
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
@@ -448,6 +449,23 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
|
|||||||
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void xgpu_ai_init_reg_access_mode(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint32_t rlc_fw_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
|
||||||
|
uint32_t sos_fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
|
||||||
|
|
||||||
|
adev->virt.reg_access_mode = AMDGPU_VIRT_REG_ACCESS_LEGACY;
|
||||||
|
|
||||||
|
if (rlc_fw_ver >= 0x5d)
|
||||||
|
adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_RLC;
|
||||||
|
|
||||||
|
if (sos_fw_ver >= 0x80455)
|
||||||
|
adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH;
|
||||||
|
|
||||||
|
if (sos_fw_ver >= 0x8045b)
|
||||||
|
adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_SKIP_SEETING;
|
||||||
|
}
|
||||||
|
|
||||||
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
|
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
|
||||||
.req_full_gpu = xgpu_ai_request_full_gpu_access,
|
.req_full_gpu = xgpu_ai_request_full_gpu_access,
|
||||||
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
|
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
|
||||||
@@ -456,4 +474,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
|
|||||||
.trans_msg = xgpu_ai_mailbox_trans_msg,
|
.trans_msg = xgpu_ai_mailbox_trans_msg,
|
||||||
.get_pp_clk = xgpu_ai_get_pp_clk,
|
.get_pp_clk = xgpu_ai_get_pp_clk,
|
||||||
.force_dpm_level = xgpu_ai_force_dpm_level,
|
.force_dpm_level = xgpu_ai_force_dpm_level,
|
||||||
|
.init_reg_access_mode = xgpu_ai_init_reg_access_mode,
|
||||||
};
|
};
|
||||||
|
|||||||
Reference in New Issue
Block a user