drm/amdgpu/gfx11: properly handle error ints on all pipes

Need to handle the interrupt enables for all pipes.

v2: fix indexing (Jessie)

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Alex Deucher 2024-07-01 11:18:00 -04:00
parent 4b95cec689
commit 2662b7d9d8

View File

@ -1953,26 +1953,74 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
gfx_v11_0_init_gds_vmid(adev);
}
static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev,
int me, int pipe)
{
if (me != 0)
return 0;
switch (pipe) {
case 0:
return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
case 1:
return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
default:
return 0;
}
}
static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev,
int me, int pipe)
{
/*
* amdgpu controls only the first MEC. That's why this function only
* handles the setting of interrupts for this specific MEC. All other
* pipes' interrupts are set by amdkfd.
*/
if (me != 1)
return 0;
switch (pipe) {
case 0:
return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
case 1:
return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
case 2:
return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
case 3:
return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
default:
return 0;
}
}
static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
u32 tmp;
u32 tmp, cp_int_cntl_reg;
int i, j;
if (amdgpu_sriov_vf(adev))
return;
tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
for (i = 0; i < adev->gfx.me.num_me; i++) {
for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
enable ? 1 : 0);
WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
if (cp_int_cntl_reg) {
tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
enable ? 1 : 0);
WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
}
}
}
}
static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
@ -6201,15 +6249,42 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
unsigned int type,
enum amdgpu_interrupt_state state)
{
u32 cp_int_cntl_reg, cp_int_cntl;
int i, j;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
PRIV_REG_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
for (i = 0; i < adev->gfx.me.num_me; i++) {
for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
if (cp_int_cntl_reg) {
cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
PRIV_REG_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
}
}
}
for (i = 0; i < adev->gfx.mec.num_mec; i++) {
for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
/* MECs start at 1 */
cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
if (cp_int_cntl_reg) {
cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
PRIV_REG_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
}
}
}
break;
default:
break;
@ -6220,15 +6295,28 @@ static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
unsigned int type,
enum amdgpu_interrupt_state state)
{
u32 cp_int_cntl_reg, cp_int_cntl;
int i, j;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
PRIV_INSTR_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
for (i = 0; i < adev->gfx.me.num_me; i++) {
for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
if (cp_int_cntl_reg) {
cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
PRIV_INSTR_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
}
}
}
break;
default:
break;
@ -6252,8 +6340,8 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
case 0:
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
/* we only enabled 1 gfx queue per pipe for now */
if (ring->me == me_id && ring->pipe == pipe_id)
if (ring->me == me_id && ring->pipe == pipe_id &&
ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;