mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
amd-drm-fixes-6.11-2024-07-18:
amdgpu: - Bump driver version for GFX12 DCC - DC documention warning fixes - VCN unified queue power fix - SMU fix - RAS fix - Display corruption fix -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZpmNAQAKCRC93/aFa7yZ 2G9ZAQDOdAXjXIZGGk6QKqHa9dJHX7mO++1YOCjHlmB8ZskZIgD/Xqs8H1Ac+SO9 COproXO54h+D+7oSFOvQnYeyrgj4ngE= =Avvn -----END PGP SIGNATURE----- Merge tag 'amd-drm-fixes-6.11-2024-07-18' of https://gitlab.freedesktop.org/agd5f/linux into drm-next amd-drm-fixes-6.11-2024-07-18: amdgpu: - Bump driver version for GFX12 DCC - DC documention warning fixes - VCN unified queue power fix - SMU fix - RAS fix - Display corruption fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240718215258.79356-1-alexander.deucher@amd.com
This commit is contained in:
commit
627a24f5f2
@ -8,37 +8,22 @@ and the code documentation when it is automatically generated.
|
||||
DCHUBBUB
|
||||
--------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
|
||||
:doc: overview
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
|
||||
:internal:
|
||||
|
||||
HUBP
|
||||
----
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
|
||||
:doc: overview
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
|
||||
:internal:
|
||||
|
||||
DPP
|
||||
---
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
|
||||
:doc: overview
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
|
||||
:internal:
|
||||
|
||||
MPC
|
||||
@ -47,11 +32,9 @@ MPC
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
|
||||
:doc: overview
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
|
||||
:internal:
|
||||
:no-identifiers: mpcc_blnd_cfg mpcc_alpha_blend_mode
|
||||
|
||||
OPP
|
||||
---
|
||||
@ -59,20 +42,14 @@ OPP
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
|
||||
:doc: overview
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
|
||||
:internal:
|
||||
|
||||
DIO
|
||||
---
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
|
||||
:doc: overview
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
|
||||
:internal:
|
||||
|
@ -132,7 +132,7 @@ The DRM blend mode and its elements are then mapped by AMDGPU display manager
|
||||
(MPC), as follows:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
|
||||
:functions: mpcc_blnd_cfg
|
||||
:identifiers: mpcc_blnd_cfg
|
||||
|
||||
Therefore, the blending configuration for a single MPCC instance on the MPC
|
||||
tree is defined by :c:type:`mpcc_blnd_cfg`, where
|
||||
@ -144,7 +144,7 @@ alpha and plane alpha values. It sets one of the three modes for
|
||||
:c:type:`MPCC_ALPHA_BLND_MODE`, as described below.
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
|
||||
:functions: mpcc_alpha_blend_mode
|
||||
:identifiers: mpcc_alpha_blend_mode
|
||||
|
||||
DM then maps the elements of `enum mpcc_alpha_blend_mode` to those in the DRM
|
||||
blend formula, as follows:
|
||||
|
@ -116,9 +116,10 @@
|
||||
* - 3.55.0 - Add AMDGPU_INFO_GPUVM_FAULT query
|
||||
* - 3.56.0 - Update IB start address and size alignment for decode and encode
|
||||
* - 3.57.0 - Compute tunneling on GFX10+
|
||||
* - 3.58.0 - Add GFX12 DCC support
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 57
|
||||
#define KMS_DRIVER_MINOR 58
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
/*
|
||||
|
@ -1591,6 +1591,68 @@ static void psp_ras_ta_check_status(struct psp_context *psp)
|
||||
}
|
||||
}
|
||||
|
||||
static int psp_ras_send_cmd(struct psp_context *psp,
|
||||
enum ras_command cmd_id, void *in, void *out)
|
||||
{
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
uint32_t cmd = cmd_id;
|
||||
int ret = 0;
|
||||
|
||||
if (!in)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&psp->ras_context.mutex);
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
|
||||
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
|
||||
|
||||
switch (cmd) {
|
||||
case TA_RAS_COMMAND__ENABLE_FEATURES:
|
||||
case TA_RAS_COMMAND__DISABLE_FEATURES:
|
||||
memcpy(&ras_cmd->ras_in_message,
|
||||
in, sizeof(ras_cmd->ras_in_message));
|
||||
break;
|
||||
case TA_RAS_COMMAND__TRIGGER_ERROR:
|
||||
memcpy(&ras_cmd->ras_in_message.trigger_error,
|
||||
in, sizeof(ras_cmd->ras_in_message.trigger_error));
|
||||
break;
|
||||
case TA_RAS_COMMAND__QUERY_ADDRESS:
|
||||
memcpy(&ras_cmd->ras_in_message.address,
|
||||
in, sizeof(ras_cmd->ras_in_message.address));
|
||||
break;
|
||||
default:
|
||||
dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
|
||||
ret = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ras_cmd->cmd_id = cmd;
|
||||
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
|
||||
|
||||
switch (cmd) {
|
||||
case TA_RAS_COMMAND__TRIGGER_ERROR:
|
||||
if (ret || psp->cmd_buf_mem->resp.status)
|
||||
ret = -EINVAL;
|
||||
else if (out)
|
||||
memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
|
||||
break;
|
||||
case TA_RAS_COMMAND__QUERY_ADDRESS:
|
||||
if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
|
||||
ret = -EINVAL;
|
||||
else if (out)
|
||||
memcpy(out,
|
||||
&ras_cmd->ras_out_message.address,
|
||||
sizeof(ras_cmd->ras_out_message.address));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
err_out:
|
||||
mutex_unlock(&psp->ras_context.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
{
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
@ -1632,23 +1694,15 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
int psp_ras_enable_features(struct psp_context *psp,
|
||||
union ta_ras_cmd_input *info, bool enable)
|
||||
{
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
enum ras_command cmd_id;
|
||||
int ret;
|
||||
|
||||
if (!psp->ras_context.context.initialized)
|
||||
if (!psp->ras_context.context.initialized || !info)
|
||||
return -EINVAL;
|
||||
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
|
||||
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
|
||||
|
||||
if (enable)
|
||||
ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
|
||||
else
|
||||
ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
|
||||
|
||||
ras_cmd->ras_in_message = *info;
|
||||
|
||||
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
|
||||
cmd_id = enable ?
|
||||
TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
|
||||
ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1672,6 +1726,8 @@ int psp_ras_terminate(struct psp_context *psp)
|
||||
|
||||
psp->ras_context.context.initialized = false;
|
||||
|
||||
mutex_destroy(&psp->ras_context.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1756,9 +1812,10 @@ int psp_ras_initialize(struct psp_context *psp)
|
||||
|
||||
ret = psp_ta_load(psp, &psp->ras_context.context);
|
||||
|
||||
if (!ret && !ras_cmd->ras_status)
|
||||
if (!ret && !ras_cmd->ras_status) {
|
||||
psp->ras_context.context.initialized = true;
|
||||
else {
|
||||
mutex_init(&psp->ras_context.mutex);
|
||||
} else {
|
||||
if (ras_cmd->ras_status)
|
||||
dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
|
||||
|
||||
@ -1772,12 +1829,12 @@ int psp_ras_initialize(struct psp_context *psp)
|
||||
int psp_ras_trigger_error(struct psp_context *psp,
|
||||
struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
|
||||
{
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
int ret;
|
||||
uint32_t dev_mask;
|
||||
uint32_t ras_status = 0;
|
||||
|
||||
if (!psp->ras_context.context.initialized)
|
||||
if (!psp->ras_context.context.initialized || !info)
|
||||
return -EINVAL;
|
||||
|
||||
switch (info->block_id) {
|
||||
@ -1801,13 +1858,8 @@ int psp_ras_trigger_error(struct psp_context *psp,
|
||||
dev_mask &= AMDGPU_RAS_INST_MASK;
|
||||
info->sub_block_index |= dev_mask;
|
||||
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
|
||||
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
|
||||
|
||||
ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
|
||||
ras_cmd->ras_in_message.trigger_error = *info;
|
||||
|
||||
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
|
||||
ret = psp_ras_send_cmd(psp,
|
||||
TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1817,9 +1869,9 @@ int psp_ras_trigger_error(struct psp_context *psp,
|
||||
if (amdgpu_ras_intr_triggered())
|
||||
return 0;
|
||||
|
||||
if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
|
||||
if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
|
||||
return -EACCES;
|
||||
else if (ras_cmd->ras_status)
|
||||
else if (ras_status)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
@ -1829,25 +1881,16 @@ int psp_ras_query_address(struct psp_context *psp,
|
||||
struct ta_ras_query_address_input *addr_in,
|
||||
struct ta_ras_query_address_output *addr_out)
|
||||
{
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
int ret;
|
||||
|
||||
if (!psp->ras_context.context.initialized)
|
||||
if (!psp->ras_context.context.initialized ||
|
||||
!addr_in || !addr_out)
|
||||
return -EINVAL;
|
||||
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
|
||||
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
|
||||
ret = psp_ras_send_cmd(psp,
|
||||
TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
|
||||
|
||||
ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS;
|
||||
ras_cmd->ras_in_message.address = *addr_in;
|
||||
|
||||
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
|
||||
if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
|
||||
return -EINVAL;
|
||||
|
||||
*addr_out = ras_cmd->ras_out_message.address;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
// ras end
|
||||
|
||||
|
@ -200,6 +200,7 @@ struct psp_xgmi_context {
|
||||
struct psp_ras_context {
|
||||
struct ta_context context;
|
||||
struct amdgpu_ras *ras;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
|
||||
|
@ -348,6 +348,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
|
||||
|
||||
context->session_id = ta_id;
|
||||
|
||||
mutex_lock(&psp->ras_context.mutex);
|
||||
ret = prep_ta_mem_context(&context->mem_context, shared_buf, shared_buf_len);
|
||||
if (ret)
|
||||
goto err_free_shared_buf;
|
||||
@ -366,6 +367,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
|
||||
ret = -EFAULT;
|
||||
|
||||
err_free_shared_buf:
|
||||
mutex_unlock(&psp->ras_context.mutex);
|
||||
kfree(shared_buf);
|
||||
|
||||
return ret;
|
||||
|
@ -147,6 +147,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
/* from vcn4 and above, only unified queue is used */
|
||||
adev->vcn.using_unified_queue =
|
||||
amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
|
||||
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
|
||||
|
||||
@ -275,18 +279,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* from vcn4 and above, only unified queue is used */
|
||||
static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool ret = false;
|
||||
|
||||
if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0))
|
||||
ret = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
|
||||
{
|
||||
bool ret = false;
|
||||
@ -397,7 +389,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
|
||||
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
|
||||
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
|
||||
!adev->vcn.using_unified_queue) {
|
||||
struct dpg_pause_state new_state;
|
||||
|
||||
if (fence[j] ||
|
||||
@ -443,7 +437,9 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
|
||||
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
|
||||
!adev->vcn.using_unified_queue) {
|
||||
struct dpg_pause_state new_state;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
|
||||
@ -469,8 +465,12 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
|
||||
|
||||
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
|
||||
if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
|
||||
!adev->vcn.using_unified_queue)
|
||||
atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
|
||||
|
||||
atomic_dec(&ring->adev->vcn.total_submission_cnt);
|
||||
@ -724,12 +724,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
|
||||
bool sq = amdgpu_vcn_using_unified_queue(ring);
|
||||
uint32_t *ib_checksum;
|
||||
uint32_t ib_pack_in_dw;
|
||||
int i, r;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
ib_size_dw += 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
@ -742,7 +741,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
|
||||
ib->length_dw = 0;
|
||||
|
||||
/* single queue headers */
|
||||
if (sq) {
|
||||
if (adev->vcn.using_unified_queue) {
|
||||
ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
|
||||
+ 4 + 2; /* engine info + decoding ib in dw */
|
||||
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
|
||||
@ -761,7 +760,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
|
||||
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
@ -851,15 +850,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
unsigned int ib_size_dw = 16;
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint32_t *ib_checksum = NULL;
|
||||
uint64_t addr;
|
||||
bool sq = amdgpu_vcn_using_unified_queue(ring);
|
||||
int i, r;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
ib_size_dw += 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
@ -873,7 +872,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
||||
|
||||
ib->length_dw = 0;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
@ -895,7 +894,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
|
||||
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
@ -918,15 +917,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
unsigned int ib_size_dw = 16;
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint32_t *ib_checksum = NULL;
|
||||
uint64_t addr;
|
||||
bool sq = amdgpu_vcn_using_unified_queue(ring);
|
||||
int i, r;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
ib_size_dw += 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
@ -940,7 +939,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
||||
|
||||
ib->length_dw = 0;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
@ -962,7 +961,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (sq)
|
||||
if (adev->vcn.using_unified_queue)
|
||||
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
|
||||
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
|
@ -329,6 +329,7 @@ struct amdgpu_vcn {
|
||||
|
||||
uint16_t inst_mask;
|
||||
uint8_t num_inst_per_aid;
|
||||
bool using_unified_queue;
|
||||
};
|
||||
|
||||
struct amdgpu_fw_shared_rb_ptrs_struct {
|
||||
|
@ -137,6 +137,13 @@ struct vblank_control_work {
|
||||
bool enable;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct idle_workqueue - Work data for periodic action in idle
|
||||
* @work: Kernel work data for the work event
|
||||
* @dm: amdgpu display manager device
|
||||
* @enable: true if idle worker is enabled
|
||||
* @running: true if idle worker is running
|
||||
*/
|
||||
struct idle_workqueue {
|
||||
struct work_struct work;
|
||||
struct amdgpu_display_manager *dm;
|
||||
@ -502,6 +509,12 @@ struct amdgpu_display_manager {
|
||||
* Deferred work for vblank control events.
|
||||
*/
|
||||
struct workqueue_struct *vblank_control_workqueue;
|
||||
|
||||
/**
|
||||
* @idle_workqueue:
|
||||
*
|
||||
* Periodic work for idle events.
|
||||
*/
|
||||
struct idle_workqueue *idle_workqueue;
|
||||
|
||||
struct drm_atomic_state *cached_state;
|
||||
@ -587,7 +600,9 @@ struct amdgpu_display_manager {
|
||||
*/
|
||||
struct mutex dpia_aux_lock;
|
||||
|
||||
/*
|
||||
/**
|
||||
* @bb_from_dmub:
|
||||
*
|
||||
* Bounding box data read from dmub during early initialization for DCN4+
|
||||
*/
|
||||
struct dml2_soc_bb *bb_from_dmub;
|
||||
|
@ -147,16 +147,28 @@ struct cnv_color_keyer_params {
|
||||
int color_keyer_blue_high;
|
||||
};
|
||||
|
||||
/* new for dcn2: set the 8bit alpha values based on the 2 bit alpha
|
||||
*ALPHA_2BIT_LUT. ALPHA_2BIT_LUT0 default: 0b00000000
|
||||
*ALPHA_2BIT_LUT. ALPHA_2BIT_LUT1 default: 0b01010101
|
||||
*ALPHA_2BIT_LUT. ALPHA_2BIT_LUT2 default: 0b10101010
|
||||
*ALPHA_2BIT_LUT. ALPHA_2BIT_LUT3 default: 0b11111111
|
||||
/**
|
||||
* struct cnv_alpha_2bit_lut - Set the 8bit alpha values based on the 2 bit alpha
|
||||
*/
|
||||
struct cnv_alpha_2bit_lut {
|
||||
/**
|
||||
* @lut0: ALPHA_2BIT_LUT. ALPHA_2BIT_LUT0. Default: 0b00000000
|
||||
*/
|
||||
int lut0;
|
||||
|
||||
/**
|
||||
* @lut1: ALPHA_2BIT_LUT. ALPHA_2BIT_LUT1. Default: 0b01010101
|
||||
*/
|
||||
int lut1;
|
||||
|
||||
/**
|
||||
* @lut2: ALPHA_2BIT_LUT. ALPHA_2BIT_LUT2. Default: 0b10101010
|
||||
*/
|
||||
int lut2;
|
||||
|
||||
/**
|
||||
* @lut3: ALPHA_2BIT_LUT. ALPHA_2BIT_LUT3. Default: 0b11111111
|
||||
*/
|
||||
int lut3;
|
||||
};
|
||||
|
||||
|
@ -1039,6 +1039,20 @@ struct mpc_funcs {
|
||||
*/
|
||||
void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable,
|
||||
bool lut_bank_a, int mpcc_id);
|
||||
/**
|
||||
* @program_3dlut_size:
|
||||
*
|
||||
* Program 3D LUT size.
|
||||
*
|
||||
* Parameters:
|
||||
* - [in/out] mpc - MPC context.
|
||||
* - [in] is_17x17x17 - is 3dlut 17x17x17
|
||||
* - [in] mpcc_id
|
||||
*
|
||||
* Return:
|
||||
*
|
||||
* void
|
||||
*/
|
||||
void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
|
||||
};
|
||||
|
||||
|
@ -205,9 +205,24 @@ struct gamma_coefficients {
|
||||
struct fixed31_32 user_brightness;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pwl_float_data - Fixed point RGB color
|
||||
*/
|
||||
struct pwl_float_data {
|
||||
/**
|
||||
* @r: Component Red.
|
||||
*/
|
||||
struct fixed31_32 r;
|
||||
|
||||
/**
|
||||
* @g: Component Green.
|
||||
*/
|
||||
|
||||
struct fixed31_32 g;
|
||||
|
||||
/**
|
||||
* @b: Component Blue.
|
||||
*/
|
||||
struct fixed31_32 b;
|
||||
};
|
||||
|
||||
|
@ -26,6 +26,16 @@
|
||||
#include "core_types.h"
|
||||
#include "link_enc_cfg.h"
|
||||
|
||||
/**
|
||||
* DOC: overview
|
||||
*
|
||||
* Display Input Output (DIO), is the display input and output unit in DCN. It
|
||||
* includes output encoders to support different display output, like
|
||||
* DisplayPort, HDMI, DVI interface, and others. It also includes the control
|
||||
* and status channels for these interfaces.
|
||||
*/
|
||||
|
||||
|
||||
void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
|
||||
struct fixed31_32 throttled_vcp_size)
|
||||
{
|
||||
@ -254,12 +264,31 @@ static const struct link_hwss dio_link_hwss = {
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* can_use_dio_link_hwss - Check if the link_hwss is accessible
|
||||
*
|
||||
* @link: Reference a link struct containing one or more sinks and the
|
||||
* connective status.
|
||||
* @link_res: Mappable hardware resource used to enable a link.
|
||||
*
|
||||
* Returns:
|
||||
* Return true if the link encoder is accessible from link.
|
||||
*/
|
||||
bool can_use_dio_link_hwss(const struct dc_link *link,
|
||||
const struct link_resource *link_res)
|
||||
{
|
||||
return link->link_enc != NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_dio_link_hwss - Return link_hwss reference
|
||||
*
|
||||
* This function behaves like a get function to return the link_hwss populated
|
||||
* in the link_hwss_dio.c file.
|
||||
*
|
||||
* Returns:
|
||||
* Return the reference to the filled struct of link_hwss.
|
||||
*/
|
||||
const struct link_hwss *get_dio_link_hwss(void)
|
||||
{
|
||||
return &dio_link_hwss;
|
||||
|
@ -23,15 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: overview
|
||||
*
|
||||
* Display Input Output (DIO), is the display input and output unit in DCN. It
|
||||
* includes output encoders to support different display output, like
|
||||
* DisplayPort, HDMI, DVI interface, and others. It also includes the control
|
||||
* and status channels for these interfaces.
|
||||
*/
|
||||
|
||||
#ifndef __LINK_HWSS_DIO_H__
|
||||
#define __LINK_HWSS_DIO_H__
|
||||
|
||||
|
@ -945,19 +945,10 @@ void optc1_set_drr(
|
||||
OTG_FORCE_LOCK_ON_EVENT, 0,
|
||||
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
|
||||
OTG_SET_V_TOTAL_MIN_MASK, 0);
|
||||
|
||||
// Setup manual flow control for EOF via TRIG_A
|
||||
optc->funcs->setup_manual_trigger(optc);
|
||||
|
||||
} else {
|
||||
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
|
||||
OTG_SET_V_TOTAL_MIN_MASK, 0,
|
||||
OTG_V_TOTAL_MIN_SEL, 0,
|
||||
OTG_V_TOTAL_MAX_SEL, 0,
|
||||
OTG_FORCE_LOCK_ON_EVENT, 0);
|
||||
|
||||
optc->funcs->set_vtotal_min_max(optc, 0, 0);
|
||||
}
|
||||
|
||||
// Setup manual flow control for EOF via TRIG_A
|
||||
optc->funcs->setup_manual_trigger(optc);
|
||||
}
|
||||
|
||||
void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
|
||||
|
@ -453,6 +453,16 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
/* Set the min/max selectors unconditionally so that
|
||||
* DMCUB fw may change OTG timings when necessary
|
||||
* TODO: Remove the w/a after fixing the issue in DMCUB firmware
|
||||
*/
|
||||
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
|
||||
OTG_V_TOTAL_MIN_SEL, 1,
|
||||
OTG_V_TOTAL_MAX_SEL, 1,
|
||||
OTG_FORCE_LOCK_ON_EVENT, 0,
|
||||
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
|
||||
|
||||
REG_SET_8(OTG_TRIGA_CNTL, 0,
|
||||
OTG_TRIGA_SOURCE_SELECT, 21,
|
||||
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
|
||||
|
@ -1924,20 +1924,12 @@ static int smu_disable_dpms(struct smu_context *smu)
|
||||
}
|
||||
|
||||
/*
|
||||
* For SMU 13.0.4/11 and 14.0.0, PMFW will handle the features disablement properly
|
||||
* For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
|
||||
* for gpu reset and S0i3 cases. Driver involvement is unnecessary.
|
||||
*/
|
||||
if (amdgpu_in_reset(adev) || adev->in_s0ix) {
|
||||
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
|
||||
case IP_VERSION(13, 0, 4):
|
||||
case IP_VERSION(13, 0, 11):
|
||||
case IP_VERSION(14, 0, 0):
|
||||
case IP_VERSION(14, 0, 1):
|
||||
return 0;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
|
||||
smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* For gpu reset, runpm and hibernation through BACO,
|
||||
|
@ -69,6 +69,9 @@
|
||||
#define SMU_14_0_0_UMD_PSTATE_SOCCLK 678
|
||||
#define SMU_14_0_0_UMD_PSTATE_FCLK 1800
|
||||
|
||||
#define SMU_14_0_4_UMD_PSTATE_GFXCLK 938
|
||||
#define SMU_14_0_4_UMD_PSTATE_SOCCLK 938
|
||||
|
||||
#define FEATURE_MASK(feature) (1ULL << feature)
|
||||
#define SMC_DPM_FEATURE ( \
|
||||
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
|
||||
@ -1296,19 +1299,28 @@ static int smu_v14_0_common_get_dpm_profile_freq(struct smu_context *smu,
|
||||
switch (clk_type) {
|
||||
case SMU_GFXCLK:
|
||||
case SMU_SCLK:
|
||||
clk_limit = SMU_14_0_0_UMD_PSTATE_GFXCLK;
|
||||
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4))
|
||||
clk_limit = SMU_14_0_4_UMD_PSTATE_GFXCLK;
|
||||
else
|
||||
clk_limit = SMU_14_0_0_UMD_PSTATE_GFXCLK;
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit);
|
||||
else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
|
||||
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL);
|
||||
break;
|
||||
case SMU_SOCCLK:
|
||||
clk_limit = SMU_14_0_0_UMD_PSTATE_SOCCLK;
|
||||
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4))
|
||||
clk_limit = SMU_14_0_4_UMD_PSTATE_SOCCLK;
|
||||
else
|
||||
clk_limit = SMU_14_0_0_UMD_PSTATE_SOCCLK;
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit);
|
||||
break;
|
||||
case SMU_FCLK:
|
||||
clk_limit = SMU_14_0_0_UMD_PSTATE_FCLK;
|
||||
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4))
|
||||
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit);
|
||||
else
|
||||
clk_limit = SMU_14_0_0_UMD_PSTATE_FCLK;
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit);
|
||||
else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
|
||||
|
Loading…
Reference in New Issue
Block a user