mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 21:33:00 +00:00
drm fixes for 6.9-rc1
core: - fix rounding in drm_fixp2int_round() bridge: - fix documentation for DRM_BRIDGE_OP_EDID sun4i: - fix 64-bit division on 32-bit architectures tests: - fix dependency on DRM_KMS_HELPER probe-helper: - never return negative values from .get_modes() plus driver fixes xe: - invalidate userptr vma on page pin fault - fail early on sysfs file creation error - skip VMA pinning on xe_exec if no batches nouveau: - clear bo resource bus after eviction - documentation fixes - don't check devinit disable on GSP amdgpu: - Freesync fixes - UAF IOCTL fixes - Fix mmhub client ID mapping - IH 7.0 fix - DML2 fixes - VCN 4.0.6 fix - GART bind fix - GPU reset fix - SR-IOV fix - OD table handling fixes - Fix TA handling on boards without display hardware - DML1 fix - ABM fix - eDP panel fix - DPPCLK fix - HDCP fix - Revert incorrect error case handling in ioremap - VPE fix - HDMI fixes - SDMA 4.4.2 fix - Other misc fixes amdkfd: - Fix duplicate BO handling in process restore -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmX833kACgkQDHTzWXnE hr4ZDA/8CC/jW8drOInD52pqPxtFrhLPZ/pD+Vz4BWkcLnzRiM2d5gM3z3/JE7xi BGcxmhPgwT/M9oZENRxtSf2uUV/RZ0OMj+Mpwnew1YpANwWBe8pKeiwbHA4A4qzP Z46VLK0+BXEjh0btC4RY/Ji6yEuNCNAh0FBWTaLYoakN8M1JAJCUYFrXeWp8gYVm 4yZETXO64iGNYy4wz9tD5fohC3xo1t9WRcskBV97uNrntDQlagoEdAnh1VF2K3yC SAwF3O8J60xh7osNx5YE4ENXynvh7UaAc75kliSsWoZKoTb1TFlyu8abE7NQRRXc 9fjzwB6tQ6BJRFpmQGF6RHAhuoddqm9nPaOYOfQ/wXbVV1ajYvXmK+eGhHfR6/VO YYzhXksd9LGX34RWAcs9lJbV+EjG4buYnSThkvCYcPs2Ys+JppwlPYSd9sC8vqgP 6D7slAoa8rh99WWz4mZ7ZuOveiOUS3Yzie5Vms2Dlwl/kHW5E1WpeEw+fXAqq08M m83whU2cod/oUvJtcuYFLoNAlhYYngVOI9XGgdM+eL/dpdKjTpyH9JHHEGj/Nejr W7Kts9CLLBShNKR8Wo2fyTu1n9dwY/eFVA1P48Mt03345G/fMNtPxy+M1Rt6LHQ2 fmeBSU1P6mqoFeji4xCRXdJ4oDveNnHlyW9J9QJGXG44mN89PCc= =EW4i -----END PGP SIGNATURE----- Merge tag 'drm-next-2024-03-22' of https://gitlab.freedesktop.org/drm/kernel Pull drm fixes from Dave Airlie: "Fixes from the last week (or 3 weeks in amdgpu case), after amdgpu, it's xe and nouveau then a few scattered core fixes. core: - fix rounding in drm_fixp2int_round() bridge: - fix documentation for DRM_BRIDGE_OP_EDID sun4i: - fix 64-bit division on 32-bit architectures tests: - fix dependency on DRM_KMS_HELPER probe-helper: - never return negative values from .get_modes() plus driver fixes xe: - invalidate userptr vma on page pin fault - fail early on sysfs file creation error - skip VMA pinning on xe_exec if no batches nouveau: - clear bo resource bus after eviction - documentation fixes - don't check devinit disable on GSP amdgpu: - Freesync fixes - UAF IOCTL fixes - Fix mmhub client ID mapping - IH 7.0 fix - DML2 fixes - VCN 4.0.6 fix - GART bind fix - GPU reset fix - SR-IOV fix - OD table handling fixes - Fix TA handling on boards without display hardware - DML1 fix - ABM fix - eDP panel fix - DPPCLK fix - HDCP fix - Revert incorrect error case handling in ioremap - VPE fix - HDMI fixes - SDMA 4.4.2 fix - Other misc fixes amdkfd: - Fix duplicate BO handling in process restore" * tag 'drm-next-2024-03-22' of https://gitlab.freedesktop.org/drm/kernel: (50 commits) drm/amdgpu/pm: Don't use OD table on Arcturus drm/amdgpu: drop setting buffer funcs in sdma442 drm/amd/display: Fix noise issue on HDMI AV mute drm/amd/display: Revert Remove pixle rate limit for subvp Revert "drm/amdgpu/vpe: don't emit cond exec command under collaborate mode" Revert "drm/amd/amdgpu: Fix potential ioremap() memory leaks in amdgpu_device_init()" drm/amd/display: Add a dc_state NULL check in dc_state_release drm/amd/display: Return the correct HDCP error code drm/amd/display: Implement wait_for_odm_update_pending_complete drm/amd/display: Lock all enabled otg pipes even with no planes drm/amd/display: Amend coasting vtotal for replay low hz drm/amd/display: Fix idle check for shared firmware state drm/amd/display: Update odm when ODM combine is changed on an otg master pipe with no plane drm/amd/display: Init DPPCLK from SMU on dcn32 drm/amd/display: Add monitor patch for specific eDP drm/amd/display: Allow dirty rects to be sent to dmub when abm is active drm/amd/display: Override min required DCFCLK in dml1_validate drm/amdgpu: Bypass display ta if display hw is not available drm/amdgpu: correct the KGQ fallback message drm/amdgpu/pm: Check the validity of overdiver power limit ...
This commit is contained in:
commit
7ee0490121
@ -68,6 +68,7 @@ config DRM_USE_DYNAMIC_DEBUG
|
||||
config DRM_KUNIT_TEST_HELPERS
|
||||
tristate
|
||||
depends on DRM && KUNIT
|
||||
select DRM_KMS_HELPER
|
||||
help
|
||||
KUnit Helpers for KMS drivers.
|
||||
|
||||
@ -80,7 +81,6 @@ config DRM_KUNIT_TEST
|
||||
select DRM_EXEC
|
||||
select DRM_EXPORT_FOR_TESTS if m
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_KUNIT_TEST_HELPERS
|
||||
select DRM_LIB_RANDOM
|
||||
select PRIME_NUMBERS
|
||||
|
@ -146,7 +146,7 @@ int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!adev->kfd.init_complete)
|
||||
if (!adev->kfd.init_complete || adev->kfd.client.dev)
|
||||
return 0;
|
||||
|
||||
ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
|
||||
|
@ -2869,14 +2869,16 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
|
||||
|
||||
mutex_lock(&process_info->lock);
|
||||
|
||||
drm_exec_init(&exec, 0, 0);
|
||||
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
|
||||
drm_exec_until_all_locked(&exec) {
|
||||
list_for_each_entry(peer_vm, &process_info->vm_list_head,
|
||||
vm_list_node) {
|
||||
ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
if (unlikely(ret))
|
||||
if (unlikely(ret)) {
|
||||
pr_err("Locking VM PD failed, ret: %d\n", ret);
|
||||
goto ttm_reserve_fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Reserve all BOs and page tables/directory. Add all BOs from
|
||||
@ -2889,8 +2891,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
|
||||
gobj = &mem->bo->tbo.base;
|
||||
ret = drm_exec_prepare_obj(&exec, gobj, 1);
|
||||
drm_exec_retry_on_contention(&exec);
|
||||
if (unlikely(ret))
|
||||
if (unlikely(ret)) {
|
||||
pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
|
||||
goto ttm_reserve_fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2950,8 +2954,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
|
||||
* validations above would invalidate DMABuf imports again.
|
||||
*/
|
||||
ret = process_validate_vms(process_info, &exec.ticket);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
pr_debug("Validating VMs failed, ret: %d\n", ret);
|
||||
goto validate_map_fail;
|
||||
}
|
||||
|
||||
/* Update mappings not managed by KFD */
|
||||
list_for_each_entry(peer_vm, &process_info->vm_list_head,
|
||||
|
@ -4040,10 +4040,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
* early on during init and before calling to RREG32.
|
||||
*/
|
||||
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
|
||||
if (!adev->reset_domain) {
|
||||
r = -ENOMEM;
|
||||
goto unmap_memory;
|
||||
}
|
||||
if (!adev->reset_domain)
|
||||
return -ENOMEM;
|
||||
|
||||
/* detect hw virtualization here */
|
||||
amdgpu_detect_virtualization(adev);
|
||||
@ -4053,7 +4051,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
r = amdgpu_device_get_job_timeout_settings(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
|
||||
goto unmap_memory;
|
||||
return r;
|
||||
}
|
||||
|
||||
amdgpu_device_set_mcbp(adev);
|
||||
@ -4061,12 +4059,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
/* early init functions */
|
||||
r = amdgpu_device_ip_early_init(adev);
|
||||
if (r)
|
||||
goto unmap_memory;
|
||||
return r;
|
||||
|
||||
/* Get rid of things like offb */
|
||||
r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
|
||||
if (r)
|
||||
goto unmap_memory;
|
||||
return r;
|
||||
|
||||
/* Enable TMZ based on IP_VERSION */
|
||||
amdgpu_gmc_tmz_set(adev);
|
||||
@ -4076,7 +4074,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
if (adev->gmc.xgmi.supported) {
|
||||
r = adev->gfxhub.funcs->get_xgmi_info(adev);
|
||||
if (r)
|
||||
goto unmap_memory;
|
||||
return r;
|
||||
}
|
||||
|
||||
/* enable PCIE atomic ops */
|
||||
@ -4345,8 +4343,6 @@ release_ras_con:
|
||||
failed:
|
||||
amdgpu_vf_error_trans_all(adev);
|
||||
|
||||
unmap_memory:
|
||||
iounmap(adev->rmmio);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -2479,8 +2479,11 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
|
||||
}
|
||||
for (i = 0; i < mgpu_info.num_dgpu; i++) {
|
||||
adev = mgpu_info.gpu_ins[i].adev;
|
||||
if (!adev->kfd.init_complete)
|
||||
if (!adev->kfd.init_complete) {
|
||||
kgd2kfd_init_zone_device(adev);
|
||||
amdgpu_amdkfd_device_init(adev);
|
||||
amdgpu_amdkfd_drm_client_create(adev);
|
||||
}
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
}
|
||||
}
|
||||
|
@ -687,7 +687,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
if (r)
|
||||
DRM_ERROR("KCQ enable failed\n");
|
||||
DRM_ERROR("KGQ enable failed\n");
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
|
||||
*/
|
||||
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (bo->kfd_bo)
|
||||
return mmu_interval_notifier_insert(&bo->notifier, current->mm,
|
||||
r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
|
||||
addr, amdgpu_bo_size(bo),
|
||||
&amdgpu_hmm_hsa_ops);
|
||||
return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
|
||||
amdgpu_bo_size(bo),
|
||||
&amdgpu_hmm_gfx_ops);
|
||||
else
|
||||
r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
|
||||
amdgpu_bo_size(bo),
|
||||
&amdgpu_hmm_gfx_ops);
|
||||
if (r)
|
||||
/*
|
||||
* Make sure amdgpu_hmm_unregister() doesn't call
|
||||
* mmu_interval_notifier_remove() when the notifier isn't properly
|
||||
* initialized.
|
||||
*/
|
||||
bo->notifier.mm = NULL;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1830,6 +1830,10 @@ static int psp_hdcp_initialize(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
/* bypass hdcp initialization if dmu is harvested */
|
||||
if (!amdgpu_device_has_display_hardware(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->hdcp_context.context.bin_desc.size_bytes ||
|
||||
!psp->hdcp_context.context.bin_desc.start_addr) {
|
||||
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
|
||||
@ -1862,6 +1866,9 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->hdcp_context.context.initialized)
|
||||
return 0;
|
||||
|
||||
return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
|
||||
}
|
||||
|
||||
@ -1897,6 +1904,10 @@ static int psp_dtm_initialize(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
/* bypass dtm initialization if dmu is harvested */
|
||||
if (!amdgpu_device_has_display_hardware(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->dtm_context.context.bin_desc.size_bytes ||
|
||||
!psp->dtm_context.context.bin_desc.start_addr) {
|
||||
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
|
||||
@ -1929,6 +1940,9 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->dtm_context.context.initialized)
|
||||
return 0;
|
||||
|
||||
return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
|
||||
}
|
||||
|
||||
@ -2063,6 +2077,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
/* bypass securedisplay initialization if dmu is harvested */
|
||||
if (!amdgpu_device_has_display_hardware(psp->adev))
|
||||
return 0;
|
||||
|
||||
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
|
||||
!psp->securedisplay_context.context.bin_desc.start_addr) {
|
||||
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
|
||||
|
@ -864,6 +864,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
|
||||
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
|
||||
gtt->ttm.dma_address, flags);
|
||||
}
|
||||
gtt->bound = true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -60,6 +60,7 @@
|
||||
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
|
||||
#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
|
||||
#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
|
||||
#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
|
||||
#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
|
||||
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN);
|
||||
@ -85,6 +86,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
|
||||
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
|
||||
MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
|
||||
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
|
||||
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
|
||||
MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
|
||||
|
||||
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
|
||||
@ -93,14 +95,22 @@ int amdgpu_vcn_early_init(struct amdgpu_device *adev)
|
||||
{
|
||||
char ucode_prefix[30];
|
||||
char fw_name[40];
|
||||
int r;
|
||||
int r, i;
|
||||
|
||||
amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
|
||||
r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
|
||||
if (r)
|
||||
amdgpu_ucode_release(&adev->vcn.fw);
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
|
||||
if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6) &&
|
||||
i == 1) {
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i);
|
||||
}
|
||||
|
||||
r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], fw_name);
|
||||
if (r) {
|
||||
amdgpu_ucode_release(&adev->vcn.fw[i]);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -141,7 +151,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
|
||||
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
|
||||
|
||||
/* Bit 20-23, it is encode major and non-zero for new naming convention.
|
||||
@ -256,9 +266,10 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
||||
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
|
||||
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
|
||||
|
||||
amdgpu_ucode_release(&adev->vcn.fw[j]);
|
||||
}
|
||||
|
||||
amdgpu_ucode_release(&adev->vcn.fw);
|
||||
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
|
||||
mutex_destroy(&adev->vcn.vcn_pg_lock);
|
||||
|
||||
@ -354,11 +365,12 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
|
||||
const struct common_firmware_header *hdr;
|
||||
unsigned int offset;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
|
||||
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
|
||||
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
|
||||
memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
|
||||
memcpy_toio(adev->vcn.inst[i].cpu_addr,
|
||||
adev->vcn.fw[i]->data + offset,
|
||||
le32_to_cpu(hdr->ucode_size_bytes));
|
||||
drm_dev_exit(idx);
|
||||
}
|
||||
@ -1043,11 +1055,11 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
const struct common_firmware_header *hdr;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
|
||||
/* currently only support 2 FW instances */
|
||||
if (i >= 2) {
|
||||
dev_info(adev->dev, "More then 2 VCN FW instances!\n");
|
||||
@ -1055,7 +1067,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
|
||||
}
|
||||
idx = AMDGPU_UCODE_ID_VCN + i;
|
||||
adev->firmware.ucode[idx].ucode_id = idx;
|
||||
adev->firmware.ucode[idx].fw = adev->vcn.fw;
|
||||
adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
||||
|
||||
|
@ -306,7 +306,7 @@ struct amdgpu_vcn_ras {
|
||||
struct amdgpu_vcn {
|
||||
unsigned fw_version;
|
||||
struct delayed_work idle_work;
|
||||
const struct firmware *fw; /* VCN firmware */
|
||||
const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */
|
||||
unsigned num_enc_rings;
|
||||
enum amd_powergating_state cur_state;
|
||||
bool indirect_sram;
|
||||
|
@ -575,9 +575,6 @@ static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring,
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
if (ring->adev->vpe.collaborate_mode)
|
||||
return ~0;
|
||||
|
||||
amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
|
@ -3657,6 +3657,9 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
|
||||
|
||||
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(10, 1, 10):
|
||||
soc15_program_register_sequence(adev,
|
||||
@ -4982,7 +4985,8 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
|
||||
u32 tmp;
|
||||
int i;
|
||||
|
||||
WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
|
||||
|
||||
gfx_v10_0_setup_rb(adev);
|
||||
gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
|
||||
@ -7163,7 +7167,7 @@ static int gfx_v10_0_hw_init(void *handle)
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
|
||||
gfx_v10_3_program_pbb_mode(adev);
|
||||
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0) && !amdgpu_sriov_vf(adev))
|
||||
gfx_v10_3_set_power_brake_sequence(adev);
|
||||
|
||||
return r;
|
||||
|
@ -155,6 +155,9 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t value;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
/* Program the AGP BAR */
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
|
||||
|
@ -418,6 +418,12 @@ static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev,
|
||||
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
|
||||
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
|
||||
|
||||
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
|
||||
* can be detected.
|
||||
*/
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
|
||||
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
|
||||
out:
|
||||
return (wptr & ih->ptr_mask);
|
||||
}
|
||||
|
@ -99,16 +99,15 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
|
||||
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
|
||||
case IP_VERSION(3, 3, 0):
|
||||
case IP_VERSION(3, 3, 1):
|
||||
mmhub_cid = mmhub_client_ids_v3_3[cid][rw];
|
||||
mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
|
||||
mmhub_client_ids_v3_3[cid][rw] :
|
||||
cid == 0x140 ? "UMSCH" : NULL;
|
||||
break;
|
||||
default:
|
||||
mmhub_cid = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!mmhub_cid && cid == 0x140)
|
||||
mmhub_cid = "UMSCH";
|
||||
|
||||
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
|
||||
mmhub_cid ? mmhub_cid : "unknown", cid);
|
||||
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
|
||||
|
@ -431,16 +431,11 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
u32 doorbell_offset, doorbell;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i, unset = 0;
|
||||
int i;
|
||||
|
||||
for_each_inst(i, inst_mask) {
|
||||
sdma[i] = &adev->sdma.instance[i].ring;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
unset = 1;
|
||||
}
|
||||
|
||||
rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
|
||||
WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
|
||||
@ -487,20 +482,10 @@ static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
|
||||
static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
|
||||
uint32_t inst_mask)
|
||||
{
|
||||
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i;
|
||||
bool unset = false;
|
||||
|
||||
for_each_inst(i, inst_mask) {
|
||||
sdma[i] = &adev->sdma.instance[i].page;
|
||||
|
||||
if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
|
||||
(!unset)) {
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
unset = true;
|
||||
}
|
||||
|
||||
rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
|
||||
RB_ENABLE, 0);
|
||||
@ -950,13 +935,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
|
||||
r = amdgpu_ring_test_helper(page);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == page)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
}
|
||||
|
||||
return r;
|
||||
|
@ -304,7 +304,7 @@ static int vcn_v1_0_resume(void *handle)
|
||||
*/
|
||||
static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
|
||||
uint32_t offset;
|
||||
|
||||
/* cache window 0: fw */
|
||||
@ -371,7 +371,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
|
||||
|
||||
static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
|
||||
uint32_t offset;
|
||||
|
||||
/* cache window 0: fw */
|
||||
|
@ -330,7 +330,7 @@ static int vcn_v2_0_resume(void *handle)
|
||||
*/
|
||||
static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
|
||||
uint32_t offset;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
@ -386,7 +386,7 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
|
||||
|
||||
static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
|
||||
{
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
|
||||
uint32_t offset;
|
||||
|
||||
/* cache window 0: fw */
|
||||
@ -1878,7 +1878,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
|
||||
|
||||
init_table += header->vcn_table_offset;
|
||||
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
|
||||
|
||||
MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
|
||||
|
@ -414,13 +414,15 @@ static int vcn_v2_5_resume(void *handle)
|
||||
*/
|
||||
static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t size;
|
||||
uint32_t offset;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
|
||||
/* cache window 0: fw */
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
|
||||
@ -469,7 +471,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
|
||||
|
||||
static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
|
||||
{
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
|
||||
uint32_t offset;
|
||||
|
||||
/* cache window 0: fw */
|
||||
@ -1240,7 +1242,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
|
||||
SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
|
||||
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
|
||||
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
|
||||
/* mc resume*/
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||
|
@ -449,7 +449,7 @@ static int vcn_v3_0_resume(void *handle)
|
||||
*/
|
||||
static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
|
||||
{
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst]->size + 4);
|
||||
uint32_t offset;
|
||||
|
||||
/* cache window 0: fw */
|
||||
@ -499,7 +499,7 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
|
||||
|
||||
static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
|
||||
{
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
|
||||
uint32_t offset;
|
||||
|
||||
/* cache window 0: fw */
|
||||
@ -1332,7 +1332,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
|
||||
mmUVD_STATUS),
|
||||
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
|
||||
|
||||
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
|
||||
|
@ -382,7 +382,7 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
|
||||
uint32_t offset, size;
|
||||
const struct common_firmware_header *hdr;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||
|
||||
/* cache window 0: fw */
|
||||
@ -442,7 +442,7 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
|
||||
{
|
||||
uint32_t offset, size;
|
||||
const struct common_firmware_header *hdr;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||
|
||||
/* cache window 0: fw */
|
||||
@ -1289,7 +1289,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
|
||||
regUVD_STATUS),
|
||||
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
|
||||
|
||||
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
|
||||
|
@ -332,7 +332,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
|
||||
uint32_t offset, size, vcn_inst;
|
||||
const struct common_firmware_header *hdr;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||
|
||||
vcn_inst = GET_INST(VCN, inst_idx);
|
||||
@ -407,7 +407,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
|
||||
uint32_t offset, size;
|
||||
const struct common_firmware_header *hdr;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||
|
||||
/* cache window 0: fw */
|
||||
@ -894,7 +894,7 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
|
||||
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
|
||||
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
|
||||
|
||||
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
|
||||
|
@ -45,7 +45,7 @@
|
||||
#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
|
||||
|
||||
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
|
||||
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
|
||||
#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
|
||||
|
||||
#define VCN_HARVEST_MMSCH 0
|
||||
|
||||
@ -329,7 +329,7 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
|
||||
uint32_t offset, size;
|
||||
const struct common_firmware_header *hdr;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||
|
||||
/* cache window 0: fw */
|
||||
@ -390,7 +390,7 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
|
||||
uint32_t offset, size;
|
||||
const struct common_firmware_header *hdr;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||
|
||||
/* cache window 0: fw */
|
||||
@ -486,7 +486,8 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
|
||||
|
||||
/* VCN global tiling registers */
|
||||
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
|
||||
VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
|
||||
VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
|
||||
adev->gfx.config.gb_addr_config, 0, indirect);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -911,7 +912,6 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
|
||||
VCN, inst_idx, regUVD_MASTINT_EN),
|
||||
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
|
||||
|
||||
|
||||
if (indirect)
|
||||
amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
|
||||
|
||||
|
@ -290,7 +290,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
|
||||
uint32_t offset, size;
|
||||
const struct common_firmware_header *hdr;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||
|
||||
/* cache window 0: fw */
|
||||
@ -351,7 +351,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
|
||||
uint32_t offset, size;
|
||||
const struct common_firmware_header *hdr;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||
|
||||
/* cache window 0: fw */
|
||||
|
@ -1767,6 +1767,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
|
||||
adev->dm.dc->debug.force_subvp_mclk_switch = true;
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
|
||||
adev->dm.dc->debug.using_dml2 = true;
|
||||
|
||||
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
|
||||
|
||||
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
|
||||
@ -11271,18 +11274,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
||||
if (!adev->dm.freesync_module)
|
||||
goto update;
|
||||
|
||||
if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
|
||||
|| sink->sink_signal == SIGNAL_TYPE_EDP) {
|
||||
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
|
||||
bool edid_check_required = false;
|
||||
|
||||
if (edid) {
|
||||
edid_check_required = is_dp_capable_without_timing_msa(
|
||||
adev->dm.dc,
|
||||
amdgpu_dm_connector);
|
||||
if (is_dp_capable_without_timing_msa(adev->dm.dc,
|
||||
amdgpu_dm_connector)) {
|
||||
if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
|
||||
freesync_capable = true;
|
||||
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
|
||||
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
|
||||
} else {
|
||||
edid_check_required = edid->version > 1 ||
|
||||
(edid->version == 1 &&
|
||||
edid->revision > 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (edid_check_required == true && (edid->version > 1 ||
|
||||
(edid->version == 1 && edid->revision > 1))) {
|
||||
if (edid_check_required) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
|
||||
timing = &edid->detailed_timings[i];
|
||||
|
@ -216,6 +216,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
|
||||
if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
|
||||
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
|
||||
|
||||
/* DPPCLK */
|
||||
dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK,
|
||||
&clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
|
||||
&num_entries_per_clk->num_dppclk_levels);
|
||||
num_levels = num_entries_per_clk->num_dppclk_levels;
|
||||
clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK);
|
||||
//HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
|
||||
if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950)
|
||||
clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950;
|
||||
|
||||
if (num_entries_per_clk->num_dcfclk_levels &&
|
||||
num_entries_per_clk->num_dtbclk_levels &&
|
||||
num_entries_per_clk->num_dispclk_levels)
|
||||
@ -240,6 +250,10 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
|
||||
= khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_levels; i++)
|
||||
if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950)
|
||||
clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950;
|
||||
|
||||
/* Get UCLK, update bounding box */
|
||||
clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
|
||||
|
||||
|
@ -1302,6 +1302,54 @@ static void disable_vbios_mode_if_required(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* wait_for_blank_complete - wait for all active OPPs to finish pending blank
|
||||
* pattern updates
|
||||
*
|
||||
* @dc: [in] dc reference
|
||||
* @context: [in] hardware context in use
|
||||
*/
|
||||
static void wait_for_blank_complete(struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
struct pipe_ctx *opp_head;
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
int i;
|
||||
|
||||
if (!hws->funcs.wait_for_blank_complete)
|
||||
return;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
opp_head = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
|
||||
dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
|
||||
continue;
|
||||
|
||||
hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
|
||||
}
|
||||
}
|
||||
|
||||
static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
struct pipe_ctx *otg_master;
|
||||
struct timing_generator *tg;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
otg_master = &context->res_ctx.pipe_ctx[i];
|
||||
if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
|
||||
dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
|
||||
continue;
|
||||
tg = otg_master->stream_res.tg;
|
||||
if (tg->funcs->wait_odm_doublebuffer_pending_clear)
|
||||
tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
|
||||
}
|
||||
|
||||
/* ODM update may require to reprogram blank pattern for each OPP */
|
||||
wait_for_blank_complete(dc, context);
|
||||
}
|
||||
|
||||
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
@ -1993,6 +2041,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||
context->stream_count == 0) {
|
||||
/* Must wait for no flips to be pending before doing optimize bw */
|
||||
wait_for_no_pipes_pending(dc, context);
|
||||
/*
|
||||
* optimized dispclk depends on ODM setup. Need to wait for ODM
|
||||
* update pending complete before optimizing bandwidth.
|
||||
*/
|
||||
wait_for_odm_update_pending_complete(dc, context);
|
||||
/* pplib is notified if disp_num changed */
|
||||
dc->hwss.optimize_bandwidth(dc, context);
|
||||
/* Need to do otg sync again as otg could be out of sync due to otg
|
||||
@ -3270,6 +3323,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s
|
||||
if (stream->link->replay_settings.config.replay_supported)
|
||||
return true;
|
||||
|
||||
if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -3493,7 +3549,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
|
||||
top_pipe_to_program->stream->update_flags.raw = 0;
|
||||
}
|
||||
|
||||
static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
|
||||
static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
|
||||
{
|
||||
/*
|
||||
* This function calls HWSS to wait for any potentially double buffered
|
||||
@ -3531,6 +3587,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
|
||||
}
|
||||
}
|
||||
}
|
||||
wait_for_odm_update_pending_complete(dc, dc_context);
|
||||
}
|
||||
|
||||
static void commit_planes_for_stream(struct dc *dc,
|
||||
@ -4844,22 +4901,16 @@ void dc_exit_ips_for_hw_access(struct dc *dc)
|
||||
|
||||
bool dc_dmub_is_ips_idle_state(struct dc *dc)
|
||||
{
|
||||
uint32_t idle_state = 0;
|
||||
|
||||
if (dc->debug.disable_idle_power_optimizations)
|
||||
return false;
|
||||
|
||||
if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
|
||||
return false;
|
||||
|
||||
if (dc->hwss.get_idle_state)
|
||||
idle_state = dc->hwss.get_idle_state(dc);
|
||||
if (!dc->ctx->dmub_srv)
|
||||
return false;
|
||||
|
||||
if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
|
||||
!(idle_state & DMUB_IPS2_ALLOW_MASK))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return dc->ctx->dmub_srv->idle_allowed;
|
||||
}
|
||||
|
||||
/* set min and max memory clock to lowest and highest DPM level, respectively */
|
||||
|
@ -334,7 +334,8 @@ static void dc_state_free(struct kref *kref)
|
||||
|
||||
void dc_state_release(struct dc_state *state)
|
||||
{
|
||||
kref_put(&state->refcount, dc_state_free);
|
||||
if (state != NULL)
|
||||
kref_put(&state->refcount, dc_state_free);
|
||||
}
|
||||
/*
|
||||
* dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
|
||||
|
@ -1085,9 +1085,9 @@ struct replay_settings {
|
||||
/* SMU optimization is enabled */
|
||||
bool replay_smu_opt_enable;
|
||||
/* Current Coasting vtotal */
|
||||
uint16_t coasting_vtotal;
|
||||
uint32_t coasting_vtotal;
|
||||
/* Coasting vtotal table */
|
||||
uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
|
||||
uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
|
||||
/* Maximum link off frame count */
|
||||
enum replay_link_off_frame_count_level link_off_frame_count_level;
|
||||
/* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
|
||||
|
@ -384,6 +384,7 @@ static const struct opp_funcs dcn10_opp_funcs = {
|
||||
.opp_set_disp_pattern_generator = NULL,
|
||||
.opp_program_dpg_dimensions = NULL,
|
||||
.dpg_is_blanked = NULL,
|
||||
.dpg_is_pending = NULL,
|
||||
.opp_destroy = opp1_destroy
|
||||
};
|
||||
|
||||
|
@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp)
|
||||
(double_buffer_pending == 0);
|
||||
}
|
||||
|
||||
bool opp2_dpg_is_pending(struct output_pixel_processor *opp)
|
||||
{
|
||||
struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
|
||||
uint32_t double_buffer_pending;
|
||||
uint32_t dpg_en;
|
||||
|
||||
REG_GET(DPG_CONTROL, DPG_EN, &dpg_en);
|
||||
|
||||
REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending);
|
||||
|
||||
return (dpg_en == 1 && double_buffer_pending == 1);
|
||||
}
|
||||
|
||||
void opp2_program_left_edge_extra_pixel (
|
||||
struct output_pixel_processor *opp,
|
||||
bool count)
|
||||
@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = {
|
||||
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
|
||||
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
|
||||
.dpg_is_blanked = opp2_dpg_is_blanked,
|
||||
.dpg_is_pending = opp2_dpg_is_pending,
|
||||
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
|
||||
.opp_destroy = opp1_destroy,
|
||||
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
|
||||
|
@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions(
|
||||
|
||||
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
|
||||
|
||||
bool opp2_dpg_is_pending(struct output_pixel_processor *opp);
|
||||
|
||||
void opp2_dpg_set_blank_color(
|
||||
struct output_pixel_processor *opp,
|
||||
const struct tg_color *color);
|
||||
|
@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = {
|
||||
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
|
||||
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
|
||||
.dpg_is_blanked = opp2_dpg_is_blanked,
|
||||
.dpg_is_pending = opp2_dpg_is_pending,
|
||||
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
|
||||
.opp_destroy = opp1_destroy,
|
||||
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
|
||||
|
@ -782,3 +782,9 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc
|
||||
pipe_cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ)
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ;
|
||||
}
|
||||
|
@ -623,6 +623,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
|
||||
* - Not TMZ surface
|
||||
*/
|
||||
if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
|
||||
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
|
||||
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
|
||||
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
|
||||
(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
|
||||
|
@ -824,13 +824,25 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state
|
||||
|
||||
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
|
||||
{
|
||||
dml_uint_t width, height;
|
||||
|
||||
if (in->timing.h_addressable > 3840)
|
||||
width = 3840;
|
||||
else
|
||||
width = in->timing.h_addressable; // 4K max
|
||||
|
||||
if (in->timing.v_addressable > 2160)
|
||||
height = 2160;
|
||||
else
|
||||
height = in->timing.v_addressable; // 4K max
|
||||
|
||||
out->CursorBPP[location] = dml_cur_32bit;
|
||||
out->CursorWidth[location] = 256;
|
||||
|
||||
out->GPUVMMinPageSizeKBytes[location] = 256;
|
||||
|
||||
out->ViewportWidth[location] = in->timing.h_addressable;
|
||||
out->ViewportHeight[location] = in->timing.v_addressable;
|
||||
out->ViewportWidth[location] = width;
|
||||
out->ViewportHeight[location] = height;
|
||||
out->ViewportStationary[location] = false;
|
||||
out->ViewportWidthChroma[location] = 0;
|
||||
out->ViewportHeightChroma[location] = 0;
|
||||
@ -849,7 +861,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
|
||||
out->HTapsChroma[location] = 0;
|
||||
out->VTapsChroma[location] = 0;
|
||||
out->SourceScan[location] = dml_rotation_0;
|
||||
out->ScalerRecoutWidth[location] = in->timing.h_addressable;
|
||||
out->ScalerRecoutWidth[location] = width;
|
||||
|
||||
out->LBBitPerPixel[location] = 57;
|
||||
|
||||
|
@ -703,13 +703,8 @@ static inline struct dml2_context *dml2_allocate_memory(void)
|
||||
return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
|
||||
}
|
||||
|
||||
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
|
||||
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
|
||||
{
|
||||
// Allocate Mode Lib Ctx
|
||||
*dml2 = dml2_allocate_memory();
|
||||
|
||||
if (!(*dml2))
|
||||
return false;
|
||||
|
||||
// Store config options
|
||||
(*dml2)->config = *config;
|
||||
@ -737,9 +732,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options
|
||||
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
|
||||
|
||||
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
|
||||
}
|
||||
|
||||
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
|
||||
{
|
||||
// Allocate Mode Lib Ctx
|
||||
*dml2 = dml2_allocate_memory();
|
||||
|
||||
if (!(*dml2))
|
||||
return false;
|
||||
|
||||
dml2_init(in_dc, config, dml2);
|
||||
|
||||
/*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/
|
||||
//dml2_initialize_instance(&(*dml_ctx)->v20.dml_init);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -779,3 +783,11 @@ bool dml2_create_copy(struct dml2_context **dst_dml2,
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void dml2_reinit(const struct dc *in_dc,
|
||||
const struct dml2_configuration_options *config,
|
||||
struct dml2_context **dml2)
|
||||
{
|
||||
|
||||
dml2_init(in_dc, config, dml2);
|
||||
}
|
||||
|
@ -214,6 +214,9 @@ void dml2_copy(struct dml2_context *dst_dml2,
|
||||
struct dml2_context *src_dml2);
|
||||
bool dml2_create_copy(struct dml2_context **dst_dml2,
|
||||
struct dml2_context *src_dml2);
|
||||
void dml2_reinit(const struct dc *in_dc,
|
||||
const struct dml2_configuration_options *config,
|
||||
struct dml2_context **dml2);
|
||||
|
||||
/*
|
||||
* dml2_validate - Determines if a display configuration is supported or not.
|
||||
|
@ -1498,6 +1498,11 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
|
||||
return;
|
||||
}
|
||||
|
||||
if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
|
||||
resource_is_odm_topology_changed(new_pipe, old_pipe))
|
||||
/* Detect odm changes */
|
||||
new_pipe->update_flags.bits.odm = 1;
|
||||
|
||||
/* Exit on unchanged, unused pipe */
|
||||
if (!old_pipe->plane_state && !new_pipe->plane_state)
|
||||
return;
|
||||
@ -1551,10 +1556,6 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
|
||||
|
||||
/* Detect top pipe only changes */
|
||||
if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
|
||||
/* Detect odm changes */
|
||||
if (resource_is_odm_topology_changed(new_pipe, old_pipe))
|
||||
new_pipe->update_flags.bits.odm = 1;
|
||||
|
||||
/* Detect global sync changes */
|
||||
if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
|
||||
|| old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
|
||||
@ -1999,19 +2000,20 @@ void dcn20_program_front_end_for_ctx(
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
unsigned int prev_hubp_count = 0;
|
||||
unsigned int hubp_count = 0;
|
||||
struct pipe_ctx *pipe;
|
||||
|
||||
if (resource_is_pipe_topology_changed(dc->current_state, context))
|
||||
resource_log_pipe_topology_update(dc, context);
|
||||
|
||||
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
|
||||
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
|
||||
if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
|
||||
ASSERT(!pipe->plane_state->triplebuffer_flips);
|
||||
/*turn off triple buffer for full update*/
|
||||
dc->hwss.program_triplebuffer(
|
||||
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
|
||||
dc, pipe, pipe->plane_state->triplebuffer_flips);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2085,12 +2087,22 @@ void dcn20_program_front_end_for_ctx(
|
||||
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
|
||||
}
|
||||
|
||||
/* update ODM for blanked OTG master pipes */
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
|
||||
!resource_is_pipe_type(pipe, DPP_PIPE) &&
|
||||
pipe->update_flags.bits.odm &&
|
||||
hws->funcs.update_odm)
|
||||
hws->funcs.update_odm(dc, context, pipe);
|
||||
}
|
||||
|
||||
/*
|
||||
* Program all updated pipes, order matters for mpcc setup. Start with
|
||||
* top pipe and program all pipes that follow in order
|
||||
*/
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->plane_state && !pipe->top_pipe) {
|
||||
while (pipe) {
|
||||
@ -2129,17 +2141,6 @@ void dcn20_program_front_end_for_ctx(
|
||||
context->stream_status[0].plane_count > 1) {
|
||||
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
|
||||
}
|
||||
|
||||
/* when dynamic ODM is active, pipes must be reconfigured when all planes are
|
||||
* disabled, as some transitions will leave software and hardware state
|
||||
* mismatched.
|
||||
*/
|
||||
if (dc->debug.enable_single_display_2to1_odm_policy &&
|
||||
pipe->stream &&
|
||||
pipe->update_flags.bits.disable &&
|
||||
!pipe->prev_odm_pipe &&
|
||||
hws->funcs.update_odm)
|
||||
hws->funcs.update_odm(dc, context, pipe);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2451,7 +2452,7 @@ bool dcn20_wait_for_blank_complete(
|
||||
int counter;
|
||||
|
||||
for (counter = 0; counter < 1000; counter++) {
|
||||
if (opp->funcs->dpg_is_blanked(opp))
|
||||
if (!opp->funcs->dpg_is_pending(opp))
|
||||
break;
|
||||
|
||||
udelay(100);
|
||||
@ -2462,7 +2463,7 @@ bool dcn20_wait_for_blank_complete(
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return opp->funcs->dpg_is_blanked(opp);
|
||||
}
|
||||
|
||||
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
|
||||
|
@ -812,10 +812,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
if (pipe_ctx == NULL)
|
||||
return;
|
||||
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
|
||||
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
enable);
|
||||
|
||||
/* Wait for two frame to make sure AV mute is sent out */
|
||||
if (enable) {
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
|
||||
|
@ -1156,6 +1156,13 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
|
||||
dsc->funcs->dsc_disconnect(dsc);
|
||||
}
|
||||
}
|
||||
|
||||
if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE))
|
||||
/*
|
||||
* blank pattern is generated by OPP, reprogram blank pattern
|
||||
* due to OPP count change
|
||||
*/
|
||||
dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true);
|
||||
}
|
||||
|
||||
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
|
||||
@ -1778,3 +1785,26 @@ void dcn32_prepare_bandwidth(struct dc *dc,
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
|
||||
}
|
||||
}
|
||||
|
||||
void dcn32_interdependent_update_lock(struct dc *dc,
|
||||
struct dc_state *context, bool lock)
|
||||
{
|
||||
unsigned int i;
|
||||
struct pipe_ctx *pipe;
|
||||
struct timing_generator *tg;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
tg = pipe->stream_res.tg;
|
||||
|
||||
if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
|
||||
!tg->funcs->is_tg_enabled(tg) ||
|
||||
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
|
||||
continue;
|
||||
|
||||
if (lock)
|
||||
dc->hwss.pipe_control_lock(dc, pipe, true);
|
||||
else
|
||||
dc->hwss.pipe_control_lock(dc, pipe, false);
|
||||
}
|
||||
}
|
||||
|
@ -129,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
|
||||
void dcn32_prepare_bandwidth(struct dc *dc,
|
||||
struct dc_state *context);
|
||||
|
||||
void dcn32_interdependent_update_lock(struct dc *dc,
|
||||
struct dc_state *context, bool lock);
|
||||
#endif /* __DC_HWSS_DCN32_H__ */
|
||||
|
@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
|
||||
.disable_plane = dcn20_disable_plane,
|
||||
.disable_pixel_data = dcn20_disable_pixel_data,
|
||||
.pipe_control_lock = dcn20_pipe_control_lock,
|
||||
.interdependent_update_lock = dcn10_lock_all_pipes,
|
||||
.interdependent_update_lock = dcn32_interdependent_update_lock,
|
||||
.cursor_lock = dcn10_cursor_lock,
|
||||
.prepare_bandwidth = dcn32_prepare_bandwidth,
|
||||
.optimize_bandwidth = dcn20_optimize_bandwidth,
|
||||
|
@ -337,6 +337,9 @@ struct opp_funcs {
|
||||
bool (*dpg_is_blanked)(
|
||||
struct output_pixel_processor *opp);
|
||||
|
||||
bool (*dpg_is_pending)(struct output_pixel_processor *opp);
|
||||
|
||||
|
||||
void (*opp_dpg_set_blank_color)(
|
||||
struct output_pixel_processor *opp,
|
||||
const struct tg_color *color);
|
||||
|
@ -331,6 +331,7 @@ struct timing_generator_funcs {
|
||||
|
||||
void (*init_odm)(struct timing_generator *tg);
|
||||
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
|
||||
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -285,12 +285,12 @@ struct link_service {
|
||||
enum replay_FW_Message_type msg,
|
||||
union dmub_replay_cmd_set *cmd_data);
|
||||
bool (*edp_set_coasting_vtotal)(
|
||||
struct dc_link *link, uint16_t coasting_vtotal);
|
||||
struct dc_link *link, uint32_t coasting_vtotal);
|
||||
bool (*edp_replay_residency)(const struct dc_link *link,
|
||||
unsigned int *residency, const bool is_start,
|
||||
const bool is_alpm);
|
||||
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
|
||||
const unsigned int *power_opts, uint16_t coasting_vtotal);
|
||||
const unsigned int *power_opts, uint32_t coasting_vtotal);
|
||||
|
||||
bool (*edp_wait_for_t12)(struct dc_link *link);
|
||||
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
|
||||
|
@ -1034,7 +1034,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
|
||||
bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
|
||||
{
|
||||
struct dc *dc = link->ctx->dc;
|
||||
struct dmub_replay *replay = dc->res_pool->replay;
|
||||
@ -1073,7 +1073,7 @@ bool edp_replay_residency(const struct dc_link *link,
|
||||
}
|
||||
|
||||
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
|
||||
const unsigned int *power_opts, uint16_t coasting_vtotal)
|
||||
const unsigned int *power_opts, uint32_t coasting_vtotal)
|
||||
{
|
||||
struct dc *dc = link->ctx->dc;
|
||||
struct dmub_replay *replay = dc->res_pool->replay;
|
||||
|
@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link,
|
||||
bool edp_send_replay_cmd(struct dc_link *link,
|
||||
enum replay_FW_Message_type msg,
|
||||
union dmub_replay_cmd_set *cmd_data);
|
||||
bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
|
||||
bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
|
||||
bool edp_replay_residency(const struct dc_link *link,
|
||||
unsigned int *residency, const bool is_start, const bool is_alpm);
|
||||
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
|
||||
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
|
||||
const unsigned int *power_opts, uint16_t coasting_vtotal);
|
||||
const unsigned int *power_opts, uint32_t coasting_vtotal);
|
||||
bool edp_wait_for_t12(struct dc_link *link);
|
||||
bool edp_is_ilr_optimization_required(struct dc_link *link,
|
||||
struct dc_crtc_timing *crtc_timing);
|
||||
|
@ -557,7 +557,8 @@ struct dcn_optc_registers {
|
||||
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
|
||||
type OTG_CRC_DATA_FORMAT;\
|
||||
type OTG_V_TOTAL_LAST_USED_BY_DRR;\
|
||||
type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
|
||||
type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\
|
||||
type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;
|
||||
|
||||
#define TG_REG_FIELD_LIST_DCN3_2(type) \
|
||||
type OTG_H_TIMING_DIV_MODE_MANUAL;
|
||||
|
@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
|
||||
}
|
||||
}
|
||||
|
||||
void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(tg);
|
||||
|
||||
REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000);
|
||||
}
|
||||
|
||||
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
@ -345,6 +352,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
|
||||
.set_odm_bypass = optc32_set_odm_bypass,
|
||||
.set_odm_combine = optc32_set_odm_combine,
|
||||
.get_odm_combine_segments = optc32_get_odm_combine_segments,
|
||||
.wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
|
||||
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
|
||||
.get_optc_source = optc2_get_optc_source,
|
||||
.set_out_mux = optc3_set_out_mux,
|
||||
|
@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man
|
||||
void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments);
|
||||
void optc32_set_odm_bypass(struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing);
|
||||
void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
|
||||
|
||||
#endif /* __DC_OPTC_DCN32_H__ */
|
||||
|
@ -1771,6 +1771,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
|
||||
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
|
||||
|
||||
dcn32_override_min_req_memclk(dc, context);
|
||||
dcn32_override_min_req_dcfclk(dc, context);
|
||||
|
||||
BW_VAL_TRACE_END_WATERMARKS();
|
||||
|
||||
@ -1930,6 +1931,8 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
|
||||
{
|
||||
DC_FP_START();
|
||||
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
|
||||
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
|
||||
dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
|
||||
DC_FP_END();
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,7 @@
|
||||
#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2
|
||||
#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
|
||||
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
|
||||
#define MIN_SUBVP_DCFCLK_KHZ 400000
|
||||
|
||||
#define TO_DCN32_RES_POOL(pool)\
|
||||
container_of(pool, struct dcn32_resource_pool, base)
|
||||
@ -181,6 +182,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
|
||||
|
||||
void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
|
||||
|
||||
void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
|
||||
|
||||
/* definitions for run time init of reg offsets */
|
||||
|
||||
/* CLK SRC */
|
||||
|
@ -1581,6 +1581,8 @@ static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
|
||||
{
|
||||
DC_FP_START();
|
||||
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
|
||||
if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
|
||||
dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
|
||||
DC_FP_END();
|
||||
}
|
||||
|
||||
|
@ -3238,6 +3238,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
|
||||
* Currently the support is only for 0 or 1
|
||||
*/
|
||||
uint8_t panel_inst;
|
||||
/**
|
||||
* 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
|
||||
*/
|
||||
uint16_t coasting_vtotal_high;
|
||||
/**
|
||||
* Explicit padding to 4 byte boundary.
|
||||
*/
|
||||
uint8_t pad[2];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
if (!display)
|
||||
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
|
||||
|
||||
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
|
||||
|
||||
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
|
||||
|
@ -975,7 +975,7 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
|
||||
|
||||
void set_replay_coasting_vtotal(struct dc_link *link,
|
||||
enum replay_coasting_vtotal_type type,
|
||||
uint16_t vtotal)
|
||||
uint32_t vtotal)
|
||||
{
|
||||
link->replay_settings.coasting_vtotal_table[type] = vtotal;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
|
||||
void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
|
||||
void set_replay_coasting_vtotal(struct dc_link *link,
|
||||
enum replay_coasting_vtotal_type type,
|
||||
uint16_t vtotal);
|
||||
uint32_t vtotal);
|
||||
void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
|
||||
void calculate_replay_link_off_frame_count(struct dc_link *link,
|
||||
uint16_t vtotal, uint16_t htotal);
|
||||
|
@ -1283,10 +1283,8 @@ static int arcturus_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *max_power_limit,
|
||||
uint32_t *min_power_limit)
|
||||
{
|
||||
struct smu_11_0_powerplay_table *powerplay_table =
|
||||
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
uint32_t power_limit, od_percent_upper, od_percent_lower;
|
||||
uint32_t power_limit;
|
||||
|
||||
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
|
||||
/* the last hope to figure out the ppt limit */
|
||||
@ -1302,26 +1300,10 @@ static int arcturus_get_power_limit(struct smu_context *smu,
|
||||
*current_power_limit = power_limit;
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
if (smu->od_enabled)
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
else
|
||||
od_percent_upper = 0;
|
||||
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
|
||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||
od_percent_upper, od_percent_lower, power_limit);
|
||||
|
||||
if (max_power_limit) {
|
||||
*max_power_limit = power_limit * (100 + od_percent_upper);
|
||||
*max_power_limit /= 100;
|
||||
}
|
||||
|
||||
if (min_power_limit) {
|
||||
*min_power_limit = power_limit * (100 - od_percent_lower);
|
||||
*min_power_limit /= 100;
|
||||
}
|
||||
if (max_power_limit)
|
||||
*max_power_limit = power_limit;
|
||||
if (min_power_limit)
|
||||
*min_power_limit = power_limit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2339,7 +2339,7 @@ static int navi10_get_power_limit(struct smu_context *smu,
|
||||
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
|
||||
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
uint32_t power_limit, od_percent_upper, od_percent_lower;
|
||||
uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
|
||||
|
||||
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
|
||||
/* the last hope to figure out the ppt limit */
|
||||
@ -2356,13 +2356,16 @@ static int navi10_get_power_limit(struct smu_context *smu,
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
if (smu->od_enabled &&
|
||||
navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
else
|
||||
od_percent_upper = 0;
|
||||
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
if (powerplay_table) {
|
||||
if (smu->od_enabled &&
|
||||
navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
} else if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
|
||||
od_percent_upper = 0;
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||
od_percent_upper, od_percent_lower, power_limit);
|
||||
|
@ -617,6 +617,12 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
|
||||
return throttler_status;
|
||||
}
|
||||
|
||||
static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
|
||||
enum SMU_11_0_7_ODFEATURE_CAP cap)
|
||||
{
|
||||
return od_table->cap[cap];
|
||||
}
|
||||
|
||||
static int sienna_cichlid_get_power_limit(struct smu_context *smu,
|
||||
uint32_t *current_power_limit,
|
||||
uint32_t *default_power_limit,
|
||||
@ -625,7 +631,8 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
|
||||
{
|
||||
struct smu_11_0_7_powerplay_table *powerplay_table =
|
||||
(struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
|
||||
uint32_t power_limit, od_percent_upper, od_percent_lower;
|
||||
struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
|
||||
uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
|
||||
uint16_t *table_member;
|
||||
|
||||
GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
|
||||
@ -640,12 +647,16 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
if (smu->od_enabled)
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
else
|
||||
od_percent_upper = 0;
|
||||
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
if (powerplay_table) {
|
||||
if (smu->od_enabled &&
|
||||
sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT)) {
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
} else if ((sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT))) {
|
||||
od_percent_upper = 0;
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||
od_percent_upper, od_percent_lower, power_limit);
|
||||
@ -1250,12 +1261,6 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
|
||||
return dpm_desc->SnapToDiscrete == 0;
|
||||
}
|
||||
|
||||
static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
|
||||
enum SMU_11_0_7_ODFEATURE_CAP cap)
|
||||
{
|
||||
return od_table->cap[cap];
|
||||
}
|
||||
|
||||
static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
|
||||
enum SMU_11_0_7_ODSETTING_ID setting,
|
||||
uint32_t *min, uint32_t *max)
|
||||
|
@ -2356,7 +2356,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
|
||||
(struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
|
||||
PPTable_t *pptable = table_context->driver_pptable;
|
||||
SkuTable_t *skutable = &pptable->SkuTable;
|
||||
uint32_t power_limit, od_percent_upper, od_percent_lower;
|
||||
uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
|
||||
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
|
||||
|
||||
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
|
||||
@ -2369,12 +2369,16 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
if (smu->od_enabled)
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
||||
else
|
||||
od_percent_upper = 0;
|
||||
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
||||
if (powerplay_table) {
|
||||
if (smu->od_enabled &&
|
||||
smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
||||
} else if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
|
||||
od_percent_upper = 0;
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||
od_percent_upper, od_percent_lower, power_limit);
|
||||
|
@ -2320,7 +2320,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
|
||||
(struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
|
||||
PPTable_t *pptable = table_context->driver_pptable;
|
||||
SkuTable_t *skutable = &pptable->SkuTable;
|
||||
uint32_t power_limit, od_percent_upper, od_percent_lower;
|
||||
uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
|
||||
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
|
||||
|
||||
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
|
||||
@ -2333,12 +2333,16 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
|
||||
if (default_power_limit)
|
||||
*default_power_limit = power_limit;
|
||||
|
||||
if (smu->od_enabled)
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
else
|
||||
od_percent_upper = 0;
|
||||
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
if (powerplay_table) {
|
||||
if (smu->od_enabled &&
|
||||
(smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT))) {
|
||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
} else if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
|
||||
od_percent_upper = 0;
|
||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||
od_percent_upper, od_percent_lower, power_limit);
|
||||
|
@ -441,23 +441,21 @@ lt8912_connector_mode_valid(struct drm_connector *connector,
|
||||
static int lt8912_connector_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
const struct drm_edid *drm_edid;
|
||||
int ret = -1;
|
||||
int num = 0;
|
||||
struct lt8912 *lt = connector_to_lt8912(connector);
|
||||
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
|
||||
int ret, num;
|
||||
|
||||
drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector);
|
||||
drm_edid_connector_update(connector, drm_edid);
|
||||
if (drm_edid) {
|
||||
num = drm_edid_connector_add_modes(connector);
|
||||
} else {
|
||||
return ret;
|
||||
}
|
||||
if (!drm_edid)
|
||||
return 0;
|
||||
|
||||
num = drm_edid_connector_add_modes(connector);
|
||||
|
||||
ret = drm_display_info_set_bus_formats(&connector->display_info,
|
||||
&bus_format, 1);
|
||||
if (ret)
|
||||
num = ret;
|
||||
if (ret < 0)
|
||||
num = 0;
|
||||
|
||||
drm_edid_free(drm_edid);
|
||||
return num;
|
||||
|
@ -294,8 +294,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
|
||||
static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
|
||||
unsigned int count;
|
||||
const struct drm_edid *drm_edid;
|
||||
int count;
|
||||
|
||||
drm_edid = drm_bridge_edid_read(<9611uxc->bridge, connector);
|
||||
drm_edid_connector_update(connector, drm_edid);
|
||||
|
@ -274,19 +274,24 @@ EXPORT_SYMBOL(drm_panel_disable);
|
||||
* The modes probed from the panel are automatically added to the connector
|
||||
* that the panel is attached to.
|
||||
*
|
||||
* Return: The number of modes available from the panel on success or a
|
||||
* negative error code on failure.
|
||||
* Return: The number of modes available from the panel on success, or 0 on
|
||||
* failure (no modes).
|
||||
*/
|
||||
int drm_panel_get_modes(struct drm_panel *panel,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
if (!panel)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
if (panel->funcs && panel->funcs->get_modes)
|
||||
return panel->funcs->get_modes(panel, connector);
|
||||
if (panel->funcs && panel->funcs->get_modes) {
|
||||
int num;
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
num = panel->funcs->get_modes(panel, connector);
|
||||
if (num > 0)
|
||||
return num;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_panel_get_modes);
|
||||
|
||||
|
@ -422,6 +422,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
|
||||
|
||||
count = connector_funcs->get_modes(connector);
|
||||
|
||||
/* The .get_modes() callback should not return negative values. */
|
||||
if (count < 0) {
|
||||
drm_err(connector->dev, ".get_modes() returned %pe\n",
|
||||
ERR_PTR(count));
|
||||
count = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped
|
||||
* override/firmware EDID.
|
||||
|
@ -74,16 +74,15 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
|
||||
{
|
||||
struct exynos_dp_device *dp = to_dp(plat_data);
|
||||
struct drm_display_mode *mode;
|
||||
int num_modes = 0;
|
||||
|
||||
if (dp->plat_data.panel)
|
||||
return num_modes;
|
||||
return 0;
|
||||
|
||||
mode = drm_mode_create(connector->dev);
|
||||
if (!mode) {
|
||||
DRM_DEV_ERROR(dp->dev,
|
||||
"failed to create a new display mode.\n");
|
||||
return num_modes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_display_mode_from_videomode(&dp->vm, mode);
|
||||
@ -94,7 +93,7 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
|
||||
drm_mode_set_name(mode);
|
||||
drm_mode_probed_add(connector, mode);
|
||||
|
||||
return num_modes + 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
|
||||
|
@ -316,14 +316,14 @@ static int vidi_get_modes(struct drm_connector *connector)
|
||||
*/
|
||||
if (!ctx->raw_edid) {
|
||||
DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
|
||||
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
|
||||
if (!edid) {
|
||||
DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_connector_update_edid_property(connector, edid);
|
||||
|
@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
|
||||
int ret;
|
||||
|
||||
if (!hdata->ddc_adpt)
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
|
||||
edid = drm_get_edid(connector, hdata->ddc_adpt);
|
||||
if (!edid)
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
|
||||
hdata->dvi_mode = !connector->display_info.is_hdmi;
|
||||
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
|
||||
|
@ -72,14 +72,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
|
||||
int ret;
|
||||
|
||||
if (!mode)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
ret = of_get_drm_display_mode(np, &imxpd->mode,
|
||||
&imxpd->bus_flags,
|
||||
OF_USE_NATIVE_MODE);
|
||||
if (ret) {
|
||||
drm_mode_destroy(connector->dev, mode);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_mode_copy(mode, &imxpd->mode);
|
||||
|
@ -1256,6 +1256,8 @@ out:
|
||||
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
|
||||
bdev->dev_mapping);
|
||||
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
|
||||
nvbo->bo.resource->bus.offset = 0;
|
||||
nvbo->bo.resource->bus.addr = NULL;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,6 @@ r535_devinit_new(const struct nvkm_devinit_func *hw,
|
||||
|
||||
rm->dtor = r535_devinit_dtor;
|
||||
rm->post = hw->post;
|
||||
rm->disable = hw->disable;
|
||||
|
||||
ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
|
||||
if (ret)
|
||||
|
@ -1430,6 +1430,10 @@ r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
|
||||
|
||||
/**
|
||||
* r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
|
||||
* @priv: gsp pointer
|
||||
* @fn: function number (ignored)
|
||||
* @repv: pointer to libos print RPC
|
||||
* @repc: message size
|
||||
*
|
||||
* The GSP sequencer is a list of I/O commands that the GSP can send to
|
||||
* the driver to perform for various purposes. The most common usage is to
|
||||
@ -1781,6 +1785,7 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
|
||||
|
||||
/**
|
||||
* r535_gsp_libos_init() -- create the libos arguments structure
|
||||
* @gsp: gsp pointer
|
||||
*
|
||||
* The logging buffers are byte queues that contain encoded printf-like
|
||||
* messages from GSP-RM. They need to be decoded by a special application
|
||||
@ -1920,6 +1925,10 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
|
||||
|
||||
/**
|
||||
* nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
|
||||
* @gsp: gsp pointer
|
||||
* @sgt: S/G list to traverse
|
||||
* @size: size of the image, in bytes
|
||||
* @rx3: radix3 array to update
|
||||
*
|
||||
* The GSP uses a three-level page table, called radix3, to map the firmware.
|
||||
* Each 64-bit "pointer" in the table is either the bus address of an entry in
|
||||
|
@ -166,7 +166,7 @@ sun4i_hdmi_connector_clock_valid(const struct drm_connector *connector,
|
||||
unsigned long long clock)
|
||||
{
|
||||
const struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
|
||||
unsigned long diff = clock / 200; /* +-0.5% allowed by HDMI spec */
|
||||
unsigned long diff = div_u64(clock, 200); /* +-0.5% allowed by HDMI spec */
|
||||
long rounded_rate;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
|
@ -509,7 +509,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
|
||||
edid = drm_get_edid(connector, vc4_hdmi->ddc);
|
||||
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
|
||||
if (!edid)
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
|
||||
drm_connector_update_edid_property(connector, edid);
|
||||
ret = drm_add_edid_modes(connector, edid);
|
||||
|
@ -235,6 +235,29 @@ retry:
|
||||
goto err_unlock_list;
|
||||
}
|
||||
|
||||
if (!args->num_batch_buffer) {
|
||||
err = xe_vm_lock(vm, true);
|
||||
if (err)
|
||||
goto err_unlock_list;
|
||||
|
||||
if (!xe_vm_in_lr_mode(vm)) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
|
||||
if (IS_ERR(fence)) {
|
||||
err = PTR_ERR(fence);
|
||||
goto err_unlock_list;
|
||||
}
|
||||
for (i = 0; i < num_syncs; i++)
|
||||
xe_sync_entry_signal(&syncs[i], NULL, fence);
|
||||
xe_exec_queue_last_fence_set(q, vm, fence);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
xe_vm_unlock(vm);
|
||||
goto err_unlock_list;
|
||||
}
|
||||
|
||||
vm_exec.vm = &vm->gpuvm;
|
||||
vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
|
||||
if (xe_vm_in_lr_mode(vm)) {
|
||||
@ -254,24 +277,6 @@ retry:
|
||||
goto err_exec;
|
||||
}
|
||||
|
||||
if (!args->num_batch_buffer) {
|
||||
if (!xe_vm_in_lr_mode(vm)) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
|
||||
if (IS_ERR(fence)) {
|
||||
err = PTR_ERR(fence);
|
||||
goto err_exec;
|
||||
}
|
||||
for (i = 0; i < num_syncs; i++)
|
||||
xe_sync_entry_signal(&syncs[i], NULL, fence);
|
||||
xe_exec_queue_last_fence_set(q, vm, fence);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
goto err_exec;
|
||||
}
|
||||
|
||||
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
|
||||
err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
|
||||
skip_retry = true;
|
||||
|
@ -69,7 +69,7 @@ static bool access_is_atomic(enum access_type access_type)
|
||||
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
|
||||
{
|
||||
return BIT(tile->id) & vma->tile_present &&
|
||||
!(BIT(tile->id) & vma->usm.tile_invalidated);
|
||||
!(BIT(tile->id) & vma->tile_invalidated);
|
||||
}
|
||||
|
||||
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
|
||||
@ -226,7 +226,7 @@ retry_userptr:
|
||||
|
||||
if (xe_vma_is_userptr(vma))
|
||||
ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
|
||||
vma->usm.tile_invalidated &= ~BIT(tile->id);
|
||||
vma->tile_invalidated &= ~BIT(tile->id);
|
||||
|
||||
unlock_dma_resv:
|
||||
drm_exec_fini(&exec);
|
||||
|
@ -468,7 +468,7 @@ DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
|
||||
TP_ARGS(vma)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
|
||||
DEFINE_EVENT(xe_vma, xe_vma_invalidate,
|
||||
TP_PROTO(struct xe_vma *vma),
|
||||
TP_ARGS(vma)
|
||||
);
|
||||
|
@ -708,6 +708,7 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
||||
int err = 0;
|
||||
LIST_HEAD(tmp_evict);
|
||||
|
||||
xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
|
||||
lockdep_assert_held_write(&vm->lock);
|
||||
|
||||
/* Collect invalidated userptrs */
|
||||
@ -724,11 +725,27 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
||||
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
|
||||
userptr.repin_link) {
|
||||
err = xe_vma_userptr_pin_pages(uvma);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (err == -EFAULT) {
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
|
||||
/* Wait for pending binds */
|
||||
xe_vm_lock(vm, false);
|
||||
dma_resv_wait_timeout(xe_vm_resv(vm),
|
||||
DMA_RESV_USAGE_BOOKKEEP,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
|
||||
err = xe_vm_invalidate_vma(&uvma->vma);
|
||||
xe_vm_unlock(vm);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
list_move_tail(&uvma->vma.combined_links.rebind,
|
||||
&vm->rebind_list);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2024,7 +2041,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
|
||||
return err;
|
||||
}
|
||||
|
||||
if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
|
||||
if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
|
||||
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
|
||||
true, first_op, last_op);
|
||||
} else {
|
||||
@ -3214,9 +3231,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
||||
u8 id;
|
||||
int ret;
|
||||
|
||||
xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
|
||||
xe_assert(xe, !xe_vma_is_null(vma));
|
||||
trace_xe_vma_usm_invalidate(vma);
|
||||
trace_xe_vma_invalidate(vma);
|
||||
|
||||
/* Check that we don't race with page-table updates */
|
||||
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
|
||||
@ -3254,7 +3270,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
||||
}
|
||||
}
|
||||
|
||||
vma->usm.tile_invalidated = vma->tile_mask;
|
||||
vma->tile_invalidated = vma->tile_mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -84,11 +84,8 @@ struct xe_vma {
|
||||
struct work_struct destroy_work;
|
||||
};
|
||||
|
||||
/** @usm: unified shared memory state */
|
||||
struct {
|
||||
/** @tile_invalidated: VMA has been invalidated */
|
||||
u8 tile_invalidated;
|
||||
} usm;
|
||||
/** @tile_invalidated: VMA has been invalidated */
|
||||
u8 tile_invalidated;
|
||||
|
||||
/** @tile_mask: Tile mask of where to create binding for this VMA */
|
||||
u8 tile_mask;
|
||||
|
@ -111,8 +111,10 @@ void xe_vram_freq_sysfs_init(struct xe_tile *tile)
|
||||
return;
|
||||
|
||||
kobj = kobject_create_and_add("memory", tile->sysfs);
|
||||
if (!kobj)
|
||||
if (!kobj) {
|
||||
drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM);
|
||||
return;
|
||||
}
|
||||
|
||||
err = sysfs_create_group(kobj, &freq_group_attrs);
|
||||
if (err) {
|
||||
|
@ -541,7 +541,7 @@ struct drm_bridge_funcs {
|
||||
* The @get_modes callback is mostly intended to support non-probeable
|
||||
* displays such as many fixed panels. Bridges that support reading
|
||||
* EDID shall leave @get_modes unimplemented and implement the
|
||||
* &drm_bridge_funcs->get_edid callback instead.
|
||||
* &drm_bridge_funcs->edid_read callback instead.
|
||||
*
|
||||
* This callback is optional. Bridges that implement it shall set the
|
||||
* DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops.
|
||||
@ -687,7 +687,7 @@ enum drm_bridge_ops {
|
||||
/**
|
||||
* @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display
|
||||
* connected to its output. Bridges that set this flag shall implement
|
||||
* the &drm_bridge_funcs->get_edid callback.
|
||||
* the &drm_bridge_funcs->edid_read callback.
|
||||
*/
|
||||
DRM_BRIDGE_OP_EDID = BIT(1),
|
||||
/**
|
||||
|
@ -71,7 +71,6 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
|
||||
}
|
||||
|
||||
#define DRM_FIXED_POINT 32
|
||||
#define DRM_FIXED_POINT_HALF 16
|
||||
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
|
||||
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
|
||||
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
|
||||
@ -90,7 +89,7 @@ static inline int drm_fixp2int(s64 a)
|
||||
|
||||
static inline int drm_fixp2int_round(s64 a)
|
||||
{
|
||||
return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
|
||||
return drm_fixp2int(a + DRM_FIXED_ONE / 2);
|
||||
}
|
||||
|
||||
static inline int drm_fixp2int_ceil(s64 a)
|
||||
|
@ -898,7 +898,8 @@ struct drm_connector_helper_funcs {
|
||||
*
|
||||
* RETURNS:
|
||||
*
|
||||
* The number of modes added by calling drm_mode_probed_add().
|
||||
* The number of modes added by calling drm_mode_probed_add(). Return 0
|
||||
* on failures (no modes) instead of negative error codes.
|
||||
*/
|
||||
int (*get_modes)(struct drm_connector *connector);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user