mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
drm fixes for 6.5-rc7
edid: - revert mode parsing fix that had side effects. i915: - Fix the flow for ignoring GuC SLPC efficient frequency selection - Fix SDVO panel_type initialization - Fix display probe for IVB Q and IVB D GT2 server nouveau: - fix use-after-free in connector code qaic: - integer overflow check fix - fix slicing memory leak panel: - fix JDI LT070ME05000 probing - fix AUO G121EAN01 timings amdgpu: - SMU 13.x fixes - Fix mcbp parameter for gfx9 - SMU 11.x fixes - Temporary fix for large numbers of XCP partitions - S0ix fixes - DCN 2.0 fix qxl: - fix use after free race in dumb object allocation -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmTekjgACgkQDHTzWXnE hr7Dtw/+Lmf8Puh9RIgqLFA6sThkRI3jaj5mr/0arFzCXhQKeVUFhDgbVMndQX2B JajuMYY/tz1ok5dDgUbFkY7P2v+JrDMEpADGbdv1F9OjzimBimydD0fyfY92NTPL LrugeBOQrgw2BQkY12GW010ThrzEYvapF0NTAgMXY10xsVfDkT1OkUS45TrM3KM8 rX7Vwkd48DD+N+c6wjvPP6cIKQHxktOrziCmn2ggsklGUD0anIyVpZ5mk+YC2HV+ fQKt8FihyWzdqRUEjpDn1p4HuLiaKYOBxy1Kwz8jJooqf9dA+SGA+AzXYP3wfUKs SmX7OpwWB22yhOA027oFm4y696EVg7Al4BqyblohWwwpY/hpTMxSReNQAAogTc13 TFQ1QqDM84QDzaLG58jydxh/6gmvLuCZt4Wy/Ho+qHw+n0RGiqt29DkWcGfx0ETu a/t5NANMO+lInmDELi8ns/ZL3m9+uXvl3VDA49VhMk1mZv1Wt+UKz8wJg83tz9SQ 7k9jpEntGGgqarF2X5hwIkTeZZtsnY3bdHaOmr2ed6OJ6Gq64r7gSfVaYx/EL5IN qF6s8Kbow9Y/5LGIkNry0KIrv6C6+XMQSYH0+F0TaeZFh55FPdPakJvxj8lkwGRT gWEhL1bKYeEHk7GgshztCgJl1lc47Dcbg4A7rVAH/Iyqgkqn/Hc= =P1rE -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2023-08-18-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Regular enough week, mostly the usual amdgpu and i915 fixes. Also qaic, nouveau, qxl and a revert for an EDID patch that had some side effects, along with a couple of panel fixes. edid: - revert mode parsing fix that had side effects. i915: - Fix the flow for ignoring GuC SLPC efficient frequency selection - Fix SDVO panel_type initialization - Fix display probe for IVB Q and IVB D GT2 server nouveau: - fix use-after-free in connector code qaic: - integer overflow check fix - fix slicing memory leak panel: - fix JDI LT070ME05000 probing - fix AUO G121EAN01 timings amdgpu: - SMU 13.x fixes - Fix mcbp parameter for gfx9 - SMU 11.x fixes - Temporary fix for large numbers of XCP partitions - S0ix fixes - DCN 2.0 fix qxl: - fix use after free race in dumb object allocation" * tag 'drm-fixes-2023-08-18-1' of git://anongit.freedesktop.org/drm/drm: drm/qxl: fix UAF on handle creation Revert "drm/edid: Fix csync detailed mode parsing" drm/nouveau/disp: fix use-after-free in error handling of nouveau_connector_create Revert "Revert "drm/amdgpu/display: change pipe policy for DCN 2.0"" drm/amd: flush any delayed gfxoff on suspend entry drm/amdgpu: skip fence GFX interrupts disable/enable for S0ix drm/amdgpu: skip xcp drm device allocation when out of drm resource drm/amd/pm: Update pci link width for smu v13.0.6 drm/amd/pm: Fix temperature unit of SMU v13.0.6 drm/amdgpu/pm: fix throttle_status for other than MP1 11.0.7 drm/amdgpu: disable mcbp if parameter zero is set drm/amd/pm: disallow the fan setting if there is no fan on smu 13.0.0 accel/qaic: Clean up integer overflow checking in map_user_pages() accel/qaic: Fix slicing memory leak drm/i915: fix display probe for IVB Q and IVB D GT2 server drm/i915/sdvo: fix panel_type initialization drm/i915/guc/slpc: Restore efficient freq earlier drm/panel: simple: Fix AUO G121EAN01 panel timings according to the docs drm/panel: JDI LT070ME05000 simplify with dev_err_probe()
This commit is contained in:
commit
1ada9c0740
@ -392,18 +392,31 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
|
||||
struct qaic_manage_trans_dma_xfer *in_trans,
|
||||
struct ioctl_resources *resources, struct dma_xfer *xfer)
|
||||
{
|
||||
u64 xfer_start_addr, remaining, end, total;
|
||||
unsigned long need_pages;
|
||||
struct page **page_list;
|
||||
unsigned long nr_pages;
|
||||
struct sg_table *sgt;
|
||||
u64 xfer_start_addr;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
xfer_start_addr = in_trans->addr + resources->xferred_dma_size;
|
||||
if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr))
|
||||
return -EINVAL;
|
||||
|
||||
need_pages = DIV_ROUND_UP(in_trans->size + offset_in_page(xfer_start_addr) -
|
||||
resources->xferred_dma_size, PAGE_SIZE);
|
||||
if (in_trans->size < resources->xferred_dma_size)
|
||||
return -EINVAL;
|
||||
remaining = in_trans->size - resources->xferred_dma_size;
|
||||
if (remaining == 0)
|
||||
return 0;
|
||||
|
||||
if (check_add_overflow(xfer_start_addr, remaining, &end))
|
||||
return -EINVAL;
|
||||
|
||||
total = remaining + offset_in_page(xfer_start_addr);
|
||||
if (total >= SIZE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
need_pages = DIV_ROUND_UP(total, PAGE_SIZE);
|
||||
|
||||
nr_pages = need_pages;
|
||||
|
||||
@ -435,7 +448,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
|
||||
|
||||
ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages,
|
||||
offset_in_page(xfer_start_addr),
|
||||
in_trans->size - resources->xferred_dma_size, GFP_KERNEL);
|
||||
remaining, GFP_KERNEL);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
goto free_sgt;
|
||||
@ -566,9 +579,6 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list
|
||||
QAIC_MANAGE_EXT_MSG_LENGTH)
|
||||
return -ENOMEM;
|
||||
|
||||
if (in_trans->addr + in_trans->size < in_trans->addr || !in_trans->size)
|
||||
return -EINVAL;
|
||||
|
||||
xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
|
||||
if (!xfer)
|
||||
return -ENOMEM;
|
||||
|
@ -1021,6 +1021,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
|
||||
bo->dbc = dbc;
|
||||
srcu_read_unlock(&dbc->ch_lock, rcu_id);
|
||||
drm_gem_object_put(obj);
|
||||
kfree(slice_ent);
|
||||
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
|
||||
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
|
||||
|
||||
|
@ -3722,10 +3722,11 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_mcbp == 1)
|
||||
adev->gfx.mcbp = true;
|
||||
|
||||
if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
|
||||
(adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
|
||||
adev->gfx.num_gfx_rings)
|
||||
else if (amdgpu_mcbp == 0)
|
||||
adev->gfx.mcbp = false;
|
||||
else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
|
||||
(adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
|
||||
adev->gfx.num_gfx_rings)
|
||||
adev->gfx.mcbp = true;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
@ -4393,6 +4394,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
|
||||
|
||||
cancel_delayed_work_sync(&adev->delayed_init_work);
|
||||
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
|
||||
|
||||
amdgpu_ras_suspend(adev);
|
||||
|
||||
|
@ -551,6 +551,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
|
||||
* fence driver interrupts need to be restored.
|
||||
*
|
||||
* @ring: ring that to be checked
|
||||
*
|
||||
* Interrupts for rings that belong to GFX IP don't need to be restored
|
||||
* when the target power state is s0ix.
|
||||
*
|
||||
* Return true if need to restore interrupts, false otherwise.
|
||||
*/
|
||||
static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool is_gfx_power_domain = false;
|
||||
|
||||
switch (ring->funcs->type) {
|
||||
case AMDGPU_RING_TYPE_SDMA:
|
||||
/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
|
||||
if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0))
|
||||
is_gfx_power_domain = true;
|
||||
break;
|
||||
case AMDGPU_RING_TYPE_GFX:
|
||||
case AMDGPU_RING_TYPE_COMPUTE:
|
||||
case AMDGPU_RING_TYPE_KIQ:
|
||||
case AMDGPU_RING_TYPE_MES:
|
||||
is_gfx_power_domain = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return !(adev->in_s0ix && is_gfx_power_domain);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_hw_fini - tear down the fence driver
|
||||
* for all possible rings.
|
||||
@ -579,7 +614,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
|
||||
if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
|
||||
ring->fence_drv.irq_src)
|
||||
ring->fence_drv.irq_src &&
|
||||
amdgpu_fence_need_ring_interrupt_restore(ring))
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
|
||||
@ -655,7 +691,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
|
||||
continue;
|
||||
|
||||
/* enable the interrupt */
|
||||
if (ring->fence_drv.irq_src)
|
||||
if (ring->fence_drv.irq_src &&
|
||||
amdgpu_fence_need_ring_interrupt_restore(ring))
|
||||
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
}
|
||||
|
@ -692,15 +692,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
|
||||
|
||||
if (adev->gfx.gfx_off_req_count == 0 &&
|
||||
!adev->gfx.gfx_off_state) {
|
||||
/* If going to s2idle, no need to wait */
|
||||
if (adev->in_s0ix) {
|
||||
if (!amdgpu_dpm_set_powergating_by_smu(adev,
|
||||
AMD_IP_BLOCK_TYPE_GFX, true))
|
||||
adev->gfx.gfx_off_state = true;
|
||||
} else {
|
||||
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
|
||||
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
|
||||
delay);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (adev->gfx.gfx_off_req_count == 0) {
|
||||
|
@ -397,7 +397,7 @@ void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring)
|
||||
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
|
||||
|
||||
WARN_ON(!ring->is_sw_ring);
|
||||
if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
|
||||
if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
|
||||
if (amdgpu_mcbp_scan(mux) > 0)
|
||||
amdgpu_mcbp_trigger_preempt(mux);
|
||||
return;
|
||||
|
@ -239,8 +239,13 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
|
||||
|
||||
for (i = 1; i < MAX_XCP; i++) {
|
||||
ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
|
||||
if (ret)
|
||||
if (ret == -ENOSPC) {
|
||||
dev_warn(adev->dev,
|
||||
"Skip xcp node #%d when out of drm node resource.", i);
|
||||
return 0;
|
||||
} else if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Redirect all IOCTLs to the primary device */
|
||||
adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
|
||||
@ -328,6 +333,9 @@ int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
|
||||
for (i = 1; i < MAX_XCP; i++) {
|
||||
if (!adev->xcp_mgr->xcp[i].ddev)
|
||||
break;
|
||||
|
||||
ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -345,6 +353,9 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
|
||||
return;
|
||||
|
||||
for (i = 1; i < MAX_XCP; i++) {
|
||||
if (!adev->xcp_mgr->xcp[i].ddev)
|
||||
break;
|
||||
|
||||
p_ddev = adev->xcp_mgr->xcp[i].ddev;
|
||||
drm_dev_unplug(p_ddev);
|
||||
p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
|
||||
|
@ -1965,7 +1965,14 @@ int kfd_topology_add_device(struct kfd_node *gpu)
|
||||
const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type];
|
||||
|
||||
gpu_id = kfd_generate_gpu_id(gpu);
|
||||
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
|
||||
if (gpu->xcp && !gpu->xcp->ddev) {
|
||||
dev_warn(gpu->adev->dev,
|
||||
"Won't add GPU (ID: 0x%x) to topology since it has no drm node assigned.",
|
||||
gpu_id);
|
||||
return 0;
|
||||
} else {
|
||||
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
|
||||
}
|
||||
|
||||
/* Check to see if this gpu device exists in the topology_device_list.
|
||||
* If so, assign the gpu to that device,
|
||||
|
@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
@ -588,7 +588,9 @@ err0_out:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu)
|
||||
static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu,
|
||||
bool use_metrics_v3,
|
||||
bool use_metrics_v2)
|
||||
{
|
||||
struct smu_table_context *smu_table= &smu->smu_table;
|
||||
SmuMetricsExternal_t *metrics_ext =
|
||||
@ -596,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
|
||||
uint32_t throttler_status = 0;
|
||||
int i;
|
||||
|
||||
if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
|
||||
(smu->smc_fw_version >= 0x3A4900)) {
|
||||
if (use_metrics_v3) {
|
||||
for (i = 0; i < THROTTLER_COUNT; i++)
|
||||
throttler_status |=
|
||||
(metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0);
|
||||
} else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
|
||||
(smu->smc_fw_version >= 0x3A4300)) {
|
||||
} else if (use_metrics_v2) {
|
||||
for (i = 0; i < THROTTLER_COUNT; i++)
|
||||
throttler_status |=
|
||||
(metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0);
|
||||
@ -864,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
|
||||
metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
break;
|
||||
case METRICS_THROTTLER_STATUS:
|
||||
*value = sienna_cichlid_get_throttler_status_locked(smu);
|
||||
*value = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
|
||||
break;
|
||||
case METRICS_CURR_FANSPEED:
|
||||
*value = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
|
||||
@ -4017,7 +4017,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
|
||||
gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] :
|
||||
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1];
|
||||
|
||||
gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu);
|
||||
gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
|
||||
gpu_metrics->indep_throttle_status =
|
||||
smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
|
||||
sienna_cichlid_throttler_map);
|
||||
|
@ -331,6 +331,7 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
|
||||
struct smu_13_0_0_powerplay_table *powerplay_table =
|
||||
table_context->power_play_table;
|
||||
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
#if 0
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
const OverDriveLimits_t * const overdrive_upperlimits =
|
||||
@ -371,6 +372,9 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
|
||||
table_context->thermal_controller_type =
|
||||
powerplay_table->thermal_controller_type;
|
||||
|
||||
smu->adev->pm.no_fan =
|
||||
!(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -81,9 +81,10 @@
|
||||
#define EPSILON 1
|
||||
|
||||
#define smnPCIE_ESM_CTRL 0x193D0
|
||||
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1ab40288
|
||||
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288
|
||||
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
|
||||
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
|
||||
#define MAX_LINK_WIDTH 6
|
||||
|
||||
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
|
||||
@ -708,16 +709,19 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
|
||||
*value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
|
||||
break;
|
||||
case METRICS_TEMPERATURE_HOTSPOT:
|
||||
*value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
|
||||
*value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature) *
|
||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
break;
|
||||
case METRICS_TEMPERATURE_MEM:
|
||||
*value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
|
||||
*value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature) *
|
||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
break;
|
||||
/* This is the max of all VRs and not just SOC VR.
|
||||
* No need to define another data type for the same.
|
||||
*/
|
||||
case METRICS_TEMPERATURE_VRSOC:
|
||||
*value = SMUQ10_TO_UINT(metrics->MaxVrTemperature);
|
||||
*value = SMUQ10_TO_UINT(metrics->MaxVrTemperature) *
|
||||
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
|
||||
break;
|
||||
default:
|
||||
*value = UINT_MAX;
|
||||
@ -1966,6 +1970,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0, inst0, xcc0;
|
||||
MetricsTable_t *metrics;
|
||||
u16 link_width_level;
|
||||
|
||||
inst0 = adev->sdma.instance[0].aid_id;
|
||||
xcc0 = GET_INST(GC, 0);
|
||||
@ -2016,8 +2021,12 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
|
||||
gpu_metrics->throttle_status = 0;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
|
||||
if (link_width_level > MAX_LINK_WIDTH)
|
||||
link_width_level = 0;
|
||||
|
||||
gpu_metrics->pcie_link_width =
|
||||
smu_v13_0_6_get_current_pcie_link_width_level(smu);
|
||||
DECODE_LANE_WIDTH(link_width_level);
|
||||
gpu_metrics->pcie_link_speed =
|
||||
smu_v13_0_6_get_current_pcie_link_speed(smu);
|
||||
}
|
||||
|
@ -3456,6 +3456,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
|
||||
connector->base.id, connector->name);
|
||||
return NULL;
|
||||
}
|
||||
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
|
||||
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
|
||||
connector->base.id, connector->name);
|
||||
}
|
||||
|
||||
/* it is incorrect if hsync/vsync width is zero */
|
||||
if (!hsync_pulse_width || !vsync_pulse_width) {
|
||||
@ -3502,27 +3506,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
|
||||
if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
|
||||
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
|
||||
} else {
|
||||
switch (pt->misc & DRM_EDID_PT_SYNC_MASK) {
|
||||
case DRM_EDID_PT_ANALOG_CSYNC:
|
||||
case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC:
|
||||
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n",
|
||||
connector->base.id, connector->name);
|
||||
mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC;
|
||||
break;
|
||||
case DRM_EDID_PT_DIGITAL_CSYNC:
|
||||
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n",
|
||||
connector->base.id, connector->name);
|
||||
mode->flags |= DRM_MODE_FLAG_CSYNC;
|
||||
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
|
||||
DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC;
|
||||
break;
|
||||
case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC:
|
||||
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
|
||||
DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
|
||||
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
|
||||
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
|
||||
break;
|
||||
}
|
||||
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
|
||||
DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
|
||||
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
|
||||
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
|
||||
}
|
||||
|
||||
set_size:
|
||||
|
@ -662,10 +662,24 @@ static const struct intel_display_device_info xe_lpdp_display = {
|
||||
BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
|
||||
};
|
||||
|
||||
/*
|
||||
* Separate detection for no display cases to keep the display id array simple.
|
||||
*
|
||||
* IVB Q requires subvendor and subdevice matching to differentiate from IVB D
|
||||
* GT2 server.
|
||||
*/
|
||||
static bool has_no_display(struct pci_dev *pdev)
|
||||
{
|
||||
static const struct pci_device_id ids[] = {
|
||||
INTEL_IVB_Q_IDS(0),
|
||||
{}
|
||||
};
|
||||
|
||||
return pci_match_id(ids, pdev);
|
||||
}
|
||||
|
||||
#undef INTEL_VGA_DEVICE
|
||||
#undef INTEL_QUANTA_VGA_DEVICE
|
||||
#define INTEL_VGA_DEVICE(id, info) { id, info }
|
||||
#define INTEL_QUANTA_VGA_DEVICE(info) { 0x16a, info }
|
||||
|
||||
static const struct {
|
||||
u32 devid;
|
||||
@ -690,7 +704,6 @@ static const struct {
|
||||
INTEL_IRONLAKE_M_IDS(&ilk_m_display),
|
||||
INTEL_SNB_D_IDS(&snb_display),
|
||||
INTEL_SNB_M_IDS(&snb_display),
|
||||
INTEL_IVB_Q_IDS(NULL), /* must be first IVB in list */
|
||||
INTEL_IVB_M_IDS(&ivb_display),
|
||||
INTEL_IVB_D_IDS(&ivb_display),
|
||||
INTEL_HSW_IDS(&hsw_display),
|
||||
@ -775,6 +788,11 @@ intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid,
|
||||
if (has_gmdid)
|
||||
return probe_gmdid_display(i915, gmdid_ver, gmdid_rel, gmdid_step);
|
||||
|
||||
if (has_no_display(pdev)) {
|
||||
drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
|
||||
return &no_display;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(intel_display_ids); i++) {
|
||||
if (intel_display_ids[i].devid == pdev->device)
|
||||
return intel_display_ids[i].info;
|
||||
|
@ -2752,7 +2752,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
|
||||
__drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
|
||||
&conn_state->base.base);
|
||||
|
||||
INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes);
|
||||
intel_panel_init_alloc(&sdvo_connector->base);
|
||||
|
||||
return sdvo_connector;
|
||||
}
|
||||
|
@ -470,12 +470,19 @@ int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
|
||||
ret = slpc_set_param(slpc,
|
||||
SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
|
||||
val);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
|
||||
val, ERR_PTR(ret));
|
||||
else
|
||||
} else {
|
||||
slpc->ignore_eff_freq = val;
|
||||
|
||||
/* Set min to RPn when we disable efficient freq */
|
||||
if (val)
|
||||
ret = slpc_set_param(slpc,
|
||||
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
|
||||
slpc->min_freq);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&slpc->lock);
|
||||
return ret;
|
||||
@ -602,9 +609,8 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
|
||||
return ret;
|
||||
|
||||
if (!slpc->min_freq_softlimit) {
|
||||
ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
/* Min softlimit is initialized to RPn */
|
||||
slpc->min_freq_softlimit = slpc->min_freq;
|
||||
slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
|
||||
} else {
|
||||
return intel_guc_slpc_set_min_freq(slpc,
|
||||
@ -755,6 +761,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Set cached value of ignore efficient freq */
|
||||
intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
|
||||
|
||||
/* Revert SLPC min/max to softlimits if necessary */
|
||||
ret = slpc_set_softlimits(slpc);
|
||||
if (unlikely(ret)) {
|
||||
@ -765,9 +774,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
|
||||
/* Set cached media freq ratio mode */
|
||||
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
|
||||
|
||||
/* Set cached value of ignore efficient freq */
|
||||
intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1408,8 +1408,7 @@ nouveau_connector_create(struct drm_device *dev,
|
||||
ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
|
||||
&nv_connector->conn);
|
||||
if (ret) {
|
||||
kfree(nv_connector);
|
||||
return ERR_PTR(ret);
|
||||
goto drm_conn_err;
|
||||
}
|
||||
|
||||
ret = nvif_conn_event_ctor(&nv_connector->conn, "kmsHotplug",
|
||||
@ -1426,8 +1425,7 @@ nouveau_connector_create(struct drm_device *dev,
|
||||
if (ret) {
|
||||
nvif_event_dtor(&nv_connector->hpd);
|
||||
nvif_conn_dtor(&nv_connector->conn);
|
||||
kfree(nv_connector);
|
||||
return ERR_PTR(ret);
|
||||
goto drm_conn_err;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1475,4 +1473,9 @@ nouveau_connector_create(struct drm_device *dev,
|
||||
|
||||
drm_connector_register(connector);
|
||||
return connector;
|
||||
|
||||
drm_conn_err:
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(nv_connector);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@ -404,38 +404,30 @@ static int jdi_panel_add(struct jdi_panel *jdi)
|
||||
|
||||
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(jdi->supplies),
|
||||
jdi->supplies);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to init regulator, ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0)
|
||||
return dev_err_probe(dev, ret,
|
||||
"failed to init regulator, ret=%d\n", ret);
|
||||
|
||||
jdi->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(jdi->enable_gpio)) {
|
||||
ret = PTR_ERR(jdi->enable_gpio);
|
||||
dev_err(dev, "cannot get enable-gpio %d\n", ret);
|
||||
return ret;
|
||||
return dev_err_probe(dev, PTR_ERR(jdi->enable_gpio),
|
||||
"cannot get enable-gpio %d\n", ret);
|
||||
}
|
||||
|
||||
jdi->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(jdi->reset_gpio)) {
|
||||
ret = PTR_ERR(jdi->reset_gpio);
|
||||
dev_err(dev, "cannot get reset-gpios %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(jdi->reset_gpio))
|
||||
return dev_err_probe(dev, PTR_ERR(jdi->reset_gpio),
|
||||
"cannot get reset-gpios %d\n", ret);
|
||||
|
||||
jdi->dcdc_en_gpio = devm_gpiod_get(dev, "dcdc-en", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(jdi->dcdc_en_gpio)) {
|
||||
ret = PTR_ERR(jdi->dcdc_en_gpio);
|
||||
dev_err(dev, "cannot get dcdc-en-gpio %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(jdi->dcdc_en_gpio))
|
||||
return dev_err_probe(dev, PTR_ERR(jdi->dcdc_en_gpio),
|
||||
"cannot get dcdc-en-gpio %d\n", ret);
|
||||
|
||||
jdi->backlight = drm_panel_create_dsi_backlight(jdi->dsi);
|
||||
if (IS_ERR(jdi->backlight)) {
|
||||
ret = PTR_ERR(jdi->backlight);
|
||||
dev_err(dev, "failed to register backlight %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(jdi->backlight))
|
||||
return dev_err_probe(dev, PTR_ERR(jdi->backlight),
|
||||
"failed to register backlight %d\n", ret);
|
||||
|
||||
drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
|
||||
DRM_MODE_CONNECTOR_DSI);
|
||||
|
@ -999,21 +999,21 @@ static const struct panel_desc auo_g104sn02 = {
|
||||
.connector_type = DRM_MODE_CONNECTOR_LVDS,
|
||||
};
|
||||
|
||||
static const struct drm_display_mode auo_g121ean01_mode = {
|
||||
.clock = 66700,
|
||||
.hdisplay = 1280,
|
||||
.hsync_start = 1280 + 58,
|
||||
.hsync_end = 1280 + 58 + 8,
|
||||
.htotal = 1280 + 58 + 8 + 70,
|
||||
.vdisplay = 800,
|
||||
.vsync_start = 800 + 6,
|
||||
.vsync_end = 800 + 6 + 4,
|
||||
.vtotal = 800 + 6 + 4 + 10,
|
||||
static const struct display_timing auo_g121ean01_timing = {
|
||||
.pixelclock = { 60000000, 74400000, 90000000 },
|
||||
.hactive = { 1280, 1280, 1280 },
|
||||
.hfront_porch = { 20, 50, 100 },
|
||||
.hback_porch = { 20, 50, 100 },
|
||||
.hsync_len = { 30, 100, 200 },
|
||||
.vactive = { 800, 800, 800 },
|
||||
.vfront_porch = { 2, 10, 25 },
|
||||
.vback_porch = { 2, 10, 25 },
|
||||
.vsync_len = { 4, 18, 50 },
|
||||
};
|
||||
|
||||
static const struct panel_desc auo_g121ean01 = {
|
||||
.modes = &auo_g121ean01_mode,
|
||||
.num_modes = 1,
|
||||
.timings = &auo_g121ean01_timing,
|
||||
.num_timings = 1,
|
||||
.bpc = 8,
|
||||
.size = {
|
||||
.width = 261,
|
||||
|
@ -310,7 +310,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
|
||||
u32 domain,
|
||||
size_t size,
|
||||
struct qxl_surface *surf,
|
||||
struct qxl_bo **qobj,
|
||||
struct drm_gem_object **gobj,
|
||||
uint32_t *handle);
|
||||
void qxl_gem_object_free(struct drm_gem_object *gobj);
|
||||
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
|
||||
|
@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
|
||||
{
|
||||
struct qxl_device *qdev = to_qxl(dev);
|
||||
struct qxl_bo *qobj;
|
||||
struct drm_gem_object *gobj;
|
||||
uint32_t handle;
|
||||
int r;
|
||||
struct qxl_surface surf;
|
||||
@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
|
||||
|
||||
r = qxl_gem_object_create_with_handle(qdev, file_priv,
|
||||
QXL_GEM_DOMAIN_CPU,
|
||||
args->size, &surf, &qobj,
|
||||
args->size, &surf, &gobj,
|
||||
&handle);
|
||||
if (r)
|
||||
return r;
|
||||
qobj = gem_to_qxl_bo(gobj);
|
||||
qobj->is_dumb = true;
|
||||
drm_gem_object_put(gobj);
|
||||
args->pitch = pitch;
|
||||
args->handle = handle;
|
||||
return 0;
|
||||
|
@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the caller passed a valid gobj pointer, it is responsible to call
|
||||
* drm_gem_object_put() when it no longer needs to acess the object.
|
||||
*
|
||||
* If gobj is NULL, it is handled internally.
|
||||
*/
|
||||
int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
|
||||
struct drm_file *file_priv,
|
||||
u32 domain,
|
||||
size_t size,
|
||||
struct qxl_surface *surf,
|
||||
struct qxl_bo **qobj,
|
||||
struct drm_gem_object **gobj,
|
||||
uint32_t *handle)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
int r;
|
||||
struct drm_gem_object *local_gobj;
|
||||
|
||||
BUG_ON(!qobj);
|
||||
BUG_ON(!handle);
|
||||
|
||||
r = qxl_gem_object_create(qdev, size, 0,
|
||||
domain,
|
||||
false, false, surf,
|
||||
&gobj);
|
||||
&local_gobj);
|
||||
if (r)
|
||||
return -ENOMEM;
|
||||
r = drm_gem_handle_create(file_priv, gobj, handle);
|
||||
r = drm_gem_handle_create(file_priv, local_gobj, handle);
|
||||
if (r)
|
||||
return r;
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
*qobj = gem_to_qxl_bo(gobj);
|
||||
drm_gem_object_put(gobj);
|
||||
|
||||
if (gobj)
|
||||
*gobj = local_gobj;
|
||||
else
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_put(local_gobj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,6 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
|
||||
struct qxl_device *qdev = to_qxl(dev);
|
||||
struct drm_qxl_alloc *qxl_alloc = data;
|
||||
int ret;
|
||||
struct qxl_bo *qobj;
|
||||
uint32_t handle;
|
||||
u32 domain = QXL_GEM_DOMAIN_VRAM;
|
||||
|
||||
@ -50,7 +49,7 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
|
||||
domain,
|
||||
qxl_alloc->size,
|
||||
NULL,
|
||||
&qobj, &handle);
|
||||
NULL, &handle);
|
||||
if (ret) {
|
||||
DRM_ERROR("%s: failed to create gem ret=%d\n",
|
||||
__func__, ret);
|
||||
@ -386,7 +385,6 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||
{
|
||||
struct qxl_device *qdev = to_qxl(dev);
|
||||
struct drm_qxl_alloc_surf *param = data;
|
||||
struct qxl_bo *qobj;
|
||||
int handle;
|
||||
int ret;
|
||||
int size, actual_stride;
|
||||
@ -406,7 +404,7 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||
QXL_GEM_DOMAIN_SURFACE,
|
||||
size,
|
||||
&surf,
|
||||
&qobj, &handle);
|
||||
NULL, &handle);
|
||||
if (ret) {
|
||||
DRM_ERROR("%s: failed to create gem ret=%d\n",
|
||||
__func__, ret);
|
||||
|
@ -61,15 +61,9 @@ struct std_timing {
|
||||
u8 vfreq_aspect;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define DRM_EDID_PT_SYNC_MASK (3 << 3)
|
||||
# define DRM_EDID_PT_ANALOG_CSYNC (0 << 3)
|
||||
# define DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC (1 << 3)
|
||||
# define DRM_EDID_PT_DIGITAL_CSYNC (2 << 3)
|
||||
# define DRM_EDID_PT_CSYNC_ON_RGB (1 << 1) /* analog csync only */
|
||||
# define DRM_EDID_PT_CSYNC_SERRATE (1 << 2)
|
||||
# define DRM_EDID_PT_DIGITAL_SEPARATE_SYNC (3 << 3)
|
||||
# define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) /* also digital csync */
|
||||
# define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
|
||||
#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
|
||||
#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
|
||||
#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
|
||||
#define DRM_EDID_PT_STEREO (1 << 5)
|
||||
#define DRM_EDID_PT_INTERLACED (1 << 7)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user