drm-fixes for v6.10-rc7

drivers:
 - amd: mostly amdgpu display fixes + radeon vm NULL deref fix
 - xe: migration error handling + typoed register name in gt setup
 - i915: usb-c fix to shut up warnings on MTL+
 - panthor: fix sync-only jobs + ioctl validation fix to not EINVAL
   wrongly
 - panel quirks
 - nouveau: NULL deref in get_modes
 
 drm core:
 - fbdev big endian fix for the dma memory backed variant
 
 drivers/firmware:
 - fix sysfb refcounting
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEb4nG6jLu8Y5XI+PfTA9ye/CYqnEFAmaIDWUACgkQTA9ye/CY
 qnG1MA/9H60hwOXinLcN7GZY3/EMQkWGyAixIZfZeSwvUc5LGJaXmc6uL87lwF2a
 0LOTmf+dG3/ieSqAxVmu7Pfaaiq0hqUQLabKRCxftOvoj+OeAKD7aaisvib2a98F
 2GTGzXMVe4jMEmTeQhIih3JAmaMgobURXZeEeZbyn1SC5UwTn48eOlQte04LpUco
 8qvMEXJ0bPzNpLaNsUZ0oOS4ntZI3+ZYVkBeg3d+YxK12QO4G9CH420An04T4HFM
 sz9P2Z8cirM+u/XNblpFxIYYBalxcBFdySg6MD9bMu/LAYXXwKcfB3k2uL4qEm82
 uFukqKVzaJoPUVPf1VKDq10TgIdR/VrjoDFVwFG1AWynitn++43VhRCx5nKK+1DW
 M6EtFiIQ2WrRINs/54wEQvC/IBdZ/AxFPZTyzjI5W/vJMIEpTjmgi9WhZ3MxMWHN
 l+ujz18CqDuJD8rKYxTdnrXeHSCJmItp4WnAmFyRxYxJvmgLxchMx3bW9LDuI26I
 h8MBVNXLJDfHBxsD0qw4cS0J+lhCvLBnOm/Gc1bKWqOWy0F6InBEO6Wq6lLliHjs
 yeOXWk/P5c69ZLi/+jMspjpmoXNa4+lnTpBdb5B7O7tF5pwnpWF3ArcEOWnB567g
 tVxPwTrz8qLCZz0S52hyHiPQUAjwkvPDy1sj6YIWEQDmC+5GKbk=
 =F56S
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2024-07-05' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Daniel Vetter:
 "Just small fixes all over here, all quiet as it should.

  drivers:

   - amd: mostly amdgpu display fixes + radeon vm NULL deref fix

   - xe: migration error handling + typoed register name in gt setup

   - i915: usb-c fix to shut up warnings on MTL+

   - panthor: fix sync-only jobs + ioctl validation fix to not EINVAL
     wrongly

   - panel quirks

   - nouveau: NULL deref in get_modes

  drm core:

   - fbdev big endian fix for the dma memory backed variant

  drivers/firmware:

   - fix sysfb refcounting"

* tag 'drm-fixes-2024-07-05' of https://gitlab.freedesktop.org/drm/kernel:
  drm/xe/mcr: Avoid clobbering DSS steering
  drm/xe: fix error handling in xe_migrate_update_pgtables
  drm/ttm: Always take the bo delayed cleanup path for imported bos
  drm/fbdev-generic: Fix framebuffer on big endian devices
  drm/panthor: Fix sync-only jobs
  drm/panthor: Don't check the array stride on empty uobj arrays
  drm/amdgpu/atomfirmware: silence UBSAN warning
  drm/radeon: check bo_va->bo is non-NULL before using it
  drm/amd/display: Fix array-index-out-of-bounds in dml2/FCLKChangeSupport
  drm/amd/display: Update efficiency bandwidth for dcn351
  drm/amd/display: Fix refresh rate range for some panel
  drm/amd/display: Account for cursor prefetch BW in DML1 mode support
  drm/amd/display: Add refresh rate range check
  drm/amd/display: Reset freesync config before update new state
  drm: panel-orientation-quirks: Add labels for both Valve Steam Deck revisions
  drm: panel-orientation-quirks: Add quirk for Valve Galileo
  drm/i915/display: For MTL+ platforms skip mg dp programming
  drm/nouveau: fix null pointer dereference in nouveau_connector_get_modes
  firmware: sysfb: Fix reference count of sysfb parent device
This commit is contained in:
Linus Torvalds 2024-07-05 11:53:40 -07:00
commit dd9d7390b2
17 changed files with 132 additions and 31 deletions

View File

@ -101,8 +101,10 @@ static __init struct device *sysfb_parent_dev(const struct screen_info *si)
if (IS_ERR(pdev)) {
return ERR_CAST(pdev);
} else if (pdev) {
if (!sysfb_pci_dev_is_enabled(pdev))
if (!sysfb_pci_dev_is_enabled(pdev)) {
pci_dev_put(pdev);
return ERR_PTR(-ENODEV);
}
return &pdev->dev;
}
@ -137,7 +139,7 @@ static __init int sysfb_init(void)
if (compatible) {
pd = sysfb_create_simplefb(si, &mode, parent);
if (!IS_ERR(pd))
goto unlock_mutex;
goto put_device;
}
/* if the FB is incompatible, create a legacy framebuffer device */
@ -155,7 +157,7 @@ static __init int sysfb_init(void)
pd = platform_device_alloc(name, 0);
if (!pd) {
ret = -ENOMEM;
goto unlock_mutex;
goto put_device;
}
pd->dev.parent = parent;
@ -170,9 +172,11 @@ static __init int sysfb_init(void)
if (ret)
goto err;
goto unlock_mutex;
goto put_device;
err:
platform_device_put(pd);
put_device:
put_device(parent);
unlock_mutex:
mutex_unlock(&disable_lock);
return ret;

View File

@ -10048,6 +10048,7 @@ skip_modeset:
}
/* Update Freesync settings. */
reset_freesync_config_for_crtc(dm_new_crtc_state);
get_freesync_config_for_crtc(dm_new_crtc_state,
dm_new_conn_state);
@ -11181,6 +11182,49 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
return ret;
}
static void parse_edid_displayid_vrr(struct drm_connector *connector,
struct edid *edid)
{
u8 *edid_ext = NULL;
int i;
int j = 0;
u16 min_vfreq;
u16 max_vfreq;
if (edid == NULL || edid->extensions == 0)
return;
/* Find DisplayID extension */
for (i = 0; i < edid->extensions; i++) {
edid_ext = (void *)(edid + (i + 1));
if (edid_ext[0] == DISPLAYID_EXT)
break;
}
if (edid_ext == NULL)
return;
while (j < EDID_LENGTH) {
/* Get dynamic video timing range from DisplayID if available */
if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 &&
(edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) {
min_vfreq = edid_ext[j+9];
if (edid_ext[j+1] & 7)
max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8);
else
max_vfreq = edid_ext[j+10];
if (max_vfreq && min_vfreq) {
connector->display_info.monitor_range.max_vfreq = max_vfreq;
connector->display_info.monitor_range.min_vfreq = min_vfreq;
return;
}
}
j++;
}
}
static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
@ -11302,6 +11346,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
/* Some eDP panels only have the refresh rate range info in DisplayID */
if ((connector->display_info.monitor_range.min_vfreq == 0 ||
connector->display_info.monitor_range.max_vfreq == 0))
parse_edid_displayid_vrr(connector, edid);
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false;
@ -11309,9 +11358,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (is_dp_capable_without_timing_msa(adev->dm.dc,
amdgpu_dm_connector)) {
if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
freesync_capable = true;
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
if (amdgpu_dm_connector->max_vfreq -
amdgpu_dm_connector->min_vfreq > 10)
freesync_capable = true;
} else {
edid_check_required = edid->version > 1 ||
(edid->version == 1 &&

View File

@ -3364,6 +3364,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.UrgentBurstFactorLumaPre[k],
&mode_lib->vba.UrgentBurstFactorChromaPre[k],
&mode_lib->vba.NotUrgentLatencyHidingPre[k]);
v->cursor_bw_pre[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] /
8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * v->VRatioPreY[i][j][k];
}
{

View File

@ -234,6 +234,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
out->round_trip_ping_latency_dcfclk_cycles = 106;
out->smn_latency_us = 2;
out->dispclk_dppclk_vco_speed_mhz = 3600;
out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0;
break;
}

View File

@ -294,7 +294,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (unsigned int)in_ctx->v20.dml_core_ctx.mp.DCFCLKDeepSleep * 1000;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx] == dml_fclock_change_unsupported)
if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0] == dml_fclock_change_unsupported)
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false;
else
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;

View File

@ -734,7 +734,7 @@ struct atom_gpio_pin_lut_v2_1
{
struct atom_common_table_header table_header;
/*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
struct atom_gpio_pin_assignment gpio_pin[8];
struct atom_gpio_pin_assignment gpio_pin[];
};

View File

@ -84,7 +84,8 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
sizes->surface_width, sizes->surface_height,
sizes->surface_bpp);
format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
sizes->surface_depth);
buffer = drm_client_framebuffer_create(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))

View File

@ -420,13 +420,20 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"),
},
.driver_data = (void *)&lcd1280x1920_rightside_up,
}, { /* Valve Steam Deck */
}, { /* Valve Steam Deck (Jupiter) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Valve Steam Deck (Galileo) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),

View File

@ -2088,6 +2088,9 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
u32 ln0, ln1, pin_assignment;
u8 width;
if (DISPLAY_VER(dev_priv) >= 14)
return;
if (!intel_encoder_is_tc(&dig_port->base) ||
intel_tc_port_in_tbt_alt_mode(dig_port))
return;

View File

@ -1001,6 +1001,9 @@ nouveau_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, nv_connector->native_mode);
if (!mode)
return 0;
drm_mode_probed_add(connector, mode);
ret = 1;
}

View File

@ -86,15 +86,15 @@ panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride,
int ret = 0;
void *out_alloc;
if (!in->count)
return NULL;
/* User stride must be at least the minimum object size, otherwise it might
* lack useful information.
*/
if (in->stride < min_stride)
return ERR_PTR(-EINVAL);
if (!in->count)
return NULL;
out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
if (!out_alloc)
return ERR_PTR(-ENOMEM);

View File

@ -458,6 +458,16 @@ struct panthor_queue {
/** @seqno: Sequence number of the last initialized fence. */
atomic64_t seqno;
/**
* @last_fence: Fence of the last submitted job.
*
* We return this fence when we get an empty command stream.
* This way, we are guaranteed that all earlier jobs have completed
* when drm_sched_job::s_fence::finished without having to feed
* the CS ring buffer with a dummy job that only signals the fence.
*/
struct dma_fence *last_fence;
/**
* @in_flight_jobs: List containing all in-flight jobs.
*
@ -829,6 +839,9 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
panthor_kernel_bo_destroy(queue->ringbuf);
panthor_kernel_bo_destroy(queue->iface.mem);
/* Release the last_fence we were holding, if any. */
dma_fence_put(queue->fence_ctx.last_fence);
kfree(queue);
}
@ -2784,9 +2797,6 @@ static void group_sync_upd_work(struct work_struct *work)
spin_lock(&queue->fence_ctx.lock);
list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
if (!job->call_info.size)
continue;
if (syncobj->seqno < job->done_fence->seqno)
break;
@ -2865,11 +2875,14 @@ queue_run_job(struct drm_sched_job *sched_job)
static_assert(sizeof(call_instrs) % 64 == 0,
"call_instrs is not aligned on a cacheline");
/* Stream size is zero, nothing to do => return a NULL fence and let
* drm_sched signal the parent.
/* Stream size is zero, nothing to do except making sure all previously
* submitted jobs are done before we signal the
* drm_sched_job::s_fence::finished fence.
*/
if (!job->call_info.size)
return NULL;
if (!job->call_info.size) {
job->done_fence = dma_fence_get(queue->fence_ctx.last_fence);
return dma_fence_get(job->done_fence);
}
ret = pm_runtime_resume_and_get(ptdev->base.dev);
if (drm_WARN_ON(&ptdev->base, ret))
@ -2928,6 +2941,10 @@ queue_run_job(struct drm_sched_job *sched_job)
}
}
/* Update the last fence. */
dma_fence_put(queue->fence_ctx.last_fence);
queue->fence_ctx.last_fence = dma_fence_get(job->done_fence);
done_fence = dma_fence_get(job->done_fence);
out_unlock:
@ -3378,10 +3395,15 @@ panthor_job_create(struct panthor_file *pfile,
goto err_put_job;
}
job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
if (!job->done_fence) {
ret = -ENOMEM;
goto err_put_job;
/* Empty command streams don't need a fence, they'll pick the one from
* the previously submitted job.
*/
if (job->call_info.size) {
job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
if (!job->done_fence) {
ret = -ENOMEM;
goto err_put_job;
}
}
ret = drm_sched_job_init(&job->base,

View File

@ -642,7 +642,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (r)
goto error_unlock;
if (bo_va->it.start)
if (bo_va->it.start && bo_va->bo)
r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
error_unlock:

View File

@ -346,6 +346,7 @@ static void ttm_bo_release(struct kref *kref)
if (!dma_resv_test_signaled(bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP) ||
(want_init_on_free() && (bo->ttm != NULL)) ||
bo->type == ttm_bo_type_sg ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);

View File

@ -342,7 +342,7 @@ static void init_steering_oaddrm(struct xe_gt *gt)
else
gt->steering[OADDRM].group_target = 1;
gt->steering[DSS].instance_target = 0; /* unused */
gt->steering[OADDRM].instance_target = 0; /* unused */
}
static void init_steering_sqidi_psmi(struct xe_gt *gt)
@ -357,8 +357,8 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt)
static void init_steering_inst0(struct xe_gt *gt)
{
gt->steering[DSS].group_target = 0; /* unused */
gt->steering[DSS].instance_target = 0; /* unused */
gt->steering[INSTANCE0].group_target = 0; /* unused */
gt->steering[INSTANCE0].instance_target = 0; /* unused */
}
static const struct {

View File

@ -1334,7 +1334,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
GFP_KERNEL, true, 0);
if (IS_ERR(sa_bo)) {
err = PTR_ERR(sa_bo);
goto err;
goto err_bb;
}
ppgtt_ofs = NUM_KERNEL_PDE +
@ -1385,7 +1385,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
update_idx);
if (IS_ERR(job)) {
err = PTR_ERR(job);
goto err_bb;
goto err_sa;
}
/* Wait on BO move */
@ -1434,12 +1434,12 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
err_job:
xe_sched_job_put(job);
err_sa:
drm_suballoc_free(sa_bo, NULL);
err_bb:
if (!q)
mutex_unlock(&m->job_mutex);
xe_bb_free(bb, NULL);
err:
drm_suballoc_free(sa_bo, NULL);
return ERR_PTR(err);
}

View File

@ -802,6 +802,9 @@ struct drm_panthor_queue_submit {
* Must be 64-bit/8-byte aligned (the size of a CS instruction)
*
* Can be zero if stream_addr is zero too.
*
* When the stream size is zero, the queue submit serves as a
* synchronization point.
*/
__u32 stream_size;
@ -822,6 +825,8 @@ struct drm_panthor_queue_submit {
* ensure the GPU doesn't get garbage when reading the indirect command
* stream buffers. If you want the cache flush to happen
* unconditionally, pass a zero here.
*
* Ignored when stream_size is zero.
*/
__u32 latest_flush;