drm fixes for 6.10-rc8

amdgpu:
 - PSR-SU fix
 - Reseved VMID fix
 
 xe:
 - Use write-back caching mode for system memory on DGFX
 - Do not leak object when finalizing hdcp gsc
 
 bridge:
 - adv7511 EDID irq fix
 
 gma500:
 - NULL mode fixes.
 
 meson:
 - fix resource leak
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmaQs1UACgkQDHTzWXnE
 hr69RQ/+LS/fvode6rdq8VKb6ZhYLyx2Q8WC+Acln3KHW9exY5sLKgBzLcKrBrlM
 7l/5w8Pzh1RR0PsGeOdywUDHeOnG06ebDmTpgdFd9O2dJShCb4Z5ozQFFx/ElOeT
 nol8TbTKxuFUkM3+6fwmAKHPHq6qYepaLn+o8FvA8tqwYiDFssyglOzQ/+AieFUq
 35x8AETB8zZqgSmQKuwSUHyXL0crZ8krR2xx69aedcBKHj5/i2rpRVSjTA0ZagJr
 1/40pXhicStDh5Mb3JSYmFn6MrTwUeWOriuT1NRop/4USzhePVmnpQuMR+kWUU8E
 0d4K/1b/pUgc4Ce/76oG4TI1lOuyTko/xqA2nv8kLm7C5+IdxmnbFyBM+mS8MToy
 wEHKgvyz9JjGc9sXANnvPsWEf06VGPJp2wKVbNcPDrTy/rH2ZmLHmxnIjzUUa1cW
 a1/l8Xb64F5bp1tNPVLk/7q3+ic6Mwp8XtNtIxqfbmTWr8oFZID0bsz3HD9bu49L
 hlOfho9tKD/oZj8IJ6xLL6LDY8zSViwWc1OvwkGqfmj4ym+g1vuCmzCwYcnagTFw
 6hKeGQAptV1LGlFtrmFsQWt1Q5t2YEhnmxXY71dgEZ5S7efPGUZ2+aS8Z94qU5n1
 2G6kEmyILnVKZNY+eMtdkZrK8SP/ozoT1VSKwyqwgWr9S/OPZz0=
 =yafK
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2024-07-12' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Oh I screwed up last week's fixes pull, and forgot to send..

  Back to work, thanks to Sima for last week, not too many fixes as
  expected getting close to release [ sic - Linus ], amdgpu and xe have
  a couple each, and then some other misc ones.

  amdgpu:
   - PSR-SU fix
   - Reseved VMID fix

  xe:
   - Use write-back caching mode for system memory on DGFX
   - Do not leak object when finalizing hdcp gsc

  bridge:
   - adv7511 EDID irq fix

  gma500:
   - NULL mode fixes.

  meson:
   - fix resource leak"

* tag 'drm-fixes-2024-07-12' of https://gitlab.freedesktop.org/drm/kernel:
  Revert "drm/amd/display: Reset freesync config before update new state"
  drm/xe/display/xe_hdcp_gsc: Free arbiter on driver removal
  drm/xe: Use write-back caching mode for system memory on DGFX
  drm/amdgpu: reject gang submit on reserved VMIDs
  drm/gma500: fix null pointer dereference in cdv_intel_lvds_get_modes
  drm/gma500: fix null pointer dereference in psb_intel_lvds_get_modes
  drm/meson: fix canvas release in bind function
  drm/bridge: adv7511: Fix Intermittent EDID failures
This commit is contained in:
Linus Torvalds 2024-07-15 10:07:31 -07:00
commit 2ffd45da0b
14 changed files with 122 additions and 60 deletions

View File

@ -1093,6 +1093,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
unsigned int i;
int r;
/*
* We can't use gang submit on with reserved VMIDs when the VM changes
* can't be invalidated by more than one engine at the same time.
*/
if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
for (i = 0; i < p->gang_size; ++i) {
struct drm_sched_entity *entity = p->entities[i];
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
return -EINVAL;
}
}
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;

View File

@ -406,7 +406,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !idle)
goto error;
if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
@ -456,6 +456,19 @@ error:
return r;
}
/*
* amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
* @vm: the VM to check
* @vmhub: the VMHUB which will be used
*
* Returns: True if the VM will use a reserved VMID.
*/
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
{
return vm->reserved_vmid[vmhub] ||
(enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
}
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
unsigned vmhub)
{

View File

@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
unsigned vmhub);
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,

View File

@ -10048,7 +10048,6 @@ skip_modeset:
}
/* Update Freesync settings. */
reset_freesync_config_for_crtc(dm_new_crtc_state);
get_freesync_config_for_crtc(dm_new_crtc_state,
dm_new_conn_state);

View File

@ -401,7 +401,7 @@ struct adv7511 {
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
#else
static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
{

View File

@ -119,7 +119,7 @@ static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf)
cec_received_msg(adv7511->cec_adap, &msg);
}
void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
{
unsigned int offset = adv7511->info->reg_cec_offset;
const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY |
@ -131,16 +131,19 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
unsigned int rx_status;
int rx_order[3] = { -1, -1, -1 };
int i;
int irq_status = IRQ_NONE;
if (irq1 & irq_tx_mask)
if (irq1 & irq_tx_mask) {
adv_cec_tx_raw_status(adv7511, irq1);
irq_status = IRQ_HANDLED;
}
if (!(irq1 & irq_rx_mask))
return;
return irq_status;
if (regmap_read(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_STATUS + offset, &rx_status))
return;
return irq_status;
/*
* ADV7511_REG_CEC_RX_STATUS[5:0] contains the reception order of RX
@ -172,6 +175,8 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
adv7511_cec_rx(adv7511, rx_buf);
}
return IRQ_HANDLED;
}
static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)

View File

@ -469,6 +469,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
int cec_status = IRQ_NONE;
int irq_status = IRQ_NONE;
ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
if (ret < 0)
@ -478,29 +480,31 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
if (ret < 0)
return ret;
/* If there is no IRQ to handle, exit indicating no IRQ data */
if (!(irq0 & (ADV7511_INT0_HPD | ADV7511_INT0_EDID_READY)) &&
!(irq1 & ADV7511_INT1_DDC_ERROR))
return -ENODATA;
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder)
if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) {
schedule_work(&adv7511->hpd_work);
irq_status = IRQ_HANDLED;
}
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
adv7511->edid_read = true;
if (adv7511->i2c_main->irq)
wake_up_all(&adv7511->wq);
irq_status = IRQ_HANDLED;
}
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
adv7511_cec_irq_process(adv7511, irq1);
cec_status = adv7511_cec_irq_process(adv7511, irq1);
#endif
return 0;
/* If there is no IRQ to handle, exit indicating no IRQ data */
if (irq_status == IRQ_HANDLED || cec_status == IRQ_HANDLED)
return IRQ_HANDLED;
return IRQ_NONE;
}
static irqreturn_t adv7511_irq_handler(int irq, void *devid)
@ -509,7 +513,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
int ret;
ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
return ret < 0 ? IRQ_NONE : ret;
}
/* -----------------------------------------------------------------------------

View File

@ -311,6 +311,9 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
if (mode_dev->panel_fixed_mode != NULL) {
struct drm_display_mode *mode =
drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
if (!mode)
return 0;
drm_mode_probed_add(connector, mode);
return 1;
}

View File

@ -504,6 +504,9 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
if (mode_dev->panel_fixed_mode != NULL) {
struct drm_display_mode *mode =
drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
if (!mode)
return 0;
drm_mode_probed_add(connector, mode);
return 1;
}

View File

@ -250,29 +250,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto free_drm;
ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
if (ret) {
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
goto free_drm;
}
if (ret)
goto free_canvas_osd1;
ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
if (ret) {
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
goto free_drm;
}
if (ret)
goto free_canvas_vd1_0;
ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
if (ret) {
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
goto free_drm;
}
if (ret)
goto free_canvas_vd1_1;
priv->vsync_irq = platform_get_irq(pdev, 0);
ret = drm_vblank_init(drm, 1);
if (ret)
goto free_drm;
goto free_canvas_vd1_2;
/* Assign limits per soc revision/package */
for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) {
@ -288,11 +279,11 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
*/
ret = drm_aperture_remove_framebuffers(&meson_driver);
if (ret)
goto free_drm;
goto free_canvas_vd1_2;
ret = drmm_mode_config_init(drm);
if (ret)
goto free_drm;
goto free_canvas_vd1_2;
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
drm->mode_config.funcs = &meson_mode_config_funcs;
@ -307,7 +298,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (priv->afbcd.ops) {
ret = priv->afbcd.ops->init(priv);
if (ret)
goto free_drm;
goto free_canvas_vd1_2;
}
/* Encoder Initialization */
@ -371,6 +362,14 @@ uninstall_irq:
exit_afbcd:
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
free_canvas_vd1_2:
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
free_canvas_vd1_1:
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
free_canvas_vd1_0:
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
free_canvas_osd1:
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
free_drm:
drm_dev_put(drm);

View File

@ -159,12 +159,16 @@ void intel_hdcp_gsc_fini(struct xe_device *xe)
{
struct intel_hdcp_gsc_message *hdcp_message =
xe->display.hdcp.hdcp_message;
struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter;
if (!hdcp_message)
return;
if (hdcp_message) {
xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
kfree(hdcp_message);
xe->display.hdcp.hdcp_message = NULL;
}
xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
kfree(hdcp_message);
kfree(arb);
xe->display.hdcp.arbiter = NULL;
}
static int xe_gsc_send_sync(struct xe_device *xe,

View File

@ -317,7 +317,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
struct xe_device *xe = xe_bo_device(bo);
struct xe_ttm_tt *tt;
unsigned long extra_pages;
enum ttm_caching caching;
enum ttm_caching caching = ttm_cached;
int err;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
@ -331,26 +331,35 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
PAGE_SIZE);
switch (bo->cpu_caching) {
case DRM_XE_GEM_CPU_CACHING_WC:
caching = ttm_write_combined;
break;
default:
caching = ttm_cached;
break;
}
WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
/*
* Display scanout is always non-coherent with the CPU cache.
*
* For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
* require a CPU:WC mapping.
* DGFX system memory is always WB / ttm_cached, since
* other caching modes are only supported on x86. DGFX
* GPU system memory accesses are always coherent with the
* CPU.
*/
if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
(xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE))
caching = ttm_write_combined;
if (!IS_DGFX(xe)) {
switch (bo->cpu_caching) {
case DRM_XE_GEM_CPU_CACHING_WC:
caching = ttm_write_combined;
break;
default:
caching = ttm_cached;
break;
}
WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
/*
* Display scanout is always non-coherent with the CPU cache.
*
* For Xe_LPG and beyond, PPGTT PTE lookups are also
* non-coherent and require a CPU:WC mapping.
*/
if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
(xe->info.graphics_verx100 >= 1270 &&
bo->flags & XE_BO_FLAG_PAGETABLE))
caching = ttm_write_combined;
}
err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
if (err) {

View File

@ -66,7 +66,8 @@ struct xe_bo {
/**
* @cpu_caching: CPU caching mode. Currently only used for userspace
* objects.
* objects. Exceptions are system memory on DGFX, which is always
* WB.
*/
u16 cpu_caching;

View File

@ -776,7 +776,13 @@ struct drm_xe_gem_create {
#define DRM_XE_GEM_CPU_CACHING_WC 2
/**
* @cpu_caching: The CPU caching mode to select for this object. If
* mmaping the object the mode selected here will also be used.
* mmaping the object the mode selected here will also be used. The
* exception is when mapping system memory (including data evicted
* to system) on discrete GPUs. The caching mode selected will
* then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency
* between GPU- and CPU is guaranteed. The caching mode of
* existing CPU-mappings will be updated transparently to
* user-space clients.
*/
__u16 cpu_caching;
/** @pad: MBZ */