- Fix the dma_sync calls applied last week (Rob)
- Fix mdp5 dsi command mode (Brian) - Squash fall through warnings (Jordan) - Don't add disabled gpu nodes to the of device list (Jeffrey) Cc: Jeffrey Hugo <jeffrey.l.hugo@gmail.com> Cc: Jordan Crouse <jcrouse@codeaurora.org> Cc: Brian Masney <masneyb@onstation.org> Cc: Rob Clark <robdclark@chromium.org> -----BEGIN PGP SIGNATURE----- iQFKBAABCgA0FiEEfxcpfMSgdnQMs+QqlvcN/ahKBwoFAl1DQ3MWHHNlYW5wYXVs QGNocm9taXVtLm9yZwAKCRCW9w39qEoHCtmoB/9Qesjz6sh2CZ8PhehHi2hLniiY 33ojx73lPkAPY2INbSTMiPFyCRvB9emvIYG68nkHNX+Ey7ZjQIzXenuzFLLFfO5P jS3phHs2UtFfMAfOaxApF11vtnwzGQu3GqpLnoC80DzcLzpDAXJgaV608LcTwzug 48NHVvG7pzOKyPOyVQG5q/YJ1H8hqr7rxSdAfOlradCwoo3PoNxIU2ieqF0YrrmV nn18E4XpTS9PRS+zxl4KPVIivHg1HS+Ii23V6C+y5fPEW/lVO2WOahm1wW/WD5Mt 07o0WOZkU4qLjAmLyWgmzpLlZ/Scl2WIhje2Z5EkpD+o7wVlOaqA2tge3OTZ =aA5E -----END PGP SIGNATURE----- Merge tag 'msm-fixes-2019_08_01' of https://gitlab.freedesktop.org/drm/msm into drm-fixes - Fix the dma_sync calls applied last week (Rob) - Fix mdp5 dsi command mode (Brian) - Squash fall through warnings (Jordan) - Don't add disabled gpu nodes to the of device list (Jeffrey) Cc: Jeffrey Hugo <jeffrey.l.hugo@gmail.com> Cc: Jordan Crouse <jcrouse@codeaurora.org> Cc: Brian Masney <masneyb@onstation.org> Cc: Rob Clark <robdclark@chromium.org> Signed-off-by: Dave Airlie <airlied@redhat.com> # gpg: Signature made Fri 02 Aug 2019 05:54:27 AM AEST # gpg: using RSA key 96F70DFDA84A070A # gpg: Can't check signature: public key not found From: Sean Paul <sean@poorly.run> Link: https://patchwork.freedesktop.org/patch/msgid/20190801200439.GV104440@art_vandelay
This commit is contained in:
commit
f8981e0309
@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
if (priv->lastctx == ctx)
|
||||
break;
|
||||
/* fall-thru */
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
/* copy commands into RB: */
|
||||
obj = submit->bos[submit->cmd[i].idx].obj;
|
||||
@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
if (priv->lastctx == ctx)
|
||||
break;
|
||||
/* fall-thru */
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
|
||||
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
|
||||
|
@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
if (priv->lastctx == ctx)
|
||||
break;
|
||||
/* fall-thru */
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
|
||||
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
|
||||
|
@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
/* ignore if there has not been a ctx switch: */
|
||||
if (priv->lastctx == ctx)
|
||||
break;
|
||||
/* fall-thru */
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
|
||||
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
|
||||
|
@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
mdp5_crtc->enabled = false;
|
||||
}
|
||||
|
||||
static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
|
||||
u32 count;
|
||||
|
||||
count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
|
||||
drm_crtc_set_max_vblank_count(crtc, count);
|
||||
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
{
|
||||
@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
/* Restore vblank irq handling after power is enabled */
|
||||
drm_crtc_vblank_on(crtc);
|
||||
mdp5_crtc_vblank_on(crtc);
|
||||
|
||||
mdp5_crtc_mode_set_nofb(crtc);
|
||||
|
||||
@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
|
||||
mdp5_crtc_destroy_state(crtc, crtc->state);
|
||||
|
||||
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
|
||||
|
||||
drm_crtc_vblank_reset(crtc);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
|
||||
|
@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
||||
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
|
||||
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
|
||||
dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
|
||||
dev->max_vblank_count = 0xffffffff;
|
||||
dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
|
||||
dev->vblank_disable_immediate = true;
|
||||
|
||||
return kms;
|
||||
|
@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev,
|
||||
if (!np)
|
||||
return 0;
|
||||
|
||||
drm_of_component_match_add(dev, matchptr, compare_of, np);
|
||||
if (of_device_is_available(np))
|
||||
drm_of_component_match_add(dev, matchptr, compare_of, np);
|
||||
|
||||
of_node_put(np);
|
||||
|
||||
|
@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj)
|
||||
return !msm_obj->vram_node;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cache sync.. this is a bit over-complicated, to fit dma-mapping
|
||||
* API. Really GPU cache is out of scope here (handled on cmdstream)
|
||||
* and all we need to do is invalidate newly allocated pages before
|
||||
* mapping to CPU as uncached/writecombine.
|
||||
*
|
||||
* On top of this, we have the added headache, that depending on
|
||||
* display generation, the display's iommu may be wired up to either
|
||||
* the toplevel drm device (mdss), or to the mdp sub-node, meaning
|
||||
* that here we either have dma-direct or iommu ops.
|
||||
*
|
||||
* Let this be a cautionary tail of abstraction gone wrong.
|
||||
*/
|
||||
|
||||
static void sync_for_device(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
struct device *dev = msm_obj->base.dev->dev;
|
||||
|
||||
if (get_dma_ops(dev)) {
|
||||
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
dma_map_sg(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
}
|
||||
|
||||
static void sync_for_cpu(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
struct device *dev = msm_obj->base.dev->dev;
|
||||
|
||||
if (get_dma_ops(dev)) {
|
||||
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
dma_unmap_sg(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
}
|
||||
|
||||
/* allocate pages from VRAM carveout, used when no IOMMU: */
|
||||
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
|
||||
{
|
||||
@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
|
||||
* because display controller, GPU, etc. are not coherent:
|
||||
*/
|
||||
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
||||
dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
sync_for_device(msm_obj);
|
||||
}
|
||||
|
||||
return msm_obj->pages;
|
||||
@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj)
|
||||
* GPU, etc. are not coherent:
|
||||
*/
|
||||
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
||||
dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
sync_for_cpu(msm_obj);
|
||||
|
||||
sg_free_table(msm_obj->sgt);
|
||||
kfree(msm_obj->sgt);
|
||||
|
Loading…
Reference in New Issue
Block a user