msm/exynos/i915/amdgpu fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJZL3jPAAoJEAx081l5xIa+6RQP/0Inn+u3Yh3aFm1f/37DATTy w0S5JMWyBippCR1rR9EOmYA1xcyXmsDNsisyCAWXortqrDT5UpXLP3V0kI2IVzdn Ms4IbH6k6py+jLX88hJGZQyvhd26vXSEmFJDOzXj+1ie446yuAPJOXn1Lz0SHFOW 0QLl7sgMgZoLseu5/fWgXlgLKnEBqQFhJjM0zmH58sxKBBbFCb5ox/f92x8SkSTm WO9voDrfsR0ejq55hViW1NBCU53ZnCUYL+P2zhrB8iog7fI9RvK4DhYIoutSUMCK pzMFilznfhte/4dg5oWxa/r0gxuwa0IviRmLA2UW7ioXZLjiJicXUgTsNzShbtYf C4NORc1uIX2pUGdLW6FH32Dbc+frL0fbKU8jeeeOuUyvxsiDhcIp72lVIVdrtz7/ APpEE6Z86X3BhFEBcrNy9qasC+SX3BBxdtB/pR7YoRa6XN+Rrl2ZnsZGDP9UseQd DjBv68pyh30JiSotBdvJ+FuQo3rojARVlVluS+IVb6q1p1Hau3q+W0ItjvxIKtQ0 5iM4uotCyNjAI6HwFO3ey7cRrP9qmywiDANefJutIIjpMLgtGz1sAEVLUx5NkA4s KTDGqJZ32cKGmlAxuMbjTOYI4SLxf6Scg+zhKIm5n7d4l0G7nXA/itvWPWFOSGlT aYVOYbA/bXAHWVCBgEVu =dckO -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.12-rc4' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "This is the main set of fixes for rc4, one amdgpu fix, some exynos regression fixes, some msm fixes and some i915 and GVT fixes. I've got a second regression fix for some DP chips that might be a bit large, but I think we'd like to land it now, I'll send it along tomorrow, once you are happy with this set" * tag 'drm-fixes-for-v4.12-rc4' of git://people.freedesktop.org/~airlied/linux: (24 commits) drm/amdgpu: Program ring for vce instance 1 at its register space drm/exynos: clean up description of exynos_drm_crtc drm/exynos: dsi: Remove bridge node reference in removal drm/exynos: dsi: Fix the parse_dt function drm/exynos: Merge pre/postclose hooks drm/msm: Fix the check for the command size drm/msm: Take the mutex before calling msm_gem_new_impl drm/msm: for array in-fences, check if all backing fences are from our own context before waiting drm/msm: constify irq_domain_ops drm/msm/mdp5: release hwpipe(s) for unused planes drm/msm: Reuse dma_fence_release. drm/msm: Expose our reservation object when exporting a dmabuf. drm/msm/gpu: check legacy clk names in get_clocks() drm/msm/mdp5: use __drm_atomic_helper_plane_duplicate_state() drm/msm: select PM_OPP drm/i915: Stop pretending to mask/unmask LPE audio interrupts drm/i915/selftests: Silence compiler warning in igt_ctx_exec Revert "drm/i915: Restore lost "Initialized i915" welcome message" drm/i915/gvt: clean up unsubmited workloads before destroying kmem cache drm/i915/gvt: Disable compression workaround for Gen9 ...
This commit is contained in:
commit
a37484638c
@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
|||||||
static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
|
static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
u32 v;
|
||||||
|
|
||||||
|
mutex_lock(&adev->grbm_idx_mutex);
|
||||||
|
if (adev->vce.harvest_config == 0 ||
|
||||||
|
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||||
|
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
|
||||||
|
|
||||||
if (ring == &adev->vce.ring[0])
|
if (ring == &adev->vce.ring[0])
|
||||||
return RREG32(mmVCE_RB_RPTR);
|
v = RREG32(mmVCE_RB_RPTR);
|
||||||
else if (ring == &adev->vce.ring[1])
|
else if (ring == &adev->vce.ring[1])
|
||||||
return RREG32(mmVCE_RB_RPTR2);
|
v = RREG32(mmVCE_RB_RPTR2);
|
||||||
else
|
else
|
||||||
return RREG32(mmVCE_RB_RPTR3);
|
v = RREG32(mmVCE_RB_RPTR3);
|
||||||
|
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||||
|
mutex_unlock(&adev->grbm_idx_mutex);
|
||||||
|
|
||||||
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
|
|||||||
static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
|
static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
u32 v;
|
||||||
|
|
||||||
|
mutex_lock(&adev->grbm_idx_mutex);
|
||||||
|
if (adev->vce.harvest_config == 0 ||
|
||||||
|
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||||
|
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
|
||||||
|
|
||||||
if (ring == &adev->vce.ring[0])
|
if (ring == &adev->vce.ring[0])
|
||||||
return RREG32(mmVCE_RB_WPTR);
|
v = RREG32(mmVCE_RB_WPTR);
|
||||||
else if (ring == &adev->vce.ring[1])
|
else if (ring == &adev->vce.ring[1])
|
||||||
return RREG32(mmVCE_RB_WPTR2);
|
v = RREG32(mmVCE_RB_WPTR2);
|
||||||
else
|
else
|
||||||
return RREG32(mmVCE_RB_WPTR3);
|
v = RREG32(mmVCE_RB_WPTR3);
|
||||||
|
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||||
|
mutex_unlock(&adev->grbm_idx_mutex);
|
||||||
|
|
||||||
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
|
||||||
|
mutex_lock(&adev->grbm_idx_mutex);
|
||||||
|
if (adev->vce.harvest_config == 0 ||
|
||||||
|
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||||
|
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
|
||||||
|
|
||||||
if (ring == &adev->vce.ring[0])
|
if (ring == &adev->vce.ring[0])
|
||||||
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
|
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
|
||||||
else if (ring == &adev->vce.ring[1])
|
else if (ring == &adev->vce.ring[1])
|
||||||
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
|
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||||
else
|
else
|
||||||
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
|
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
|
||||||
|
|
||||||
|
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||||
|
mutex_unlock(&adev->grbm_idx_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
|
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
|
||||||
@ -231,33 +267,38 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
|||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
int idx, r;
|
int idx, r;
|
||||||
|
|
||||||
ring = &adev->vce.ring[0];
|
|
||||||
WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
|
|
||||||
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
|
|
||||||
WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
|
|
||||||
WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
|
||||||
WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
|
|
||||||
|
|
||||||
ring = &adev->vce.ring[1];
|
|
||||||
WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
|
|
||||||
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
|
|
||||||
WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
|
|
||||||
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
|
||||||
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
|
|
||||||
|
|
||||||
ring = &adev->vce.ring[2];
|
|
||||||
WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
|
|
||||||
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
|
|
||||||
WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
|
|
||||||
WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
|
|
||||||
WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
|
|
||||||
|
|
||||||
mutex_lock(&adev->grbm_idx_mutex);
|
mutex_lock(&adev->grbm_idx_mutex);
|
||||||
for (idx = 0; idx < 2; ++idx) {
|
for (idx = 0; idx < 2; ++idx) {
|
||||||
if (adev->vce.harvest_config & (1 << idx))
|
if (adev->vce.harvest_config & (1 << idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
|
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
|
||||||
|
|
||||||
|
/* Program instance 0 reg space for two instances or instance 0 case
|
||||||
|
program instance 1 reg space for only instance 1 available case */
|
||||||
|
if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
|
||||||
|
ring = &adev->vce.ring[0];
|
||||||
|
WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
|
||||||
|
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
|
||||||
|
WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
|
||||||
|
WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
|
||||||
|
WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
|
||||||
|
|
||||||
|
ring = &adev->vce.ring[1];
|
||||||
|
WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
|
||||||
|
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||||
|
WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
|
||||||
|
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||||
|
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
|
||||||
|
|
||||||
|
ring = &adev->vce.ring[2];
|
||||||
|
WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
|
||||||
|
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
|
||||||
|
WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
|
||||||
|
WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
|
||||||
|
WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
|
||||||
|
}
|
||||||
|
|
||||||
vce_v3_0_mc_resume(adev, idx);
|
vce_v3_0_mc_resume(adev, idx);
|
||||||
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
|
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
|
||||||
|
|
||||||
|
@ -82,14 +82,9 @@ err_file_priv_free:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void exynos_drm_preclose(struct drm_device *dev,
|
|
||||||
struct drm_file *file)
|
|
||||||
{
|
|
||||||
exynos_drm_subdrv_close(dev, file);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
|
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
|
||||||
{
|
{
|
||||||
|
exynos_drm_subdrv_close(dev, file);
|
||||||
kfree(file->driver_priv);
|
kfree(file->driver_priv);
|
||||||
file->driver_priv = NULL;
|
file->driver_priv = NULL;
|
||||||
}
|
}
|
||||||
@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = {
|
|||||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
|
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
|
||||||
| DRIVER_ATOMIC | DRIVER_RENDER,
|
| DRIVER_ATOMIC | DRIVER_RENDER,
|
||||||
.open = exynos_drm_open,
|
.open = exynos_drm_open,
|
||||||
.preclose = exynos_drm_preclose,
|
|
||||||
.lastclose = exynos_drm_lastclose,
|
.lastclose = exynos_drm_lastclose,
|
||||||
.postclose = exynos_drm_postclose,
|
.postclose = exynos_drm_postclose,
|
||||||
.gem_free_object_unlocked = exynos_drm_gem_free_object,
|
.gem_free_object_unlocked = exynos_drm_gem_free_object,
|
||||||
|
@ -160,12 +160,9 @@ struct exynos_drm_clk {
|
|||||||
* drm framework doesn't support multiple irq yet.
|
* drm framework doesn't support multiple irq yet.
|
||||||
* we can refer to the crtc to current hardware interrupt occurred through
|
* we can refer to the crtc to current hardware interrupt occurred through
|
||||||
* this pipe value.
|
* this pipe value.
|
||||||
* @enabled: if the crtc is enabled or not
|
|
||||||
* @event: vblank event that is currently queued for flip
|
|
||||||
* @wait_update: wait all pending planes updates to finish
|
|
||||||
* @pending_update: number of pending plane updates in this crtc
|
|
||||||
* @ops: pointer to callbacks for exynos drm specific functionality
|
* @ops: pointer to callbacks for exynos drm specific functionality
|
||||||
* @ctx: A pointer to the crtc's implementation specific context
|
* @ctx: A pointer to the crtc's implementation specific context
|
||||||
|
* @pipe_clk: A pointer to the crtc's pipeline clock.
|
||||||
*/
|
*/
|
||||||
struct exynos_drm_crtc {
|
struct exynos_drm_crtc {
|
||||||
struct drm_crtc base;
|
struct drm_crtc base;
|
||||||
|
@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
|
|||||||
{
|
{
|
||||||
struct device *dev = dsi->dev;
|
struct device *dev = dsi->dev;
|
||||||
struct device_node *node = dev->of_node;
|
struct device_node *node = dev->of_node;
|
||||||
struct device_node *ep;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
|
ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
|
||||||
@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0);
|
ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
|
||||||
if (!ep) {
|
|
||||||
dev_err(dev, "no output port with endpoint specified\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
|
|
||||||
&dsi->burst_clk_rate);
|
&dsi->burst_clk_rate);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto end;
|
return ret;
|
||||||
|
|
||||||
ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
|
ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
|
||||||
&dsi->esc_clk_rate);
|
&dsi->esc_clk_rate);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto end;
|
return ret;
|
||||||
|
|
||||||
of_node_put(ep);
|
|
||||||
|
|
||||||
dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
|
dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
|
||||||
if (!dsi->bridge_node)
|
if (!dsi->bridge_node)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
end:
|
return 0;
|
||||||
of_node_put(ep);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int exynos_dsi_bind(struct device *dev, struct device *master,
|
static int exynos_dsi_bind(struct device *dev, struct device *master,
|
||||||
@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
static int exynos_dsi_remove(struct platform_device *pdev)
|
static int exynos_dsi_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
|
struct exynos_dsi *dsi = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
|
of_node_put(dsi->bridge_node);
|
||||||
|
|
||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
|
|
||||||
component_del(&pdev->dev, &exynos_dsi_component_ops);
|
component_del(&pdev->dev, &exynos_dsi_component_ops);
|
||||||
|
@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
|
|||||||
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
|
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||||
|
struct intel_engine_cs *engine;
|
||||||
|
struct intel_vgpu_workload *pos, *n;
|
||||||
|
unsigned int tmp;
|
||||||
|
|
||||||
|
/* free the unsubmited workloads in the queues. */
|
||||||
|
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
|
||||||
|
list_for_each_entry_safe(pos, n,
|
||||||
|
&vgpu->workload_q_head[engine->id], list) {
|
||||||
|
list_del_init(&pos->list);
|
||||||
|
free_workload(pos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
|
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
|
clean_workloads(vgpu, ALL_ENGINES);
|
||||||
kmem_cache_destroy(vgpu->workloads);
|
kmem_cache_destroy(vgpu->workloads);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
|
|||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
struct intel_vgpu_workload *pos, *n;
|
|
||||||
unsigned int tmp;
|
unsigned int tmp;
|
||||||
|
|
||||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
|
clean_workloads(vgpu, engine_mask);
|
||||||
/* free the unsubmited workload in the queue */
|
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
|
||||||
list_for_each_entry_safe(pos, n,
|
|
||||||
&vgpu->workload_q_head[engine->id], list) {
|
|
||||||
list_del_init(&pos->list);
|
|
||||||
free_workload(pos);
|
|
||||||
}
|
|
||||||
|
|
||||||
init_vgpu_execlist(vgpu, engine->id);
|
init_vgpu_execlist(vgpu, engine->id);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||||||
void *p_data, unsigned int bytes)
|
void *p_data, unsigned int bytes)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||||
i915_reg_t reg = {.reg = offset};
|
u32 v = *(u32 *)p_data;
|
||||||
|
|
||||||
|
if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
|
||||||
|
return intel_vgpu_default_mmio_write(vgpu,
|
||||||
|
offset, p_data, bytes);
|
||||||
|
|
||||||
switch (offset) {
|
switch (offset) {
|
||||||
case 0x4ddc:
|
case 0x4ddc:
|
||||||
vgpu_vreg(vgpu, offset) = 0x8000003c;
|
/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
|
||||||
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
|
vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
|
||||||
I915_WRITE(reg, vgpu_vreg(vgpu, offset));
|
|
||||||
break;
|
break;
|
||||||
case 0x42080:
|
case 0x42080:
|
||||||
vgpu_vreg(vgpu, offset) = 0x8000;
|
/* bypass WaCompressedResourceDisplayNewHashMode */
|
||||||
/* WaCompressedResourceDisplayNewHashMode:skl */
|
vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
|
||||||
I915_WRITE(reg, vgpu_vreg(vgpu, offset));
|
break;
|
||||||
|
case 0xe194:
|
||||||
|
/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
|
||||||
|
vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
|
||||||
|
break;
|
||||||
|
case 0x7014:
|
||||||
|
/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
|
||||||
|
vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
|||||||
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
|
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
|
||||||
NULL, NULL);
|
NULL, NULL);
|
||||||
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
|
||||||
|
skl_misc_ctl_write);
|
||||||
MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||||
@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
|||||||
MMIO_D(0x6e570, D_BDW_PLUS);
|
MMIO_D(0x6e570, D_BDW_PLUS);
|
||||||
MMIO_D(0x65f10, D_BDW_PLUS);
|
MMIO_D(0x65f10, D_BDW_PLUS);
|
||||||
|
|
||||||
MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
|
||||||
|
skl_misc_ctl_write);
|
||||||
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||||
|
@ -1272,10 +1272,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||||||
|
|
||||||
dev_priv->ipc_enabled = false;
|
dev_priv->ipc_enabled = false;
|
||||||
|
|
||||||
/* Everything is in place, we can now relax! */
|
|
||||||
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
|
|
||||||
driver.name, driver.major, driver.minor, driver.patchlevel,
|
|
||||||
driver.date, pci_name(pdev), dev_priv->drm.primary->index);
|
|
||||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
|
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
|
||||||
DRM_INFO("DRM_I915_DEBUG enabled\n");
|
DRM_INFO("DRM_I915_DEBUG enabled\n");
|
||||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
||||||
|
@ -2313,7 +2313,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
|||||||
appgtt->base.allocate_va_range) {
|
appgtt->base.allocate_va_range) {
|
||||||
ret = appgtt->base.allocate_va_range(&appgtt->base,
|
ret = appgtt->base.allocate_va_range(&appgtt->base,
|
||||||
vma->node.start,
|
vma->node.start,
|
||||||
vma->node.size);
|
vma->size);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_pages;
|
goto err_pages;
|
||||||
}
|
}
|
||||||
|
@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
/* expedite the RCU grace period to free some request slabs */
|
|
||||||
synchronize_rcu_expedited();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
|
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
|
||||||
@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
|
|||||||
I915_SHRINK_ACTIVE);
|
I915_SHRINK_ACTIVE);
|
||||||
intel_runtime_pm_put(dev_priv);
|
intel_runtime_pm_put(dev_priv);
|
||||||
|
|
||||||
synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
|
|
||||||
|
|
||||||
return freed;
|
return freed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||||||
u32 pipestat_mask;
|
u32 pipestat_mask;
|
||||||
u32 enable_mask;
|
u32 enable_mask;
|
||||||
enum pipe pipe;
|
enum pipe pipe;
|
||||||
u32 val;
|
|
||||||
|
|
||||||
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
|
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
|
||||||
PIPE_CRC_DONE_INTERRUPT_STATUS;
|
PIPE_CRC_DONE_INTERRUPT_STATUS;
|
||||||
@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
|
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
|
||||||
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
|
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
|
||||||
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
|
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
|
||||||
|
I915_LPE_PIPE_A_INTERRUPT |
|
||||||
|
I915_LPE_PIPE_B_INTERRUPT;
|
||||||
|
|
||||||
if (IS_CHERRYVIEW(dev_priv))
|
if (IS_CHERRYVIEW(dev_priv))
|
||||||
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
|
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
|
||||||
|
I915_LPE_PIPE_C_INTERRUPT;
|
||||||
|
|
||||||
WARN_ON(dev_priv->irq_mask != ~0);
|
WARN_ON(dev_priv->irq_mask != ~0);
|
||||||
|
|
||||||
val = (I915_LPE_PIPE_A_INTERRUPT |
|
|
||||||
I915_LPE_PIPE_B_INTERRUPT |
|
|
||||||
I915_LPE_PIPE_C_INTERRUPT);
|
|
||||||
|
|
||||||
enable_mask |= val;
|
|
||||||
|
|
||||||
dev_priv->irq_mask = ~enable_mask;
|
dev_priv->irq_mask = ~enable_mask;
|
||||||
|
|
||||||
GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
|
GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
|
||||||
|
@ -8280,7 +8280,7 @@ enum {
|
|||||||
|
|
||||||
/* MIPI DSI registers */
|
/* MIPI DSI registers */
|
||||||
|
|
||||||
#define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */
|
#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
|
||||||
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
|
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
|
||||||
|
|
||||||
#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
|
#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
|
||||||
|
@ -149,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
static void lpe_audio_irq_unmask(struct irq_data *d)
|
static void lpe_audio_irq_unmask(struct irq_data *d)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = d->chip_data;
|
|
||||||
unsigned long irqflags;
|
|
||||||
u32 val = (I915_LPE_PIPE_A_INTERRUPT |
|
|
||||||
I915_LPE_PIPE_B_INTERRUPT);
|
|
||||||
|
|
||||||
if (IS_CHERRYVIEW(dev_priv))
|
|
||||||
val |= I915_LPE_PIPE_C_INTERRUPT;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
|
||||||
|
|
||||||
dev_priv->irq_mask &= ~val;
|
|
||||||
I915_WRITE(VLV_IIR, val);
|
|
||||||
I915_WRITE(VLV_IIR, val);
|
|
||||||
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
|
|
||||||
POSTING_READ(VLV_IMR);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void lpe_audio_irq_mask(struct irq_data *d)
|
static void lpe_audio_irq_mask(struct irq_data *d)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = d->chip_data;
|
|
||||||
unsigned long irqflags;
|
|
||||||
u32 val = (I915_LPE_PIPE_A_INTERRUPT |
|
|
||||||
I915_LPE_PIPE_B_INTERRUPT);
|
|
||||||
|
|
||||||
if (IS_CHERRYVIEW(dev_priv))
|
|
||||||
val |= I915_LPE_PIPE_C_INTERRUPT;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
|
||||||
|
|
||||||
dev_priv->irq_mask |= val;
|
|
||||||
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
|
|
||||||
I915_WRITE(VLV_IIR, val);
|
|
||||||
I915_WRITE(VLV_IIR, val);
|
|
||||||
POSTING_READ(VLV_IIR);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip lpe_audio_irqchip = {
|
static struct irq_chip lpe_audio_irqchip = {
|
||||||
@ -330,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
desc = irq_to_desc(dev_priv->lpe_audio.irq);
|
desc = irq_to_desc(dev_priv->lpe_audio.irq);
|
||||||
|
|
||||||
lpe_audio_irq_mask(&desc->irq_data);
|
|
||||||
|
|
||||||
lpe_audio_platdev_destroy(dev_priv);
|
lpe_audio_platdev_destroy(dev_priv);
|
||||||
|
|
||||||
irq_free_desc(dev_priv->lpe_audio.irq);
|
irq_free_desc(dev_priv->lpe_audio.irq);
|
||||||
|
@ -1989,7 +1989,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
|||||||
|
|
||||||
ce->ring = ring;
|
ce->ring = ring;
|
||||||
ce->state = vma;
|
ce->state = vma;
|
||||||
ce->initialised = engine->init_context == NULL;
|
ce->initialised |= engine->init_context == NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
|
|||||||
static int igt_ctx_exec(void *arg)
|
static int igt_ctx_exec(void *arg)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = arg;
|
struct drm_i915_private *i915 = arg;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj = NULL;
|
||||||
struct drm_file *file;
|
struct drm_file *file;
|
||||||
IGT_TIMEOUT(end_time);
|
IGT_TIMEOUT(end_time);
|
||||||
LIST_HEAD(objects);
|
LIST_HEAD(objects);
|
||||||
@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for_each_engine(engine, i915, id) {
|
for_each_engine(engine, i915, id) {
|
||||||
if (dw == 0) {
|
if (!obj) {
|
||||||
obj = create_test_object(ctx, file, &objects);
|
obj = create_test_object(ctx, file, &objects);
|
||||||
if (IS_ERR(obj)) {
|
if (IS_ERR(obj)) {
|
||||||
err = PTR_ERR(obj);
|
err = PTR_ERR(obj);
|
||||||
@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (++dw == max_dwords(obj))
|
if (++dw == max_dwords(obj)) {
|
||||||
|
obj = NULL;
|
||||||
dw = 0;
|
dw = 0;
|
||||||
|
}
|
||||||
ndwords++;
|
ndwords++;
|
||||||
}
|
}
|
||||||
ncontexts++;
|
ncontexts++;
|
||||||
|
@ -13,6 +13,7 @@ config DRM_MSM
|
|||||||
select QCOM_SCM
|
select QCOM_SCM
|
||||||
select SND_SOC_HDMI_CODEC if SND_SOC
|
select SND_SOC_HDMI_CODEC if SND_SOC
|
||||||
select SYNC_FILE
|
select SYNC_FILE
|
||||||
|
select PM_OPP
|
||||||
default y
|
default y
|
||||||
help
|
help
|
||||||
DRM/KMS driver for MSM/snapdragon.
|
DRM/KMS driver for MSM/snapdragon.
|
||||||
|
@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_domain_ops mdss_hw_irqdomain_ops = {
|
static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
|
||||||
.map = mdss_hw_irqdomain_map,
|
.map = mdss_hw_irqdomain_map,
|
||||||
.xlate = irq_domain_xlate_onecell,
|
.xlate = irq_domain_xlate_onecell,
|
||||||
};
|
};
|
||||||
|
@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
|
|||||||
|
|
||||||
mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
|
mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
|
||||||
sizeof(*mdp5_state), GFP_KERNEL);
|
sizeof(*mdp5_state), GFP_KERNEL);
|
||||||
|
if (!mdp5_state)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
if (mdp5_state && mdp5_state->base.fb)
|
__drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
|
||||||
drm_framebuffer_reference(mdp5_state->base.fb);
|
|
||||||
|
|
||||||
return &mdp5_state->base;
|
return &mdp5_state->base;
|
||||||
}
|
}
|
||||||
@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
|
|||||||
mdp5_pipe_release(state->state, old_hwpipe);
|
mdp5_pipe_release(state->state, old_hwpipe);
|
||||||
mdp5_pipe_release(state->state, old_right_hwpipe);
|
mdp5_pipe_release(state->state, old_right_hwpipe);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
mdp5_pipe_release(state->state, mdp5_state->hwpipe);
|
||||||
|
mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
|
||||||
|
mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -830,6 +830,7 @@ static struct drm_driver msm_driver = {
|
|||||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||||
.gem_prime_export = drm_gem_prime_export,
|
.gem_prime_export = drm_gem_prime_export,
|
||||||
.gem_prime_import = drm_gem_prime_import,
|
.gem_prime_import = drm_gem_prime_import,
|
||||||
|
.gem_prime_res_obj = msm_gem_prime_res_obj,
|
||||||
.gem_prime_pin = msm_gem_prime_pin,
|
.gem_prime_pin = msm_gem_prime_pin,
|
||||||
.gem_prime_unpin = msm_gem_prime_unpin,
|
.gem_prime_unpin = msm_gem_prime_unpin,
|
||||||
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
|
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
|
||||||
|
@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
|||||||
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
|
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
|
||||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||||
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||||
|
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
|
||||||
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||||
struct dma_buf_attachment *attach, struct sg_table *sg);
|
struct dma_buf_attachment *attach, struct sg_table *sg);
|
||||||
int msm_gem_prime_pin(struct drm_gem_object *obj);
|
int msm_gem_prime_pin(struct drm_gem_object *obj);
|
||||||
|
@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct msm_fence {
|
struct msm_fence {
|
||||||
struct msm_fence_context *fctx;
|
|
||||||
struct dma_fence base;
|
struct dma_fence base;
|
||||||
|
struct msm_fence_context *fctx;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
|
static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
|
||||||
@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence)
|
|||||||
return fence_completed(f->fctx, f->base.seqno);
|
return fence_completed(f->fctx, f->base.seqno);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void msm_fence_release(struct dma_fence *fence)
|
|
||||||
{
|
|
||||||
struct msm_fence *f = to_msm_fence(fence);
|
|
||||||
kfree_rcu(f, base.rcu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct dma_fence_ops msm_fence_ops = {
|
static const struct dma_fence_ops msm_fence_ops = {
|
||||||
.get_driver_name = msm_fence_get_driver_name,
|
.get_driver_name = msm_fence_get_driver_name,
|
||||||
.get_timeline_name = msm_fence_get_timeline_name,
|
.get_timeline_name = msm_fence_get_timeline_name,
|
||||||
.enable_signaling = msm_fence_enable_signaling,
|
.enable_signaling = msm_fence_enable_signaling,
|
||||||
.signaled = msm_fence_signaled,
|
.signaled = msm_fence_signaled,
|
||||||
.wait = dma_fence_default_wait,
|
.wait = dma_fence_default_wait,
|
||||||
.release = msm_fence_release,
|
.release = dma_fence_free,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dma_fence *
|
struct dma_fence *
|
||||||
|
@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
|||||||
struct msm_gem_object *msm_obj;
|
struct msm_gem_object *msm_obj;
|
||||||
bool use_vram = false;
|
bool use_vram = false;
|
||||||
|
|
||||||
|
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||||
|
|
||||||
switch (flags & MSM_BO_CACHE_MASK) {
|
switch (flags & MSM_BO_CACHE_MASK) {
|
||||||
case MSM_BO_UNCACHED:
|
case MSM_BO_UNCACHED:
|
||||||
case MSM_BO_CACHED:
|
case MSM_BO_CACHED:
|
||||||
@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
|||||||
|
|
||||||
size = PAGE_ALIGN(dmabuf->size);
|
size = PAGE_ALIGN(dmabuf->size);
|
||||||
|
|
||||||
|
/* Take mutex so we can modify the inactive list in msm_gem_new_impl */
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
|
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
|
|||||||
if (!obj->import_attach)
|
if (!obj->import_attach)
|
||||||
msm_gem_put_pages(obj);
|
msm_gem_put_pages(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
|
|
||||||
|
return msm_obj->resv;
|
||||||
|
}
|
||||||
|
@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||||||
if (!in_fence)
|
if (!in_fence)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* TODO if we get an array-fence due to userspace merging multiple
|
/*
|
||||||
* fences, we need a way to determine if all the backing fences
|
* Wait if the fence is from a foreign context, or if the fence
|
||||||
* are from our own context..
|
* array contains any fence from a foreign context.
|
||||||
*/
|
*/
|
||||||
|
if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
|
||||||
if (in_fence->context != gpu->fctx->context) {
|
|
||||||
ret = dma_fence_wait(in_fence, true);
|
ret = dma_fence_wait(in_fence, true);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((submit_cmd.size + submit_cmd.submit_offset) >=
|
if (!submit_cmd.size ||
|
||||||
msm_obj->base.size) {
|
((submit_cmd.size + submit_cmd.submit_offset) >
|
||||||
|
msm_obj->base.size)) {
|
||||||
DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
|
DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
|
|||||||
gpu->grp_clks[i] = get_clock(dev, name);
|
gpu->grp_clks[i] = get_clock(dev, name);
|
||||||
|
|
||||||
/* Remember the key clocks that we need to control later */
|
/* Remember the key clocks that we need to control later */
|
||||||
if (!strcmp(name, "core"))
|
if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
|
||||||
gpu->core_clk = gpu->grp_clks[i];
|
gpu->core_clk = gpu->grp_clks[i];
|
||||||
else if (!strcmp(name, "rbbmtimer"))
|
else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
|
||||||
gpu->rbbmtimer_clk = gpu->grp_clks[i];
|
gpu->rbbmtimer_clk = gpu->grp_clks[i];
|
||||||
|
|
||||||
++i;
|
++i;
|
||||||
|
Loading…
Reference in New Issue
Block a user