mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 22:51:42 +00:00
drm fixes for 5.18-rc3
i915: - Correct legacy mmap disabling to use GRAPHICS_VER_FULL msm: - system suspend fix - kzalloc return checks - misc display fix - iommu_present removal amdgpu: - Fix for alpha properly in pre-multiplied mode - Fix VCN 3.1.2 firmware name - Suspend/resume fix - Add a gfxoff quirk for Mac vega20 board - DCN 3.1.6 spread spectrum fix -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmJYki8ACgkQDHTzWXnE hr4Vfw/+PDthco9TYnXPIJyloSceWD5YGInfJfzDjsY3ABGNr0lS5L8Oiv6CvSsI 1fYTDMuB5Uv3NFvt6snzsux7hlyYtkbhcig6QRYIH2at9BaN8db6Ngx8NAo5q53r hR64MP1EdPRlj9It7eMWVB/W5F04SBwMs0k0KXfitRuyEeMM2YA+W23rmb5ZnTL3 QtRWs9PZSttmyQAsJTd2XpS5Fy40RKICkENi2i6Dq6etTrw4xMg+csN2jOyTfwPQ jKdRzFfL7u6/40EzJgxWkg+QsjbHHAIpQwcE4EpMCYfa6jeS7KJsBgt1/RuolFx/ 7su+jupQUAVMafMfXKgzYq/EUR1WPyM6Zrn85uPbOZlWTWlmVQjdejNKDoWmWcfC 3wkW/B/bsroJNJL+9CiQ+V/ZDrSBPe74T1FAhtNSQmmb493dyp0tEJoiTwpFTiCC 1th1aUlXZdkkFDCl5pg+iUAXF+vQi97TbzgLD52e31Pm9JXkls0M+ExbZdV1ACsU ALwXNpj025CD00gL/bvrgf6dkonUXP+JtuXyjVjOyt6GsOSNLrQhtaLbN+uKWxud 6ZJfe3qZG6IlSZdGSGMRZ1IY9Iy0LGGs2jrkga22iwzcGbtmwdNTJf01nBthNI8D BQzi4yjwnw1MvZYqXA+5yXS7RN0JdC9asZtwWjkpcq9jmqHtllQ= =UQNj -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2022-04-15' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Eggs season holidays are among us, and I think I'd expect some smaller pulls for two weeks then. This seems eerily quiet. One i915 fix, amdgpu has a bunch and msm. I didn't see a misc pull this week, so I expect that will catch up next week. i915: - Correct legacy mmap disabling to use GRAPHICS_VER_FULL msm: - system suspend fix - kzalloc return checks - misc display fix - iommu_present removal amdgpu: - Fix for alpha properly in pre-multiplied mode - Fix VCN 3.1.2 firmware name - Suspend/resume fix - Add a gfxoff quirk for Mac vega20 board - DCN 3.1.6 spread spectrum fix" * tag 'drm-fixes-2022-04-15' of git://anongit.freedesktop.org/drm/drm: drm/amd/display: remove dtbclk_ss compensation for dcn316 drm/amdgpu: Enable gfxoff quirk on MacBook Pro drm/amdgpu: Ensure HDA function is suspended before ASIC reset drm/amdgpu: fix VCN 3.1.2 firmware name drm/amd/display: don't ignore alpha property on pre-multiplied mode drm/msm/gpu: Avoid -Wunused-function with !CONFIG_PM_SLEEP drm/msm/dp: add fail safe mode outside of event_mutex context drm/msm/dsi: Use connector directly in msm_dsi_manager_connector_init() drm/msm: Stop using iommu_present() drm/msm/mdp5: check the return of kzalloc() drm/msm: Fix range size vs end confusion drm/i915: Sunset igpu legacy mmap support based on GRAPHICS_VER_FULL drm/msm/dpu: Use indexed array initializer to prevent mismatches drm/msm/disp: check the return value of kzalloc() dt-bindings: display/msm: another fix for the dpu-qcm2290 example drm/msm: Add missing put_task_struct() in debugfs path drm/msm/gpu: Remove mutex from wait_event condition drm/msm/gpu: Park scheduler threads for system suspend drm/msm/gpu: Rename runtime suspend/resume functions
This commit is contained in:
commit
028192fea1
@ -160,7 +160,7 @@ examples:
|
||||
mdss: mdss@5e00000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "qcom,qcm2290-mdss", "qcom,mdss";
|
||||
compatible = "qcom,qcm2290-mdss";
|
||||
reg = <0x05e00000 0x1000>;
|
||||
reg-names = "mdss";
|
||||
power-domains = <&dispcc MDSS_GDSC>;
|
||||
@ -180,7 +180,7 @@ examples:
|
||||
<&apps_smmu 0x421 0x0>;
|
||||
ranges;
|
||||
|
||||
mdss_mdp: mdp@5e01000 {
|
||||
mdss_mdp: display-controller@5e01000 {
|
||||
compatible = "qcom,qcm2290-dpu";
|
||||
reg = <0x05e01000 0x8f000>,
|
||||
<0x05eb0000 0x2008>;
|
||||
|
@ -2323,18 +2323,23 @@ static int amdgpu_pmops_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int r;
|
||||
|
||||
if (amdgpu_acpi_is_s0ix_active(adev))
|
||||
adev->in_s0ix = true;
|
||||
else
|
||||
adev->in_s3 = true;
|
||||
r = amdgpu_device_suspend(drm_dev, true);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_device_suspend(drm_dev, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
|
||||
if (!adev->in_s0ix)
|
||||
r = amdgpu_asic_reset(adev);
|
||||
return r;
|
||||
return amdgpu_asic_reset(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_resume(struct device *dev)
|
||||
@ -2575,6 +2580,7 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
|
||||
.prepare = amdgpu_pmops_prepare,
|
||||
.complete = amdgpu_pmops_complete,
|
||||
.suspend = amdgpu_pmops_suspend,
|
||||
.suspend_noirq = amdgpu_pmops_suspend_noirq,
|
||||
.resume = amdgpu_pmops_resume,
|
||||
.freeze = amdgpu_pmops_freeze,
|
||||
.thaw = amdgpu_pmops_thaw,
|
||||
|
@ -52,7 +52,7 @@
|
||||
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
|
||||
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
|
||||
#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
|
||||
#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2_vcn.bin"
|
||||
#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
|
||||
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN);
|
||||
MODULE_FIRMWARE(FIRMWARE_PICASSO);
|
||||
|
@ -1205,6 +1205,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
|
||||
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
|
||||
/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
|
||||
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
|
||||
/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
|
||||
{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
|
||||
{ 0, 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
@ -374,7 +374,7 @@ void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce)
|
||||
clk_mgr_dce->dprefclk_ss_percentage =
|
||||
info.spread_spectrum_percentage;
|
||||
}
|
||||
if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss)
|
||||
if (clk_mgr_dce->base.ctx->dc->config.ignore_dpref_ss)
|
||||
clk_mgr_dce->dprefclk_ss_percentage = 0;
|
||||
}
|
||||
}
|
||||
|
@ -686,8 +686,8 @@ void dcn316_clk_mgr_construct(
|
||||
clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
|
||||
clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
|
||||
dce_clock_read_ss_info(&clk_mgr->base);
|
||||
clk_mgr->base.dccg->ref_dtbclk_khz =
|
||||
dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
|
||||
/*clk_mgr->base.dccg->ref_dtbclk_khz =
|
||||
dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
|
||||
|
||||
clk_mgr->base.base.bw_params = &dcn316_bw_params;
|
||||
|
||||
|
@ -340,6 +340,7 @@ struct dc_config {
|
||||
bool is_asymmetric_memory;
|
||||
bool is_single_rank_dimm;
|
||||
bool use_pipe_ctx_sync_logic;
|
||||
bool ignore_dpref_ss;
|
||||
};
|
||||
|
||||
enum visual_confirm {
|
||||
@ -729,7 +730,6 @@ struct dc_debug_options {
|
||||
bool apply_vendor_specific_lttpr_wa;
|
||||
bool extended_blank_optimization;
|
||||
union aux_wake_wa_options aux_wake_wa;
|
||||
bool ignore_dpref_ss;
|
||||
uint8_t psr_power_use_phy_fsm;
|
||||
};
|
||||
|
||||
|
@ -2522,14 +2522,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
struct mpc *mpc = dc->res_pool->mpc;
|
||||
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
|
||||
|
||||
if (per_pixel_alpha)
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
|
||||
else
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
|
||||
|
||||
blnd_cfg.overlap_only = false;
|
||||
blnd_cfg.global_gain = 0xff;
|
||||
|
||||
if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
|
||||
blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
|
||||
} else if (per_pixel_alpha) {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
|
||||
} else {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
|
||||
}
|
||||
|
||||
if (pipe_ctx->plane_state->global_alpha)
|
||||
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
|
||||
else
|
||||
|
@ -2344,14 +2344,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
struct mpc *mpc = dc->res_pool->mpc;
|
||||
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
|
||||
|
||||
if (per_pixel_alpha)
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
|
||||
else
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
|
||||
|
||||
blnd_cfg.overlap_only = false;
|
||||
blnd_cfg.global_gain = 0xff;
|
||||
|
||||
if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
|
||||
blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
|
||||
} else if (per_pixel_alpha) {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
|
||||
} else {
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
|
||||
}
|
||||
|
||||
if (pipe_ctx->plane_state->global_alpha)
|
||||
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
|
||||
else
|
||||
|
@ -70,7 +70,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
* mmap ioctl is disallowed for all discrete platforms,
|
||||
* and for all platforms with GRAPHICS_VER > 12.
|
||||
*/
|
||||
if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
|
||||
if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (args->flags & ~(I915_MMAP_WC))
|
||||
|
@ -1742,7 +1742,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
|
||||
return ERR_CAST(mmu);
|
||||
|
||||
return msm_gem_address_space_create(mmu,
|
||||
"gpu", 0x100000000ULL, 0x1ffffffffULL);
|
||||
"gpu", 0x100000000ULL, SZ_4G);
|
||||
}
|
||||
|
||||
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
||||
|
@ -599,43 +599,91 @@ static const struct of_device_id dt_match[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int adreno_resume(struct device *dev)
|
||||
static int adreno_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct msm_gpu *gpu = dev_to_gpu(dev);
|
||||
|
||||
return gpu->funcs->pm_resume(gpu);
|
||||
}
|
||||
|
||||
static int active_submits(struct msm_gpu *gpu)
|
||||
{
|
||||
int active_submits;
|
||||
mutex_lock(&gpu->active_lock);
|
||||
active_submits = gpu->active_submits;
|
||||
mutex_unlock(&gpu->active_lock);
|
||||
return active_submits;
|
||||
}
|
||||
|
||||
static int adreno_suspend(struct device *dev)
|
||||
static int adreno_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct msm_gpu *gpu = dev_to_gpu(dev);
|
||||
int remaining;
|
||||
|
||||
remaining = wait_event_timeout(gpu->retire_event,
|
||||
active_submits(gpu) == 0,
|
||||
msecs_to_jiffies(1000));
|
||||
if (remaining == 0) {
|
||||
dev_err(dev, "Timeout waiting for GPU to suspend\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
/*
|
||||
* We should be holding a runpm ref, which will prevent
|
||||
* runtime suspend. In the system suspend path, we've
|
||||
* already waited for active jobs to complete.
|
||||
*/
|
||||
WARN_ON_ONCE(gpu->active_submits);
|
||||
|
||||
return gpu->funcs->pm_suspend(gpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void suspend_scheduler(struct msm_gpu *gpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Shut down the scheduler before we force suspend, so that
|
||||
* suspend isn't racing with scheduler kthread feeding us
|
||||
* more work.
|
||||
*
|
||||
* Note, we just want to park the thread, and let any jobs
|
||||
* that are already on the hw queue complete normally, as
|
||||
* opposed to the drm_sched_stop() path used for handling
|
||||
* faulting/timed-out jobs. We can't really cancel any jobs
|
||||
* already on the hw queue without racing with the GPU.
|
||||
*/
|
||||
for (i = 0; i < gpu->nr_rings; i++) {
|
||||
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
|
||||
kthread_park(sched->thread);
|
||||
}
|
||||
}
|
||||
|
||||
static void resume_scheduler(struct msm_gpu *gpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gpu->nr_rings; i++) {
|
||||
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
|
||||
kthread_unpark(sched->thread);
|
||||
}
|
||||
}
|
||||
|
||||
static int adreno_system_suspend(struct device *dev)
|
||||
{
|
||||
struct msm_gpu *gpu = dev_to_gpu(dev);
|
||||
int remaining, ret;
|
||||
|
||||
suspend_scheduler(gpu);
|
||||
|
||||
remaining = wait_event_timeout(gpu->retire_event,
|
||||
gpu->active_submits == 0,
|
||||
msecs_to_jiffies(1000));
|
||||
if (remaining == 0) {
|
||||
dev_err(dev, "Timeout waiting for GPU to suspend\n");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = pm_runtime_force_suspend(dev);
|
||||
out:
|
||||
if (ret)
|
||||
resume_scheduler(gpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int adreno_system_resume(struct device *dev)
|
||||
{
|
||||
resume_scheduler(dev_to_gpu(dev));
|
||||
return pm_runtime_force_resume(dev);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops adreno_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
|
||||
SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
|
||||
SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
|
||||
RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static struct platform_driver adreno_driver = {
|
||||
|
@ -54,87 +54,87 @@ struct dpu_intr_reg {
|
||||
* When making changes be sure to sync with dpu_hw_intr_reg
|
||||
*/
|
||||
static const struct dpu_intr_reg dpu_intr_set[] = {
|
||||
{
|
||||
[MDP_SSPP_TOP0_INTR] = {
|
||||
MDP_SSPP_TOP0_OFF+INTR_CLEAR,
|
||||
MDP_SSPP_TOP0_OFF+INTR_EN,
|
||||
MDP_SSPP_TOP0_OFF+INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_SSPP_TOP0_INTR2] = {
|
||||
MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
|
||||
MDP_SSPP_TOP0_OFF+INTR2_EN,
|
||||
MDP_SSPP_TOP0_OFF+INTR2_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_SSPP_TOP0_HIST_INTR] = {
|
||||
MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
|
||||
MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
|
||||
MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF0_INTR] = {
|
||||
MDP_INTF_0_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_0_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_0_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF1_INTR] = {
|
||||
MDP_INTF_1_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_1_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_1_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF2_INTR] = {
|
||||
MDP_INTF_2_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_2_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_2_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF3_INTR] = {
|
||||
MDP_INTF_3_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_3_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_3_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF4_INTR] = {
|
||||
MDP_INTF_4_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_4_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_4_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF5_INTR] = {
|
||||
MDP_INTF_5_OFF+INTF_INTR_CLEAR,
|
||||
MDP_INTF_5_OFF+INTF_INTR_EN,
|
||||
MDP_INTF_5_OFF+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_AD4_0_INTR] = {
|
||||
MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
|
||||
MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
|
||||
MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
|
||||
},
|
||||
{
|
||||
[MDP_AD4_1_INTR] = {
|
||||
MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
|
||||
MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
|
||||
MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
|
||||
},
|
||||
{
|
||||
[MDP_INTF0_7xxx_INTR] = {
|
||||
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF1_7xxx_INTR] = {
|
||||
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF2_7xxx_INTR] = {
|
||||
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF3_7xxx_INTR] = {
|
||||
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF4_7xxx_INTR] = {
|
||||
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
},
|
||||
{
|
||||
[MDP_INTF5_7xxx_INTR] = {
|
||||
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
|
||||
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
|
||||
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
|
||||
|
@ -98,7 +98,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
|
||||
__drm_atomic_helper_plane_destroy_state(plane->state);
|
||||
|
||||
kfree(to_mdp5_plane_state(plane->state));
|
||||
plane->state = NULL;
|
||||
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
|
||||
if (!mdp5_state)
|
||||
return;
|
||||
__drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
|
||||
}
|
||||
|
||||
|
@ -176,6 +176,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
|
||||
va_list va;
|
||||
|
||||
new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
|
||||
if (!new_blk)
|
||||
return;
|
||||
|
||||
va_start(va, fmt);
|
||||
|
||||
|
@ -580,6 +580,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
|
||||
dp->dp_display.connector_type, state);
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
/*
|
||||
* add fail safe mode outside event_mutex scope
|
||||
* to avoid potiential circular lock with drm thread
|
||||
*/
|
||||
dp_panel_add_fail_safe_mode(dp->dp_display.connector);
|
||||
|
||||
/* uevent will complete connection part */
|
||||
return 0;
|
||||
};
|
||||
|
@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector,
|
||||
return rc;
|
||||
}
|
||||
|
||||
void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
|
||||
{
|
||||
/* fail safe edid */
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
if (drm_add_modes_noedid(connector, 640, 480))
|
||||
drm_set_preferred_mode(connector, 640, 480);
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
}
|
||||
|
||||
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
@ -207,16 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* fail safe edid */
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
if (drm_add_modes_noedid(connector, 640, 480))
|
||||
drm_set_preferred_mode(connector, 640, 480);
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
} else {
|
||||
/* always add fail-safe mode as backup mode */
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
drm_add_modes_noedid(connector, 640, 480);
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
dp_panel_add_fail_safe_mode(connector);
|
||||
}
|
||||
|
||||
if (panel->aux_cfg_update_done) {
|
||||
|
@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
|
||||
int dp_panel_deinit(struct dp_panel *dp_panel);
|
||||
int dp_panel_timing_cfg(struct dp_panel *dp_panel);
|
||||
void dp_panel_dump_regs(struct dp_panel *dp_panel);
|
||||
void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
|
||||
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
|
||||
struct drm_connector *connector);
|
||||
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
|
||||
|
@ -638,7 +638,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
|
||||
return connector;
|
||||
|
||||
fail:
|
||||
connector->funcs->destroy(msm_dsi->connector);
|
||||
connector->funcs->destroy(connector);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ bool msm_use_mmu(struct drm_device *dev)
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
||||
/* a2xx comes with its own MMU */
|
||||
return priv->is_a2xx || iommu_present(&platform_bus_type);
|
||||
return priv->is_a2xx || device_iommu_mapped(dev->dev);
|
||||
}
|
||||
|
||||
static int msm_init_vram(struct drm_device *dev)
|
||||
|
@ -926,6 +926,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
|
||||
get_pid_task(aspace->pid, PIDTYPE_PID);
|
||||
if (task) {
|
||||
comm = kstrdup(task->comm, GFP_KERNEL);
|
||||
put_task_struct(task);
|
||||
} else {
|
||||
comm = NULL;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user