Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next

The big things this time around are:
1) support for hw cursor on newer mdp5 devices (snapdragon 820+,
tested on db820c)
2) dsi encoder cleanup
3) gpu dt bindings cleanup so we can get the gpu nodes merged upstream

* 'msm-next' of git://people.freedesktop.org/~robclark/linux: (32 commits)
  drm/msm: return -EFAULT if copy_from_user() fails
  drm/msm/dsi: Add PHY/PLL for 8x96
  drm/msm/dsi: Add new method to calculate 14nm PHY timings
  drm/msm/dsi: Move PHY operations out of host
  drm/msm/dsi: Reset both PHYs before clock operation for dual DSI
  drm/msm/dsi: Pass down use case to PHY
  drm/msm/dsi: Return more timings from PHY to host
  drm/msm/dsi: Add a PHY op that initializes version specific stuff
  drm/msm/dsi: Add 8x96 info in dsi_cfg
  drm/msm/dsi: Don't error if a DSI host doesn't have a device connected
  drm/msm/mdp5: Add support for legacy cursor updates
  drm/msm/mdp5: Refactor mdp5_plane_atomic_check
  drm/msm/mdp5: Add cursor planes
  drm/msm/mdp5: Misc cursor plane bits
  drm/msm/mdp5: Configure COLOR3_OUT propagation
  drm/msm/mdp5: Use plane helpers to configure src/dst rectangles
  drm/msm/mdp5: Prepare CRTC/LM for empty stages
  drm/msm/mdp5: Create only as many CRTCs as we need
  drm/msm/mdp5: cfg: Change count to unsigned int
  drm/msm/mdp5: Create single encoder per interface (INTF)
  ...
This commit is contained in:
Dave Airlie 2017-02-07 11:05:42 +10:00
commit 26d7f34cae
45 changed files with 2710 additions and 554 deletions

View File

@ -1,23 +1,19 @@
Qualcomm adreno/snapdragon GPU Qualcomm adreno/snapdragon GPU
Required properties: Required properties:
- compatible: "qcom,adreno-3xx" - compatible: "qcom,adreno-XYZ.W", "qcom,adreno"
for example: "qcom,adreno-306.0", "qcom,adreno"
Note that you need to list the less specific "qcom,adreno" (since this
is what the device is matched on), in addition to the more specific
with the chip-id.
- reg: Physical base address and length of the controller's registers. - reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt signal from the gpu. - interrupts: The interrupt signal from the gpu.
- clocks: device clocks - clocks: device clocks
See ../clocks/clock-bindings.txt for details. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required: - clock-names: the following clocks are required:
* "core_clk" * "core"
* "iface_clk" * "iface"
* "mem_iface_clk" * "mem_iface"
- qcom,chipid: gpu chip-id. Note this may become optional for future
devices if we can reliably read the chipid from hw
- qcom,gpu-pwrlevels: list of operating points
- compatible: "qcom,gpu-pwrlevels"
- for each qcom,gpu-pwrlevel:
- qcom,gpu-freq: requested gpu clock speed
- NOTE: downstream android driver defines additional parameters to
configure memory bandwidth scaling per OPP.
Example: Example:
@ -25,28 +21,18 @@ Example:
... ...
gpu: qcom,kgsl-3d0@4300000 { gpu: qcom,kgsl-3d0@4300000 {
compatible = "qcom,adreno-3xx"; compatible = "qcom,adreno-320.2", "qcom,adreno";
reg = <0x04300000 0x20000>; reg = <0x04300000 0x20000>;
reg-names = "kgsl_3d0_reg_memory"; reg-names = "kgsl_3d0_reg_memory";
interrupts = <GIC_SPI 80 0>; interrupts = <GIC_SPI 80 0>;
interrupt-names = "kgsl_3d0_irq"; interrupt-names = "kgsl_3d0_irq";
clock-names = clock-names =
"core_clk", "core",
"iface_clk", "iface",
"mem_iface_clk"; "mem_iface";
clocks = clocks =
<&mmcc GFX3D_CLK>, <&mmcc GFX3D_CLK>,
<&mmcc GFX3D_AHB_CLK>, <&mmcc GFX3D_AHB_CLK>,
<&mmcc MMSS_IMEM_AHB_CLK>; <&mmcc MMSS_IMEM_AHB_CLK>;
qcom,chipid = <0x03020100>;
qcom,gpu-pwrlevels {
compatible = "qcom,gpu-pwrlevels";
qcom,gpu-pwrlevel@0 {
qcom,gpu-freq = <450000000>;
};
qcom,gpu-pwrlevel@1 {
qcom,gpu-freq = <27000000>;
};
};
}; };
}; };

View File

@ -72,3 +72,10 @@ config DRM_MSM_DSI_28NM_8960_PHY
help help
Choose this option if the 28nm DSI PHY 8960 variant is used on the Choose this option if the 28nm DSI PHY 8960 variant is used on the
platform. platform.
config DRM_MSM_DSI_14NM_PHY
bool "Enable DSI 14nm PHY driver in MSM DRM (used by MSM8996/APQ8096)"
depends on DRM_MSM_DSI
default y
help
Choose this option if DSI PHY on 8996 is used on the platform.

View File

@ -76,11 +76,13 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
msm-y += dsi/pll/dsi_pll.o msm-y += dsi/pll/dsi_pll.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
endif endif
obj-$(CONFIG_DRM_MSM) += msm.o obj-$(CONFIG_DRM_MSM) += msm.o

View File

@ -12,6 +12,7 @@
*/ */
#include "msm_gem.h" #include "msm_gem.h"
#include "msm_mmu.h"
#include "a5xx_gpu.h" #include "a5xx_gpu.h"
extern bool hang_debug; extern bool hang_debug;
@ -327,7 +328,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* Enable RBBM error reporting bits */ /* Enable RBBM error reporting bits */
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001); gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
/* /*
* Mask out the activity signals from RB1-3 to avoid false * Mask out the activity signals from RB1-3 to avoid false
* positives * positives
@ -381,7 +382,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
@ -573,6 +574,19 @@ static bool a5xx_idle(struct msm_gpu *gpu)
return true; return true;
} }
static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
{
struct msm_gpu *gpu = arg;
pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
iova, flags,
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
return -EFAULT;
}
static void a5xx_cp_err_irq(struct msm_gpu *gpu) static void a5xx_cp_err_irq(struct msm_gpu *gpu)
{ {
u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS); u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
@ -884,5 +898,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
if (gpu->aspace)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
return gpu; return gpu;
} }

View File

@ -75,12 +75,14 @@ static const struct adreno_info gpulist[] = {
.gmem = (SZ_1M + SZ_512K), .gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init, .init = a4xx_gpu_init,
}, { }, {
.rev = ADRENO_REV(5, 3, 0, ANY_ID), .rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530, .revn = 530,
.name = "A530", .name = "A530",
.pm4fw = "a530_pm4.fw", .pm4fw = "a530_pm4.fw",
.pfpfw = "a530_pfp.fw", .pfpfw = "a530_pfp.fw",
.gmem = SZ_1M, .gmem = SZ_1M,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init, .init = a5xx_gpu_init,
.gpmufw = "a530v3_gpmu.fw2", .gpmufw = "a530v3_gpmu.fw2",
}, },
@ -181,22 +183,51 @@ static void set_gpu_pdev(struct drm_device *dev,
priv->gpu_pdev = pdev; priv->gpu_pdev = pdev;
} }
static const struct { static int find_chipid(struct device *dev, u32 *chipid)
const char *str; {
uint32_t flag; struct device_node *node = dev->of_node;
} quirks[] = { const char *compat;
{ "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI }, int ret;
{ "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
}; /* first search the compat strings for qcom,adreno-XYZ.W: */
ret = of_property_read_string_index(node, "compatible", 0, &compat);
if (ret == 0) {
unsigned rev, patch;
if (sscanf(compat, "qcom,adreno-%u.%u", &rev, &patch) == 2) {
*chipid = 0;
*chipid |= (rev / 100) << 24; /* core */
rev %= 100;
*chipid |= (rev / 10) << 16; /* major */
rev %= 10;
*chipid |= rev << 8; /* minor */
*chipid |= patch;
return 0;
}
}
/* and if that fails, fall back to legacy "qcom,chipid" property: */
ret = of_property_read_u32(node, "qcom,chipid", chipid);
if (ret)
return ret;
dev_warn(dev, "Using legacy qcom,chipid binding!\n");
dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
(*chipid >> 24) & 0xff, (*chipid >> 16) & 0xff,
(*chipid >> 8) & 0xff, *chipid & 0xff);
return 0;
}
static int adreno_bind(struct device *dev, struct device *master, void *data) static int adreno_bind(struct device *dev, struct device *master, void *data)
{ {
static struct adreno_platform_config config = {}; static struct adreno_platform_config config = {};
struct device_node *child, *node = dev->of_node; struct device_node *child, *node = dev->of_node;
u32 val; u32 val;
int ret, i; int ret;
ret = of_property_read_u32(node, "qcom,chipid", &val); ret = find_chipid(dev, &val);
if (ret) { if (ret) {
dev_err(dev, "could not find chipid: %d\n", ret); dev_err(dev, "could not find chipid: %d\n", ret);
return ret; return ret;
@ -224,14 +255,12 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
} }
if (!config.fast_rate) { if (!config.fast_rate) {
dev_err(dev, "could not find clk rates\n"); dev_warn(dev, "could not find clk rates\n");
return -ENXIO; /* This is a safe low speed for all devices: */
config.fast_rate = 200000000;
config.slow_rate = 27000000;
} }
for (i = 0; i < ARRAY_SIZE(quirks); i++)
if (of_property_read_bool(node, quirks[i].str))
config.quirks |= quirks[i].flag;
dev->platform_data = &config; dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0; return 0;
@ -260,6 +289,7 @@ static int adreno_remove(struct platform_device *pdev)
} }
static const struct of_device_id dt_match[] = { static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" }, { .compatible = "qcom,adreno-3xx" },
/* for backwards compat w/ downstream kgsl DT files: */ /* for backwards compat w/ downstream kgsl DT files: */
{ .compatible = "qcom,kgsl-3d0" }, { .compatible = "qcom,kgsl-3d0" },

View File

@ -352,7 +352,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->gmem = adreno_gpu->info->gmem; adreno_gpu->gmem = adreno_gpu->info->gmem;
adreno_gpu->revn = adreno_gpu->info->revn; adreno_gpu->revn = adreno_gpu->info->revn;
adreno_gpu->rev = config->rev; adreno_gpu->rev = config->rev;
adreno_gpu->quirks = config->quirks;
gpu->fast_rate = config->fast_rate; gpu->fast_rate = config->fast_rate;
gpu->slow_rate = config->slow_rate; gpu->slow_rate = config->slow_rate;

View File

@ -75,6 +75,7 @@ struct adreno_info {
const char *pm4fw, *pfpfw; const char *pm4fw, *pfpfw;
const char *gpmufw; const char *gpmufw;
uint32_t gmem; uint32_t gmem;
enum adreno_quirks quirks;
struct msm_gpu *(*init)(struct drm_device *dev); struct msm_gpu *(*init)(struct drm_device *dev);
}; };
@ -116,8 +117,6 @@ struct adreno_gpu {
* code (a3xx_gpu.c) and stored in this common location. * code (a3xx_gpu.c) and stored in this common location.
*/ */
const unsigned int *reg_offsets; const unsigned int *reg_offsets;
uint32_t quirks;
}; };
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@ -128,7 +127,6 @@ struct adreno_platform_config {
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table; struct msm_bus_scale_pdata *bus_scale_table;
#endif #endif
uint32_t quirks;
}; };
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000) #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)

View File

@ -18,9 +18,7 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
if (!msm_dsi || !msm_dsi_device_connected(msm_dsi)) if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
return NULL; return NULL;
return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ? return msm_dsi->encoder;
msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
} }
static int dsi_get_phy(struct msm_dsi *msm_dsi) static int dsi_get_phy(struct msm_dsi *msm_dsi)
@ -187,14 +185,13 @@ void __exit msm_dsi_unregister(void)
} }
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) struct drm_encoder *encoder)
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
struct drm_bridge *ext_bridge; struct drm_bridge *ext_bridge;
int ret, i; int ret;
if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] || if (WARN_ON(!encoder))
!encoders[MSM_DSI_CMD_ENCODER_ID]))
return -EINVAL; return -EINVAL;
msm_dsi->dev = dev; msm_dsi->dev = dev;
@ -205,6 +202,8 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail; goto fail;
} }
msm_dsi->encoder = encoder;
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id); msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
if (IS_ERR(msm_dsi->bridge)) { if (IS_ERR(msm_dsi->bridge)) {
ret = PTR_ERR(msm_dsi->bridge); ret = PTR_ERR(msm_dsi->bridge);
@ -213,11 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail; goto fail;
} }
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
encoders[i]->bridge = msm_dsi->bridge;
msm_dsi->encoders[i] = encoders[i];
}
/* /*
* check if the dsi encoder output is connected to a panel or an * check if the dsi encoder output is connected to a panel or an
* external bridge. We create a connector only if we're connected to a * external bridge. We create a connector only if we're connected to a

View File

@ -27,14 +27,24 @@
#define DSI_1 1 #define DSI_1 1
#define DSI_MAX 2 #define DSI_MAX 2
struct msm_dsi_phy_shared_timings;
struct msm_dsi_phy_clk_request;
enum msm_dsi_phy_type { enum msm_dsi_phy_type {
MSM_DSI_PHY_28NM_HPM, MSM_DSI_PHY_28NM_HPM,
MSM_DSI_PHY_28NM_LP, MSM_DSI_PHY_28NM_LP,
MSM_DSI_PHY_20NM, MSM_DSI_PHY_20NM,
MSM_DSI_PHY_28NM_8960, MSM_DSI_PHY_28NM_8960,
MSM_DSI_PHY_14NM,
MSM_DSI_PHY_MAX MSM_DSI_PHY_MAX
}; };
enum msm_dsi_phy_usecase {
MSM_DSI_PHY_STANDALONE,
MSM_DSI_PHY_MASTER,
MSM_DSI_PHY_SLAVE,
};
#define DSI_DEV_REGULATOR_MAX 8 #define DSI_DEV_REGULATOR_MAX 8
#define DSI_BUS_CLK_MAX 4 #define DSI_BUS_CLK_MAX 4
@ -73,8 +83,8 @@ struct msm_dsi {
struct device *phy_dev; struct device *phy_dev;
bool phy_enabled; bool phy_enabled;
/* the encoders we are hooked to (outside of dsi block) */ /* the encoder we are hooked to (outside of dsi block) */
struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]; struct drm_encoder *encoder;
int id; int id;
}; };
@ -84,12 +94,9 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge); void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
struct drm_connector *msm_dsi_manager_connector_init(u8 id); struct drm_connector *msm_dsi_manager_connector_init(u8 id);
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_phy_enable(int id,
const unsigned long bit_rate, const unsigned long esc_rate,
u32 *clk_pre, u32 *clk_post);
void msm_dsi_manager_phy_disable(int id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi); int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
@ -111,6 +118,8 @@ int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
struct clk **byte_clk_provider, struct clk **pixel_clk_provider); struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
void msm_dsi_pll_save_state(struct msm_dsi_pll *pll); void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll); int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
enum msm_dsi_phy_usecase uc);
#else #else
static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id) { enum msm_dsi_phy_type type, int id) {
@ -131,6 +140,11 @@ static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
{ {
return 0; return 0;
} }
static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
enum msm_dsi_phy_usecase uc)
{
return -ENODEV;
}
#endif #endif
/* dsi host */ /* dsi host */
@ -146,7 +160,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
u32 dma_base, u32 len); u32 dma_base, u32 len);
int msm_dsi_host_enable(struct mipi_dsi_host *host); int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host); int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host); int msm_dsi_host_power_on(struct mipi_dsi_host *host,
struct msm_dsi_phy_shared_timings *phy_shared_timings);
int msm_dsi_host_power_off(struct mipi_dsi_host *host); int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode); struct drm_display_mode *mode);
@ -157,6 +172,9 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
void msm_dsi_host_unregister(struct mipi_dsi_host *host); void msm_dsi_host_unregister(struct mipi_dsi_host *host);
int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll); struct msm_dsi_pll *src_pll);
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_host_destroy(struct mipi_dsi_host *host); void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev); struct drm_device *dev);
@ -164,14 +182,27 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
/* dsi phy */ /* dsi phy */
struct msm_dsi_phy; struct msm_dsi_phy;
struct msm_dsi_phy_shared_timings {
u32 clk_post;
u32 clk_pre;
bool clk_pre_inc_by_2;
};
struct msm_dsi_phy_clk_request {
unsigned long bitclk_rate;
unsigned long escclk_rate;
};
void msm_dsi_phy_driver_register(void); void msm_dsi_phy_driver_register(void);
void msm_dsi_phy_driver_unregister(void); void msm_dsi_phy_driver_unregister(void);
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
const unsigned long bit_rate, const unsigned long esc_rate); struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_phy_disable(struct msm_dsi_phy *phy); void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
u32 *clk_pre, u32 *clk_post); struct msm_dsi_phy_shared_timings *shared_timing);
struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy); struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc);
#endif /* __DSI_CONNECTOR_H__ */ #endif /* __DSI_CONNECTOR_H__ */

View File

@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) - /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-01-11 05:19:19)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors: Copyright (C) 2013-2017 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@ -1304,5 +1295,257 @@ static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 #define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
#define REG_DSI_14nm_PHY_CMN_REVISION_ID0 0x00000000
#define REG_DSI_14nm_PHY_CMN_REVISION_ID1 0x00000004
#define REG_DSI_14nm_PHY_CMN_REVISION_ID2 0x00000008
#define REG_DSI_14nm_PHY_CMN_REVISION_ID3 0x0000000c
#define REG_DSI_14nm_PHY_CMN_CLK_CFG0 0x00000010
#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK 0x000000f0
#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT 4
static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val)
{
return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK;
}
#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK 0x000000f0
#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT 4
static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val)
{
return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK;
}
#define REG_DSI_14nm_PHY_CMN_CLK_CFG1 0x00000014
#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL 0x00000001
#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL 0x00000018
#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000004
#define REG_DSI_14nm_PHY_CMN_CTRL_0 0x0000001c
#define REG_DSI_14nm_PHY_CMN_CTRL_1 0x00000020
#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER 0x00000024
#define REG_DSI_14nm_PHY_CMN_SW_CFG0 0x00000028
#define REG_DSI_14nm_PHY_CMN_SW_CFG1 0x0000002c
#define REG_DSI_14nm_PHY_CMN_SW_CFG2 0x00000030
#define REG_DSI_14nm_PHY_CMN_HW_CFG0 0x00000034
#define REG_DSI_14nm_PHY_CMN_HW_CFG1 0x00000038
#define REG_DSI_14nm_PHY_CMN_HW_CFG2 0x0000003c
#define REG_DSI_14nm_PHY_CMN_HW_CFG3 0x00000040
#define REG_DSI_14nm_PHY_CMN_HW_CFG4 0x00000044
#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL 0x00000048
#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START 0x00000001
#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL 0x0000004c
#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK 0x0000003f
#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT 0
static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val)
{
return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK;
}
static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK 0x000000c0
#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT 6
static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val)
{
return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK;
}
static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN 0x00000001
static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; }
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; }
#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT 0
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val)
{
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK;
}
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; }
#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT 0
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val)
{
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK;
}
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; }
#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
{
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK;
}
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; }
#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
{
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK;
}
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; }
#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT 0
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val)
{
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK;
}
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; }
#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK 0x00000007
#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT 0
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val)
{
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK;
}
#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT 4
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val)
{
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK;
}
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; }
#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK 0x00000007
#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT 0
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val)
{
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK;
}
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; }
#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
{
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK;
}
static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; }
static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; }
static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; }
#define REG_DSI_14nm_PHY_PLL_IE_TRIM 0x00000000
#define REG_DSI_14nm_PHY_PLL_IP_TRIM 0x00000004
#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM 0x00000010
#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN 0x0000001c
#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET 0x00000028
#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL 0x0000002c
#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2 0x00000030
#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3 0x00000034
#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4 0x00000038
#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5 0x0000003c
#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1 0x00000040
#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2 0x00000044
#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1 0x00000048
#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2 0x0000004c
#define REG_DSI_14nm_PHY_PLL_VREF_CFG1 0x0000005c
#define REG_DSI_14nm_PHY_PLL_KVCO_CODE 0x00000058
#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1 0x0000006c
#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2 0x00000070
#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1 0x00000074
#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2 0x00000078
#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1 0x0000007c
#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2 0x00000080
#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3 0x00000084
#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN 0x00000088
#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE 0x0000008c
#define REG_DSI_14nm_PHY_PLL_DEC_START 0x00000090
#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER 0x00000094
#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1 0x00000098
#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2 0x0000009c
#define REG_DSI_14nm_PHY_PLL_SSC_PER1 0x000000a0
#define REG_DSI_14nm_PHY_PLL_SSC_PER2 0x000000a4
#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1 0x000000a8
#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2 0x000000ac
#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1 0x000000b4
#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2 0x000000b8
#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3 0x000000bc
#define REG_DSI_14nm_PHY_PLL_TXCLK_EN 0x000000c0
#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL 0x000000c4
#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS 0x000000cc
#define REG_DSI_14nm_PHY_PLL_PLL_MISC1 0x000000e8
#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR 0x000000f0
#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET 0x000000f4
#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET 0x000000f8
#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET 0x000000fc
#define REG_DSI_14nm_PHY_PLL_PLL_LPF1 0x00000100
#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV 0x00000104
#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108
#endif /* DSI_XML */ #endif /* DSI_XML */

View File

@ -94,6 +94,30 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
.num_dsi = 2, .num_dsi = 2,
}; };
/*
* TODO: core_mmss_clk fails to enable for some reason, but things work fine
* without it too. Figure out why it doesn't enable and uncomment below
*/
static const char * const dsi_8996_bus_clk_names[] = {
"mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */
};
static const struct msm_dsi_config msm8996_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 2,
.regs = {
{"vdda", 18160, 1 }, /* 1.25 V */
{"vcca", 17000, 32 }, /* 0.925 V */
{"vddio", 100000, 100 },/* 1.8 V */
},
},
.bus_clk_names = dsi_8996_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names),
.io_start = { 0x994000, 0x996000 },
.num_dsi = 2,
};
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg}, {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
@ -106,6 +130,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8974_apq8084_dsi_cfg}, &msm8974_apq8084_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
}; };
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)

View File

@ -24,6 +24,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 #define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
#define MSM_DSI_V2_VER_MINOR_8064 0x0 #define MSM_DSI_V2_VER_MINOR_8064 0x0

View File

@ -691,17 +691,6 @@ static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
return 0; return 0;
} }
static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
{
DBG("");
dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
/* Make sure fully reset */
wmb();
udelay(1000);
dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
udelay(100);
}
static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable) static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
{ {
u32 intr; u32 intr;
@ -756,7 +745,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
} }
static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
u32 clk_pre, u32 clk_post) struct msm_dsi_phy_shared_timings *phy_shared_timings)
{ {
u32 flags = msm_host->mode_flags; u32 flags = msm_host->mode_flags;
enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
@ -819,10 +808,16 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) | data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre); DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data); dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
(cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
phy_shared_timings->clk_pre_inc_by_2)
dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
data = 0; data = 0;
if (!(flags & MIPI_DSI_MODE_EOT_PACKET)) if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
@ -1482,6 +1477,8 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
msm_host->format = dsi->format; msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags; msm_host->mode_flags = dsi->mode_flags;
msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags);
/* Some gpios defined in panel DT need to be controlled by host */ /* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
if (ret) if (ret)
@ -1557,8 +1554,9 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
prop = of_find_property(ep, "data-lanes", &len); prop = of_find_property(ep, "data-lanes", &len);
if (!prop) { if (!prop) {
dev_dbg(dev, "failed to find data lane mapping\n"); dev_dbg(dev,
return -EINVAL; "failed to find data lane mapping, using default\n");
return 0;
} }
num_lanes = len / sizeof(u32); num_lanes = len / sizeof(u32);
@ -1615,7 +1613,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
struct device *dev = &msm_host->pdev->dev; struct device *dev = &msm_host->pdev->dev;
struct device_node *np = dev->of_node; struct device_node *np = dev->of_node;
struct device_node *endpoint, *device_node; struct device_node *endpoint, *device_node;
int ret; int ret = 0;
/* /*
* Get the endpoint of the output port of the DSI host. In our case, * Get the endpoint of the output port of the DSI host. In our case,
@ -1639,8 +1637,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
/* Get panel node from the output port's endpoint data */ /* Get panel node from the output port's endpoint data */
device_node = of_graph_get_remote_port_parent(endpoint); device_node = of_graph_get_remote_port_parent(endpoint);
if (!device_node) { if (!device_node) {
dev_err(dev, "%s: no valid device\n", __func__); dev_dbg(dev, "%s: no valid device\n", __func__);
ret = -ENODEV;
goto err; goto err;
} }
@ -2118,6 +2115,28 @@ exit:
return ret; return ret;
} }
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
DBG("");
dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
/* Make sure fully reset */
wmb();
udelay(1000);
dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
udelay(100);
}
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
clk_req->escclk_rate = msm_host->esc_clk_rate;
}
int msm_dsi_host_enable(struct mipi_dsi_host *host) int msm_dsi_host_enable(struct mipi_dsi_host *host)
{ {
struct msm_dsi_host *msm_host = to_msm_dsi_host(host); struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
@ -2165,10 +2184,10 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
SFPB_GPREG_MASTER_PORT_EN(en)); SFPB_GPREG_MASTER_PORT_EN(en));
} }
int msm_dsi_host_power_on(struct mipi_dsi_host *host) int msm_dsi_host_power_on(struct mipi_dsi_host *host,
struct msm_dsi_phy_shared_timings *phy_shared_timings)
{ {
struct msm_dsi_host *msm_host = to_msm_dsi_host(host); struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
u32 clk_pre = 0, clk_post = 0;
int ret = 0; int ret = 0;
mutex_lock(&msm_host->dev_mutex); mutex_lock(&msm_host->dev_mutex);
@ -2179,12 +2198,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
msm_dsi_sfpb_config(msm_host, true); msm_dsi_sfpb_config(msm_host, true);
ret = dsi_calc_clk_rate(msm_host);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
goto unlock_ret;
}
ret = dsi_host_regulator_enable(msm_host); ret = dsi_host_regulator_enable(msm_host);
if (ret) { if (ret) {
pr_err("%s:Failed to enable vregs.ret=%d\n", pr_err("%s:Failed to enable vregs.ret=%d\n",
@ -2192,23 +2205,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
goto unlock_ret; goto unlock_ret;
} }
ret = dsi_bus_clk_enable(msm_host);
if (ret) {
pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
goto fail_disable_reg;
}
dsi_phy_sw_reset(msm_host);
ret = msm_dsi_manager_phy_enable(msm_host->id,
msm_host->byte_clk_rate * 8,
msm_host->esc_clk_rate,
&clk_pre, &clk_post);
dsi_bus_clk_disable(msm_host);
if (ret) {
pr_err("%s: failed to enable phy, %d\n", __func__, ret);
goto fail_disable_reg;
}
ret = dsi_clk_ctrl(msm_host, 1); ret = dsi_clk_ctrl(msm_host, 1);
if (ret) { if (ret) {
pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret); pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
@ -2224,7 +2220,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
dsi_timing_setup(msm_host); dsi_timing_setup(msm_host);
dsi_sw_reset(msm_host); dsi_sw_reset(msm_host);
dsi_ctrl_config(msm_host, true, clk_pre, clk_post); dsi_ctrl_config(msm_host, true, phy_shared_timings);
if (msm_host->disp_en_gpio) if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 1); gpiod_set_value(msm_host->disp_en_gpio, 1);
@ -2253,15 +2249,13 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
goto unlock_ret; goto unlock_ret;
} }
dsi_ctrl_config(msm_host, false, 0, 0); dsi_ctrl_config(msm_host, false, NULL);
if (msm_host->disp_en_gpio) if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 0); gpiod_set_value(msm_host->disp_en_gpio, 0);
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
msm_dsi_manager_phy_disable(msm_host->id);
dsi_clk_ctrl(msm_host, 0); dsi_clk_ctrl(msm_host, 0);
dsi_host_regulator_disable(msm_host); dsi_host_regulator_disable(msm_host);
@ -2281,6 +2275,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct msm_dsi_host *msm_host = to_msm_dsi_host(host); struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
if (msm_host->mode) { if (msm_host->mode) {
drm_mode_destroy(msm_host->dev, msm_host->mode); drm_mode_destroy(msm_host->dev, msm_host->mode);
@ -2293,6 +2288,12 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
return -ENOMEM; return -ENOMEM;
} }
ret = dsi_calc_clk_rate(msm_host);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return ret;
}
return 0; return 0;
} }

View File

@ -72,11 +72,12 @@ static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
return 0; return 0;
} }
static int dsi_mgr_host_register(int id) static int dsi_mgr_setup_components(int id)
{ {
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
struct msm_dsi_pll *src_pll; struct msm_dsi_pll *src_pll;
int ret; int ret;
@ -85,15 +86,16 @@ static int dsi_mgr_host_register(int id)
if (ret) if (ret)
return ret; return ret;
msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
} else if (!other_dsi) { } else if (!other_dsi) {
ret = 0; ret = 0;
} else { } else {
struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ? struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
msm_dsi : other_dsi; msm_dsi : other_dsi;
struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ? struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
other_dsi : msm_dsi; other_dsi : msm_dsi;
/* Register slave host first, so that slave DSI device /* Register slave host first, so that slave DSI device
* has a chance to probe, and do not block the master * has a chance to probe, and do not block the master
* DSI device's probe. * DSI device's probe.
@ -101,14 +103,18 @@ static int dsi_mgr_host_register(int id)
* because only master DSI device adds the panel to global * because only master DSI device adds the panel to global
* panel list. The panel's device is the master DSI device. * panel list. The panel's device is the master DSI device.
*/ */
ret = msm_dsi_host_register(sdsi->host, false); ret = msm_dsi_host_register(slave_link_dsi->host, false);
if (ret) if (ret)
return ret; return ret;
ret = msm_dsi_host_register(mdsi->host, true); ret = msm_dsi_host_register(master_link_dsi->host, true);
if (ret) if (ret)
return ret; return ret;
/* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */ /* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */
msm_dsi_phy_set_usecase(clk_master_dsi->phy,
MSM_DSI_PHY_MASTER);
msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
MSM_DSI_PHY_SLAVE);
src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy); src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
if (ret) if (ret)
@ -119,6 +125,84 @@ static int dsi_mgr_host_register(int id)
return ret; return ret;
} }
static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
struct msm_dsi_phy_shared_timings *shared_timings)
{
struct msm_dsi_phy_clk_request clk_req;
int ret;
msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req);
ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
return ret;
}
static int
dsi_mgr_phy_enable(int id,
struct msm_dsi_phy_shared_timings shared_timings[DSI_MAX])
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
int ret;
/* In case of dual DSI, some registers in PHY1 have been programmed
* during PLL0 clock's set_rate. The PHY1 reset called by host1 here
* will silently reset those PHY1 registers. Therefore we need to reset
* and enable both PHYs before any PLL clock operation.
*/
if (IS_DUAL_DSI() && mdsi && sdsi) {
if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
msm_dsi_host_reset_phy(mdsi->host);
msm_dsi_host_reset_phy(sdsi->host);
ret = enable_phy(mdsi, src_pll_id,
&shared_timings[DSI_CLOCK_MASTER]);
if (ret)
return ret;
ret = enable_phy(sdsi, src_pll_id,
&shared_timings[DSI_CLOCK_SLAVE]);
if (ret) {
msm_dsi_phy_disable(mdsi->phy);
return ret;
}
}
} else {
msm_dsi_host_reset_phy(mdsi->host);
ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
if (ret)
return ret;
}
msm_dsi->phy_enabled = true;
return 0;
}
static void dsi_mgr_phy_disable(int id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
/* disable DSI phy
* In dual-dsi configuration, the phy should be disabled for the
* first controller only when the second controller is disabled.
*/
msm_dsi->phy_enabled = false;
if (IS_DUAL_DSI() && mdsi && sdsi) {
if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
msm_dsi_phy_disable(sdsi->phy);
msm_dsi_phy_disable(mdsi->phy);
}
} else {
msm_dsi_phy_disable(msm_dsi->phy);
}
}
struct dsi_connector { struct dsi_connector {
struct drm_connector base; struct drm_connector base;
int id; int id;
@ -168,6 +252,16 @@ static enum drm_connector_status dsi_mgr_connector_detect(
msm_dsi->panel = msm_dsi_host_get_panel( msm_dsi->panel = msm_dsi_host_get_panel(
other_dsi->host, NULL); other_dsi->host, NULL);
if (msm_dsi->panel && kms->funcs->set_encoder_mode) {
bool cmd_mode = !(msm_dsi->device_flags &
MIPI_DSI_MODE_VIDEO);
struct drm_encoder *encoder =
msm_dsi_get_encoder(msm_dsi);
kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
}
if (msm_dsi->panel && IS_DUAL_DSI()) if (msm_dsi->panel && IS_DUAL_DSI())
drm_object_attach_property(&connector->base, drm_object_attach_property(&connector->base,
connector->dev->mode_config.tile_property, 0); connector->dev->mode_config.tile_property, 0);
@ -344,22 +438,31 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host; struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel; struct drm_panel *panel = msm_dsi->panel;
struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX];
bool is_dual_dsi = IS_DUAL_DSI(); bool is_dual_dsi = IS_DUAL_DSI();
int ret; int ret;
DBG("id=%d", id); DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi) || if (!msm_dsi_device_connected(msm_dsi))
(is_dual_dsi && (DSI_1 == id)))
return; return;
ret = msm_dsi_host_power_on(host); ret = dsi_mgr_phy_enable(id, phy_shared_timings);
if (ret)
goto phy_en_fail;
/* Do nothing with the host if it is DSI 1 in case of dual DSI */
if (is_dual_dsi && (DSI_1 == id))
return;
ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]);
if (ret) { if (ret) {
pr_err("%s: power on host %d failed, %d\n", __func__, id, ret); pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
goto host_on_fail; goto host_on_fail;
} }
if (is_dual_dsi && msm_dsi1) { if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_power_on(msm_dsi1->host); ret = msm_dsi_host_power_on(msm_dsi1->host,
&phy_shared_timings[DSI_1]);
if (ret) { if (ret) {
pr_err("%s: power on host1 failed, %d\n", pr_err("%s: power on host1 failed, %d\n",
__func__, ret); __func__, ret);
@ -418,6 +521,8 @@ panel_prep_fail:
host1_on_fail: host1_on_fail:
msm_dsi_host_power_off(host); msm_dsi_host_power_off(host);
host_on_fail: host_on_fail:
dsi_mgr_phy_disable(id);
phy_en_fail:
return; return;
} }
@ -443,10 +548,17 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
DBG("id=%d", id); DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi) || if (!msm_dsi_device_connected(msm_dsi))
(is_dual_dsi && (DSI_1 == id)))
return; return;
/*
* Do nothing with the host if it is DSI 1 in case of dual DSI.
* It is safe to call dsi_mgr_phy_disable() here because a single PHY
* won't be diabled until both PHYs request disable.
*/
if (is_dual_dsi && (DSI_1 == id))
goto disable_phy;
if (panel) { if (panel) {
ret = drm_panel_disable(panel); ret = drm_panel_disable(panel);
if (ret) if (ret)
@ -481,6 +593,9 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
pr_err("%s: host1 power off failed, %d\n", pr_err("%s: host1 power off failed, %d\n",
__func__, ret); __func__, ret);
} }
disable_phy:
dsi_mgr_phy_disable(id);
} }
static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
@ -540,7 +655,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_connector *connector = NULL; struct drm_connector *connector = NULL;
struct dsi_connector *dsi_connector; struct dsi_connector *dsi_connector;
int ret, i; int ret;
dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL); dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
if (!dsi_connector) if (!dsi_connector)
@ -566,9 +681,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
connector->interlace_allowed = 0; connector->interlace_allowed = 0;
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) drm_mode_connector_attach_encoder(connector, msm_dsi->encoder);
drm_mode_connector_attach_encoder(connector,
msm_dsi->encoders[i]);
return connector; return connector;
} }
@ -591,13 +704,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
dsi_bridge->id = id; dsi_bridge->id = id;
/* encoder = msm_dsi->encoder;
* HACK: we may not know the external DSI bridge device's mode
* flags here. We'll get to know them only when the device
* attaches to the dsi host. For now, assume the bridge supports
* DSI video mode
*/
encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID];
bridge = &dsi_bridge->base; bridge = &dsi_bridge->base;
bridge->funcs = &dsi_mgr_bridge_funcs; bridge->funcs = &dsi_mgr_bridge_funcs;
@ -628,13 +735,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
ext_bridge = msm_dsi->external_bridge = ext_bridge = msm_dsi->external_bridge =
msm_dsi_host_get_bridge(msm_dsi->host); msm_dsi_host_get_bridge(msm_dsi->host);
/* encoder = msm_dsi->encoder;
* HACK: we may not know the external DSI bridge device's mode
* flags here. We'll get to know them only when the device
* attaches to the dsi host. For now, assume the bridge supports
* DSI video mode
*/
encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID];
/* link the internal dsi bridge to the external bridge */ /* link the internal dsi bridge to the external bridge */
drm_bridge_attach(encoder, ext_bridge, int_bridge); drm_bridge_attach(encoder, ext_bridge, int_bridge);
@ -662,68 +763,6 @@ void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
{ {
} }
int msm_dsi_manager_phy_enable(int id,
const unsigned long bit_rate, const unsigned long esc_rate,
u32 *clk_pre, u32 *clk_post)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi_phy *phy = msm_dsi->phy;
int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
int ret;
ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate);
if (ret)
return ret;
/*
* Reset DSI PHY silently changes its PLL registers to reset status,
* which will confuse clock driver and result in wrong output rate of
* link clocks. Restore PLL status if its PLL is being used as clock
* source.
*/
if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) {
ret = msm_dsi_pll_restore_state(pll);
if (ret) {
pr_err("%s: failed to restore pll state\n", __func__);
msm_dsi_phy_disable(phy);
return ret;
}
}
msm_dsi->phy_enabled = true;
msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
return 0;
}
void msm_dsi_manager_phy_disable(int id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
struct msm_dsi_phy *phy = msm_dsi->phy;
struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
/* Save PLL status if it is a clock source */
if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER))
msm_dsi_pll_save_state(pll);
/* disable DSI phy
* In dual-dsi configuration, the phy should be disabled for the
* first controller only when the second controller is disabled.
*/
msm_dsi->phy_enabled = false;
if (IS_DUAL_DSI() && mdsi && sdsi) {
if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
msm_dsi_phy_disable(sdsi->phy);
msm_dsi_phy_disable(mdsi->phy);
}
} else {
msm_dsi_phy_disable(phy);
}
}
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg) int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
{ {
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
@ -787,6 +826,33 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len)
return true; return true;
} }
void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_device *dev = msm_dsi->dev;
struct msm_drm_private *priv;
struct msm_kms *kms;
struct drm_encoder *encoder;
/*
* drm_device pointer is assigned to msm_dsi only in the modeset_init
* path. If mipi_dsi_attach() happens in DSI driver's probe path
* (generally the case when we're connected to a drm_panel of the type
* mipi_dsi_device), this would be NULL. In such cases, try to set the
* encoder mode in the DSI connector's detect() op.
*/
if (!dev)
return;
priv = dev->dev_private;
kms = priv->kms;
encoder = msm_dsi_get_encoder(msm_dsi);
if (encoder && kms->funcs->set_encoder_mode)
if (!(device_flags & MIPI_DSI_MODE_VIDEO))
kms->funcs->set_encoder_mode(kms, encoder, true);
}
int msm_dsi_manager_register(struct msm_dsi *msm_dsi) int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
{ {
struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
@ -811,7 +877,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
goto fail; goto fail;
} }
ret = dsi_mgr_host_register(id); ret = dsi_mgr_setup_components(id);
if (ret) { if (ret) {
pr_err("%s: failed to register mipi dsi host for DSI %d\n", pr_err("%s: failed to register mipi dsi host for DSI %d\n",
__func__, id); __func__, id);

View File

@ -54,8 +54,10 @@ static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
} }
int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
const unsigned long bit_rate, const unsigned long esc_rate) struct msm_dsi_phy_clk_request *clk_req)
{ {
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, lpx; s32 ui, lpx;
s32 tmax, tmin; s32 tmax, tmin;
s32 pcnt0 = 10; s32 pcnt0 = 10;
@ -115,8 +117,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
temp = ((timing->hs_exit >> 1) + 1) * 2 * ui; temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
temp = 60 * coeff + 52 * ui - 24 * ui - temp; temp = 60 * coeff + 52 * ui - 24 * ui - temp;
tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false); timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
false);
tmax = 63; tmax = 63;
temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui; temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
temp += ((timing->clk_zero >> 1) + 1) * 2 * ui; temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
@ -124,17 +126,21 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
if (tmin > tmax) { if (tmin > tmax) {
temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false); temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
timing->clk_pre = temp >> 1; timing->shared_timings.clk_pre = temp >> 1;
timing->shared_timings.clk_pre_inc_by_2 = true;
} else { } else {
timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
timing->shared_timings.clk_pre_inc_by_2 = false;
} }
timing->ta_go = 3; timing->ta_go = 3;
timing->ta_sure = 0; timing->ta_sure = 0;
timing->ta_get = 4; timing->ta_get = 4;
DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
timing->clk_pre, timing->clk_post, timing->clk_zero, timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
timing->clk_trail, timing->clk_prepare, timing->hs_exit, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_zero, timing->hs_prepare, timing->hs_trail,
timing->hs_rqst); timing->hs_rqst);
@ -142,6 +148,123 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
return 0; return 0;
} }
int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req)
{
const unsigned long bit_rate = clk_req->bitclk_rate;
const unsigned long esc_rate = clk_req->escclk_rate;
s32 ui, ui_x8, lpx;
s32 tmax, tmin;
s32 pcnt0 = 50;
s32 pcnt1 = 50;
s32 pcnt2 = 10;
s32 pcnt3 = 30;
s32 pcnt4 = 10;
s32 pcnt5 = 2;
s32 coeff = 1000; /* Precision, should avoid overflow */
s32 hb_en, hb_en_ckln, pd_ckln, pd;
s32 val, val_ckln;
s32 temp;
if (!bit_rate || !esc_rate)
return -EINVAL;
timing->hs_halfbyte_en = 0;
hb_en = 0;
timing->hs_halfbyte_en_ckln = 0;
hb_en_ckln = 0;
timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
pd_ckln = timing->hs_prep_dly_ckln;
timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
pd = timing->hs_prep_dly;
val = (hb_en << 2) + (pd << 1);
val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
ui_x8 = ui << 3;
lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
tmin = max_t(s32, temp, 0);
temp = (95 * coeff - val_ckln * ui) / ui_x8;
tmax = max_t(s32, temp, 0);
timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
tmax = (tmin > 255) ? 511 : 255;
timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = (temp + 3 * ui) / ui_x8;
timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
tmin = max_t(s32, temp, 0);
temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
tmax = max_t(s32, temp, 0);
timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
tmax = 255;
timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
temp = 105 * coeff + 12 * ui - 20 * coeff;
tmax = (temp + 3 * ui) / ui_x8;
timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
tmax = 255;
timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
temp = 60 * coeff + 52 * ui - 43 * ui;
tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = 63;
timing->shared_timings.clk_post =
linear_inter(tmax, tmin, pcnt2, 0, false);
temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
(((timing->hs_rqst_ckln << 3) + 8) * ui);
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
tmax = 63;
if (tmin > tmax) {
temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
timing->shared_timings.clk_pre = temp >> 1;
timing->shared_timings.clk_pre_inc_by_2 = 1;
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
timing->shared_timings.clk_pre_inc_by_2 = 0;
}
timing->ta_go = 3;
timing->ta_sure = 0;
timing->ta_get = 4;
DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
timing->clk_trail, timing->clk_prepare, timing->hs_exit,
timing->hs_zero, timing->hs_prepare, timing->hs_trail,
timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
timing->hs_prep_dly_ckln);
return 0;
}
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask) u32 bit_mask)
{ {
@ -267,6 +390,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
{ .compatible = "qcom,dsi-phy-28nm-8960", { .compatible = "qcom,dsi-phy-28nm-8960",
.data = &dsi_phy_28nm_8960_cfgs }, .data = &dsi_phy_28nm_8960_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
{ .compatible = "qcom,dsi-phy-14nm",
.data = &dsi_phy_14nm_cfgs },
#endif #endif
{} {}
}; };
@ -295,6 +422,24 @@ static int dsi_phy_get_id(struct msm_dsi_phy *phy)
return -EINVAL; return -EINVAL;
} }
int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
{
struct platform_device *pdev = phy->pdev;
int ret = 0;
phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
"DSI_PHY_REG");
if (IS_ERR(phy->reg_base)) {
dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
__func__);
ret = -ENOMEM;
goto fail;
}
fail:
return ret;
}
static int dsi_phy_driver_probe(struct platform_device *pdev) static int dsi_phy_driver_probe(struct platform_device *pdev)
{ {
struct msm_dsi_phy *phy; struct msm_dsi_phy *phy;
@ -331,15 +476,6 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail; goto fail;
} }
phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
"DSI_PHY_REG");
if (IS_ERR(phy->reg_base)) {
dev_err(dev, "%s: failed to map phy regulator base\n",
__func__);
ret = -ENOMEM;
goto fail;
}
ret = dsi_phy_regulator_init(phy); ret = dsi_phy_regulator_init(phy);
if (ret) { if (ret) {
dev_err(dev, "%s: failed to init regulator\n", __func__); dev_err(dev, "%s: failed to init regulator\n", __func__);
@ -353,6 +489,12 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail; goto fail;
} }
if (phy->cfg->ops.init) {
ret = phy->cfg->ops.init(phy);
if (ret)
goto fail;
}
/* PLL init will call into clk_register which requires /* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock. * register access, so we need to enable power and ahb clock.
*/ */
@ -410,7 +552,7 @@ void __exit msm_dsi_phy_driver_unregister(void)
} }
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
const unsigned long bit_rate, const unsigned long esc_rate) struct msm_dsi_phy_clk_request *clk_req)
{ {
struct device *dev = &phy->pdev->dev; struct device *dev = &phy->pdev->dev;
int ret; int ret;
@ -418,21 +560,52 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
if (!phy || !phy->cfg->ops.enable) if (!phy || !phy->cfg->ops.enable)
return -EINVAL; return -EINVAL;
ret = dsi_phy_enable_resource(phy);
if (ret) {
dev_err(dev, "%s: resource enable failed, %d\n",
__func__, ret);
goto res_en_fail;
}
ret = dsi_phy_regulator_enable(phy); ret = dsi_phy_regulator_enable(phy);
if (ret) { if (ret) {
dev_err(dev, "%s: regulator enable failed, %d\n", dev_err(dev, "%s: regulator enable failed, %d\n",
__func__, ret); __func__, ret);
return ret; goto reg_en_fail;
} }
ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate); ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
if (ret) { if (ret) {
dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret); dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
dsi_phy_regulator_disable(phy); goto phy_en_fail;
return ret; }
/*
* Resetting DSI PHY silently changes its PLL registers to reset status,
* which will confuse clock driver and result in wrong output rate of
* link clocks. Restore PLL status if its PLL is being used as clock
* source.
*/
if (phy->usecase != MSM_DSI_PHY_SLAVE) {
ret = msm_dsi_pll_restore_state(phy->pll);
if (ret) {
dev_err(dev, "%s: failed to restore pll state, %d\n",
__func__, ret);
goto pll_restor_fail;
}
} }
return 0; return 0;
pll_restor_fail:
if (phy->cfg->ops.disable)
phy->cfg->ops.disable(phy);
phy_en_fail:
dsi_phy_regulator_disable(phy);
reg_en_fail:
dsi_phy_disable_resource(phy);
res_en_fail:
return ret;
} }
void msm_dsi_phy_disable(struct msm_dsi_phy *phy) void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
@ -440,21 +613,21 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
if (!phy || !phy->cfg->ops.disable) if (!phy || !phy->cfg->ops.disable)
return; return;
/* Save PLL status if it is a clock source */
if (phy->usecase != MSM_DSI_PHY_SLAVE)
msm_dsi_pll_save_state(phy->pll);
phy->cfg->ops.disable(phy); phy->cfg->ops.disable(phy);
dsi_phy_regulator_disable(phy); dsi_phy_regulator_disable(phy);
dsi_phy_disable_resource(phy);
} }
void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
u32 *clk_pre, u32 *clk_post) struct msm_dsi_phy_shared_timings *shared_timings)
{ {
if (!phy) memcpy(shared_timings, &phy->timing.shared_timings,
return; sizeof(*shared_timings));
if (clk_pre)
*clk_pre = phy->timing.clk_pre;
if (clk_post)
*clk_post = phy->timing.clk_post;
} }
struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy) struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
@ -465,3 +638,9 @@ struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
return phy->pll; return phy->pll;
} }
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc)
{
if (phy)
phy->usecase = uc;
}

View File

@ -22,8 +22,9 @@
#define dsi_phy_write(offset, data) msm_writel((data), (offset)) #define dsi_phy_write(offset, data) msm_writel((data), (offset))
struct msm_dsi_phy_ops { struct msm_dsi_phy_ops {
int (*init) (struct msm_dsi_phy *phy);
int (*enable)(struct msm_dsi_phy *phy, int src_pll_id, int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
const unsigned long bit_rate, const unsigned long esc_rate); struct msm_dsi_phy_clk_request *clk_req);
void (*disable)(struct msm_dsi_phy *phy); void (*disable)(struct msm_dsi_phy *phy);
}; };
@ -46,6 +47,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
struct msm_dsi_dphy_timing { struct msm_dsi_dphy_timing {
u32 clk_pre; u32 clk_pre;
@ -61,12 +63,22 @@ struct msm_dsi_dphy_timing {
u32 ta_go; u32 ta_go;
u32 ta_sure; u32 ta_sure;
u32 ta_get; u32 ta_get;
struct msm_dsi_phy_shared_timings shared_timings;
/* For PHY v2 only */
u32 hs_rqst_ckln;
u32 hs_prep_dly;
u32 hs_prep_dly_ckln;
u8 hs_halfbyte_en;
u8 hs_halfbyte_en_ckln;
}; };
struct msm_dsi_phy { struct msm_dsi_phy {
struct platform_device *pdev; struct platform_device *pdev;
void __iomem *base; void __iomem *base;
void __iomem *reg_base; void __iomem *reg_base;
void __iomem *lane_base;
int id; int id;
struct clk *ahb_clk; struct clk *ahb_clk;
@ -75,6 +87,7 @@ struct msm_dsi_phy {
struct msm_dsi_dphy_timing timing; struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg; const struct msm_dsi_phy_cfg *cfg;
enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode; bool regulator_ldo_mode;
struct msm_dsi_pll *pll; struct msm_dsi_pll *pll;
@ -84,9 +97,12 @@ struct msm_dsi_phy {
* PHY internal functions * PHY internal functions
*/ */
int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
const unsigned long bit_rate, const unsigned long esc_rate); struct msm_dsi_phy_clk_request *clk_req);
int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask); u32 bit_mask);
int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
#endif /* __DSI_PHY_H__ */ #endif /* __DSI_PHY_H__ */

View File

@ -0,0 +1,169 @@
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "dsi_phy.h"
#include "dsi.xml.h"
#define PHY_14NM_CKLN_IDX 4
static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
struct msm_dsi_dphy_timing *timing,
int lane_idx)
{
void __iomem *base = phy->lane_base;
bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX);
u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero;
u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare;
u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail;
u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst;
u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly;
u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln :
timing->hs_halfbyte_en;
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx),
DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx),
halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0);
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0));
}
static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_dphy_timing *timing = &phy->timing;
u32 data;
int i;
int ret;
void __iomem *base = phy->base;
void __iomem *lane_base = phy->lane_base;
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
dev_err(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
data = 0x1c;
if (phy->usecase != MSM_DSI_PHY_STANDALONE)
data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32);
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1);
/* 4 data lanes + 1 clk lane configuration */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i),
0x1d);
dsi_phy_write(lane_base +
REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff);
dsi_phy_write(lane_base +
REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i),
(i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06);
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i),
(i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f);
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10);
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i),
0);
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i),
0x88);
dsi_14nm_dphy_set_timing(phy, timing, i);
}
/* Make sure PLL is not start */
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00);
wmb(); /* make sure everything is written before reset and enable */
/* reset digital block */
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80);
wmb(); /* ensure reset is asserted */
udelay(100);
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00);
msm_dsi_phy_set_src_pll(phy, src_pll_id,
REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL,
DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL);
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
/* Remove power down from PLL and all lanes */
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff);
return 0;
}
static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
{
dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0);
dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0);
/* ensure that the phy is completely disabled */
wmb();
}
static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
{
struct platform_device *pdev = phy->pdev;
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}
return 0;
}
const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
.type = MSM_DSI_PHY_14NM,
.src_pll_truthtable = { {false, false}, {true, false} },
.reg_cfg = {
.num = 1,
.regs = {
{"vcca", 17000, 32},
},
},
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
.init = dsi_14nm_phy_init,
},
.io_start = { 0x994400, 0x996400 },
.num_dsi_phy = 2,
};

View File

@ -72,7 +72,7 @@ static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
} }
static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
const unsigned long bit_rate, const unsigned long esc_rate) struct msm_dsi_phy_clk_request *clk_req)
{ {
struct msm_dsi_dphy_timing *timing = &phy->timing; struct msm_dsi_dphy_timing *timing = &phy->timing;
int i; int i;
@ -81,7 +81,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG(""); DBG("");
if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
dev_err(&phy->pdev->dev, dev_err(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__); "%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL; return -EINVAL;
@ -145,6 +145,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.ops = { .ops = {
.enable = dsi_20nm_phy_enable, .enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable, .disable = dsi_20nm_phy_disable,
.init = msm_dsi_phy_init_common,
}, },
.io_start = { 0xfd998300, 0xfd9a0300 }, .io_start = { 0xfd998300, 0xfd9a0300 },
.num_dsi_phy = 2, .num_dsi_phy = 2,

View File

@ -67,7 +67,7 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
} }
static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
const unsigned long bit_rate, const unsigned long esc_rate) struct msm_dsi_phy_clk_request *clk_req)
{ {
struct msm_dsi_dphy_timing *timing = &phy->timing; struct msm_dsi_dphy_timing *timing = &phy->timing;
int i; int i;
@ -75,7 +75,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG(""); DBG("");
if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
dev_err(&phy->pdev->dev, dev_err(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__); "%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL; return -EINVAL;
@ -144,6 +144,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.ops = { .ops = {
.enable = dsi_28nm_phy_enable, .enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable, .disable = dsi_28nm_phy_disable,
.init = msm_dsi_phy_init_common,
}, },
.io_start = { 0xfd922b00, 0xfd923100 }, .io_start = { 0xfd922b00, 0xfd923100 },
.num_dsi_phy = 2, .num_dsi_phy = 2,
@ -161,6 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.ops = { .ops = {
.enable = dsi_28nm_phy_enable, .enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable, .disable = dsi_28nm_phy_disable,
.init = msm_dsi_phy_init_common,
}, },
.io_start = { 0x1a98500 }, .io_start = { 0x1a98500 },
.num_dsi_phy = 1, .num_dsi_phy = 1,

View File

@ -124,14 +124,14 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
} }
static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
const unsigned long bit_rate, const unsigned long esc_rate) struct msm_dsi_phy_clk_request *clk_req)
{ {
struct msm_dsi_dphy_timing *timing = &phy->timing; struct msm_dsi_dphy_timing *timing = &phy->timing;
void __iomem *base = phy->base; void __iomem *base = phy->base;
DBG(""); DBG("");
if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
dev_err(&phy->pdev->dev, dev_err(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__); "%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL; return -EINVAL;
@ -191,6 +191,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.ops = { .ops = {
.enable = dsi_28nm_phy_enable, .enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable, .disable = dsi_28nm_phy_disable,
.init = msm_dsi_phy_init_common,
}, },
.io_start = { 0x4700300, 0x5800300 }, .io_start = { 0x4700300, 0x5800300 },
.num_dsi_phy = 2, .num_dsi_phy = 2,

View File

@ -140,6 +140,15 @@ int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
return 0; return 0;
} }
int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
enum msm_dsi_phy_usecase uc)
{
if (pll->set_usecase)
return pll->set_usecase(pll, uc);
return 0;
}
struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id) enum msm_dsi_phy_type type, int id)
{ {
@ -154,6 +163,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
case MSM_DSI_PHY_28NM_8960: case MSM_DSI_PHY_28NM_8960:
pll = msm_dsi_pll_28nm_8960_init(pdev, id); pll = msm_dsi_pll_28nm_8960_init(pdev, id);
break; break;
case MSM_DSI_PHY_14NM:
pll = msm_dsi_pll_14nm_init(pdev, id);
break;
default: default:
pll = ERR_PTR(-ENXIO); pll = ERR_PTR(-ENXIO);
break; break;

View File

@ -41,6 +41,8 @@ struct msm_dsi_pll {
void (*destroy)(struct msm_dsi_pll *pll); void (*destroy)(struct msm_dsi_pll *pll);
void (*save_state)(struct msm_dsi_pll *pll); void (*save_state)(struct msm_dsi_pll *pll);
int (*restore_state)(struct msm_dsi_pll *pll); int (*restore_state)(struct msm_dsi_pll *pll);
int (*set_usecase)(struct msm_dsi_pll *pll,
enum msm_dsi_phy_usecase uc);
}; };
#define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw) #define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw)
@ -104,5 +106,14 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(
} }
#endif #endif
#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id);
#else
static inline struct msm_dsi_pll *
msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
{
return ERR_PTR(-ENODEV);
}
#endif
#endif /* __DSI_PLL_H__ */ #endif /* __DSI_PLL_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -260,8 +260,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct drm_connector *connector; struct drm_connector *connector;
struct device_node *panel_node; struct device_node *panel_node;
struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM]; int dsi_id;
int i, dsi_id;
int ret; int ret;
switch (intf_type) { switch (intf_type) {
@ -322,22 +321,19 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
if (!priv->dsi[dsi_id]) if (!priv->dsi[dsi_id])
break; break;
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { encoder = mdp4_dsi_encoder_init(dev);
dsi_encs[i] = mdp4_dsi_encoder_init(dev); if (IS_ERR(encoder)) {
if (IS_ERR(dsi_encs[i])) { ret = PTR_ERR(encoder);
ret = PTR_ERR(dsi_encs[i]); dev_err(dev->dev,
dev_err(dev->dev, "failed to construct DSI encoder: %d\n", ret);
"failed to construct DSI encoder: %d\n", return ret;
ret);
return ret;
}
/* TODO: Add DMA_S later? */
dsi_encs[i]->possible_crtcs = 1 << DMA_P;
priv->encoders[priv->num_encoders++] = dsi_encs[i];
} }
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs); /* TODO: Add DMA_S later? */
encoder->possible_crtcs = 1 << DMA_P;
priv->encoders[priv->num_encoders++] = encoder;
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (ret) { if (ret) {
dev_err(dev->dev, "failed to initialize DSI: %d\n", dev_err(dev->dev, "failed to initialize DSI: %d\n",
ret); ret);

View File

@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are: The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) - /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-01-11 05:19:19)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) - /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) - /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2016 by the following authors: Copyright (C) 2013-2017 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark) - Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@ -65,16 +57,19 @@ enum mdp5_intfnum {
}; };
enum mdp5_pipe { enum mdp5_pipe {
SSPP_VIG0 = 0, SSPP_NONE = 0,
SSPP_VIG1 = 1, SSPP_VIG0 = 1,
SSPP_VIG2 = 2, SSPP_VIG1 = 2,
SSPP_RGB0 = 3, SSPP_VIG2 = 3,
SSPP_RGB1 = 4, SSPP_RGB0 = 4,
SSPP_RGB2 = 5, SSPP_RGB1 = 5,
SSPP_DMA0 = 6, SSPP_RGB2 = 6,
SSPP_DMA1 = 7, SSPP_DMA0 = 7,
SSPP_VIG3 = 8, SSPP_DMA1 = 8,
SSPP_RGB3 = 9, SSPP_VIG3 = 9,
SSPP_RGB3 = 10,
SSPP_CURSOR0 = 11,
SSPP_CURSOR1 = 12,
}; };
enum mdp5_ctl_mode { enum mdp5_ctl_mode {
@ -532,6 +527,7 @@ static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id va
static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
{ {
switch (idx) { switch (idx) {
case SSPP_NONE: return (INVALID_IDX(idx));
case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]); case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]);
case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]); case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]);
case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]); case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]);
@ -542,6 +538,8 @@ static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]); case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]);
case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]); case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]);
case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]); case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]);
case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]);
case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]);
default: return INVALID_IDX(idx); default: return INVALID_IDX(idx);
} }
} }
@ -1073,6 +1071,10 @@ static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000
#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 #define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 #define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 #define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020
#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040
#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080
#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000
static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); } static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); }
#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 #define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000

View File

@ -421,6 +421,16 @@ const struct mdp5_cfg_hw msm8x96_config = {
MDP_PIPE_CAP_SW_PIX_EXT | MDP_PIPE_CAP_SW_PIX_EXT |
0, 0,
}, },
.pipe_cursor = {
.count = 2,
.base = { 0x34000, 0x36000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
MDP_PIPE_CAP_CURSOR |
0,
},
.lm = { .lm = {
.count = 6, .count = 6,
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },

View File

@ -32,7 +32,7 @@ extern const struct mdp5_cfg_hw *mdp5_cfg;
typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
#define MDP5_SUB_BLOCK_DEFINITION \ #define MDP5_SUB_BLOCK_DEFINITION \
int count; \ unsigned int count; \
uint32_t base[MAX_BASES] uint32_t base[MAX_BASES]
struct mdp5_sub_block { struct mdp5_sub_block {
@ -85,6 +85,7 @@ struct mdp5_cfg_hw {
struct mdp5_pipe_block pipe_vig; struct mdp5_pipe_block pipe_vig;
struct mdp5_pipe_block pipe_rgb; struct mdp5_pipe_block pipe_rgb;
struct mdp5_pipe_block pipe_dma; struct mdp5_pipe_block pipe_dma;
struct mdp5_pipe_block pipe_cursor;
struct mdp5_lm_block lm; struct mdp5_lm_block lm;
struct mdp5_sub_block dspp; struct mdp5_sub_block dspp;
struct mdp5_sub_block ad; struct mdp5_sub_block ad;

View File

@ -16,16 +16,6 @@
#include "drm_crtc.h" #include "drm_crtc.h"
#include "drm_crtc_helper.h" #include "drm_crtc_helper.h"
struct mdp5_cmd_encoder {
struct drm_encoder base;
struct mdp5_interface intf;
bool enabled;
uint32_t bsc;
struct mdp5_ctl *ctl;
};
#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
static struct mdp5_kms *get_kms(struct drm_encoder *encoder) static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{ {
struct msm_drm_private *priv = encoder->dev->dev_private; struct msm_drm_private *priv = encoder->dev->dev_private;
@ -36,47 +26,8 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
#include <mach/board.h> #include <mach/board.h>
#include <linux/msm-bus.h> #include <linux/msm-bus.h>
#include <linux/msm-bus-board.h> #include <linux/msm-bus-board.h>
#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
{ \
.src = MSM_BUS_MASTER_MDP_PORT0, \
.dst = MSM_BUS_SLAVE_EBI_CH0, \
.ab = (ab_val), \
.ib = (ib_val), \
}
static struct msm_bus_vectors mdp_bus_vectors[] = { static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx)
MDP_BUS_VECTOR_ENTRY(0, 0),
MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
};
static struct msm_bus_paths mdp_bus_usecases[] = { {
.num_paths = 1,
.vectors = &mdp_bus_vectors[0],
}, {
.num_paths = 1,
.vectors = &mdp_bus_vectors[1],
} };
static struct msm_bus_scale_pdata mdp_bus_scale_table = {
.usecase = mdp_bus_usecases,
.num_usecases = ARRAY_SIZE(mdp_bus_usecases),
.name = "mdss_mdp",
};
static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc)
{
mdp5_cmd_enc->bsc = msm_bus_scale_register_client(
&mdp_bus_scale_table);
DBG("bus scale client: %08x", mdp5_cmd_enc->bsc);
}
static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc)
{
if (mdp5_cmd_enc->bsc) {
msm_bus_scale_unregister_client(mdp5_cmd_enc->bsc);
mdp5_cmd_enc->bsc = 0;
}
}
static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx)
{ {
if (mdp5_cmd_enc->bsc) { if (mdp5_cmd_enc->bsc) {
DBG("set bus scaling: %d", idx); DBG("set bus scaling: %d", idx);
@ -89,14 +40,12 @@ static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx)
} }
} }
#else #else
static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) {} static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {}
static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) {}
static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) {}
#endif #endif
#define VSYNC_CLK_RATE 19200000 #define VSYNC_CLK_RATE 19200000
static int pingpong_tearcheck_setup(struct drm_encoder *encoder, static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct mdp5_kms *mdp5_kms = get_kms(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct device *dev = encoder->dev->dev; struct device *dev = encoder->dev->dev;
@ -176,23 +125,11 @@ static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
clk_disable_unprepare(mdp5_kms->vsync_clk); clk_disable_unprepare(mdp5_kms->vsync_clk);
} }
static void mdp5_cmd_encoder_destroy(struct drm_encoder *encoder) void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{ {
struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
bs_fini(mdp5_cmd_enc);
drm_encoder_cleanup(encoder);
kfree(mdp5_cmd_enc);
}
static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = {
.destroy = mdp5_cmd_encoder_destroy,
};
static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
mode = adjusted_mode; mode = adjusted_mode;
@ -209,9 +146,9 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
mdp5_cmd_enc->ctl); mdp5_cmd_enc->ctl);
} }
static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
{ {
struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = &mdp5_cmd_enc->intf; struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
@ -228,9 +165,9 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
mdp5_cmd_enc->enabled = false; mdp5_cmd_enc->enabled = false;
} }
static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
{ {
struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
struct mdp5_interface *intf = &mdp5_cmd_enc->intf; struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
@ -248,16 +185,10 @@ static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
mdp5_cmd_enc->enabled = true; mdp5_cmd_enc->enabled = true;
} }
static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = {
.mode_set = mdp5_cmd_encoder_mode_set,
.disable = mdp5_cmd_encoder_disable,
.enable = mdp5_cmd_encoder_enable,
};
int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder) struct drm_encoder *slave_encoder)
{ {
struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms; struct mdp5_kms *mdp5_kms;
int intf_num; int intf_num;
u32 data = 0; u32 data = 0;
@ -292,43 +223,3 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
return 0; return 0;
} }
/* initialize command mode encoder */
struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
struct mdp5_interface *intf, struct mdp5_ctl *ctl)
{
struct drm_encoder *encoder = NULL;
struct mdp5_cmd_encoder *mdp5_cmd_enc;
int ret;
if (WARN_ON((intf->type != INTF_DSI) &&
(intf->mode != MDP5_INTF_DSI_MODE_COMMAND))) {
ret = -EINVAL;
goto fail;
}
mdp5_cmd_enc = kzalloc(sizeof(*mdp5_cmd_enc), GFP_KERNEL);
if (!mdp5_cmd_enc) {
ret = -ENOMEM;
goto fail;
}
memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
encoder = &mdp5_cmd_enc->base;
mdp5_cmd_enc->ctl = ctl;
drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs);
bs_init(mdp5_cmd_enc);
return encoder;
fail:
if (encoder)
mdp5_cmd_encoder_destroy(encoder);
return ERR_PTR(ret);
}

View File

@ -177,6 +177,21 @@ static void mdp5_crtc_destroy(struct drm_crtc *crtc)
kfree(mdp5_crtc); kfree(mdp5_crtc);
} }
static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
{
switch (stage) {
case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
default:
return 0;
}
}
/* /*
* blend_setup() - blend all the planes of a CRTC * blend_setup() - blend all the planes of a CRTC
* *
@ -195,8 +210,10 @@ static void blend_setup(struct drm_crtc *crtc)
uint32_t lm = mdp5_crtc->lm; uint32_t lm = mdp5_crtc->lm;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags; unsigned long flags;
uint8_t stage[STAGE_MAX + 1]; enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE };
int i, plane_cnt = 0; int i, plane_cnt = 0;
bool bg_alpha_enabled = false;
u32 mixer_op_mode = 0;
#define blender(stage) ((stage) - STAGE0) #define blender(stage) ((stage) - STAGE0)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
@ -218,6 +235,11 @@ static void blend_setup(struct drm_crtc *crtc)
if (!pstates[STAGE_BASE]) { if (!pstates[STAGE_BASE]) {
ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
DBG("Border Color is enabled"); DBG("Border Color is enabled");
} else if (plane_cnt) {
format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
if (format->alpha_enable)
bg_alpha_enabled = true;
} }
/* The reset for blending */ /* The reset for blending */
@ -232,6 +254,12 @@ static void blend_setup(struct drm_crtc *crtc)
MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST); MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
fg_alpha = pstates[i]->alpha; fg_alpha = pstates[i]->alpha;
bg_alpha = 0xFF - pstates[i]->alpha; bg_alpha = 0xFF - pstates[i]->alpha;
if (!format->alpha_enable && bg_alpha_enabled)
mixer_op_mode = 0;
else
mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha); DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
if (format->alpha_enable && pstates[i]->premultiplied) { if (format->alpha_enable && pstates[i]->premultiplied) {
@ -268,6 +296,8 @@ static void blend_setup(struct drm_crtc *crtc)
blender(i)), bg_alpha); blender(i)), bg_alpha);
} }
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode);
mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags); mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
out: out:
@ -370,6 +400,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct plane_state pstates[STAGE_MAX + 1]; struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg; const struct mdp5_cfg_hw *hw_cfg;
const struct drm_plane_state *pstate; const struct drm_plane_state *pstate;
bool cursor_plane = false;
int cnt = 0, base = 0, i; int cnt = 0, base = 0, i;
DBG("%s: check", crtc->name); DBG("%s: check", crtc->name);
@ -379,6 +410,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
pstates[cnt].state = to_mdp5_plane_state(pstate); pstates[cnt].state = to_mdp5_plane_state(pstate);
cnt++; cnt++;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
cursor_plane = true;
} }
/* assign a stage based on sorted zpos property */ /* assign a stage based on sorted zpos property */
@ -390,6 +424,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
base++; base++;
/* trigger a warning if cursor isn't the highest zorder */
WARN_ON(cursor_plane &&
(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
/* verify that there are not too many planes attached to crtc /* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages: * and that we don't have conflicting mixer stages:
*/ */
@ -401,7 +439,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
} }
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
pstates[i].state->stage = STAGE_BASE + i + base; if (cursor_plane && (i == (cnt - 1)))
pstates[i].state->stage = hw_cfg->lm.nb_stages;
else
pstates[i].state->stage = STAGE_BASE + i + base;
DBG("%s: assign pipe %s on stage=%d", crtc->name, DBG("%s: assign pipe %s on stage=%d", crtc->name,
pstates[i].plane->name, pstates[i].plane->name,
pstates[i].state->stage); pstates[i].state->stage);
@ -612,6 +653,16 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.cursor_move = mdp5_crtc_cursor_move, .cursor_move = mdp5_crtc_cursor_move,
}; };
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.set_property = drm_atomic_helper_crtc_set_property,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.mode_set_nofb = mdp5_crtc_mode_set_nofb, .mode_set_nofb = mdp5_crtc_mode_set_nofb,
.disable = mdp5_crtc_disable, .disable = mdp5_crtc_disable,
@ -727,6 +778,13 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
mdp5_ctl_set_pipeline(ctl, intf, lm); mdp5_ctl_set_pipeline(ctl, intf, lm);
} }
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
return mdp5_crtc->ctl;
}
int mdp5_crtc_get_lm(struct drm_crtc *crtc) int mdp5_crtc_get_lm(struct drm_crtc *crtc)
{ {
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@ -745,7 +803,8 @@ void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
/* initialize crtc */ /* initialize crtc */
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id) struct drm_plane *plane,
struct drm_plane *cursor_plane, int id)
{ {
struct drm_crtc *crtc = NULL; struct drm_crtc *crtc = NULL;
struct mdp5_crtc *mdp5_crtc; struct mdp5_crtc *mdp5_crtc;
@ -766,8 +825,12 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq;
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs, if (cursor_plane)
NULL); drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
&mdp5_crtc_no_lm_cursor_funcs, NULL);
else
drm_crtc_init_with_planes(dev, crtc, plane, NULL,
&mdp5_crtc_funcs, NULL);
drm_flip_work_init(&mdp5_crtc->unref_cursor_work, drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker); "unref cursor", unref_cursor_worker);

View File

@ -326,6 +326,8 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
case SSPP_CURSOR0:
case SSPP_CURSOR1:
default: return 0; default: return 0;
} }
} }
@ -333,7 +335,7 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
enum mdp_mixer_stage_id stage) enum mdp_mixer_stage_id stage)
{ {
if (stage < STAGE6) if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
return 0; return 0;
switch (pipe) { switch (pipe) {
@ -347,12 +349,14 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
default: return 0; default: return 0;
} }
} }
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
u32 ctl_blend_op_flags) u32 ctl_blend_op_flags)
{ {
unsigned long flags; unsigned long flags;
u32 blend_cfg = 0, blend_ext_cfg = 0; u32 blend_cfg = 0, blend_ext_cfg = 0;
@ -365,7 +369,7 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
start_stage = STAGE_BASE; start_stage = STAGE_BASE;
} }
for (i = start_stage; i < start_stage + stage_cnt; i++) { for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
blend_cfg |= mdp_ctl_blend_mask(stage[i], i); blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i); blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
} }
@ -422,6 +426,8 @@ u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
default: return 0; default: return 0;
} }
} }

View File

@ -56,8 +56,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/ */
#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) #define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
u32 ctl_blend_op_flags); u32 ctl_blend_op_flags);
/** /**
* mdp_ctl_flush_mask...() - Register FLUSH masks * mdp_ctl_flush_mask...() - Register FLUSH masks

View File

@ -21,17 +21,6 @@
#include "drm_crtc.h" #include "drm_crtc.h"
#include "drm_crtc_helper.h" #include "drm_crtc_helper.h"
struct mdp5_encoder {
struct drm_encoder base;
struct mdp5_interface intf;
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
struct mdp5_ctl *ctl;
};
#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
static struct mdp5_kms *get_kms(struct drm_encoder *encoder) static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{ {
struct msm_drm_private *priv = encoder->dev->dev_private; struct msm_drm_private *priv = encoder->dev->dev_private;
@ -112,9 +101,9 @@ static const struct drm_encoder_funcs mdp5_encoder_funcs = {
.destroy = mdp5_encoder_destroy, .destroy = mdp5_encoder_destroy,
}; };
static void mdp5_encoder_mode_set(struct drm_encoder *encoder, static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder);
@ -221,7 +210,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
mdp5_encoder->ctl); mdp5_encoder->ctl);
} }
static void mdp5_encoder_disable(struct drm_encoder *encoder) static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
{ {
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder);
@ -256,7 +245,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
mdp5_encoder->enabled = false; mdp5_encoder->enabled = false;
} }
static void mdp5_encoder_enable(struct drm_encoder *encoder) static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
{ {
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder);
@ -279,6 +268,41 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
mdp5_encoder->enabled = true; mdp5_encoder->enabled = true;
} }
static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = &mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
else
mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode);
}
static void mdp5_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = &mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_disable(encoder);
else
mdp5_vid_encoder_disable(encoder);
}
static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = &mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_disable(encoder);
else
mdp5_vid_encoder_enable(encoder);
}
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.mode_set = mdp5_encoder_mode_set, .mode_set = mdp5_encoder_mode_set,
.disable = mdp5_encoder_disable, .disable = mdp5_encoder_disable,
@ -303,8 +327,8 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf)); return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
} }
int mdp5_encoder_set_split_display(struct drm_encoder *encoder, int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder) struct drm_encoder *slave_encoder)
{ {
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
@ -342,6 +366,23 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
return 0; return 0;
} }
void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = &mdp5_encoder->intf;
/* TODO: Expand this to set writeback modes too */
if (cmd_mode) {
WARN_ON(intf->type != INTF_DSI);
intf->mode = MDP5_INTF_DSI_MODE_COMMAND;
} else {
if (intf->type == INTF_DSI)
intf->mode = MDP5_INTF_DSI_MODE_VIDEO;
else
intf->mode = MDP5_INTF_MODE_NONE;
}
}
/* initialize encoder */ /* initialize encoder */
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_interface *intf, struct mdp5_ctl *ctl) struct mdp5_interface *intf, struct mdp5_ctl *ctl)

View File

@ -148,7 +148,15 @@ static int mdp5_set_split_display(struct msm_kms *kms,
return mdp5_cmd_encoder_set_split_display(encoder, return mdp5_cmd_encoder_set_split_display(encoder,
slave_encoder); slave_encoder);
else else
return mdp5_encoder_set_split_display(encoder, slave_encoder); return mdp5_vid_encoder_set_split_display(encoder,
slave_encoder);
}
static void mdp5_set_encoder_mode(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode)
{
mdp5_encoder_set_intf_mode(encoder, cmd_mode);
} }
static void mdp5_kms_destroy(struct msm_kms *kms) static void mdp5_kms_destroy(struct msm_kms *kms)
@ -230,6 +238,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.get_format = mdp_get_format, .get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk, .round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display, .set_split_display = mdp5_set_split_display,
.set_encoder_mode = mdp5_set_encoder_mode,
.destroy = mdp5_kms_destroy, .destroy = mdp5_kms_destroy,
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
.debugfs_init = mdp5_kms_debugfs_init, .debugfs_init = mdp5_kms_debugfs_init,
@ -267,7 +276,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
enum mdp5_intf_type intf_type, int intf_num, enum mdp5_intf_type intf_type, int intf_num,
enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl) struct mdp5_ctl *ctl)
{ {
struct drm_device *dev = mdp5_kms->dev; struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
@ -275,21 +284,15 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
struct mdp5_interface intf = { struct mdp5_interface intf = {
.num = intf_num, .num = intf_num,
.type = intf_type, .type = intf_type,
.mode = intf_mode, .mode = MDP5_INTF_MODE_NONE,
}; };
if ((intf_type == INTF_DSI) && encoder = mdp5_encoder_init(dev, &intf, ctl);
(intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
encoder = mdp5_cmd_encoder_init(dev, &intf, ctl);
else
encoder = mdp5_encoder_init(dev, &intf, ctl);
if (IS_ERR(encoder)) { if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct encoder\n"); dev_err(dev->dev, "failed to construct encoder\n");
return encoder; return encoder;
} }
encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
priv->encoders[priv->num_encoders++] = encoder; priv->encoders[priv->num_encoders++] = encoder;
return encoder; return encoder;
@ -338,8 +341,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
break; break;
} }
encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl);
MDP5_INTF_MODE_NONE, ctl);
if (IS_ERR(encoder)) { if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder); ret = PTR_ERR(encoder);
break; break;
@ -357,8 +359,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
break; break;
} }
encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl);
MDP5_INTF_MODE_NONE, ctl);
if (IS_ERR(encoder)) { if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder); ret = PTR_ERR(encoder);
break; break;
@ -369,9 +370,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
case INTF_DSI: case INTF_DSI:
{ {
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num); int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
enum mdp5_intf_mode mode;
int i;
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
dev_err(dev->dev, "failed to find dsi from intf %d\n", dev_err(dev->dev, "failed to find dsi from intf %d\n",
@ -389,19 +387,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
break; break;
} }
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl);
mode = (i == MSM_DSI_CMD_ENCODER_ID) ? if (IS_ERR(encoder)) {
MDP5_INTF_DSI_MODE_COMMAND : ret = PTR_ERR(encoder);
MDP5_INTF_DSI_MODE_VIDEO; break;
dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
intf_num, mode, ctl);
if (IS_ERR(dsi_encs[i])) {
ret = PTR_ERR(dsi_encs[i]);
break;
}
} }
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs); ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
break; break;
} }
default: default:
@ -418,20 +410,48 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
struct drm_device *dev = mdp5_kms->dev; struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
const struct mdp5_cfg_hw *hw_cfg; const struct mdp5_cfg_hw *hw_cfg;
int i, ret; unsigned int num_crtcs;
int i, ret, pi = 0, ci = 0;
struct drm_plane *primary[MAX_BASES] = { NULL };
struct drm_plane *cursor[MAX_BASES] = { NULL };
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
/* Construct planes equaling the number of hw pipes, and CRTCs /*
* for the N layer-mixers (LM). The first N planes become primary * Construct encoders and modeset initialize connector devices
* for each external display interface.
*/
for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
ret = modeset_init_intf(mdp5_kms, i);
if (ret)
goto fail;
}
/*
* We should ideally have less number of encoders (set up by parsing
* the MDP5 interfaces) than the number of layer mixers present in HW,
* but let's be safe here anyway
*/
num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count);
/*
* Construct planes equaling the number of hw pipes, and CRTCs for the
* N encoders set up by the driver. The first N planes become primary
* planes for the CRTCs, with the remainder as overlay planes: * planes for the CRTCs, with the remainder as overlay planes:
*/ */
for (i = 0; i < mdp5_kms->num_hwpipes; i++) { for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
bool primary = i < mdp5_cfg->lm.count; struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
struct drm_plane *plane; struct drm_plane *plane;
struct drm_crtc *crtc; enum drm_plane_type type;
plane = mdp5_plane_init(dev, primary); if (i < num_crtcs)
type = DRM_PLANE_TYPE_PRIMARY;
else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR)
type = DRM_PLANE_TYPE_CURSOR;
else
type = DRM_PLANE_TYPE_OVERLAY;
plane = mdp5_plane_init(dev, type);
if (IS_ERR(plane)) { if (IS_ERR(plane)) {
ret = PTR_ERR(plane); ret = PTR_ERR(plane);
dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret); dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
@ -439,10 +459,16 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
} }
priv->planes[priv->num_planes++] = plane; priv->planes[priv->num_planes++] = plane;
if (!primary) if (type == DRM_PLANE_TYPE_PRIMARY)
continue; primary[pi++] = plane;
if (type == DRM_PLANE_TYPE_CURSOR)
cursor[ci++] = plane;
}
crtc = mdp5_crtc_init(dev, plane, i); for (i = 0; i < num_crtcs; i++) {
struct drm_crtc *crtc;
crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
if (IS_ERR(crtc)) { if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc); ret = PTR_ERR(crtc);
dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
@ -451,13 +477,14 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
priv->crtcs[priv->num_crtcs++] = crtc; priv->crtcs[priv->num_crtcs++] = crtc;
} }
/* Construct encoders and modeset initialize connector devices /*
* for each external display interface. * Now that we know the number of crtcs we've created, set the possible
* crtcs for the encoders
*/ */
for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { for (i = 0; i < priv->num_encoders; i++) {
ret = modeset_init_intf(mdp5_kms, i); struct drm_encoder *encoder = priv->encoders[i];
if (ret)
goto fail; encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
} }
return 0; return 0;
@ -773,6 +800,9 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms)
static const enum mdp5_pipe dma_planes[] = { static const enum mdp5_pipe dma_planes[] = {
SSPP_DMA0, SSPP_DMA1, SSPP_DMA0, SSPP_DMA1,
}; };
static const enum mdp5_pipe cursor_planes[] = {
SSPP_CURSOR0, SSPP_CURSOR1,
};
const struct mdp5_cfg_hw *hw_cfg; const struct mdp5_cfg_hw *hw_cfg;
int ret; int ret;
@ -796,6 +826,13 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms)
if (ret) if (ret)
return ret; return ret;
/* Construct cursor pipes: */
ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count,
cursor_planes, hw_cfg->pipe_cursor.base,
hw_cfg->pipe_cursor.caps);
if (ret)
return ret;
return 0; return 0;
} }

View File

@ -126,6 +126,17 @@ struct mdp5_interface {
enum mdp5_intf_mode mode; enum mdp5_intf_mode mode;
}; };
struct mdp5_encoder {
struct drm_encoder base;
struct mdp5_interface intf;
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
struct mdp5_ctl *ctl;
};
#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{ {
msm_writel(data, mdp5_kms->mmio + reg); msm_writel(data, mdp5_kms->mmio + reg);
@ -156,6 +167,7 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
NAME(RGB0), NAME(RGB1), NAME(RGB2), NAME(RGB0), NAME(RGB1), NAME(RGB2),
NAME(DMA0), NAME(DMA1), NAME(DMA0), NAME(DMA1),
NAME(VIG3), NAME(RGB3), NAME(VIG3), NAME(RGB3),
NAME(CURSOR0), NAME(CURSOR1),
#undef NAME #undef NAME
}; };
return names[pipe]; return names[pipe];
@ -231,8 +243,10 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane); uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum drm_plane_type type);
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
int mdp5_crtc_get_lm(struct drm_crtc *crtc); int mdp5_crtc_get_lm(struct drm_crtc *crtc);
@ -240,25 +254,36 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
struct mdp5_interface *intf, struct mdp5_ctl *ctl); struct mdp5_interface *intf, struct mdp5_ctl *ctl);
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id); struct drm_plane *plane,
struct drm_plane *cursor_plane, int id);
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_interface *intf, struct mdp5_ctl *ctl); struct mdp5_interface *intf, struct mdp5_ctl *ctl);
int mdp5_encoder_set_split_display(struct drm_encoder *encoder, int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder); struct drm_encoder *slave_encoder);
void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode);
int mdp5_encoder_get_linecount(struct drm_encoder *encoder); int mdp5_encoder_get_linecount(struct drm_encoder *encoder);
u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder); u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder);
#ifdef CONFIG_DRM_MSM_DSI #ifdef CONFIG_DRM_MSM_DSI
struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
struct mdp5_interface *intf, struct mdp5_ctl *ctl); struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void mdp5_cmd_encoder_disable(struct drm_encoder *encoder);
void mdp5_cmd_encoder_enable(struct drm_encoder *encoder);
int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder); struct drm_encoder *slave_encoder);
#else #else
static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
struct mdp5_interface *intf, struct mdp5_ctl *ctl) struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
{
}
static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
{ {
return ERR_PTR(-EINVAL);
} }
static inline int mdp5_cmd_encoder_set_split_display( static inline int mdp5_cmd_encoder_set_split_display(
struct drm_encoder *encoder, struct drm_encoder *slave_encoder) struct drm_encoder *encoder, struct drm_encoder *slave_encoder)

View File

@ -53,6 +53,14 @@ struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
if (caps & ~cur->caps) if (caps & ~cur->caps)
continue; continue;
/*
* don't assign a cursor pipe to a plane that isn't going to
* be used as a cursor
*/
if (cur->caps & MDP_PIPE_CAP_CURSOR &&
plane->type != DRM_PLANE_TYPE_CURSOR)
continue;
/* possible candidate, take the one with the /* possible candidate, take the one with the
* fewest unneeded caps bits set: * fewest unneeded caps bits set:
*/ */

View File

@ -29,6 +29,11 @@ struct mdp5_plane {
static int mdp5_plane_mode_set(struct drm_plane *plane, static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_rect *src, struct drm_rect *dest);
static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h, unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y, uint32_t src_x, uint32_t src_y,
@ -45,7 +50,7 @@ static struct mdp5_kms *get_kms(struct drm_plane *plane)
static bool plane_enabled(struct drm_plane_state *state) static bool plane_enabled(struct drm_plane_state *state)
{ {
return state->fb && state->crtc; return state->visible;
} }
static void mdp5_plane_destroy(struct drm_plane *plane) static void mdp5_plane_destroy(struct drm_plane *plane)
@ -246,6 +251,19 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.atomic_print_state = mdp5_plane_atomic_print_state, .atomic_print_state = mdp5_plane_atomic_print_state,
}; };
static const struct drm_plane_funcs mdp5_cursor_plane_funcs = {
.update_plane = mdp5_update_cursor_plane_legacy,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
.set_property = drm_atomic_helper_plane_set_property,
.atomic_set_property = mdp5_plane_atomic_set_property,
.atomic_get_property = mdp5_plane_atomic_get_property,
.reset = mdp5_plane_reset,
.atomic_duplicate_state = mdp5_plane_duplicate_state,
.atomic_destroy_state = mdp5_plane_destroy_state,
.atomic_print_state = mdp5_plane_atomic_print_state,
};
static int mdp5_plane_prepare_fb(struct drm_plane *plane, static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state) struct drm_plane_state *new_state)
{ {
@ -272,15 +290,20 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
msm_framebuffer_cleanup(fb, mdp5_kms->id); msm_framebuffer_cleanup(fb, mdp5_kms->id);
} }
static int mdp5_plane_atomic_check(struct drm_plane *plane, #define FRAC_16_16(mult, div) (((mult) << 16) / (div))
struct drm_plane_state *state) static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
struct drm_plane_state *state)
{ {
struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
struct drm_plane *plane = state->plane;
struct drm_plane_state *old_state = plane->state; struct drm_plane_state *old_state = plane->state;
struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
bool new_hwpipe = false; bool new_hwpipe = false;
uint32_t max_width, max_height; uint32_t max_width, max_height;
uint32_t caps = 0; uint32_t caps = 0;
struct drm_rect clip;
int min_scale, max_scale;
int ret;
DBG("%s: check (%d -> %d)", plane->name, DBG("%s: check (%d -> %d)", plane->name,
plane_enabled(old_state), plane_enabled(state)); plane_enabled(old_state), plane_enabled(state));
@ -296,6 +319,18 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
return -ERANGE; return -ERANGE;
} }
clip.x1 = 0;
clip.y1 = 0;
clip.x2 = crtc_state->adjusted_mode.hdisplay;
clip.y2 = crtc_state->adjusted_mode.vdisplay;
min_scale = FRAC_16_16(1, 8);
max_scale = FRAC_16_16(8, 1);
ret = drm_plane_helper_check_state(state, &clip, min_scale,
max_scale, true, true);
if (ret)
return ret;
if (plane_enabled(state)) { if (plane_enabled(state)) {
unsigned int rotation; unsigned int rotation;
const struct mdp_format *format; const struct mdp_format *format;
@ -321,6 +356,9 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
if (rotation & DRM_REFLECT_Y) if (rotation & DRM_REFLECT_Y)
caps |= MDP_PIPE_CAP_VFLIP; caps |= MDP_PIPE_CAP_VFLIP;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
caps |= MDP_PIPE_CAP_CURSOR;
/* (re)allocate hw pipe if we don't have one or caps-mismatch: */ /* (re)allocate hw pipe if we don't have one or caps-mismatch: */
if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
new_hwpipe = true; new_hwpipe = true;
@ -356,6 +394,23 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
return 0; return 0;
} }
static int mdp5_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
crtc = state->crtc ? state->crtc : plane->state->crtc;
if (!crtc)
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
return mdp5_plane_atomic_check_with_state(crtc_state, state);
}
static void mdp5_plane_atomic_update(struct drm_plane *plane, static void mdp5_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state) struct drm_plane_state *old_state)
{ {
@ -368,10 +423,7 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
ret = mdp5_plane_mode_set(plane, ret = mdp5_plane_mode_set(plane,
state->crtc, state->fb, state->crtc, state->fb,
state->crtc_x, state->crtc_y, &state->src, &state->dst);
state->crtc_w, state->crtc_h,
state->src_x, state->src_y,
state->src_w, state->src_h);
/* atomic_check should have ensured that this doesn't fail */ /* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0); WARN_ON(ret < 0);
} }
@ -664,10 +716,7 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
static int mdp5_plane_mode_set(struct drm_plane *plane, static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y, struct drm_rect *src, struct drm_rect *dest)
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{ {
struct drm_plane_state *pstate = plane->state; struct drm_plane_state *pstate = plane->state;
struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
@ -683,6 +732,10 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t pix_format; uint32_t pix_format;
unsigned int rotation; unsigned int rotation;
bool vflip, hflip; bool vflip, hflip;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
uint32_t src_w, src_h;
unsigned long flags; unsigned long flags;
int ret; int ret;
@ -695,6 +748,16 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
format = to_mdp_format(msm_framebuffer_format(fb)); format = to_mdp_format(msm_framebuffer_format(fb));
pix_format = format->base.pixel_format; pix_format = format->base.pixel_format;
src_x = src->x1;
src_y = src->y1;
src_w = drm_rect_width(src);
src_h = drm_rect_height(src);
crtc_x = dest->x1;
crtc_y = dest->y1;
crtc_w = drm_rect_width(dest);
crtc_h = drm_rect_height(dest);
/* src values are in Q16 fixed point, convert to integer: */ /* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16; src_x = src_x >> 16;
src_y = src_y >> 16; src_y = src_y >> 16;
@ -818,12 +881,88 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
return ret; return ret;
} }
static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct drm_plane_state *plane_state, *new_plane_state;
struct mdp5_plane_state *mdp5_pstate;
struct drm_crtc_state *crtc_state = crtc->state;
int ret;
if (!crtc_state->active || drm_atomic_crtc_needs_modeset(crtc_state))
goto slow;
plane_state = plane->state;
mdp5_pstate = to_mdp5_plane_state(plane_state);
/* don't use fast path if we don't have a hwpipe allocated yet */
if (!mdp5_pstate->hwpipe)
goto slow;
/* only allow changing of position(crtc x/y or src x/y) in fast path */
if (plane_state->crtc != crtc ||
plane_state->src_w != src_w ||
plane_state->src_h != src_h ||
plane_state->crtc_w != crtc_w ||
plane_state->crtc_h != crtc_h ||
!plane_state->fb ||
plane_state->fb != fb)
goto slow;
new_plane_state = mdp5_plane_duplicate_state(plane);
if (!new_plane_state)
return -ENOMEM;
new_plane_state->src_x = src_x;
new_plane_state->src_y = src_y;
new_plane_state->src_w = src_w;
new_plane_state->src_h = src_h;
new_plane_state->crtc_x = crtc_x;
new_plane_state->crtc_y = crtc_y;
new_plane_state->crtc_w = crtc_w;
new_plane_state->crtc_h = crtc_h;
ret = mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
if (ret)
goto slow_free;
if (new_plane_state->visible) {
struct mdp5_ctl *ctl;
ret = mdp5_plane_mode_set(plane, crtc, fb,
&new_plane_state->src,
&new_plane_state->dst);
WARN_ON(ret < 0);
ctl = mdp5_crtc_get_ctl(crtc);
mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane));
}
*to_mdp5_plane_state(plane_state) =
*to_mdp5_plane_state(new_plane_state);
mdp5_plane_destroy_state(plane, new_plane_state);
return 0;
slow_free:
mdp5_plane_destroy_state(plane, new_plane_state);
slow:
return drm_atomic_helper_update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
}
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
{ {
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
if (WARN_ON(!pstate->hwpipe)) if (WARN_ON(!pstate->hwpipe))
return 0; return SSPP_NONE;
return pstate->hwpipe->pipe; return pstate->hwpipe->pipe;
} }
@ -839,12 +978,12 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
} }
/* initialize plane */ /* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum drm_plane_type type)
{ {
struct drm_plane *plane = NULL; struct drm_plane *plane = NULL;
struct mdp5_plane *mdp5_plane; struct mdp5_plane *mdp5_plane;
int ret; int ret;
enum drm_plane_type type;
mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
if (!mdp5_plane) { if (!mdp5_plane) {
@ -857,10 +996,16 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats), false); ARRAY_SIZE(mdp5_plane->formats), false);
type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; if (type == DRM_PLANE_TYPE_CURSOR)
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, ret = drm_universal_plane_init(dev, plane, 0xff,
mdp5_plane->formats, mdp5_plane->nformats, &mdp5_cursor_plane_funcs,
type, NULL); mdp5_plane->formats, mdp5_plane->nformats,
type, NULL);
else
ret = drm_universal_plane_init(dev, plane, 0xff,
&mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
type, NULL);
if (ret) if (ret)
goto fail; goto fail;

View File

@ -112,6 +112,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
#define MDP_PIPE_CAP_CSC BIT(3) #define MDP_PIPE_CAP_CSC BIT(3)
#define MDP_PIPE_CAP_DECIMATION BIT(4) #define MDP_PIPE_CAP_DECIMATION BIT(4)
#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) #define MDP_PIPE_CAP_SW_PIX_EXT BIT(5)
#define MDP_PIPE_CAP_CURSOR BIT(6)
static inline bool pipe_supports_yuv(uint32_t pipe_caps) static inline bool pipe_supports_yuv(uint32_t pipe_caps)
{ {

View File

@ -93,11 +93,6 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
if (!crtc->state->enable) if (!crtc->state->enable)
continue; continue;
/* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that (by doing tons of cursor updates). */
if (old_state->legacy_cursor_update)
continue;
kms->funcs->wait_for_crtc_commit_done(kms, crtc); kms->funcs->wait_for_crtc_commit_done(kms, crtc);
} }
} }
@ -151,20 +146,29 @@ static void commit_worker(struct work_struct *work)
complete_commit(container_of(work, struct msm_commit, work), true); complete_commit(container_of(work, struct msm_commit, work), true);
} }
/*
* this func is identical to the drm_atomic_helper_check, but we keep this
* because we might eventually need to have a more finegrained check
* sequence without using the atomic helpers.
*
* In the past, we first called drm_atomic_helper_check_planes, and then
* drm_atomic_helper_check_modeset. We needed this because the MDP5 plane's
* ->atomic_check could update ->mode_changed for pixel format changes.
* This, however isn't needed now because if there is a pixel format change,
* we just assign a new hwpipe for it with a new SMP allocation. We might
* eventually hit a condition where we would need to do a full modeset if
* we run out of planes. There, we'd probably need to set mode_changed.
*/
int msm_atomic_check(struct drm_device *dev, int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
int ret; int ret;
/* ret = drm_atomic_helper_check_modeset(dev, state);
* msm ->atomic_check can update ->mode_changed for pixel format
* changes, hence must be run before we check the modeset changes.
*/
ret = drm_atomic_helper_check_planes(dev, state);
if (ret) if (ret)
return ret; return ret;
ret = drm_atomic_helper_check_modeset(dev, state); ret = drm_atomic_helper_check_planes(dev, state);
if (ret) if (ret)
return ret; return ret;

View File

@ -91,6 +91,25 @@ module_param(dumpstate, bool, 0600);
* Util/helpers: * Util/helpers:
*/ */
struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
{
struct clk *clk;
char name2[32];
clk = devm_clk_get(&pdev->dev, name);
if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
return clk;
snprintf(name2, sizeof(name2), "%s_clk", name);
clk = devm_clk_get(&pdev->dev, name2);
if (!IS_ERR(clk))
dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
"\"%s\" instead of \"%s\"\n", name, name2);
return clk;
}
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname) const char *dbgname)
{ {
@ -985,6 +1004,7 @@ static int add_display_components(struct device *dev,
* as components. * as components.
*/ */
static const struct of_device_id msm_gpu_match[] = { static const struct of_device_id msm_gpu_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" }, { .compatible = "qcom,adreno-3xx" },
{ .compatible = "qcom,kgsl-3d0" }, { .compatible = "qcom,kgsl-3d0" },
{ }, { },

View File

@ -275,16 +275,11 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
struct drm_encoder *encoder); struct drm_encoder *encoder);
struct msm_dsi; struct msm_dsi;
enum msm_dsi_encoder_id {
MSM_DSI_VIDEO_ENCODER_ID = 0,
MSM_DSI_CMD_ENCODER_ID = 1,
MSM_DSI_ENCODER_NUM = 2
};
#ifdef CONFIG_DRM_MSM_DSI #ifdef CONFIG_DRM_MSM_DSI
void __init msm_dsi_register(void); void __init msm_dsi_register(void);
void __exit msm_dsi_unregister(void); void __exit msm_dsi_unregister(void);
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]); struct drm_encoder *encoder);
#else #else
static inline void __init msm_dsi_register(void) static inline void __init msm_dsi_register(void)
{ {
@ -293,8 +288,8 @@ static inline void __exit msm_dsi_unregister(void)
{ {
} }
static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
struct drm_device *dev, struct drm_device *dev,
struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) struct drm_encoder *encoder)
{ {
return -EINVAL; return -EINVAL;
} }
@ -318,6 +313,7 @@ static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {} static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {}
#endif #endif
struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname); const char *dbgname);
void msm_writel(u32 data, void __iomem *addr); void msm_writel(u32 data, void __iomem *addr);

View File

@ -95,13 +95,13 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/ */
submit->bos[i].flags = 0; submit->bos[i].flags = 0;
ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo)); if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) {
if (unlikely(ret)) {
pagefault_enable(); pagefault_enable();
spin_unlock(&file->table_lock); spin_unlock(&file->table_lock);
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
if (ret) ret = -EFAULT;
goto out; goto out;
}
spin_lock(&file->table_lock); spin_lock(&file->table_lock);
pagefault_disable(); pagefault_disable();
} }
@ -317,9 +317,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
uint64_t iova; uint64_t iova;
bool valid; bool valid;
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc)); if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
if (ret) ret = -EFAULT;
goto out; goto out;
}
if (submit_reloc.submit_offset % 4) { if (submit_reloc.submit_offset % 4) {
DRM_ERROR("non-aligned reloc offset: %u\n", DRM_ERROR("non-aligned reloc offset: %u\n",

View File

@ -560,8 +560,7 @@ static irqreturn_t irq_handler(int irq, void *data)
} }
static const char *clk_names[] = { static const char *clk_names[] = {
"core_clk", "iface_clk", "rbbmtimer_clk", "mem_clk", "core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface",
"mem_iface_clk", "alt_mem_iface_clk",
}; };
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
@ -625,13 +624,13 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
/* Acquire clocks: */ /* Acquire clocks: */
for (i = 0; i < ARRAY_SIZE(clk_names); i++) { for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]); gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]);
DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
if (IS_ERR(gpu->grp_clks[i])) if (IS_ERR(gpu->grp_clks[i]))
gpu->grp_clks[i] = NULL; gpu->grp_clks[i] = NULL;
} }
gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk"); gpu->ebi1_clk = msm_clk_get(pdev, "bus");
DBG("ebi1_clk: %p", gpu->ebi1_clk); DBG("ebi1_clk: %p", gpu->ebi1_clk);
if (IS_ERR(gpu->ebi1_clk)) if (IS_ERR(gpu->ebi1_clk))
gpu->ebi1_clk = NULL; gpu->ebi1_clk = NULL;

View File

@ -24,9 +24,12 @@ struct msm_iommu {
}; };
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base) #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg) unsigned long iova, int flags, void *arg)
{ {
struct msm_iommu *iommu = arg;
if (iommu->base.handler)
return iommu->base.handler(iommu->base.arg, iova, flags);
pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
return 0; return 0;
} }
@ -136,7 +139,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
iommu->domain = domain; iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs); msm_mmu_init(&iommu->base, dev, &funcs);
iommu_set_fault_handler(domain, msm_fault_handler, dev); iommu_set_fault_handler(domain, msm_fault_handler, iommu);
return &iommu->base; return &iommu->base;
} }

View File

@ -56,6 +56,9 @@ struct msm_kms_funcs {
struct drm_encoder *encoder, struct drm_encoder *encoder,
struct drm_encoder *slave_encoder, struct drm_encoder *slave_encoder,
bool is_cmd_mode); bool is_cmd_mode);
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
/* cleanup: */ /* cleanup: */
void (*destroy)(struct msm_kms *kms); void (*destroy)(struct msm_kms *kms);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS

View File

@ -33,6 +33,8 @@ struct msm_mmu_funcs {
struct msm_mmu { struct msm_mmu {
const struct msm_mmu_funcs *funcs; const struct msm_mmu_funcs *funcs;
struct device *dev; struct device *dev;
int (*handler)(void *arg, unsigned long iova, int flags);
void *arg;
}; };
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
@ -45,4 +47,11 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
int (*handler)(void *arg, unsigned long iova, int flags))
{
mmu->arg = arg;
mmu->handler = handler;
}
#endif /* __MSM_MMU_H__ */ #endif /* __MSM_MMU_H__ */