Merge tag 'drm-msm-next-2024-09-02' of https://gitlab.freedesktop.org/drm/msm into drm-next

Updates for v6.12

DPU:
- Fix implement DP/PHY mapping on SC8180X
- Enable writeback on SM8150, SC8180X, SM6125, SM6350

DP:
- Enable widebus on all relevant chipsets

DSI:
- Fix PHY programming on SM8350 / SM8450

HDMI:
- Add support for HDMI on MSM8998

MDP5:
- NULL string fix

GPU:
- A642L speedbin support
- A615 support
- A306 support
- A621 support
- Expand UBWC uapi
- A7xx GPU devcoredump fixes
- A5xx preemption fixes
- cleanups

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGudK7YMiKDhtvYgp=bY64OZZt0UQSkEkSxLo4rLmeVd9g@mail.gmail.com
This commit is contained in:
Dave Airlie 2024-09-07 16:43:06 +10:00
commit af04e65f6b
37 changed files with 2526 additions and 129 deletions

View File

@ -19,14 +19,15 @@ properties:
- qcom,hdmi-tx-8974
- qcom,hdmi-tx-8994
- qcom,hdmi-tx-8996
- qcom,hdmi-tx-8998
clocks:
minItems: 1
maxItems: 5
maxItems: 8
clock-names:
minItems: 1
maxItems: 5
maxItems: 8
reg:
minItems: 1
@ -142,6 +143,7 @@ allOf:
properties:
clocks:
minItems: 5
maxItems: 5
clock-names:
items:
- const: mdp_core
@ -151,6 +153,28 @@ allOf:
- const: extp
hdmi-mux-supplies: false
- if:
properties:
compatible:
contains:
enum:
- qcom,hdmi-tx-8998
then:
properties:
clocks:
minItems: 8
maxItems: 8
clock-names:
items:
- const: mdp_core
- const: iface
- const: core
- const: alt_iface
- const: extp
- const: bus
- const: mnoc
- const: iface_mmss
additionalProperties: false
examples:

View File

@ -14,6 +14,7 @@ properties:
compatible:
enum:
- qcom,hdmi-phy-8996
- qcom,hdmi-phy-8998
reg:
maxItems: 6

View File

@ -37,6 +37,7 @@ msm-display-$(CONFIG_DRM_MSM_HDMI) += \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8996.o \
hdmi/hdmi_phy_8998.o \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
hdmi/hdmi_pll_8960.o \

View File

@ -41,6 +41,17 @@ static const struct adreno_info a3xx_gpus[] = {
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000620),
.family = ADRENO_3XX,
.revn = 308,
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03020000,

View File

@ -145,6 +145,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a);
gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a);
} else if (adreno_is_a306a(adreno_gpu)) {
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000010);
gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000010);
} else if (adreno_is_a320(adreno_gpu)) {
/* Set up 16 deep read/write request queues: */
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
@ -237,7 +241,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
/* Enable Clock gating: */
if (adreno_is_a305b(adreno_gpu) || adreno_is_a306(adreno_gpu))
if (adreno_is_a305b(adreno_gpu) ||
adreno_is_a306(adreno_gpu) ||
adreno_is_a306a(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
else if (adreno_is_a320(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
@ -334,8 +340,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) ||
adreno_is_a320(adreno_gpu)) {
if (adreno_is_a305(adreno_gpu) ||
adreno_is_a306(adreno_gpu) ||
adreno_is_a306a(adreno_gpu) ||
adreno_is_a320(adreno_gpu)) {
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |

View File

@ -65,6 +65,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
struct drm_gem_object *obj;
uint32_t *ptr, dwords;
@ -109,6 +111,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
}
}
a5xx_gpu->last_seqno[ring->id] = submit->seqno;
a5xx_flush(gpu, ring, true);
a5xx_preempt_trigger(gpu);
@ -150,9 +153,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
/* Enable local preemption for finegrain preemption */
/*
* Disable local preemption by default because it requires
* user-space to be aware of it and provide additional handling
* to restore rendering state or do various flushes on switch.
*/
OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
OUT_RING(ring, 0x1);
OUT_RING(ring, 0x0);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
@ -206,6 +213,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
a5xx_gpu->last_seqno[ring->id] = submit->seqno;
/*
* Execute a CACHE_FLUSH_TS event. This will ensure that the
@ -1793,5 +1801,9 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
else
adreno_gpu->ubwc_config.highest_bank_bit = 14;
/* a5xx only supports UBWC 1.0, these are not configurable */
adreno_gpu->ubwc_config.macrotile_mode = 0;
adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
return gpu;
}

View File

@ -34,8 +34,10 @@ struct a5xx_gpu {
struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
uint32_t last_seqno[MSM_GPU_MAX_RINGS];
atomic_t preempt_state;
spinlock_t preempt_start_lock;
struct timer_list preempt_timer;
struct drm_gem_object *shadow_bo;

View File

@ -55,6 +55,8 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Return the highest priority ringbuffer with something in it */
static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
unsigned long flags;
int i;
@ -64,6 +66,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
spin_lock_irqsave(&ring->preempt_lock, flags);
empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
if (!empty && ring == a5xx_gpu->cur_ring)
empty = ring->memptrs->fence == a5xx_gpu->last_seqno[i];
spin_unlock_irqrestore(&ring->preempt_lock, flags);
if (!empty)
@ -97,12 +101,19 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
if (gpu->nr_rings == 1)
return;
/*
* Serialize preemption start to ensure that we always make
* decision on latest state. Otherwise we can get stuck in
* lower priority or empty ring.
*/
spin_lock_irqsave(&a5xx_gpu->preempt_start_lock, flags);
/*
* Try to start preemption by moving from NONE to START. If
* unsuccessful, a preemption is already in flight
*/
if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
return;
goto out;
/* Get the next ring to preempt to */
ring = get_next_ring(gpu);
@ -127,9 +138,11 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
return;
goto out;
}
spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
/* Make sure the wptr doesn't update while we're in motion */
spin_lock_irqsave(&ring->preempt_lock, flags);
a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
@ -152,6 +165,10 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
/* And actually start the preemption */
gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
return;
out:
spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
}
void a5xx_preempt_irq(struct msm_gpu *gpu)
@ -188,6 +205,12 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
/*
* Try to trigger preemption again in case there was a submit or
* retire during ring switch
*/
a5xx_preempt_trigger(gpu);
}
void a5xx_preempt_hw_init(struct msm_gpu *gpu)
@ -204,6 +227,8 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
return;
for (i = 0; i < gpu->nr_rings; i++) {
a5xx_gpu->preempt[i]->data = 0;
a5xx_gpu->preempt[i]->info = 0;
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
@ -298,5 +323,6 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
}
}
spin_lock_init(&a5xx_gpu->preempt_start_lock);
timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
}

View File

@ -129,6 +129,59 @@ static const struct adreno_reglist a615_hwcg[] = {
{},
};
static const struct adreno_reglist a620_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{},
};
static const struct adreno_reglist a630_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
@ -448,7 +501,6 @@ static const struct adreno_reglist a690_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, 0x20200},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555},
{}
@ -491,7 +543,6 @@ static const u32 a630_protect_regs[] = {
};
DECLARE_ADRENO_PROTECT(a630_protect, 32);
/* These are for a620 and a650 */
static const u32 a650_protect_regs[] = {
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
A6XX_PROTECT_RDONLY(0x00501, 0x0005),
@ -636,6 +687,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a612_hwcg,
.protect = &a630_protect,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00080000,
},
/*
* There are (at least) three SoCs implementing A610: SM6125
@ -651,6 +704,35 @@ static const struct adreno_info a6xx_gpus[] = {
{ 157, 3 },
{ 127, 4 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06010500),
.family = ADRENO_6XX_GEN1,
.revn = 615,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
.gmu_cgc_mode = 0x00000222,
.prim_fifo_threshold = 0x0018000,
},
.speedbins = ADRENO_SPEEDBINS(
/*
* The default speed bin (0) has the same values as
* speed bin 90 which goes up to 432 MHz.
*/
{ 0, 0 },
{ 90, 0 },
{ 105, 1 },
{ 146, 2 },
{ 163, 3 },
),
}, {
.machine = "qcom,sm7150",
.chip_ids = ADRENO_CHIP_IDS(0x06010800),
@ -667,6 +749,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
.gmu_cgc_mode = 0x00000222,
.prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@ -689,6 +773,8 @@ static const struct adreno_info a6xx_gpus[] = {
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.protect = &a630_protect,
.gmu_cgc_mode = 0x00000222,
.prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@ -711,6 +797,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
.gmu_cgc_mode = 0x00000222,
.prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@ -733,6 +821,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
.gmu_cgc_mode = 0x00000222,
.prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@ -755,6 +845,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
.gmu_cgc_mode = 0x00000222,
.prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@ -763,6 +855,30 @@ static const struct adreno_info a6xx_gpus[] = {
{ 169, 2 },
{ 180, 1 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06020100),
.family = ADRENO_6XX_GEN3,
.fw = {
[ADRENO_FW_SQE] = "a650_sqe.fw",
[ADRENO_FW_GMU] = "a621_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a620_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a620_hwcg,
.protect = &a650_protect,
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00010000,
},
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 137, 1 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
@ -782,6 +898,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a630_hwcg,
.protect = &a630_protect,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00180000,
},
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06040001),
@ -799,6 +917,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
.protect = &a630_protect,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@ -821,6 +941,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a650_hwcg,
.protect = &a650_protect,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00300200,
},
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
@ -846,6 +968,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
.gmu_cgc_mode = 0x00020000,
.prim_fifo_threshold = 0x00300200,
},
.address_space_size = SZ_16G,
}, {
@ -864,11 +988,14 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00200200,
},
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
{ 129, 4 },
{ 172, 2 }, /* Called speedbin 1 downstream, but let's not break things! */
{ 190, 1 },
),
@ -888,6 +1015,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
.protect = &a630_protect,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00200200,
},
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06090000),
@ -905,6 +1034,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a690_protect,
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00800200,
},
.address_space_size = SZ_16G,
}
@ -1165,6 +1296,8 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a702_hwcg,
.protect = &a650_protect,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x0000c000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@ -1188,6 +1321,7 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a730_hwcg,
.protect = &a730_protect,
.gmu_cgc_mode = 0x00020000,
},
.address_space_size = SZ_16G,
}, {
@ -1207,6 +1341,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.gmu_chipid = 0x7020100,
.gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_16G,
}, {
@ -1225,6 +1360,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.gmu_chipid = 0x7050001,
.gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_256G,
}, {
@ -1243,6 +1379,7 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
.gmu_chipid = 0x7090100,
.gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_16G,
}

View File

@ -423,6 +423,20 @@ static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
}
static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
/*
* GEMNoC can power collapse whilst the GPU is being powered down, resulting
* in the power down sequence not being fully executed. That in turn can
* prevent CX_GDSC from collapsing. Assert Qactive to avoid this.
*/
if (adreno_is_a621(adreno_gpu) || adreno_is_7c3(adreno_gpu))
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0));
}
/* Let the GMU know that we are about to go into slumber */
static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
{
@ -456,6 +470,8 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
}
out:
a6xx_gemnoc_workaround(gmu);
/* Put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
return ret;
@ -525,8 +541,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (IS_ERR(pdcptr))
goto err;
if (adreno_is_a650(adreno_gpu) ||
adreno_is_a660_family(adreno_gpu) ||
if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu))
pdc_in_aop = true;
else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
@ -946,6 +961,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Force off SPTP in case the GMU is managing it */
a6xx_sptprac_disable(gmu);
a6xx_gemnoc_workaround(gmu);
/* Make sure there are no outstanding RPMh votes */
a6xx_gmu_rpmh_off(gmu);

View File

@ -402,7 +402,8 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
const struct adreno_reglist *reg;
unsigned int i;
u32 val, clock_cntl_on, cgc_mode;
u32 cgc_delay, cgc_hyst;
u32 val, clock_cntl_on;
if (!(adreno_gpu->info->a6xx->hwcg || adreno_is_a7xx(adreno_gpu)))
return;
@ -416,16 +417,15 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
else
clock_cntl_on = 0x8aa8aa82;
if (adreno_is_a7xx(adreno_gpu)) {
cgc_mode = adreno_is_a740_family(adreno_gpu) ? 0x20222 : 0x20000;
cgc_delay = adreno_is_a615_family(adreno_gpu) ? 0x111 : 0x10111;
cgc_hyst = adreno_is_a615_family(adreno_gpu) ? 0x555 : 0x5555;
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
state ? cgc_mode : 0);
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
state ? 0x10111 : 0);
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
state ? 0x5555 : 0);
}
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0);
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
state ? cgc_delay : 0);
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
state ? cgc_hyst : 0);
if (!adreno_gpu->info->a6xx->hwcg) {
gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 1);
@ -493,24 +493,17 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
{
/* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */
gpu->ubwc_config.rgb565_predicator = 0;
/* Unknown, introduced with A650 family */
gpu->ubwc_config.uavflagprd_inv = 0;
/* Whether the minimum access length is 64 bits */
gpu->ubwc_config.min_acc_len = 0;
/* Entirely magic, per-GPU-gen value */
gpu->ubwc_config.ubwc_mode = 0;
/*
* The Highest Bank Bit value represents the bit of the highest DDR bank.
* This should ideally use DRAM type detection.
*/
gpu->ubwc_config.ubwc_swizzle = 0x6;
gpu->ubwc_config.macrotile_mode = 0;
gpu->ubwc_config.highest_bank_bit = 15;
if (adreno_is_a610(gpu)) {
gpu->ubwc_config.highest_bank_bit = 13;
gpu->ubwc_config.min_acc_len = 1;
gpu->ubwc_config.ubwc_mode = 1;
gpu->ubwc_config.ubwc_swizzle = 0x7;
}
if (adreno_is_a618(gpu))
@ -523,9 +516,18 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_a619_holi(gpu))
gpu->ubwc_config.highest_bank_bit = 13;
if (adreno_is_a621(gpu)) {
gpu->ubwc_config.highest_bank_bit = 13;
gpu->ubwc_config.amsbc = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
}
if (adreno_is_a640_family(gpu))
gpu->ubwc_config.amsbc = 1;
if (adreno_is_a680(gpu))
gpu->ubwc_config.macrotile_mode = 1;
if (adreno_is_a650(gpu) ||
adreno_is_a660(gpu) ||
adreno_is_a690(gpu) ||
@ -536,6 +538,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.amsbc = 1;
gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
gpu->ubwc_config.macrotile_mode = 1;
}
if (adreno_is_7c3(gpu)) {
@ -543,12 +546,12 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.amsbc = 1;
gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
gpu->ubwc_config.macrotile_mode = 1;
}
if (adreno_is_a702(gpu)) {
gpu->ubwc_config.highest_bank_bit = 14;
gpu->ubwc_config.min_acc_len = 1;
gpu->ubwc_config.ubwc_mode = 0;
}
}
@ -564,21 +567,26 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
u32 hbb_hi = hbb >> 2;
u32 hbb_lo = hbb & 3;
u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1;
u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2);
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
level2_swizzling_dis << 12 |
adreno_gpu->ubwc_config.rgb565_predicator << 11 |
hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
hbb_lo << 1 | ubwc_mode);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 |
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL,
level2_swizzling_dis << 6 | hbb_hi << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
hbb_lo << 1 | ubwc_mode);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 |
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
level2_swizzling_dis << 12 | hbb_hi << 10 |
adreno_gpu->ubwc_config.uavflagprd_inv << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
hbb_lo << 1 | ubwc_mode);
if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
@ -586,6 +594,9 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21);
gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL,
adreno_gpu->ubwc_config.macrotile_mode);
}
static int a6xx_cp_init(struct msm_gpu *gpu)
@ -976,25 +987,11 @@ static int hw_init(struct msm_gpu *gpu)
} else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
/* Setting the primFifo thresholds default values,
* and vccCacheSkipDis=1 bit (0x200) for A640 and newer
*/
if (adreno_is_a702(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x0000c000);
else if (adreno_is_a690(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00800200);
else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
else if (adreno_is_a619(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
else if (adreno_is_a610(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
/* Set the default primFifo threshold values */
if (adreno_gpu->info->a6xx->prim_fifo_threshold)
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL,
adreno_gpu->info->a6xx->prim_fifo_threshold);
/* Set the AHB default slave response to "ERROR" */
gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);

View File

@ -22,6 +22,8 @@ struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
};
struct a6xx_gpu {

View File

@ -388,18 +388,18 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
const u32 *debugbus_blocks, *gbif_debugbus_blocks;
int i;
if (adreno_is_a730(adreno_gpu)) {
if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
debugbus_blocks = gen7_0_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_0_0_debugbus_blocks);
gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks;
gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
} else if (adreno_is_a740_family(adreno_gpu)) {
} else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
debugbus_blocks = gen7_2_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_2_0_debugbus_blocks);
gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks;
gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
} else {
BUG_ON(!adreno_is_a750(adreno_gpu));
BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
debugbus_blocks = gen7_9_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_9_0_debugbus_blocks);
gbif_debugbus_blocks = gen7_9_0_gbif_debugbus_blocks;
@ -509,7 +509,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
const struct a6xx_debugbus_block *cx_debugbus_blocks;
if (adreno_is_a7xx(adreno_gpu)) {
BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)));
BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3);
cx_debugbus_blocks = a7xx_cx_debugbus_blocks;
nr_cx_debugbus_blocks = ARRAY_SIZE(a7xx_cx_debugbus_blocks);
} else {
@ -660,13 +660,16 @@ static void a7xx_get_dbgahb_clusters(struct msm_gpu *gpu,
const struct gen7_sptp_cluster_registers *dbgahb_clusters;
unsigned dbgahb_clusters_size;
if (adreno_is_a730(adreno_gpu)) {
if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
dbgahb_clusters = gen7_0_0_sptp_clusters;
dbgahb_clusters_size = ARRAY_SIZE(gen7_0_0_sptp_clusters);
} else {
BUG_ON(!adreno_is_a740_family(adreno_gpu));
} else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
dbgahb_clusters = gen7_2_0_sptp_clusters;
dbgahb_clusters_size = ARRAY_SIZE(gen7_2_0_sptp_clusters);
} else {
BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
dbgahb_clusters = gen7_9_0_sptp_clusters;
dbgahb_clusters_size = ARRAY_SIZE(gen7_9_0_sptp_clusters);
}
a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
@ -818,14 +821,14 @@ static void a7xx_get_clusters(struct msm_gpu *gpu,
const struct gen7_cluster_registers *clusters;
unsigned clusters_size;
if (adreno_is_a730(adreno_gpu)) {
if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
clusters = gen7_0_0_clusters;
clusters_size = ARRAY_SIZE(gen7_0_0_clusters);
} else if (adreno_is_a740_family(adreno_gpu)) {
} else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
clusters = gen7_2_0_clusters;
clusters_size = ARRAY_SIZE(gen7_2_0_clusters);
} else {
BUG_ON(!adreno_is_a750(adreno_gpu));
BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
clusters = gen7_9_0_clusters;
clusters_size = ARRAY_SIZE(gen7_9_0_clusters);
}
@ -893,7 +896,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu,
if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
return;
if (adreno_is_a730(adreno_gpu)) {
if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 3);
}
@ -923,7 +926,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu,
datasize);
out:
if (adreno_is_a730(adreno_gpu)) {
if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 0);
}
}
@ -956,14 +959,14 @@ static void a7xx_get_shaders(struct msm_gpu *gpu,
unsigned num_shader_blocks;
int i;
if (adreno_is_a730(adreno_gpu)) {
if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
shader_blocks = gen7_0_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_0_0_shader_blocks);
} else if (adreno_is_a740_family(adreno_gpu)) {
} else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
shader_blocks = gen7_2_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_2_0_shader_blocks);
} else {
BUG_ON(!adreno_is_a750(adreno_gpu));
BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
shader_blocks = gen7_9_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_9_0_shader_blocks);
}
@ -1348,14 +1351,14 @@ static void a7xx_get_registers(struct msm_gpu *gpu,
const u32 *pre_crashdumper_regs;
const struct gen7_reg_list *reglist;
if (adreno_is_a730(adreno_gpu)) {
if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
reglist = gen7_0_0_reg_list;
pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
} else if (adreno_is_a740_family(adreno_gpu)) {
} else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
reglist = gen7_2_0_reg_list;
pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
} else {
BUG_ON(!adreno_is_a750(adreno_gpu));
BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
reglist = gen7_9_0_reg_list;
pre_crashdumper_regs = gen7_9_0_pre_crashdumper_gpu_registers;
}
@ -1405,8 +1408,7 @@ static void a7xx_get_post_crashdumper_registers(struct msm_gpu *gpu,
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
const u32 *regs;
BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu) ||
adreno_is_a750(adreno_gpu)));
BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3);
regs = gen7_0_0_post_crashdumper_registers;
a7xx_get_ahb_gpu_registers(gpu,
@ -1514,11 +1516,11 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
const struct a6xx_indexed_registers *indexed_regs;
int i, indexed_count, mempool_count;
if (adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)) {
if (adreno_gpu->info->family <= ADRENO_7XX_GEN2) {
indexed_regs = a7xx_indexed_reglist;
indexed_count = ARRAY_SIZE(a7xx_indexed_reglist);
} else {
BUG_ON(!adreno_is_a750(adreno_gpu));
BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
indexed_regs = gen7_9_0_cp_indexed_reg_list;
indexed_count = ARRAY_SIZE(gen7_9_0_cp_indexed_reg_list);
}

View File

@ -1303,7 +1303,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
REG_A6XX_CP_ROQ_DBG_DATA, 0x00800},
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000},
{ "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
{ "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200},
{ "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800},

View File

@ -379,6 +379,12 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_RAYTRACING:
*value = adreno_gpu->has_ray_tracing;
return 0;
case MSM_PARAM_UBWC_SWIZZLE:
*value = adreno_gpu->ubwc_config.ubwc_swizzle;
return 0;
case MSM_PARAM_MACROTILE_MODE:
*value = adreno_gpu->ubwc_config.macrotile_mode;
return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
@ -478,7 +484,7 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
newname);
fwname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
@ -688,11 +694,9 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
size = j + 1;
if (size) {
state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
if (state->ring[i].data) {
memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL);
if (state->ring[i].data)
state->ring[i].data_size = size << 2;
}
}
}
@ -1083,6 +1087,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->chip_id = config->chip_id;
gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1;
gpu->pdev = pdev;
/* Only handle the core clock when GMU is not in use (or is absent). */
if (adreno_has_gmu_wrapper(adreno_gpu) ||

View File

@ -191,12 +191,42 @@ struct adreno_gpu {
const struct firmware *fw[ADRENO_FW_MAX];
struct {
/**
* @rgb565_predicator: Unknown, introduced with A650 family,
* related to UBWC mode/ver 4
*/
u32 rgb565_predicator;
/** @uavflagprd_inv: Unknown, introduced with A650 family */
u32 uavflagprd_inv;
/** @min_acc_len: Whether the minimum access length is 64 bits */
u32 min_acc_len;
u32 ubwc_mode;
/**
* @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling.
*
* UBWC 1.0 always enables all three levels.
* UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3.
* UBWC 4.0 adds the optional ability to disable levels 2 & 3.
*
* This is a bitmask where BIT(0) enables level 1, BIT(1)
* controls level 2, and BIT(2) enables level 3.
*/
u32 ubwc_swizzle;
/**
* @highest_bank_bit: Highest Bank Bit
*
* The Highest Bank Bit value represents the bit of the highest
* DDR bank. This should ideally use DRAM type detection.
*/
u32 highest_bank_bit;
u32 amsbc;
/**
* @macrotile_mode: Macrotile Mode
*
* Whether to use 4-channel macrotiling mode or the newer
* 8-channel macrotiling mode introduced in UBWC 3.1. 0 is
* 4-channel and 1 is 8-channel.
*/
u32 macrotile_mode;
} ubwc_config;
/*
@ -294,6 +324,12 @@ static inline bool adreno_is_a306(const struct adreno_gpu *gpu)
return adreno_is_revn(gpu, 307);
}
static inline bool adreno_is_a306a(const struct adreno_gpu *gpu)
{
/* a306a (marketing name is a308) */
return adreno_is_revn(gpu, 308);
}
static inline bool adreno_is_a320(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 320);
@ -384,6 +420,11 @@ static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu)
return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu);
}
static inline int adreno_is_a621(const struct adreno_gpu *gpu)
{
return gpu->info->chip_ids[0] == 0x06020100;
}
static inline int adreno_is_a630(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 630);
@ -433,7 +474,13 @@ static inline int adreno_is_a610_family(const struct adreno_gpu *gpu)
return adreno_is_a610(gpu) || adreno_is_a702(gpu);
}
/* check for a615, a616, a618, a619 or any a630 derivatives */
/* TODO: 615/616 */
static inline int adreno_is_a615_family(const struct adreno_gpu *gpu)
{
return adreno_is_a618(gpu) ||
adreno_is_a619(gpu);
}
static inline int adreno_is_a630_family(const struct adreno_gpu *gpu)
{
if (WARN_ON_ONCE(!gpu->info))

View File

@ -33,6 +33,7 @@ static const struct dpu_mdp_cfg sm8150_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
[DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@ -290,6 +291,21 @@ static const struct dpu_dsc_cfg sm8150_dsc[] = {
},
};
static const struct dpu_wb_cfg sm8150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
.intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
},
};
static const struct dpu_intf_cfg sm8150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@ -384,6 +400,8 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = {
.pingpong = sm8150_pp,
.merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
.merge_3d = sm8150_merge_3d,
.wb_count = ARRAY_SIZE(sm8150_wb),
.wb = sm8150_wb,
.intf_count = ARRAY_SIZE(sm8150_intf),
.intf = sm8150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),

View File

@ -33,6 +33,7 @@ static const struct dpu_mdp_cfg sc8180x_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
[DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@ -297,6 +298,21 @@ static const struct dpu_dsc_cfg sc8180x_dsc[] = {
},
};
static const struct dpu_wb_cfg sc8180x_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
.intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
},
};
static const struct dpu_intf_cfg sc8180x_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@ -410,6 +426,8 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
.pingpong = sc8180x_pp,
.merge_3d_count = ARRAY_SIZE(sc8180x_merge_3d),
.merge_3d = sc8180x_merge_3d,
.wb_count = ARRAY_SIZE(sc8180x_wb),
.wb = sc8180x_wb,
.intf_count = ARRAY_SIZE(sc8180x_intf),
.intf = sc8180x_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),

View File

@ -27,6 +27,7 @@ static const struct dpu_mdp_cfg sm6125_mdp = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@ -139,6 +140,21 @@ static const struct dpu_pingpong_cfg sm6125_pp[] = {
},
};
static const struct dpu_wb_cfg sm6125_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SDM845_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 2160,
.intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
},
};
static const struct dpu_intf_cfg sm6125_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@ -210,6 +226,8 @@ const struct dpu_mdss_cfg dpu_sm6125_cfg = {
.dspp = sm6125_dspp,
.pingpong_count = ARRAY_SIZE(sm6125_pp),
.pingpong = sm6125_pp,
.wb_count = ARRAY_SIZE(sm6125_wb),
.wb = sm6125_wb,
.intf_count = ARRAY_SIZE(sm6125_intf),
.intf = sm6125_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),

View File

@ -26,6 +26,7 @@ static const struct dpu_mdp_cfg sm6350_mdp = {
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
[DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
@ -145,6 +146,21 @@ static const struct dpu_dsc_cfg sm6350_dsc[] = {
},
};
static const struct dpu_wb_cfg sm6350_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 1920,
.intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
},
};
static const struct dpu_intf_cfg sm6350_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@ -218,6 +234,8 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = {
.dsc = sm6350_dsc,
.pingpong_count = ARRAY_SIZE(sm6350_pp),
.pingpong = sm6350_pp,
.wb_count = ARRAY_SIZE(sm6350_wb),
.wb = sm6350_wb,
.intf_count = ARRAY_SIZE(sm6350_intf),
.intf = sm6350_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),

View File

@ -96,14 +96,16 @@
#define INTF_SC7280_MASK (INTF_SC7180_MASK)
#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
#define WB_SDM845_MASK (BIT(DPU_WB_LINE_MODE) | \
BIT(DPU_WB_UBWC) | \
BIT(DPU_WB_YUV_CONFIG) | \
BIT(DPU_WB_PIPE_ALPHA) | \
BIT(DPU_WB_XY_ROI_OFFSET) | \
BIT(DPU_WB_QOS) | \
BIT(DPU_WB_QOS_8LVL) | \
BIT(DPU_WB_CDP) | \
BIT(DPU_WB_CDP))
#define WB_SM8250_MASK (WB_SDM845_MASK | \
BIT(DPU_WB_INPUT_CTRL))
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)

View File

@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/bitfield.h>
#include <drm/drm_managed.h>
#include "dpu_hwio.h"
@ -231,8 +233,38 @@ static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
}
static void dpu_hw_dp_phy_intf_sel(struct dpu_hw_mdp *mdp,
enum dpu_dp_phy_sel phys[2])
{
struct dpu_hw_blk_reg_map *c = &mdp->hw;
unsigned int intf;
u32 sel = 0;
sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF0, phys[0]);
sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF1, phys[1]);
for (intf = 0; intf < 2; intf++) {
switch (phys[intf]) {
case DPU_DP_PHY_0:
sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY0, intf + 1);
break;
case DPU_DP_PHY_1:
sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY1, intf + 1);
break;
case DPU_DP_PHY_2:
sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY2, intf + 1);
break;
default:
/* ignore */
break;
}
}
DPU_REG_WRITE(c, MDP_DP_PHY_INTF_SEL, sel);
}
static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
unsigned long cap)
unsigned long cap, const struct dpu_mdss_version *mdss_rev)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
@ -245,6 +277,9 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->get_safe_status = dpu_hw_get_safe_status;
if (mdss_rev->core_major_ver >= 5)
ops->dp_phy_intf_sel = dpu_hw_dp_phy_intf_sel;
if (cap & BIT(DPU_MDP_AUDIO_SELECT))
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
@ -252,7 +287,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_mdp *mdp;
@ -270,7 +305,7 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
* Assign ops
*/
mdp->caps = cfg;
_setup_mdp_ops(&mdp->ops, mdp->caps->features);
_setup_mdp_ops(&mdp->ops, mdp->caps->features, mdss_rev);
return mdp;
}

View File

@ -67,6 +67,13 @@ struct dpu_vsync_source_cfg {
enum dpu_vsync_source vsync_source;
};
enum dpu_dp_phy_sel {
DPU_DP_PHY_NONE,
DPU_DP_PHY_0,
DPU_DP_PHY_1,
DPU_DP_PHY_2,
};
/**
* struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
* Assumption is these functions will be called after clocks are enabled.
@ -125,6 +132,13 @@ struct dpu_hw_mdp_ops {
void (*get_safe_status)(struct dpu_hw_mdp *mdp,
struct dpu_danger_safe_status *status);
/**
* dp_phy_intf_sel - configure intf to phy mapping
* @mdp: mdp top context driver
* @phys: list of phys the DP interfaces should be connected to. 0 disables the INTF.
*/
void (*dp_phy_intf_sel)(struct dpu_hw_mdp *mdp, enum dpu_dp_phy_sel phys[2]);
/**
* intf_audio_select - select the external interface for audio
* @mdp: mdp top context driver
@ -148,12 +162,12 @@ struct dpu_hw_mdp {
* @dev: Corresponding device for devres management
* @cfg: MDP TOP configuration from catalog
* @addr: Mapped register io address of MDP
* @m: Pointer to mdss catalog data
* @mdss_rev: dpu core's major and minor versions
*/
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
const struct dpu_mdss_cfg *m);
const struct dpu_mdss_version *mdss_rev);
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);

View File

@ -60,6 +60,13 @@
#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
#define DCE_SEL 0x450
#define MDP_DP_PHY_INTF_SEL 0x460
#define MDP_DP_PHY_INTF_SEL_INTF0 GENMASK(2, 0)
#define MDP_DP_PHY_INTF_SEL_INTF1 GENMASK(5, 3)
#define MDP_DP_PHY_INTF_SEL_PHY0 GENMASK(8, 6)
#define MDP_DP_PHY_INTF_SEL_PHY1 GENMASK(11, 9)
#define MDP_DP_PHY_INTF_SEL_PHY2 GENMASK(14, 12)
#define MDP_PERIPH_TOP0 MDP_WD_TIMER_0_CTL
#define MDP_PERIPH_TOP0_END CLK_CTRL3

View File

@ -1146,7 +1146,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev,
dpu_kms->catalog->mdp,
dpu_kms->mmio,
dpu_kms->catalog);
dpu_kms->catalog->mdss_ver);
if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp);
DPU_ERROR("failed to get hw_mdp: %d\n", rc);
@ -1181,6 +1181,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto err_pm_put;
}
/*
* We need to program DP <-> PHY relationship only for SC8180X since it
* has fewer DP controllers than DP PHYs.
* If any other platform requires the same kind of programming, or if
* the INTF <->DP relationship isn't static anymore, this needs to be
* configured through the DT.
*/
if (of_device_is_compatible(dpu_kms->pdev->dev.of_node, "qcom,sc8180x-dpu"))
dpu_kms->hw_mdp->ops.dp_phy_intf_sel(dpu_kms->hw_mdp, (unsigned int[]){ 1, 2, });
dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog);
if (IS_ERR(dpu_kms->hw_intr)) {
rc = PTR_ERR(dpu_kms->hw_intr);

View File

@ -351,7 +351,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p,
drm_printf(p, "%s:%d\t%d\t%s\n",
pipe2name(pipe), j, inuse,
plane ? plane->name : NULL);
plane ? plane->name : "(null)");
total += inuse;
}

View File

@ -119,7 +119,7 @@ struct msm_dp_desc {
};
static const struct msm_dp_desc sc7180_dp_descs[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};
@ -130,9 +130,9 @@ static const struct msm_dp_desc sc7280_dp_descs[] = {
};
static const struct msm_dp_desc sc8180x_dp_descs[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
{ .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1 },
{ .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2 },
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{ .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
{ .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
{}
};
@ -149,7 +149,7 @@ static const struct msm_dp_desc sc8280xp_dp_descs[] = {
};
static const struct msm_dp_desc sm8650_dp_descs[] = {
{ .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0 },
{ .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};

View File

@ -135,7 +135,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
config->pll_clock_inverters = 0x00;
else
config->pll_clock_inverters = 0x40;
} else {
} else if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
if (pll_freq <= 1000000000ULL)
config->pll_clock_inverters = 0xa0;
else if (pll_freq <= 2500000000ULL)
@ -144,6 +144,16 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
config->pll_clock_inverters = 0x00;
else
config->pll_clock_inverters = 0x40;
} else {
/* 4.2, 4.3 */
if (pll_freq <= 1000000000ULL)
config->pll_clock_inverters = 0xa0;
else if (pll_freq <= 2500000000ULL)
config->pll_clock_inverters = 0x20;
else if (pll_freq <= 3500000000ULL)
config->pll_clock_inverters = 0x00;
else
config->pll_clock_inverters = 0x40;
}
config->decimal_div_start = dec;

View File

@ -549,6 +549,7 @@ static void msm_hdmi_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id msm_hdmi_dt_match[] = {
{ .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8974_config },

View File

@ -137,6 +137,7 @@ enum hdmi_phy_type {
MSM_HDMI_PHY_8960,
MSM_HDMI_PHY_8x74,
MSM_HDMI_PHY_8996,
MSM_HDMI_PHY_8998,
MSM_HDMI_PHY_MAX,
};
@ -154,6 +155,7 @@ extern const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8998_cfg;
struct hdmi_phy {
struct platform_device *pdev;
@ -184,6 +186,7 @@ void __exit msm_hdmi_phy_driver_unregister(void);
#ifdef CONFIG_COMMON_CLK
int msm_hdmi_pll_8960_init(struct platform_device *pdev);
int msm_hdmi_pll_8996_init(struct platform_device *pdev);
int msm_hdmi_pll_8998_init(struct platform_device *pdev);
#else
static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
{
@ -194,6 +197,11 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
{
return -ENODEV;
}
static inline int msm_hdmi_pll_8998_init(struct platform_device *pdev)
{
return -ENODEV;
}
#endif
/*

View File

@ -118,6 +118,9 @@ static int msm_hdmi_phy_pll_init(struct platform_device *pdev,
case MSM_HDMI_PHY_8996:
ret = msm_hdmi_pll_8996_init(pdev);
break;
case MSM_HDMI_PHY_8998:
ret = msm_hdmi_pll_8998_init(pdev);
break;
/*
* we don't have PLL support for these, don't report an error for now
*/
@ -193,6 +196,8 @@ static const struct of_device_id msm_hdmi_phy_dt_match[] = {
.data = &msm_hdmi_phy_8x74_cfg },
{ .compatible = "qcom,hdmi-phy-8996",
.data = &msm_hdmi_phy_8996_cfg },
{ .compatible = "qcom,hdmi-phy-8998",
.data = &msm_hdmi_phy_8998_cfg },
{}
};

View File

@ -0,0 +1,779 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Freebox SAS
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include "hdmi.h"
#define HDMI_VCO_MAX_FREQ 12000000000UL
#define HDMI_VCO_MIN_FREQ 8000000000UL
#define HDMI_PCLK_MAX_FREQ 600000000
#define HDMI_PCLK_MIN_FREQ 25000000
#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL
#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL
#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000UL
#define HDMI_CORECLK_DIV 5
#define HDMI_DEFAULT_REF_CLOCK 19200000
#define HDMI_PLL_CMP_CNT 1024
#define HDMI_PLL_POLL_MAX_READS 100
#define HDMI_PLL_POLL_TIMEOUT_US 150
#define HDMI_NUM_TX_CHANNEL 4
struct hdmi_pll_8998 {
struct platform_device *pdev;
struct clk_hw clk_hw;
unsigned long rate;
/* pll mmio base */
void __iomem *mmio_qserdes_com;
/* tx channel base */
void __iomem *mmio_qserdes_tx[HDMI_NUM_TX_CHANNEL];
};
#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8998, clk_hw)
struct hdmi_8998_phy_pll_reg_cfg {
u32 com_svs_mode_clk_sel;
u32 com_hsclk_sel;
u32 com_pll_cctrl_mode0;
u32 com_pll_rctrl_mode0;
u32 com_cp_ctrl_mode0;
u32 com_dec_start_mode0;
u32 com_div_frac_start1_mode0;
u32 com_div_frac_start2_mode0;
u32 com_div_frac_start3_mode0;
u32 com_integloop_gain0_mode0;
u32 com_integloop_gain1_mode0;
u32 com_lock_cmp_en;
u32 com_lock_cmp1_mode0;
u32 com_lock_cmp2_mode0;
u32 com_lock_cmp3_mode0;
u32 com_core_clk_en;
u32 com_coreclk_div_mode0;
u32 tx_lx_tx_band[HDMI_NUM_TX_CHANNEL];
u32 tx_lx_tx_drv_lvl[HDMI_NUM_TX_CHANNEL];
u32 tx_lx_tx_emp_post1_lvl[HDMI_NUM_TX_CHANNEL];
u32 tx_lx_pre_driver_1[HDMI_NUM_TX_CHANNEL];
u32 tx_lx_pre_driver_2[HDMI_NUM_TX_CHANNEL];
u32 tx_lx_res_code_offset[HDMI_NUM_TX_CHANNEL];
u32 phy_mode;
};
struct hdmi_8998_post_divider {
u64 vco_freq;
int hsclk_divsel;
int vco_ratio;
int tx_band_sel;
int half_rate_mode;
};
static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8998 *pll)
{
return platform_get_drvdata(pll->pdev);
}
static inline void hdmi_pll_write(struct hdmi_pll_8998 *pll, int offset,
u32 data)
{
writel(data, pll->mmio_qserdes_com + offset);
}
static inline u32 hdmi_pll_read(struct hdmi_pll_8998 *pll, int offset)
{
return readl(pll->mmio_qserdes_com + offset);
}
static inline void hdmi_tx_chan_write(struct hdmi_pll_8998 *pll, int channel,
int offset, int data)
{
writel(data, pll->mmio_qserdes_tx[channel] + offset);
}
static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk,
bool gen_ssc)
{
if ((frac_start != 0) || gen_ssc)
return 0x8;
return 0x30;
}
static inline u32 pll_get_rctrl(u64 frac_start, bool gen_ssc)
{
if ((frac_start != 0) || gen_ssc)
return 0x16;
return 0x18;
}
static inline u32 pll_get_cctrl(u64 frac_start, bool gen_ssc)
{
if ((frac_start != 0) || gen_ssc)
return 0x34;
return 0x2;
}
static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
bool gen_ssc)
{
int digclk_divsel = bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
u64 base;
if ((frac_start != 0) || gen_ssc)
base = 0x3F;
else
base = 0xC4;
base <<= (digclk_divsel == 2 ? 1 : 0);
return (base <= 2046 ? base : 2046);
}
static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
{
u64 dividend = HDMI_PLL_CMP_CNT * fdata;
u32 divisor = ref_clk * 10;
u32 rem;
rem = do_div(dividend, divisor);
if (rem > (divisor >> 1))
dividend++;
return dividend - 1;
}
static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk)
{
u64 fdata = ((u64)pll_cmp) * ref_clk * 10;
do_div(fdata, HDMI_PLL_CMP_CNT);
return fdata;
}
#define HDMI_REF_CLOCK_HZ ((u64)19200000)
#define HDMI_MHZ_TO_HZ ((u64)1000000)
static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk)
{
u32 const ratio_list[] = {1, 2, 3, 4, 5, 6,
9, 10, 12, 15, 25};
u32 const band_list[] = {0, 1, 2, 3};
u32 const sz_ratio = ARRAY_SIZE(ratio_list);
u32 const sz_band = ARRAY_SIZE(band_list);
u32 const cmp_cnt = 1024;
u32 const th_min = 500, th_max = 1000;
u32 half_rate_mode = 0;
u32 list_elements;
int optimal_index;
u32 i, j, k;
u32 found_hsclk_divsel = 0, found_vco_ratio;
u32 found_tx_band_sel;
u64 const min_freq = HDMI_VCO_MIN_FREQ, max_freq = HDMI_VCO_MAX_FREQ;
u64 freq_list[ARRAY_SIZE(ratio_list) * ARRAY_SIZE(band_list)];
u64 found_vco_freq;
u64 freq_optimal;
find_optimal_index:
freq_optimal = max_freq;
optimal_index = -1;
list_elements = 0;
for (i = 0; i < sz_ratio; i++) {
for (j = 0; j < sz_band; j++) {
u64 freq = div_u64(bclk, (1 << half_rate_mode));
freq *= (ratio_list[i] * (1 << band_list[j]));
freq_list[list_elements++] = freq;
}
}
for (k = 0; k < ARRAY_SIZE(freq_list); k++) {
u32 const clks_pll_div = 2, core_clk_div = 5;
u32 const rng1 = 16, rng2 = 8;
u32 th1, th2;
u64 core_clk, rvar1, rem;
core_clk = div_u64(freq_list[k],
ratio_list[k / sz_band] * clks_pll_div *
core_clk_div);
rvar1 = HDMI_REF_CLOCK_HZ * rng1 * HDMI_MHZ_TO_HZ;
rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem);
if (rem > ((cmp_cnt * core_clk) >> 1))
rvar1++;
th1 = rvar1;
rvar1 = HDMI_REF_CLOCK_HZ * rng2 * HDMI_MHZ_TO_HZ;
rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem);
if (rem > ((cmp_cnt * core_clk) >> 1))
rvar1++;
th2 = rvar1;
if (freq_list[k] >= min_freq &&
freq_list[k] <= max_freq) {
if ((th1 >= th_min && th1 <= th_max) ||
(th2 >= th_min && th2 <= th_max)) {
if (freq_list[k] <= freq_optimal) {
freq_optimal = freq_list[k];
optimal_index = k;
}
}
}
}
if (optimal_index == -1) {
if (!half_rate_mode) {
half_rate_mode = 1;
goto find_optimal_index;
} else {
return -EINVAL;
}
} else {
found_vco_ratio = ratio_list[optimal_index / sz_band];
found_tx_band_sel = band_list[optimal_index % sz_band];
found_vco_freq = freq_optimal;
}
switch (found_vco_ratio) {
case 1:
found_hsclk_divsel = 15;
break;
case 2:
found_hsclk_divsel = 0;
break;
case 3:
found_hsclk_divsel = 4;
break;
case 4:
found_hsclk_divsel = 8;
break;
case 5:
found_hsclk_divsel = 12;
break;
case 6:
found_hsclk_divsel = 1;
break;
case 9:
found_hsclk_divsel = 5;
break;
case 10:
found_hsclk_divsel = 2;
break;
case 12:
found_hsclk_divsel = 9;
break;
case 15:
found_hsclk_divsel = 13;
break;
case 25:
found_hsclk_divsel = 14;
break;
};
pd->vco_freq = found_vco_freq;
pd->tx_band_sel = found_tx_band_sel;
pd->vco_ratio = found_vco_ratio;
pd->hsclk_divsel = found_hsclk_divsel;
return 0;
}
static int pll_calculate(unsigned long pix_clk, unsigned long ref_clk,
struct hdmi_8998_phy_pll_reg_cfg *cfg)
{
struct hdmi_8998_post_divider pd;
u64 bclk;
u64 dec_start;
u64 frac_start;
u64 fdata;
u32 pll_divisor;
u32 rem;
u32 cpctrl;
u32 rctrl;
u32 cctrl;
u32 integloop_gain;
u32 pll_cmp;
int i, ret;
/* bit clk = 10 * pix_clk */
bclk = ((u64)pix_clk) * 10;
ret = pll_get_post_div(&pd, bclk);
if (ret)
return ret;
dec_start = pd.vco_freq;
pll_divisor = 4 * ref_clk;
do_div(dec_start, pll_divisor);
frac_start = pd.vco_freq * (1 << 20);
rem = do_div(frac_start, pll_divisor);
frac_start -= dec_start * (1 << 20);
if (rem > (pll_divisor >> 1))
frac_start++;
cpctrl = pll_get_cpctrl(frac_start, ref_clk, false);
rctrl = pll_get_rctrl(frac_start, false);
cctrl = pll_get_cctrl(frac_start, false);
integloop_gain = pll_get_integloop_gain(frac_start, bclk,
ref_clk, false);
fdata = pd.vco_freq;
do_div(fdata, pd.vco_ratio);
pll_cmp = pll_get_pll_cmp(fdata, ref_clk);
/* Convert these values to register specific values */
if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
cfg->com_svs_mode_clk_sel = 1;
else
cfg->com_svs_mode_clk_sel = 2;
cfg->com_hsclk_sel = (0x20 | pd.hsclk_divsel);
cfg->com_pll_cctrl_mode0 = cctrl;
cfg->com_pll_rctrl_mode0 = rctrl;
cfg->com_cp_ctrl_mode0 = cpctrl;
cfg->com_dec_start_mode0 = dec_start;
cfg->com_div_frac_start1_mode0 = (frac_start & 0xff);
cfg->com_div_frac_start2_mode0 = ((frac_start & 0xff00) >> 8);
cfg->com_div_frac_start3_mode0 = ((frac_start & 0xf0000) >> 16);
cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xff);
cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xf00) >> 8);
cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xff);
cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xff00) >> 8);
cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
cfg->com_lock_cmp_en = 0x0;
cfg->com_core_clk_en = 0x2c;
cfg->com_coreclk_div_mode0 = HDMI_CORECLK_DIV;
cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x5 : 0x4;
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
cfg->tx_lx_tx_band[i] = pd.tx_band_sel;
if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
cfg->tx_lx_tx_emp_post1_lvl[0] = 0x03;
cfg->tx_lx_tx_emp_post1_lvl[1] = 0x02;
cfg->tx_lx_tx_emp_post1_lvl[2] = 0x03;
cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
cfg->tx_lx_pre_driver_1[0] = 0x00;
cfg->tx_lx_pre_driver_1[1] = 0x00;
cfg->tx_lx_pre_driver_1[2] = 0x00;
cfg->tx_lx_pre_driver_1[3] = 0x00;
cfg->tx_lx_pre_driver_2[0] = 0x1C;
cfg->tx_lx_pre_driver_2[1] = 0x1C;
cfg->tx_lx_pre_driver_2[2] = 0x1C;
cfg->tx_lx_pre_driver_2[3] = 0x00;
cfg->tx_lx_res_code_offset[0] = 0x03;
cfg->tx_lx_res_code_offset[1] = 0x00;
cfg->tx_lx_res_code_offset[2] = 0x00;
cfg->tx_lx_res_code_offset[3] = 0x03;
} else if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD) {
cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
cfg->tx_lx_tx_emp_post1_lvl[0] = 0x03;
cfg->tx_lx_tx_emp_post1_lvl[1] = 0x03;
cfg->tx_lx_tx_emp_post1_lvl[2] = 0x03;
cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
cfg->tx_lx_pre_driver_1[0] = 0x00;
cfg->tx_lx_pre_driver_1[1] = 0x00;
cfg->tx_lx_pre_driver_1[2] = 0x00;
cfg->tx_lx_pre_driver_1[3] = 0x00;
cfg->tx_lx_pre_driver_2[0] = 0x16;
cfg->tx_lx_pre_driver_2[1] = 0x16;
cfg->tx_lx_pre_driver_2[2] = 0x16;
cfg->tx_lx_pre_driver_2[3] = 0x18;
cfg->tx_lx_res_code_offset[0] = 0x03;
cfg->tx_lx_res_code_offset[1] = 0x00;
cfg->tx_lx_res_code_offset[2] = 0x00;
cfg->tx_lx_res_code_offset[3] = 0x00;
} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
cfg->tx_lx_tx_emp_post1_lvl[0] = 0x05;
cfg->tx_lx_tx_emp_post1_lvl[1] = 0x05;
cfg->tx_lx_tx_emp_post1_lvl[2] = 0x05;
cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
cfg->tx_lx_pre_driver_1[0] = 0x00;
cfg->tx_lx_pre_driver_1[1] = 0x00;
cfg->tx_lx_pre_driver_1[2] = 0x00;
cfg->tx_lx_pre_driver_1[3] = 0x00;
cfg->tx_lx_pre_driver_2[0] = 0x0E;
cfg->tx_lx_pre_driver_2[1] = 0x0E;
cfg->tx_lx_pre_driver_2[2] = 0x0E;
cfg->tx_lx_pre_driver_2[3] = 0x0E;
cfg->tx_lx_res_code_offset[0] = 0x00;
cfg->tx_lx_res_code_offset[1] = 0x00;
cfg->tx_lx_res_code_offset[2] = 0x00;
cfg->tx_lx_res_code_offset[3] = 0x00;
} else {
cfg->tx_lx_tx_drv_lvl[0] = 0x01;
cfg->tx_lx_tx_drv_lvl[1] = 0x01;
cfg->tx_lx_tx_drv_lvl[2] = 0x01;
cfg->tx_lx_tx_drv_lvl[3] = 0x00;
cfg->tx_lx_tx_emp_post1_lvl[0] = 0x00;
cfg->tx_lx_tx_emp_post1_lvl[1] = 0x00;
cfg->tx_lx_tx_emp_post1_lvl[2] = 0x00;
cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
cfg->tx_lx_pre_driver_1[0] = 0x00;
cfg->tx_lx_pre_driver_1[1] = 0x00;
cfg->tx_lx_pre_driver_1[2] = 0x00;
cfg->tx_lx_pre_driver_1[3] = 0x00;
cfg->tx_lx_pre_driver_2[0] = 0x16;
cfg->tx_lx_pre_driver_2[1] = 0x16;
cfg->tx_lx_pre_driver_2[2] = 0x16;
cfg->tx_lx_pre_driver_2[3] = 0x18;
cfg->tx_lx_res_code_offset[0] = 0x00;
cfg->tx_lx_res_code_offset[1] = 0x00;
cfg->tx_lx_res_code_offset[2] = 0x00;
cfg->tx_lx_res_code_offset[3] = 0x00;
}
return 0;
}
static int hdmi_8998_pll_set_clk_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
struct hdmi_phy *phy = pll_get_phy(pll);
struct hdmi_8998_phy_pll_reg_cfg cfg = {};
int i, ret;
ret = pll_calculate(rate, parent_rate, &cfg);
if (ret) {
DRM_ERROR("PLL calculation failed\n");
return ret;
}
/* Initially shut down PHY */
hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0x0);
udelay(500);
/* Power up sequence */
hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0x1);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_RESETSM_CNTRL, 0x20);
hdmi_phy_write(phy, REG_HDMI_8998_PHY_CMN_CTRL, 0x6);
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
hdmi_tx_chan_write(pll, i,
REG_HDMI_8998_PHY_TXn_INTERFACE_SELECT_TX_BAND,
cfg.tx_lx_tx_band[i]);
hdmi_tx_chan_write(pll, i,
REG_HDMI_8998_PHY_TXn_CLKBUF_TERM_ENABLE,
0x1);
hdmi_tx_chan_write(pll, i,
REG_HDMI_8998_PHY_TXn_LANE_MODE,
0x20);
}
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE, 0x02);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x0B);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYSCLK_EN_SEL, 0x37);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYS_CLK_CTRL, 0x02);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CLK_ENABLE1, 0x0E);
/* Bypass VCO calibration */
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SVS_MODE_CLK_SEL,
cfg.com_svs_mode_clk_sel);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_IVCO, 0x07);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_VCO_TUNE_CTRL, 0x00);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CLK_SEL, 0x30);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_HSCLK_SEL,
cfg.com_hsclk_sel);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP_EN,
cfg.com_lock_cmp_en);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_CCTRL_MODE0,
cfg.com_pll_cctrl_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_RCTRL_MODE0,
cfg.com_pll_rctrl_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CP_CTRL_MODE0,
cfg.com_cp_ctrl_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DEC_START_MODE0,
cfg.com_dec_start_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0,
cfg.com_div_frac_start1_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0,
cfg.com_div_frac_start2_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0,
cfg.com_div_frac_start3_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
cfg.com_integloop_gain0_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
cfg.com_integloop_gain1_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP1_MODE0,
cfg.com_lock_cmp1_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP2_MODE0,
cfg.com_lock_cmp2_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP3_MODE0,
cfg.com_lock_cmp3_mode0);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_VCO_TUNE_MAP, 0x00);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CORE_CLK_EN,
cfg.com_core_clk_en);
hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CORECLK_DIV_MODE0,
cfg.com_coreclk_div_mode0);
/* TX lanes setup (TX 0/1/2/3) */
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
hdmi_tx_chan_write(pll, i,
REG_HDMI_8998_PHY_TXn_DRV_LVL,
cfg.tx_lx_tx_drv_lvl[i]);
hdmi_tx_chan_write(pll, i,
REG_HDMI_8998_PHY_TXn_EMP_POST1_LVL,
cfg.tx_lx_tx_emp_post1_lvl[i]);
hdmi_tx_chan_write(pll, i,
REG_HDMI_8998_PHY_TXn_PRE_DRIVER_1,
cfg.tx_lx_pre_driver_1[i]);
hdmi_tx_chan_write(pll, i,
REG_HDMI_8998_PHY_TXn_PRE_DRIVER_2,
cfg.tx_lx_pre_driver_2[i]);
hdmi_tx_chan_write(pll, i,
REG_HDMI_8998_PHY_TXn_DRV_LVL_RES_CODE_OFFSET,
cfg.tx_lx_res_code_offset[i]);
}
hdmi_phy_write(phy, REG_HDMI_8998_PHY_MODE, cfg.phy_mode);
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
hdmi_tx_chan_write(pll, i,
REG_HDMI_8998_PHY_TXn_LANE_CONFIG,
0x10);
}
/*
* Ensure that vco configuration gets flushed to hardware before
* enabling the PLL
*/
wmb();
pll->rate = rate;
return 0;
}
static int hdmi_8998_phy_ready_status(struct hdmi_phy *phy)
{
u32 nb_tries = HDMI_PLL_POLL_MAX_READS;
unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
u32 status;
int phy_ready = 0;
while (nb_tries--) {
status = hdmi_phy_read(phy, REG_HDMI_8998_PHY_STATUS);
phy_ready = status & BIT(0);
if (phy_ready)
break;
udelay(timeout);
}
return phy_ready;
}
static int hdmi_8998_pll_lock_status(struct hdmi_pll_8998 *pll)
{
u32 status;
int nb_tries = HDMI_PLL_POLL_MAX_READS;
unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
int pll_locked = 0;
while (nb_tries--) {
status = hdmi_pll_read(pll,
REG_HDMI_8998_PHY_QSERDES_COM_C_READY_STATUS);
pll_locked = status & BIT(0);
if (pll_locked)
break;
udelay(timeout);
}
return pll_locked;
}
static int hdmi_8998_pll_prepare(struct clk_hw *hw)
{
struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
struct hdmi_phy *phy = pll_get_phy(pll);
int i, ret = 0;
hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x1);
udelay(100);
hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x59);
udelay(100);
ret = hdmi_8998_pll_lock_status(pll);
if (!ret)
return ret;
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
hdmi_tx_chan_write(pll, i,
REG_HDMI_8998_PHY_TXn_LANE_CONFIG, 0x1F);
}
/* Ensure all registers are flushed to hardware */
wmb();
ret = hdmi_8998_phy_ready_status(phy);
if (!ret)
return ret;
/* Restart the retiming buffer */
hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x58);
udelay(1);
hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x59);
/* Ensure all registers are flushed to hardware */
wmb();
return 0;
}
static long hdmi_8998_pll_round_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long *parent_rate)
{
if (rate < HDMI_PCLK_MIN_FREQ)
return HDMI_PCLK_MIN_FREQ;
else if (rate > HDMI_PCLK_MAX_FREQ)
return HDMI_PCLK_MAX_FREQ;
else
return rate;
}
static unsigned long hdmi_8998_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
return pll->rate;
}
static void hdmi_8998_pll_unprepare(struct clk_hw *hw)
{
struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
struct hdmi_phy *phy = pll_get_phy(pll);
hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0);
usleep_range(100, 150);
}
static int hdmi_8998_pll_is_enabled(struct clk_hw *hw)
{
struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
u32 status;
int pll_locked;
status = hdmi_pll_read(pll, REG_HDMI_8998_PHY_QSERDES_COM_C_READY_STATUS);
pll_locked = status & BIT(0);
return pll_locked;
}
static const struct clk_ops hdmi_8998_pll_ops = {
.set_rate = hdmi_8998_pll_set_clk_rate,
.round_rate = hdmi_8998_pll_round_rate,
.recalc_rate = hdmi_8998_pll_recalc_rate,
.prepare = hdmi_8998_pll_prepare,
.unprepare = hdmi_8998_pll_unprepare,
.is_enabled = hdmi_8998_pll_is_enabled,
};
static const struct clk_init_data pll_init = {
.name = "hdmipll",
.ops = &hdmi_8998_pll_ops,
.parent_data = (const struct clk_parent_data[]){
{ .fw_name = "xo", .name = "xo_board" },
},
.num_parents = 1,
.flags = CLK_IGNORE_UNUSED,
};
int msm_hdmi_pll_8998_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdmi_pll_8998 *pll;
int ret, i;
pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
if (!pll)
return -ENOMEM;
pll->pdev = pdev;
pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll");
if (IS_ERR(pll->mmio_qserdes_com)) {
DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
char name[32];
snprintf(name, sizeof(name), "hdmi_tx_l%d", i);
pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name);
if (IS_ERR(pll->mmio_qserdes_tx[i])) {
DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
}
pll->clk_hw.init = &pll_init;
ret = devm_clk_hw_register(dev, &pll->clk_hw);
if (ret) {
DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return ret;
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
if (ret) {
DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
return 0;
}
static const char * const hdmi_phy_8998_reg_names[] = {
"vddio",
"vcca",
};
static const char * const hdmi_phy_8998_clk_names[] = {
"iface", "ref", "xo",
};
const struct hdmi_phy_cfg msm_hdmi_phy_8998_cfg = {
.type = MSM_HDMI_PHY_8998,
.reg_names = hdmi_phy_8998_reg_names,
.num_regs = ARRAY_SIZE(hdmi_phy_8998_reg_names),
.clk_names = hdmi_phy_8998_clk_names,
.num_clks = ARRAY_SIZE(hdmi_phy_8998_clk_names),
};

View File

@ -215,8 +215,6 @@ struct msm_drm_private {
struct notifier_block vmap_notifier;
struct shrinker *shrinker;
struct drm_atomic_state *pm_state;
/**
* hangcheck_period: For hang detection, in ms
*
@ -254,8 +252,6 @@ void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer);
void msm_atomic_commit_tail(struct drm_atomic_state *state);
int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
int msm_crtc_enable_vblank(struct drm_crtc *crtc);
void msm_crtc_disable_vblank(struct drm_crtc *crtc);

View File

@ -931,7 +931,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
msm_devfreq_init(gpu);

File diff suppressed because it is too large Load Diff

View File

@ -1012,4 +1012,93 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00110" name="TX_ALOG_INTF_OBSV"/>
</domain>
<domain name="HDMI_8998_PHY" width="32">
<reg32 offset="0x00000" name="CFG"/>
<reg32 offset="0x00004" name="PD_CTL"/>
<reg32 offset="0x00010" name="MODE"/>
<reg32 offset="0x0005C" name="CLOCK"/>
<reg32 offset="0x00068" name="CMN_CTRL"/>
<reg32 offset="0x000B4" name="STATUS"/>
</domain>
<domain name="HDMI_8998_PHY_QSERDES_COM" width="32">
<reg32 offset="0x0000" name="ATB_SEL1"/>
<reg32 offset="0x0004" name="ATB_SEL2"/>
<reg32 offset="0x0008" name="FREQ_UPDATE"/>
<reg32 offset="0x000C" name="BG_TIMER"/>
<reg32 offset="0x0010" name="SSC_EN_CENTER"/>
<reg32 offset="0x0014" name="SSC_ADJ_PER1"/>
<reg32 offset="0x0018" name="SSC_ADJ_PER2"/>
<reg32 offset="0x001C" name="SSC_PER1"/>
<reg32 offset="0x0020" name="SSC_PER2"/>
<reg32 offset="0x0024" name="SSC_STEP_SIZE1"/>
<reg32 offset="0x0028" name="SSC_STEP_SIZE2"/>
<reg32 offset="0x002C" name="POST_DIV"/>
<reg32 offset="0x0030" name="POST_DIV_MUX"/>
<reg32 offset="0x0034" name="BIAS_EN_CLKBUFLR_EN"/>
<reg32 offset="0x0038" name="CLK_ENABLE1"/>
<reg32 offset="0x003C" name="SYS_CLK_CTRL"/>
<reg32 offset="0x0040" name="SYSCLK_BUF_ENABLE"/>
<reg32 offset="0x0044" name="PLL_EN"/>
<reg32 offset="0x0048" name="PLL_IVCO"/>
<reg32 offset="0x004C" name="CMN_IETRIM"/>
<reg32 offset="0x0050" name="CMN_IPTRIM"/>
<reg32 offset="0x0060" name="CP_CTRL_MODE0"/>
<reg32 offset="0x0064" name="CP_CTRL_MODE1"/>
<reg32 offset="0x0068" name="PLL_RCTRL_MODE0"/>
<reg32 offset="0x006C" name="PLL_RCTRL_MODE1"/>
<reg32 offset="0x0070" name="PLL_CCTRL_MODE0"/>
<reg32 offset="0x0074" name="PLL_CCTRL_MODE1"/>
<reg32 offset="0x0078" name="PLL_CNTRL"/>
<reg32 offset="0x007C" name="BIAS_EN_CTRL_BY_PSM"/>
<reg32 offset="0x0080" name="SYSCLK_EN_SEL"/>
<reg32 offset="0x0084" name="CML_SYSCLK_SEL"/>
<reg32 offset="0x0088" name="RESETSM_CNTRL"/>
<reg32 offset="0x008C" name="RESETSM_CNTRL2"/>
<reg32 offset="0x0090" name="LOCK_CMP_EN"/>
<reg32 offset="0x0094" name="LOCK_CMP_CFG"/>
<reg32 offset="0x0098" name="LOCK_CMP1_MODE0"/>
<reg32 offset="0x009C" name="LOCK_CMP2_MODE0"/>
<reg32 offset="0x00A0" name="LOCK_CMP3_MODE0"/>
<reg32 offset="0x00B0" name="DEC_START_MODE0"/>
<reg32 offset="0x00B4" name="DEC_START_MODE1"/>
<reg32 offset="0x00B8" name="DIV_FRAC_START1_MODE0"/>
<reg32 offset="0x00BC" name="DIV_FRAC_START2_MODE0"/>
<reg32 offset="0x00C0" name="DIV_FRAC_START3_MODE0"/>
<reg32 offset="0x00C4" name="DIV_FRAC_START1_MODE1"/>
<reg32 offset="0x00C8" name="DIV_FRAC_START2_MODE1"/>
<reg32 offset="0x00CC" name="DIV_FRAC_START3_MODE1"/>
<reg32 offset="0x00D0" name="INTEGLOOP_INITVAL"/>
<reg32 offset="0x00D4" name="INTEGLOOP_EN"/>
<reg32 offset="0x00D8" name="INTEGLOOP_GAIN0_MODE0"/>
<reg32 offset="0x00DC" name="INTEGLOOP_GAIN1_MODE0"/>
<reg32 offset="0x00E0" name="INTEGLOOP_GAIN0_MODE1"/>
<reg32 offset="0x00E4" name="INTEGLOOP_GAIN1_MODE1"/>
<reg32 offset="0x00E8" name="VCOCAL_DEADMAN_CTRL"/>
<reg32 offset="0x00EC" name="VCO_TUNE_CTRL"/>
<reg32 offset="0x00F0" name="VCO_TUNE_MAP"/>
<reg32 offset="0x0124" name="CMN_STATUS"/>
<reg32 offset="0x0128" name="RESET_SM_STATUS"/>
<reg32 offset="0x0138" name="CLK_SEL"/>
<reg32 offset="0x013C" name="HSCLK_SEL"/>
<reg32 offset="0x0148" name="CORECLK_DIV_MODE0"/>
<reg32 offset="0x0150" name="SW_RESET"/>
<reg32 offset="0x0154" name="CORE_CLK_EN"/>
<reg32 offset="0x0158" name="C_READY_STATUS"/>
<reg32 offset="0x015C" name="CMN_CONFIG"/>
<reg32 offset="0x0164" name="SVS_MODE_CLK_SEL"/>
</domain>
<domain name="HDMI_8998_PHY_TXn" width="32">
<reg32 offset="0x0000" name="EMP_POST1_LVL"/>
<reg32 offset="0x0008" name="INTERFACE_SELECT_TX_BAND"/>
<reg32 offset="0x000C" name="CLKBUF_TERM_ENABLE"/>
<reg32 offset="0x0014" name="DRV_LVL_RES_CODE_OFFSET"/>
<reg32 offset="0x0018" name="DRV_LVL"/>
<reg32 offset="0x001C" name="LANE_CONFIG"/>
<reg32 offset="0x0024" name="PRE_DRIVER_1"/>
<reg32 offset="0x0028" name="PRE_DRIVER_2"/>
<reg32 offset="0x002C" name="LANE_MODE"/>
</domain>
</database>

View File

@ -88,6 +88,8 @@ struct drm_msm_timespec {
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
#define MSM_PARAM_RAYTRACING 0x11 /* RO */
#define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */
#define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #