drm fixes for 6.0-rc3

dma-buf/dma-resv:
 - Fence-handling fix
 
 i915:
 - GVT fixes including fix for a CommetLake regression in mmio table
   and misc doc and typo fixes
 - Fix CCS handling
 - Fix for guc requests after reset
 - Display DSI related fixes
 - Display backlight related fixes
 - Fix for a null pointer dereference
 - HDMI related quirk for ECS Liva Q2 with GLK graphics
 - Skip wm/ddb readout for disabled pipes
 
 amdgpu:
 - FRU error message fix
 - MES 11 updates
 - DCN 3.2.x fixes
 - DCN 3.1.4 fixes
 - Fix possible use after free in CS IOCTL
 - SMU 13.0.x fixes
 - Fix iolink reporting on devices with direct connections to CPU
 - GFX10 tap delay firmware fixes
 
 msm:
 - Fix for inconsistent indenting in function msm_dsi_dphy_timing_calc_v3.
 - Fix to make eDP the first connector in the connected list.
 - Fix to populate intf_cfg correctly before calling reset_intf_cfg().
 - Specify the correct number of DSI regulators for SDM660.
 - Specify the correct number of DSI regulators for MSM8996.
 - Fix for removing DP_RECOVERED_CLOCK_OUT_EN bit for tps4 link training
 - Fix probe-deferral crash in gpu devfreq
 - Fix gpu debugfs deadlock
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmMRX2AACgkQDHTzWXnE
 hr5VzA/7Bc/VqWS2BIvZdO76fx+nMQheAOKROa4eZtFRYbhHdR8gMQ+8FlRKL/B1
 z/vI03yvVA9y8JFEtWcXRANZz5JVhe73YLbVmD2ei/SKR8esw8s1M15Ux3PL6YfT
 3lyE9NRTOVd1HaIu47JrkE/HmRcYtV11jrz49A/1noPixJbxly40HstR10SFl1sC
 e52U3VhnzuNdxkBEHtCnVUXszbZTMmDLGy6F0rbl90Mr9d2L7oaHLYqcky5lbUpb
 cLEZ2ZdfZ01kEoLFkaN5JrBFvubP7Mic+Frjch3UkTPw0KQTPgQRRsvArgfp736s
 2wXWzl9lmxlQgtxerQMDP4e4iDKSheRA/zJNrbcokPCrRQW5X0RQLZiMuaW7dPOv
 D0LNymlZpk8UXbo6igsMIBnyPkAZmHGXR88Fhew/skt3pcmQ+zFkyAX7di/txaZh
 yJTDkzaGzulVzAmSbGpUWx/vQeG6b05FJ79qlKWgDJWz8AXO4sB3GeleObuqa3+2
 Z0ivYqDxBZ55TUBrdKPLhrEZwrZ4eIMG/GIbMa0a/7k9zZed5zJ5Igz5Tz2JWSvq
 2NtgIVhLdlX+JqERQDHasBjrsUG0lKOY33y4ku4mSEP4wpESWc1VUWt/2f64zvYI
 hXHhCOCuWI/saNflt0+q10xlUhESIrc3o1oz6nNITm5j3QFQJZU=
 =RMMt
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2022-09-02' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Regular fixes pull. One core dma-buf fix, then two weeks of i915
  fixes, a lot of amdgpu fixes mostly for new IP, and a bunch of msm
  fixes, mostly modesetting ones.

  Nothing seems too bad at this point.

  dma-buf/dma-resv:
   - Fence-handling fix

  i915:
   - GVT fixes including fix for a CommetLake regression in mmio table
     and misc doc and typo fixes
   - Fix CCS handling
   - Fix for guc requests after reset
   - Display DSI related fixes
   - Display backlight related fixes
   - Fix for a null pointer dereference
   - HDMI related quirk for ECS Liva Q2 with GLK graphics
   - Skip wm/ddb readout for disabled pipes

  amdgpu:
   - FRU error message fix
   - MES 11 updates
   - DCN 3.2.x fixes
   - DCN 3.1.4 fixes
   - Fix possible use after free in CS IOCTL
   - SMU 13.0.x fixes
   - Fix iolink reporting on devices with direct connections to CPU
   - GFX10 tap delay firmware fixes

  msm:
   - Fix for inconsistent indenting in msm_dsi_dphy_timing_calc_v3().
   - Fix to make eDP the first connector in the connected list.
   - Fix to populate intf_cfg correctly before calling reset_intf_cfg().
   - Specify the correct number of DSI regulators for SDM660.
   - Specify the correct number of DSI regulators for MSM8996.
   - Fix for removing DP_RECOVERED_CLOCK_OUT_EN bit for tps4 link training
   - Fix probe-deferral crash in gpu devfreq
   - Fix gpu debugfs deadlock"

* tag 'drm-fixes-2022-09-02' of git://anongit.freedesktop.org/drm/drm: (51 commits)
  drm/amd/amdgpu: skip ucode loading if ucode_size == 0
  drm/amdgpu: only init tap_delay ucode when it's included in ucode binary
  drm/amd/display: Fix black flash when switching from ODM2to1 to ODMBypass
  drm/amd/display: Fix check for stream and plane
  drm/amd/display: Re-initialize viewport after pipe merge
  drm/amd/display: Use correct plane for CAB cursor size allocation
  drm/amdgpu: ensure no PCIe peer access for CPU XGMI iolinks
  drm/amd/pm: bump SMU 13.0.0 driver_if header version
  drm/amd/pm: use vbios carried pptable for all SMU13.0.7 SKUs
  drm/amd/pm: use vbios carried pptable for those supported SKUs
  drm/amd/display: fix wrong register access
  drm/amd/display: use actual cursor size instead of max for CAB allocation
  drm/amd/display: disable display fresh from MALL on an edge case for DCN321
  drm/amd/display: Fix CAB cursor size allocation for DCN32/321
  drm/amd/display: Missing HPO instance added
  drm/amd/display: set dig fifo read start level to 7 before dig fifo reset
  drm/amdgpu: Fix use-after-free in amdgpu_cs_ioctl
  drm/amd/display: Fix OTG H timing reset for dcn314
  drm/amd/display: Fix DCN32 DPSTREAMCLK_CNTL programming
  drm/amdgpu: Update mes_v11_api_def.h
  ...
This commit is contained in:
Linus Torvalds 2022-09-02 14:56:09 -07:00
commit 1e8e515edd
46 changed files with 348 additions and 162 deletions

View File

@ -295,7 +295,8 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
enum dma_resv_usage old_usage; enum dma_resv_usage old_usage;
dma_resv_list_entry(fobj, i, obj, &old, &old_usage); dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
if ((old->context == fence->context && old_usage >= usage) || if ((old->context == fence->context && old_usage >= usage &&
dma_fence_is_later(fence, old)) ||
dma_fence_is_signaled(old)) { dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage); dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old); dma_fence_put(old);

View File

@ -5524,7 +5524,8 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
resource_size_t aper_limit = resource_size_t aper_limit =
adev->gmc.aper_base + adev->gmc.aper_size - 1; adev->gmc.aper_base + adev->gmc.aper_size - 1;
bool p2p_access = !(pci_p2pdma_distance_many(adev->pdev, bool p2p_access = !adev->gmc.xgmi.connected_to_cpu &&
!(pci_p2pdma_distance_many(adev->pdev,
&peer_adev->dev, 1, true) < 0); &peer_adev->dev, 1, true) < 0);
return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size && return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&

View File

@ -66,10 +66,15 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
return true; return true;
case CHIP_SIENNA_CICHLID: case CHIP_SIENNA_CICHLID:
if (strnstr(atom_ctx->vbios_version, "D603", if (strnstr(atom_ctx->vbios_version, "D603",
sizeof(atom_ctx->vbios_version))) {
if (strnstr(atom_ctx->vbios_version, "D603GLXE",
sizeof(atom_ctx->vbios_version))) sizeof(atom_ctx->vbios_version)))
return true;
else
return false; return false;
else
return true;
} else {
return false;
}
default: default:
return false; return false;
} }

View File

@ -159,6 +159,9 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync); amdgpu_sync_free(&job->sched_sync);
if (!job->hw_fence.ops)
kfree(job);
else
dma_fence_put(&job->hw_fence); dma_fence_put(&job->hw_fence);
} }

View File

@ -2401,7 +2401,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
static bool fw_load_skip_check(struct psp_context *psp, static bool fw_load_skip_check(struct psp_context *psp,
struct amdgpu_firmware_info *ucode) struct amdgpu_firmware_info *ucode)
{ {
if (!ucode->fw) if (!ucode->fw || !ucode->ucode_size)
return true; return true;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&

View File

@ -4274,35 +4274,45 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
} }
if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS]; info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS; info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw; info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size += adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE); ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS]; info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS; info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw; info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size += adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE); ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS]; info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS; info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw; info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size += adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE); ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS]; info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS; info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw; info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size += adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE); ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS]; info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS; info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw; info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size += adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE); ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;

View File

@ -183,6 +183,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr; mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr; mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process; mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
mes_add_queue_pkt.trap_en = 1;
return mes_v11_0_submit_pkt_and_poll_completion(mes, return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt), &mes_add_queue_pkt, sizeof(mes_add_queue_pkt),

View File

@ -1094,7 +1094,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc->current_state->stream_count != context->stream_count) dc->current_state->stream_count != context->stream_count)
should_disable = true; should_disable = true;
if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe) { if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
!dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *old_pipe, *new_pipe; struct pipe_ctx *old_pipe, *new_pipe;
old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];

View File

@ -104,6 +104,9 @@ static bool has_query_dp_alt(struct link_encoder *enc)
{ {
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
if (enc->ctx->dce_version >= DCN_VERSION_3_15)
return true;
/* Supports development firmware and firmware >= 4.0.11 */ /* Supports development firmware and firmware >= 4.0.11 */
return dc_dmub_srv && return dc_dmub_srv &&
!(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) && !(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&

View File

@ -317,6 +317,7 @@ static void enc314_stream_encoder_dp_unblank(
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover. * that it overflows during mode transition, and sometimes doesn't recover.
*/ */
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10); udelay(10);

View File

@ -98,7 +98,8 @@ static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id,
REG_UPDATE(OPTC_WIDTH_CONTROL, REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive); OPTC_SEGMENT_WIDTH, mpcc_hactive);
REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1); REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
optc1->opp_count = opp_cnt; optc1->opp_count = opp_cnt;
} }

View File

@ -454,6 +454,7 @@ static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs
hpo_dp_stream_encoder_reg_list(0), hpo_dp_stream_encoder_reg_list(0),
hpo_dp_stream_encoder_reg_list(1), hpo_dp_stream_encoder_reg_list(1),
hpo_dp_stream_encoder_reg_list(2), hpo_dp_stream_encoder_reg_list(2),
hpo_dp_stream_encoder_reg_list(3)
}; };
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {

View File

@ -225,19 +225,19 @@ void dccg32_set_dpstreamclk(
case 0: case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL, REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN, DPSTREAMCLK0_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, 0); (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst);
break; break;
case 1: case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN, REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, 1); (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst);
break; break;
case 2: case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN, REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, 2); (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst);
break; break;
case 3: case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN, REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
(src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, 3); (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst);
break; break;
default: default:
BREAK_TO_DEBUGGER(); BREAK_TO_DEBUGGER();

View File

@ -310,6 +310,11 @@ static void enc32_stream_encoder_dp_unblank(
// TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON // TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000); REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
/* read start level = 0 will bring underflow / overflow and DIG_FIFO_ERROR = 1
* so set it to 1/2 full = 7 before reset as suggested by hardware team.
*/
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000); REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);

View File

@ -295,8 +295,20 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
} }
// Include cursor size for CAB allocation // Include cursor size for CAB allocation
if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) { for (j = 0; j < dc->res_pool->pipe_count; j++) {
cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size; struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
struct hubp *hubp = pipe->plane_res.hubp;
if (pipe->stream && pipe->plane_state && hubp)
/* Find the cursor plane and use the exact size instead of
* using the max for calculation
*/
if (hubp->curs_attr.width > 0) {
cursor_size = hubp->curs_attr.width * hubp->curs_attr.height;
break;
}
}
switch (stream->cursor_attributes.color_format) { switch (stream->cursor_attributes.color_format) {
case CURSOR_MODE_MONO: case CURSOR_MODE_MONO:
cursor_size /= 2; cursor_size /= 2;
@ -312,7 +324,9 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
cursor_size *= 8; cursor_size *= 8;
break; break;
} }
cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
cache_lines_used += dcn32_cache_lines_for_surface(dc, cursor_size,
plane->address.grph.cursor_cache_addr.quad_part); plane->address.grph.cursor_cache_addr.quad_part);
} }
} }
@ -325,6 +339,26 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
if (cache_lines_used % lines_per_way > 0) if (cache_lines_used % lines_per_way > 0)
num_ways++; num_ways++;
for (i = 0; i < ctx->stream_count; i++) {
stream = ctx->streams[i];
for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
plane = ctx->stream_status[i].plane_states[j];
if (stream->cursor_position.enable && plane &&
!plane->address.grph.cursor_cache_addr.quad_part &&
cursor_size > 16384) {
/* Cursor caching is not supported since it won't be on the same line.
* So we need an extra line to accommodate it. With large cursors and a single 4k monitor
* this case triggers corruption. If we're at the edge, then dont trigger display refresh
* from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
*/
num_ways++;
/* We only expect one cursor plane */
break;
}
}
}
return num_ways; return num_ways;
} }

View File

@ -144,7 +144,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream) if (!pipe->stream)
continue; return false;
if (!pipe->plane_state) if (!pipe->plane_state)
return false; return false;

View File

@ -1014,6 +1014,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->debug.force_subvp_mclk_switch)) { dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context); dcn32_merge_pipes_for_subvp(dc, context);
// to re-initialize viewport after the pipe merge
for (int i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx->plane_state || !pipe_ctx->stream)
continue;
resource_build_scaling_params(pipe_ctx);
}
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) && while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) { dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {

View File

@ -116,7 +116,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing; dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, link_enc->inst); dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst);
dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk); dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params); dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc); stream_enc->funcs->enable_stream(stream_enc);
@ -137,7 +137,7 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
stream_enc->funcs->disable(stream_enc); stream_enc->funcs->disable(stream_enc);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params); dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst); dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, pipe_ctx->link_res.hpo_dp_link_enc->inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);
} }
static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)

View File

@ -268,7 +268,8 @@ union MESAPI__ADD_QUEUE {
uint32_t is_tmz_queue : 1; uint32_t is_tmz_queue : 1;
uint32_t map_kiq_utility_queue : 1; uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1; uint32_t is_kfd_process : 1;
uint32_t reserved : 22; uint32_t trap_en : 1;
uint32_t reserved : 21;
}; };
struct MES_API_STATUS api_status; struct MES_API_STATUS api_status;
uint64_t tma_addr; uint64_t tma_addr;

View File

@ -25,7 +25,7 @@
#define SMU13_DRIVER_IF_V13_0_0_H #define SMU13_DRIVER_IF_V13_0_0_H
//Increment this version if SkuTable_t or BoardTable_t change //Increment this version if SkuTable_t or BoardTable_t change
#define PPTABLE_VERSION 0x22 #define PPTABLE_VERSION 0x24
#define NUM_GFXCLK_DPM_LEVELS 16 #define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8 #define NUM_SOCCLK_DPM_LEVELS 8

View File

@ -30,7 +30,7 @@
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2E #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms #define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@ -291,5 +291,11 @@ int smu_v13_0_set_default_dpm_tables(struct smu_context *smu);
void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu); void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu);
int smu_v13_0_mode1_reset(struct smu_context *smu); int smu_v13_0_mode1_reset(struct smu_context *smu);
int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
void **table,
uint32_t *size,
uint32_t pptable_id);
#endif #endif
#endif #endif

View File

@ -84,9 +84,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
static const int link_speed[] = {25, 50, 80, 160}; static const int link_speed[] = {25, 50, 80, 160};
static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
uint32_t pptable_id);
int smu_v13_0_init_microcode(struct smu_context *smu) int smu_v13_0_init_microcode(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
@ -224,23 +221,19 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
/* /*
* Temporary solution for SMU V13.0.0 with SCPM enabled: * Temporary solution for SMU V13.0.0 with SCPM enabled:
* - use 36831 signed pptable when pp_table_id is 3683 * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
* - use 37151 signed pptable when pp_table_id is 3715 * - use 36831 soft pptable when pptable_id is 3683
* - use 36641 signed pptable when pp_table_id is 3664 or 0
* TODO: drop these when the pptable carried in vbios is ready.
*/ */
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) { if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
switch (pptable_id) { switch (pptable_id) {
case 0:
case 3664: case 3664:
pptable_id = 36641; case 3715:
case 3795:
pptable_id = 0;
break; break;
case 3683: case 3683:
pptable_id = 36831; pptable_id = 36831;
break; break;
case 3715:
pptable_id = 37151;
break;
default: default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id); dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
return -EINVAL; return -EINVAL;
@ -425,7 +418,9 @@ static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **tabl
return 0; return 0;
} }
static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size, int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
void **table,
uint32_t *size,
uint32_t pptable_id) uint32_t pptable_id)
{ {
const struct smc_firmware_header_v1_0 *hdr; const struct smc_firmware_header_v1_0 *hdr;

View File

@ -388,11 +388,29 @@ static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu)
return 0; return 0;
} }
static int smu_v13_0_0_setup_pptable(struct smu_context *smu) static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu,
void **table,
uint32_t *size)
{ {
struct smu_table_context *smu_table = &smu->smu_table; struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable; void *combo_pptable = smu_table->combo_pptable;
int ret = 0;
ret = smu_cmn_get_combo_pptable(smu);
if (ret)
return ret;
*table = combo_pptable;
*size = sizeof(struct smu_13_0_0_powerplay_table);
return 0;
}
static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
uint32_t pptable_id;
int ret = 0; int ret = 0;
/* /*
@ -401,17 +419,51 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
* rely on the combo pptable(and its revelant SMU message). * rely on the combo pptable(and its revelant SMU message).
*/ */
if (adev->scpm_enabled) { if (adev->scpm_enabled) {
ret = smu_cmn_get_combo_pptable(smu); ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
if (ret) &smu_table->power_play_table,
return ret; &smu_table->power_play_table_size);
smu->smu_table.power_play_table = combo_pptable;
smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_0_powerplay_table);
} else { } else {
ret = smu_v13_0_setup_pptable(smu); /* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
dev_info(adev->dev, "override pptable id %d\n", pptable_id);
} else {
pptable_id = smu_table->boot_values.pp_table_id;
}
/*
* Temporary solution for SMU V13.0.0 with SCPM disabled:
* - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
* - use soft pptable when pptable_id is 3683
*/
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
switch (pptable_id) {
case 3664:
case 3715:
case 3795:
pptable_id = 0;
break;
case 3683:
break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
return -EINVAL;
}
}
/* force using vbios pptable in sriov mode */
if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
else
ret = smu_v13_0_get_pptable_from_firmware(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size,
pptable_id);
}
if (ret) if (ret)
return ret; return ret;
}
ret = smu_v13_0_0_store_powerplay_table(smu); ret = smu_v13_0_0_store_powerplay_table(smu);
if (ret) if (ret)

View File

@ -400,11 +400,27 @@ static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu)
return 0; return 0;
} }
static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
void **table,
uint32_t *size)
{
struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable;
int ret = 0;
ret = smu_cmn_get_combo_pptable(smu);
if (ret)
return ret;
*table = combo_pptable;
*size = sizeof(struct smu_13_0_7_powerplay_table);
return 0;
}
static int smu_v13_0_7_setup_pptable(struct smu_context *smu) static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
{ {
struct smu_table_context *smu_table = &smu->smu_table; struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable;
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
int ret = 0; int ret = 0;
@ -413,19 +429,12 @@ static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
* be used directly by driver. To get the raw pptable, we need to * be used directly by driver. To get the raw pptable, we need to
* rely on the combo pptable(and its revelant SMU message). * rely on the combo pptable(and its revelant SMU message).
*/ */
if (adev->scpm_enabled) { ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
ret = smu_cmn_get_combo_pptable(smu); &smu_table->power_play_table,
&smu_table->power_play_table_size);
if (ret) if (ret)
return ret; return ret;
smu->smu_table.power_play_table = combo_pptable;
smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_7_powerplay_table);
} else {
ret = smu_v13_0_setup_pptable(smu);
if (ret)
return ret;
}
ret = smu_v13_0_7_store_powerplay_table(smu); ret = smu_v13_0_7_store_powerplay_table(smu);
if (ret) if (ret)
return ret; return ret;

View File

@ -2070,7 +2070,14 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
else else
intel_dsi->ports = BIT(port); intel_dsi->ports = BIT(port);
if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports; intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports; intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {

View File

@ -16,6 +16,7 @@
#include "intel_dsi_dcs_backlight.h" #include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h" #include "intel_panel.h"
#include "intel_pci_config.h" #include "intel_pci_config.h"
#include "intel_pps.h"
/** /**
* scale - scale values from one range to another * scale - scale values from one range to another
@ -971,9 +972,9 @@ int intel_backlight_device_register(struct intel_connector *connector)
if (!name) if (!name)
return -ENOMEM; return -ENOMEM;
bd = backlight_device_register(name, connector->base.kdev, connector, bd = backlight_device_get_by_name(name);
&intel_backlight_device_ops, &props); if (bd) {
put_device(&bd->dev);
/* /*
* Using the same name independent of the drm device or connector * Using the same name independent of the drm device or connector
* prevents registration of multiple backlight devices in the * prevents registration of multiple backlight devices in the
@ -981,16 +982,14 @@ int intel_backlight_device_register(struct intel_connector *connector)
* compatibility. Use unique names for subsequent backlight devices as a * compatibility. Use unique names for subsequent backlight devices as a
* fallback when the default name already exists. * fallback when the default name already exists.
*/ */
if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
kfree(name); kfree(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight", name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
i915->drm.primary->index, connector->base.name); i915->drm.primary->index, connector->base.name);
if (!name) if (!name)
return -ENOMEM; return -ENOMEM;
}
bd = backlight_device_register(name, connector->base.kdev, connector, bd = backlight_device_register(name, connector->base.kdev, connector,
&intel_backlight_device_ops, &props); &intel_backlight_device_ops, &props);
}
if (IS_ERR(bd)) { if (IS_ERR(bd)) {
drm_err(&i915->drm, drm_err(&i915->drm,
@ -1773,10 +1772,14 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
panel->backlight.pwm_funcs = &i9xx_pwm_funcs; panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
} }
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
intel_dp_aux_init_backlight_funcs(connector) == 0) if (intel_dp_aux_init_backlight_funcs(connector) == 0)
return; return;
if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
connector->panel.backlight.power = intel_pps_backlight_power;
}
/* We're using a standard PWM backlight interface */ /* We're using a standard PWM backlight interface */
panel->backlight.funcs = &pwm_bl_funcs; panel->backlight.funcs = &pwm_bl_funcs;
} }

View File

@ -1596,6 +1596,8 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
struct intel_panel *panel, struct intel_panel *panel,
enum port port) enum port port)
{ {
enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C;
if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) { if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) {
panel->vbt.dsi.bl_ports = BIT(port); panel->vbt.dsi.bl_ports = BIT(port);
if (panel->vbt.dsi.config->cabc_supported) if (panel->vbt.dsi.config->cabc_supported)
@ -1609,11 +1611,11 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.bl_ports = BIT(PORT_A); panel->vbt.dsi.bl_ports = BIT(PORT_A);
break; break;
case DL_DCS_PORT_C: case DL_DCS_PORT_C:
panel->vbt.dsi.bl_ports = BIT(PORT_C); panel->vbt.dsi.bl_ports = BIT(port_bc);
break; break;
default: default:
case DL_DCS_PORT_A_AND_C: case DL_DCS_PORT_A_AND_C:
panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C); panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc);
break; break;
} }
@ -1625,12 +1627,12 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.cabc_ports = BIT(PORT_A); panel->vbt.dsi.cabc_ports = BIT(PORT_A);
break; break;
case DL_DCS_PORT_C: case DL_DCS_PORT_C:
panel->vbt.dsi.cabc_ports = BIT(PORT_C); panel->vbt.dsi.cabc_ports = BIT(port_bc);
break; break;
default: default:
case DL_DCS_PORT_A_AND_C: case DL_DCS_PORT_A_AND_C:
panel->vbt.dsi.cabc_ports = panel->vbt.dsi.cabc_ports =
BIT(PORT_A) | BIT(PORT_C); BIT(PORT_A) | BIT(port_bc);
break; break;
} }
} }

View File

@ -404,15 +404,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
int clpchgroup; int clpchgroup;
int j; int j;
if (i < num_groups - 1)
bi_next = &dev_priv->max_bw[i + 1];
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
if (i < num_groups - 1 && clpchgroup < clperchgroup) if (i < num_groups - 1) {
bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; bi_next = &dev_priv->max_bw[i + 1];
if (clpchgroup < clperchgroup)
bi_next->num_planes = (ipqdepth - clpchgroup) /
clpchgroup + 1;
else else
bi_next->num_planes = 0; bi_next->num_planes = 0;
}
bi->num_qgv_points = qi.num_points; bi->num_qgv_points = qi.num_points;
bi->num_psf_gv_points = qi.num_psf_points; bi->num_psf_gv_points = qi.num_psf_points;

View File

@ -5293,8 +5293,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_panel_init(intel_connector); intel_panel_init(intel_connector);
if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_backlight_setup(intel_connector, pipe); intel_backlight_setup(intel_connector, pipe);
intel_edp_add_properties(intel_dp); intel_edp_add_properties(intel_dp);

View File

@ -191,6 +191,9 @@ static struct intel_quirk intel_quirks[] = {
/* ASRock ITX*/ /* ASRock ITX*/
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
/* ECS Liva Q2 */
{ 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
}; };
void intel_init_quirks(struct drm_i915_private *i915) void intel_init_quirks(struct drm_i915_private *i915)

View File

@ -1933,7 +1933,14 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
else else
intel_dsi->ports = BIT(port); intel_dsi->ports = BIT(port);
if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports; intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports; intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
/* Create a DSI host (and a device) for each port. */ /* Create a DSI host (and a device) for each port. */

View File

@ -638,9 +638,9 @@ static int emit_copy(struct i915_request *rq,
return 0; return 0;
} }
static int scatter_list_length(struct scatterlist *sg) static u64 scatter_list_length(struct scatterlist *sg)
{ {
int len = 0; u64 len = 0;
while (sg && sg_dma_len(sg)) { while (sg && sg_dma_len(sg)) {
len += sg_dma_len(sg); len += sg_dma_len(sg);
@ -650,12 +650,11 @@ static int scatter_list_length(struct scatterlist *sg)
return len; return len;
} }
static void static int
calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem, calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy) u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
{ {
if (ccs_bytes_to_cpy) { if (ccs_bytes_to_cpy && !src_is_lmem)
if (!src_is_lmem)
/* /*
* When CHUNK_SZ is passed all the pages upto CHUNK_SZ * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
* will be taken for the blt. in Flat-ccs supported * will be taken for the blt. in Flat-ccs supported
@ -663,15 +662,14 @@ calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
* for main meory hence limit it to the required size * for main meory hence limit it to the required size
* for main memory * for main memory
*/ */
*src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ); return min_t(u64, bytes_to_cpy, CHUNK_SZ);
} else { /* ccs handling is not required */ else
*src_sz = CHUNK_SZ; return CHUNK_SZ;
}
} }
static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy) static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
{ {
u32 len; u64 len;
do { do {
GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg)); GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
@ -702,12 +700,12 @@ intel_context_migrate_copy(struct intel_context *ce,
{ {
struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs; struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
struct drm_i915_private *i915 = ce->engine->i915; struct drm_i915_private *i915 = ce->engine->i915;
u32 ccs_bytes_to_cpy = 0, bytes_to_cpy; u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
enum i915_cache_level ccs_cache_level; enum i915_cache_level ccs_cache_level;
u32 src_offset, dst_offset; u32 src_offset, dst_offset;
u8 src_access, dst_access; u8 src_access, dst_access;
struct i915_request *rq; struct i915_request *rq;
int src_sz, dst_sz; u64 src_sz, dst_sz;
bool ccs_is_src, overwrite_ccs; bool ccs_is_src, overwrite_ccs;
int err; int err;
@ -790,7 +788,7 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err) if (err)
goto out_rq; goto out_rq;
calculate_chunk_sz(i915, src_is_lmem, &src_sz, src_sz = calculate_chunk_sz(i915, src_is_lmem,
bytes_to_cpy, ccs_bytes_to_cpy); bytes_to_cpy, ccs_bytes_to_cpy);
len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem, len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,

View File

@ -4026,6 +4026,13 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
/* make sure all descriptors are clean... */ /* make sure all descriptors are clean... */
xa_destroy(&guc->context_lookup); xa_destroy(&guc->context_lookup);
/*
* A reset might have occurred while we had a pending stalled request,
* so make sure we clean that up.
*/
guc->stalled_request = NULL;
guc->submission_stall_reason = STALL_NONE;
/* /*
* Some contexts might have been pinned before we enabled GuC * Some contexts might have been pinned before we enabled GuC
* submission, so we need to add them to the GuC bookeeping. * submission, so we need to add them to the GuC bookeeping.

View File

@ -298,7 +298,7 @@ no_enough_resource:
} }
/** /**
* inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU * intel_vgpu_free_resource() - free HW resource owned by a vGPU
* @vgpu: a vGPU * @vgpu: a vGPU
* *
* This function is used to free the HW resource owned by a vGPU. * This function is used to free the HW resource owned by a vGPU.
@ -328,7 +328,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
} }
/** /**
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU * intel_vgpu_alloc_resource() - allocate HW resource for a vGPU
* @vgpu: vGPU * @vgpu: vGPU
* @param: vGPU creation params * @param: vGPU creation params
* *

View File

@ -2341,7 +2341,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
gvt_vgpu_err("fail to populate guest ggtt entry\n"); gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial /* guest driver may read/write the entry when partial
* update the entry in this situation p2m will fail * update the entry in this situation p2m will fail
* settting the shadow entry to point to a scratch page * setting the shadow entry to point to a scratch page
*/ */
ops->set_pfn(&m, gvt->gtt.scratch_mfn); ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} else } else

View File

@ -905,7 +905,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset); index = FDI_RX_IMR_TO_PIPE(offset);
else { else {
gvt_vgpu_err("Unsupport registers %x\n", offset); gvt_vgpu_err("Unsupported registers %x\n", offset);
return -EINVAL; return -EINVAL;
} }
@ -3052,7 +3052,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
} }
/** /**
* intel_t_default_mmio_write - default MMIO write handler * intel_vgpu_default_mmio_write() - default MMIO write handler
* @vgpu: a vGPU * @vgpu: a vGPU
* @offset: access offset * @offset: access offset
* @p_data: write data buffer * @p_data: write data buffer

View File

@ -546,7 +546,7 @@ static void switch_mmio(struct intel_vgpu *pre,
} }
/** /**
* intel_gvt_switch_render_mmio - switch mmio context of specific engine * intel_gvt_switch_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine * @pre: the last vGPU that own the engine
* @next: the vGPU to switch to * @next: the vGPU to switch to
* @engine: the engine * @engine: the engine

View File

@ -1076,7 +1076,8 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GEN8_HDC_CHICKEN1); MMIO_D(GEN8_HDC_CHICKEN1);
MMIO_D(GEN9_WM_CHICKEN3); MMIO_D(GEN9_WM_CHICKEN3);
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) if (IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
MMIO_D(GAMT_CHKN_BIT_REG); MMIO_D(GAMT_CHKN_BIT_REG);
if (!IS_BROXTON(dev_priv)) if (!IS_BROXTON(dev_priv))
MMIO_D(GEN9_CTX_PREEMPT_REG); MMIO_D(GEN9_CTX_PREEMPT_REG);

View File

@ -6561,6 +6561,9 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
enum plane_id plane_id; enum plane_id plane_id;
u8 slices; u8 slices;
memset(&crtc_state->wm.skl.optimal, 0,
sizeof(crtc_state->wm.skl.optimal));
if (crtc_state->hw.active)
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
@ -6572,6 +6575,9 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
struct skl_ddb_entry *ddb_y = struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id]; &crtc_state->wm.skl.plane_ddb_y[plane_id];
if (!crtc_state->hw.active)
continue;
skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe, skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
plane_id, ddb, ddb_y); plane_id, ddb, ddb_y);

View File

@ -2061,6 +2061,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
intf_cfg.stream_sel = 0; /* Don't care value for video mode */ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
if (phys_enc->hw_intf)
intf_cfg.intf = phys_enc->hw_intf->idx;
if (phys_enc->hw_wb)
intf_cfg.wb = phys_enc->hw_wb->idx;
if (phys_enc->hw_pp->merge_3d) if (phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;

View File

@ -1214,7 +1214,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
if (ret) if (ret)
return ret; return ret;
dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN); dp_ctrl_train_pattern_set(ctrl, pattern);
for (tries = 0; tries <= maximum_retries; tries++) { for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);

View File

@ -109,7 +109,7 @@ static const char * const dsi_8996_bus_clk_names[] = {
static const struct msm_dsi_config msm8996_dsi_cfg = { static const struct msm_dsi_config msm8996_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT, .io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = { .reg_cfg = {
.num = 2, .num = 3,
.regs = { .regs = {
{"vdda", 18160, 1 }, /* 1.25 V */ {"vdda", 18160, 1 }, /* 1.25 V */
{"vcca", 17000, 32 }, /* 0.925 V */ {"vcca", 17000, 32 }, /* 0.925 V */
@ -148,7 +148,7 @@ static const char * const dsi_sdm660_bus_clk_names[] = {
static const struct msm_dsi_config sdm660_dsi_cfg = { static const struct msm_dsi_config sdm660_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT, .io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = { .reg_cfg = {
.num = 2, .num = 1,
.regs = { .regs = {
{"vdda", 12560, 4 }, /* 1.2 V */ {"vdda", 12560, 4 }, /* 1.2 V */
}, },

View File

@ -469,6 +469,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
} }
} }
drm_helper_move_panel_connectors_to_head(ddev);
ddev->mode_config.funcs = &mode_config_funcs; ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs; ddev->mode_config.helper_private = &mode_config_helper_funcs;

View File

@ -213,6 +213,8 @@ void msm_devfreq_init(struct msm_gpu *gpu)
if (IS_ERR(df->devfreq)) { if (IS_ERR(df->devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
dev_pm_qos_remove_request(&df->idle_freq);
dev_pm_qos_remove_request(&df->boost_freq);
df->devfreq = NULL; df->devfreq = NULL;
return; return;
} }

View File

@ -196,6 +196,9 @@ static int rd_open(struct inode *inode, struct file *file)
file->private_data = rd; file->private_data = rd;
rd->open = true; rd->open = true;
/* Reset fifo to clear any previously unread data: */
rd->fifo.head = rd->fifo.tail = 0;
/* the parsing tools need to know gpu-id to know which /* the parsing tools need to know gpu-id to know which
* register database to load. * register database to load.
* *