mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
Core Changes (DRM):
- Fix documentation of DP tunnel functions (Imre) - DP MST read sideband messaging cap (Jani) - Preparation patches for Adaptive Sync SDP Support for DP (Mitul) Driver Changes: i915 core (non-display): - Documentation improvements (Nirmoy) - Add includes for BUG_ON/BUILD_BUG_ON in i915_memcpy.c (Joonas) - Do not print 'pxp init failed with 0' when it succeed (Jose) - Clean-up, including removal of dead code for unsupported platforms (Lucas) - Adding new DG2 PCI ID (Ravi) {i915,xe} display: - Spelling fix (Colin Ian) - Document CDCLK components (Gustavo) - Lunar Lake display enabling, including cdclk and other refactors (Gustavo, Bala) - BIOS/VBT/opregion related refactor (Jani, Ville, RK) - Save a few bytes of memory using {kstrdup,kfree}_const variant (Christophe) - Digital port related refactor/clean-up (Ville) - Fix 2s boot time regression on DP panel replay init (Animesh) - Remove redundant drm_rect_visible() overlay use (Arthur) - DSC HW state readout fixes (Imre) - Remove duplication on audio enable/disable on SDVO and g4x+ DP (Ville) - Disable AuxCCS framebuffers if built for Xe (Juha-Pekka) - Fix DSI init order (Ville) - DRRS related refactor and fixes (Bhanuprakash) - Fix DSB vblank waits with VRR (Ville) - General improvements on register name and use of REG_BIT (Ville) - Some display power well related improvements (Ville) - FBC changes for better w/a handling (Ville) - Make crtc disable more atomic (Ville) - Fix hwmon locking inversion in sysfs getter (Janusz) - Increase DP idle pattern wait timeout to 2ms (Shekhar) - PSR related fixes and improvents (Jouni) - Start using container_of_const() for some extra const safety (Ville) - Use drm_printer more on display code (Ville) - Fix Jasper Lake boot freeze (Jonathon) - Update Pipe src size check in skl_update_scaler (Ankit) - Enable MST mode for 128b/132b single-stream sideband (Jani) - Pass encoder around more for port/phy checks (Jani) - Some initial work to make display code more independent from i915 (Jani) - Pre-populate the cursor physical dma address (Ville) - Do not bump min backlight brightness to max on enable (Gareth) - Fix MTL supported DP rates - removal of UHBR13.5 (Arun) - Fix the computation for compressed_bpp for DISPLAY < 1 (Ankit) - Bigjoiner modeset sequence redesign and MST support (Ville) - Enable Adaptive Sync SDP Support for DP (Mitul) - Implemnt vblank sycnhronized mbus joining changes (Ville, Stanislav) - HDCP related fixes (Suraj) - Fix i915_display_info debugfs when connectors are not active (Ville) - Clean up on Xe compat layer (Jani) - Add jitter WAs for MST/FEC/DSC links (Imre) - DMC wakelock implementation (Luca) -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmYfzQEACgkQ+mJfZA7r E8qYvAf/T8KrEewHOWz7NOaKcFRCNYaF4QTdVOfgHUYBX5NPDF/xzwFdHCL8QWQu bwKwE2b94VEyruG3DYwTMd8GNcDxrsOrmU0IZe3PVkm+BvHLTmrOqL6BlCd85zXF 02IuE+LCaWREmmpLMcsDMxsaaq8yp+cw9/F0jJDrH6LiyfxFriefxyZYpGYjRCuv 8GP1fHXLFV2yys4rveR/+y9xIhgy82mVcg3/Kfk0+er7gALkY6Vc0N38wedET9MZ ZPfVidBeaTkIKcCDFKnFzGjG+9rNQ7NFrXyS7Hl97VolGt2l03qGGPNW1PouDiUx 7Y8CJOc+1k9wyBMKl0a/NQBRAqSZBQ== =JvZN -----END PGP SIGNATURE----- Merge tag 'drm-intel-next-2024-04-17-1' of https://anongit.freedesktop.org/git/drm/drm-intel into drm-next Core Changes (DRM): - Fix documentation of DP tunnel functions (Imre) - DP MST read sideband messaging cap (Jani) - Preparation patches for Adaptive Sync SDP Support for DP (Mitul) Driver Changes: i915 core (non-display): - Documentation improvements (Nirmoy) - Add includes for BUG_ON/BUILD_BUG_ON in i915_memcpy.c (Joonas) - Do not print 'pxp init failed with 0' when it succeed (Jose) - Clean-up, including removal of dead code for unsupported platforms (Lucas) - Adding new DG2 PCI ID (Ravi) {i915,xe} display: - Spelling fix (Colin Ian) - Document CDCLK components (Gustavo) - Lunar Lake display enabling, including cdclk and other refactors (Gustavo, Bala) - BIOS/VBT/opregion related refactor (Jani, Ville, RK) - Save a few bytes of memory using {kstrdup,kfree}_const variant (Christophe) - Digital port related refactor/clean-up (Ville) - Fix 2s boot time regression on DP panel replay init (Animesh) - Remove redundant drm_rect_visible() overlay use (Arthur) - DSC HW state readout fixes (Imre) - Remove duplication on audio enable/disable on SDVO and g4x+ DP (Ville) - Disable AuxCCS framebuffers if built for Xe (Juha-Pekka) - Fix DSI init order (Ville) - DRRS related refactor and fixes (Bhanuprakash) - Fix DSB vblank waits with VRR (Ville) - General improvements on register name and use of REG_BIT (Ville) - Some display power well related improvements (Ville) - FBC changes for better w/a handling (Ville) - Make crtc disable more atomic (Ville) - Fix hwmon locking inversion in sysfs getter (Janusz) - Increase DP idle pattern wait timeout to 2ms (Shekhar) - PSR related fixes and improvents (Jouni) - Start using container_of_const() for some extra const safety (Ville) - Use drm_printer more on display code (Ville) - Fix Jasper Lake boot freeze (Jonathon) - Update Pipe src size check in skl_update_scaler (Ankit) - Enable MST mode for 128b/132b single-stream sideband (Jani) - Pass encoder around more for port/phy checks (Jani) - Some initial work to make display code more independent from i915 (Jani) - Pre-populate the cursor physical dma address (Ville) - Do not bump min backlight brightness to max on enable (Gareth) - Fix MTL supported DP rates - removal of UHBR13.5 (Arun) - Fix the computation for compressed_bpp for DISPLAY < 1 (Ankit) - Bigjoiner modeset sequence redesign and MST support (Ville) - Enable Adaptive Sync SDP Support for DP (Mitul) - Implemnt vblank sycnhronized mbus joining changes (Ville, Stanislav) - HDCP related fixes (Suraj) - Fix i915_display_info debugfs when connectors are not active (Ville) - Clean up on Xe compat layer (Jani) - Add jitter WAs for MST/FEC/DSC links (Imre) - DMC wakelock implementation (Luca) Signed-off-by: Dave Airlie <airlied@redhat.com> # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmYfzQEACgkQ+mJfZA7r # E8qYvAf/T8KrEewHOWz7NOaKcFRCNYaF4QTdVOfgHUYBX5NPDF/xzwFdHCL8QWQu # bwKwE2b94VEyruG3DYwTMd8GNcDxrsOrmU0IZe3PVkm+BvHLTmrOqL6BlCd85zXF # 02IuE+LCaWREmmpLMcsDMxsaaq8yp+cw9/F0jJDrH6LiyfxFriefxyZYpGYjRCuv # 8GP1fHXLFV2yys4rveR/+y9xIhgy82mVcg3/Kfk0+er7gALkY6Vc0N38wedET9MZ # ZPfVidBeaTkIKcCDFKnFzGjG+9rNQ7NFrXyS7Hl97VolGt2l03qGGPNW1PouDiUx # 7Y8CJOc+1k9wyBMKl0a/NQBRAqSZBQ== # =JvZN # -----END PGP SIGNATURE----- # gpg: Signature made Wed 17 Apr 2024 23:22:09 AEST # gpg: using RSA key 6D207068EEDD65091C2CE2A3FA625F640EEB13CA # gpg: Good signature from "Rodrigo Vivi <rodrigo.vivi@intel.com>" [unknown] # gpg: aka "Rodrigo Vivi <rodrigo.vivi@gmail.com>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 6D20 7068 EEDD 6509 1C2C E2A3 FA62 5F64 0EEB 13CA From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/Zh_Q72gYKMMbge9A@intel.com
This commit is contained in:
commit
fad3dad832
@ -204,6 +204,15 @@ DMC Firmware Support
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc.c
|
||||
:internal:
|
||||
|
||||
DMC wakelock support
|
||||
--------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc_wl.c
|
||||
:doc: DMC wakelock support
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc_wl.c
|
||||
:internal:
|
||||
|
||||
Video BIOS Table (VBT)
|
||||
----------------------
|
||||
|
||||
|
@ -93,12 +93,11 @@ struct drm_i915_gem_timeline_fence {
|
||||
* Multiple VA mappings can be created to the same section of the object
|
||||
* (aliasing).
|
||||
*
|
||||
* The @start, @offset and @length must be 4K page aligned. However the DG2
|
||||
* and XEHPSDV has 64K page size for device local memory and has compact page
|
||||
* table. On those platforms, for binding device local-memory objects, the
|
||||
* @start, @offset and @length must be 64K aligned. Also, UMDs should not mix
|
||||
* the local memory 64K page and the system memory 4K page bindings in the same
|
||||
* 2M range.
|
||||
* The @start, @offset and @length must be 4K page aligned. However the DG2 has
|
||||
* 64K page size for device local memory and has compact page table. On that
|
||||
* platform, for binding device local-memory objects, the @start, @offset and
|
||||
* @length must be 64K aligned. Also, UMDs should not mix the local memory 64K
|
||||
* page and the system memory 4K page bindings in the same 2M range.
|
||||
*
|
||||
* Error code -EINVAL will be returned if @start, @offset and @length are not
|
||||
* properly aligned. In version 1 (See I915_PARAM_VM_BIND_VERSION), error code
|
||||
|
@ -2948,6 +2948,43 @@ void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
|
||||
|
||||
void drm_dp_as_sdp_log(struct drm_printer *p, const struct drm_dp_as_sdp *as_sdp)
|
||||
{
|
||||
drm_printf(p, "DP SDP: AS_SDP, revision %u, length %u\n",
|
||||
as_sdp->revision, as_sdp->length);
|
||||
drm_printf(p, " vtotal: %d\n", as_sdp->vtotal);
|
||||
drm_printf(p, " target_rr: %d\n", as_sdp->target_rr);
|
||||
drm_printf(p, " duration_incr_ms: %d\n", as_sdp->duration_incr_ms);
|
||||
drm_printf(p, " duration_decr_ms: %d\n", as_sdp->duration_decr_ms);
|
||||
drm_printf(p, " operation_mode: %d\n", as_sdp->mode);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_as_sdp_log);
|
||||
|
||||
/**
|
||||
* drm_dp_as_sdp_supported() - check if adaptive sync sdp is supported
|
||||
* @aux: DisplayPort AUX channel
|
||||
* @dpcd: DisplayPort configuration data
|
||||
*
|
||||
* Returns true if adaptive sync sdp is supported, else returns false
|
||||
*/
|
||||
bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
{
|
||||
u8 rx_feature;
|
||||
|
||||
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
|
||||
return false;
|
||||
|
||||
if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
|
||||
&rx_feature) != 1) {
|
||||
drm_dbg_dp(aux->drm_dev,
|
||||
"Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return (rx_feature & DP_ADAPTIVE_SYNC_SDP_SUPPORTED);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_as_sdp_supported);
|
||||
|
||||
/**
|
||||
* drm_dp_vsc_sdp_supported() - check if vsc sdp is supported
|
||||
* @aux: DisplayPort AUX channel
|
||||
|
@ -3608,24 +3608,30 @@ fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
|
||||
EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
|
||||
|
||||
/**
|
||||
* drm_dp_read_mst_cap() - check whether or not a sink supports MST
|
||||
* drm_dp_read_mst_cap() - Read the sink's MST mode capability
|
||||
* @aux: The DP AUX channel to use
|
||||
* @dpcd: A cached copy of the DPCD capabilities for this sink
|
||||
*
|
||||
* Returns: %True if the sink supports MST, %false otherwise
|
||||
* Returns: enum drm_dp_mst_mode to indicate MST mode capability
|
||||
*/
|
||||
bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
|
||||
const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
|
||||
const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
{
|
||||
u8 mstm_cap;
|
||||
|
||||
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
|
||||
return false;
|
||||
return DRM_DP_SST;
|
||||
|
||||
if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
|
||||
return false;
|
||||
return DRM_DP_SST;
|
||||
|
||||
return mstm_cap & DP_MST_CAP;
|
||||
if (mstm_cap & DP_MST_CAP)
|
||||
return DRM_DP_MST;
|
||||
|
||||
if (mstm_cap & DP_SINGLE_STREAM_SIDEBAND_MSG)
|
||||
return DRM_DP_SST_SIDEBAND_MSG;
|
||||
|
||||
return DRM_DP_SST;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_read_mst_cap);
|
||||
|
||||
|
@ -436,8 +436,8 @@ EXPORT_SYMBOL(drm_dp_tunnel_get);
|
||||
|
||||
/**
|
||||
* drm_dp_tunnel_put - Put a reference for a DP tunnel
|
||||
* @tunnel - Tunnel object
|
||||
* @tracker - Debug tracker for the reference
|
||||
* @tunnel: Tunnel object
|
||||
* @tracker: Debug tracker for the reference
|
||||
*
|
||||
* Put a reference for @tunnel along with its debug *@tracker, which
|
||||
* was obtained with drm_dp_tunnel_get().
|
||||
@ -1170,7 +1170,7 @@ int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
|
||||
EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw);
|
||||
|
||||
/**
|
||||
* drm_dp_tunnel_atomic_get_allocated_bw - Get the BW allocated for a DP tunnel
|
||||
* drm_dp_tunnel_get_allocated_bw - Get the BW allocated for a DP tunnel
|
||||
* @tunnel: Tunnel object
|
||||
*
|
||||
* Get the current BW allocated for @tunnel. After the tunnel is created /
|
||||
@ -1892,6 +1892,7 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
|
||||
/**
|
||||
* drm_dp_tunnel_mgr_create - Create a DP tunnel manager
|
||||
* @dev: DRM device object
|
||||
* @max_group_count: Maximum number of tunnel groups
|
||||
*
|
||||
* Creates a DP tunnel manager for @dev.
|
||||
*
|
||||
|
@ -32,11 +32,6 @@ endif
|
||||
# Enable -Werror in CI and development
|
||||
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
|
||||
|
||||
# Fine grained warnings disable
|
||||
CFLAGS_i915_pci.o = -Wno-override-init
|
||||
CFLAGS_display/intel_display_device.o = -Wno-override-init
|
||||
CFLAGS_display/intel_fbdev.o = -Wno-override-init
|
||||
|
||||
# Support compiling the display code separately for both i915 and xe
|
||||
# drivers. Define I915 when building i915.
|
||||
subdir-ccflags-y += -DI915
|
||||
@ -270,6 +265,7 @@ i915-y += \
|
||||
display/intel_display_rps.o \
|
||||
display/intel_display_wa.o \
|
||||
display/intel_dmc.o \
|
||||
display/intel_dmc_wl.o \
|
||||
display/intel_dpio_phy.o \
|
||||
display/intel_dpll.o \
|
||||
display/intel_dpll_mgr.o \
|
||||
|
@ -1616,8 +1616,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
|
||||
base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct intel_connector *intel_connector = intel_dsi->attached_connector;
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
|
@ -62,7 +62,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_digital_connector_state *intel_conn_state =
|
||||
const struct intel_digital_connector_state *intel_conn_state =
|
||||
to_intel_digital_connector_state(state);
|
||||
|
||||
if (property == dev_priv->display.properties.force_audio)
|
||||
|
@ -761,8 +761,8 @@ static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
|
||||
|
||||
WARN_ON(panel->backlight.max == 0);
|
||||
|
||||
if (panel->backlight.level <= panel->backlight.min) {
|
||||
panel->backlight.level = panel->backlight.max;
|
||||
if (panel->backlight.level < panel->backlight.min) {
|
||||
panel->backlight.level = panel->backlight.min;
|
||||
if (panel->backlight.device)
|
||||
panel->backlight.device->props.brightness =
|
||||
scale_hw_to_user(connector,
|
||||
@ -949,7 +949,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
|
||||
else
|
||||
props.power = FB_BLANK_POWERDOWN;
|
||||
|
||||
name = kstrdup("intel_backlight", GFP_KERNEL);
|
||||
name = kstrdup_const("intel_backlight", GFP_KERNEL);
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -963,7 +963,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
|
||||
* compatibility. Use unique names for subsequent backlight devices as a
|
||||
* fallback when the default name already exists.
|
||||
*/
|
||||
kfree(name);
|
||||
kfree_const(name);
|
||||
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
|
||||
i915->drm.primary->index, connector->base.name);
|
||||
if (!name)
|
||||
@ -987,7 +987,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
|
||||
connector->base.base.id, connector->base.name, name);
|
||||
|
||||
out:
|
||||
kfree(name);
|
||||
kfree_const(name);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -25,6 +25,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include <drm/display/drm_dp_helper.h>
|
||||
#include <drm/display/drm_dsc_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
@ -2730,6 +2732,57 @@ static void parse_ddi_ports(struct drm_i915_private *i915)
|
||||
print_ddi_port(devdata);
|
||||
}
|
||||
|
||||
static int child_device_expected_size(u16 version)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct child_device_config) < 40);
|
||||
|
||||
if (version > 256)
|
||||
return -ENOENT;
|
||||
else if (version >= 256)
|
||||
return 40;
|
||||
else if (version >= 216)
|
||||
return 39;
|
||||
else if (version >= 196)
|
||||
return 38;
|
||||
else if (version >= 195)
|
||||
return 37;
|
||||
else if (version >= 111)
|
||||
return LEGACY_CHILD_DEVICE_CONFIG_SIZE;
|
||||
else if (version >= 106)
|
||||
return 27;
|
||||
else
|
||||
return 22;
|
||||
}
|
||||
|
||||
static bool child_device_size_valid(struct drm_i915_private *i915, int size)
|
||||
{
|
||||
int expected_size;
|
||||
|
||||
expected_size = child_device_expected_size(i915->display.vbt.version);
|
||||
if (expected_size < 0) {
|
||||
expected_size = sizeof(struct child_device_config);
|
||||
drm_dbg(&i915->drm,
|
||||
"Expected child device config size for VBT version %u not known; assuming %d\n",
|
||||
i915->display.vbt.version, expected_size);
|
||||
}
|
||||
|
||||
/* Flag an error for unexpected size, but continue anyway. */
|
||||
if (size != expected_size)
|
||||
drm_err(&i915->drm,
|
||||
"Unexpected child device config size %d (expected %d for VBT version %u)\n",
|
||||
size, expected_size, i915->display.vbt.version);
|
||||
|
||||
/* The legacy sized child device config is the minimum we need. */
|
||||
if (size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Child device config size %d is too small.\n",
|
||||
size);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
parse_general_definitions(struct drm_i915_private *i915)
|
||||
{
|
||||
@ -2737,7 +2790,6 @@ parse_general_definitions(struct drm_i915_private *i915)
|
||||
struct intel_bios_encoder_data *devdata;
|
||||
const struct child_device_config *child;
|
||||
int i, child_device_num;
|
||||
u8 expected_size;
|
||||
u16 block_size;
|
||||
int bus_pin;
|
||||
|
||||
@ -2761,39 +2813,8 @@ parse_general_definitions(struct drm_i915_private *i915)
|
||||
if (intel_gmbus_is_valid_pin(i915, bus_pin))
|
||||
i915->display.vbt.crt_ddc_pin = bus_pin;
|
||||
|
||||
if (i915->display.vbt.version < 106) {
|
||||
expected_size = 22;
|
||||
} else if (i915->display.vbt.version < 111) {
|
||||
expected_size = 27;
|
||||
} else if (i915->display.vbt.version < 195) {
|
||||
expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
|
||||
} else if (i915->display.vbt.version == 195) {
|
||||
expected_size = 37;
|
||||
} else if (i915->display.vbt.version <= 215) {
|
||||
expected_size = 38;
|
||||
} else if (i915->display.vbt.version <= 250) {
|
||||
expected_size = 39;
|
||||
} else {
|
||||
expected_size = sizeof(*child);
|
||||
BUILD_BUG_ON(sizeof(*child) < 39);
|
||||
drm_dbg(&i915->drm,
|
||||
"Expected child device config size for VBT version %u not known; assuming %u\n",
|
||||
i915->display.vbt.version, expected_size);
|
||||
}
|
||||
|
||||
/* Flag an error for unexpected size, but continue anyway. */
|
||||
if (defs->child_dev_size != expected_size)
|
||||
drm_err(&i915->drm,
|
||||
"Unexpected child device config size %u (expected %u for VBT version %u)\n",
|
||||
defs->child_dev_size, expected_size, i915->display.vbt.version);
|
||||
|
||||
/* The legacy sized child device config is the minimum we need. */
|
||||
if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Child device config size %u is too small.\n",
|
||||
defs->child_dev_size);
|
||||
if (!child_device_size_valid(i915, defs->child_dev_size))
|
||||
return;
|
||||
}
|
||||
|
||||
/* get the number of child device */
|
||||
child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
|
||||
@ -2869,9 +2890,8 @@ init_vbt_panel_defaults(struct intel_panel *panel)
|
||||
static void
|
||||
init_vbt_missing_defaults(struct drm_i915_private *i915)
|
||||
{
|
||||
unsigned int ports = DISPLAY_RUNTIME_INFO(i915)->port_mask;
|
||||
enum port port;
|
||||
int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) |
|
||||
BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F);
|
||||
|
||||
if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
|
||||
return;
|
||||
@ -2981,6 +3001,43 @@ bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
|
||||
return vbt;
|
||||
}
|
||||
|
||||
static struct vbt_header *firmware_get_vbt(struct drm_i915_private *i915,
|
||||
size_t *size)
|
||||
{
|
||||
struct vbt_header *vbt = NULL;
|
||||
const struct firmware *fw = NULL;
|
||||
const char *name = i915->display.params.vbt_firmware;
|
||||
int ret;
|
||||
|
||||
if (!name || !*name)
|
||||
return NULL;
|
||||
|
||||
ret = request_firmware(&fw, name, i915->drm.dev);
|
||||
if (ret) {
|
||||
drm_err(&i915->drm,
|
||||
"Requesting VBT firmware \"%s\" failed (%d)\n",
|
||||
name, ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (intel_bios_is_valid_vbt(i915, fw->data, fw->size)) {
|
||||
vbt = kmemdup(fw->data, fw->size, GFP_KERNEL);
|
||||
if (vbt) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Found valid VBT firmware \"%s\"\n", name);
|
||||
if (size)
|
||||
*size = fw->size;
|
||||
}
|
||||
} else {
|
||||
drm_dbg_kms(&i915->drm, "Invalid VBT firmware \"%s\"\n",
|
||||
name);
|
||||
}
|
||||
|
||||
release_firmware(fw);
|
||||
|
||||
return vbt;
|
||||
}
|
||||
|
||||
static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset)
|
||||
{
|
||||
intel_uncore_write(uncore, PRIMARY_SPI_ADDRESS, offset);
|
||||
@ -2988,7 +3045,8 @@ static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset)
|
||||
return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER);
|
||||
}
|
||||
|
||||
static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
|
||||
static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915,
|
||||
size_t *size)
|
||||
{
|
||||
u32 count, data, found, store = 0;
|
||||
u32 static_region, oprom_offset;
|
||||
@ -3031,6 +3089,9 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
|
||||
|
||||
drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n");
|
||||
|
||||
if (size)
|
||||
*size = vbt_size;
|
||||
|
||||
return (struct vbt_header *)vbt;
|
||||
|
||||
err_free_vbt:
|
||||
@ -3039,7 +3100,8 @@ err_not_found:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
|
||||
static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
|
||||
size_t *sizep)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
void __iomem *p = NULL, *oprom;
|
||||
@ -3088,6 +3150,9 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
|
||||
|
||||
pci_unmap_rom(pdev, oprom);
|
||||
|
||||
if (sizep)
|
||||
*sizep = vbt_size;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n");
|
||||
|
||||
return vbt;
|
||||
@ -3100,6 +3165,32 @@ err_unmap_oprom:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const struct vbt_header *intel_bios_get_vbt(struct drm_i915_private *i915,
|
||||
size_t *sizep)
|
||||
{
|
||||
const struct vbt_header *vbt = NULL;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
vbt = firmware_get_vbt(i915, sizep);
|
||||
|
||||
if (!vbt)
|
||||
vbt = intel_opregion_get_vbt(i915, sizep);
|
||||
|
||||
/*
|
||||
* If the OpRegion does not have VBT, look in SPI flash
|
||||
* through MMIO or PCI mapping
|
||||
*/
|
||||
if (!vbt && IS_DGFX(i915))
|
||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
||||
vbt = spi_oprom_get_vbt(i915, sizep);
|
||||
|
||||
if (!vbt)
|
||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
||||
vbt = oprom_get_vbt(i915, sizep);
|
||||
|
||||
return vbt;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_bios_init - find VBT and initialize settings from the BIOS
|
||||
* @i915: i915 device instance
|
||||
@ -3111,7 +3202,6 @@ err_unmap_oprom:
|
||||
void intel_bios_init(struct drm_i915_private *i915)
|
||||
{
|
||||
const struct vbt_header *vbt;
|
||||
struct vbt_header *oprom_vbt = NULL;
|
||||
const struct bdb_header *bdb;
|
||||
|
||||
INIT_LIST_HEAD(&i915->display.vbt.display_devices);
|
||||
@ -3125,21 +3215,7 @@ void intel_bios_init(struct drm_i915_private *i915)
|
||||
|
||||
init_vbt_defaults(i915);
|
||||
|
||||
vbt = intel_opregion_get_vbt(i915, NULL);
|
||||
|
||||
/*
|
||||
* If the OpRegion does not have VBT, look in SPI flash through MMIO or
|
||||
* PCI mapping
|
||||
*/
|
||||
if (!vbt && IS_DGFX(i915)) {
|
||||
oprom_vbt = spi_oprom_get_vbt(i915);
|
||||
vbt = oprom_vbt;
|
||||
}
|
||||
|
||||
if (!vbt) {
|
||||
oprom_vbt = oprom_get_vbt(i915);
|
||||
vbt = oprom_vbt;
|
||||
}
|
||||
vbt = intel_bios_get_vbt(i915, NULL);
|
||||
|
||||
if (!vbt)
|
||||
goto out;
|
||||
@ -3172,7 +3248,7 @@ out:
|
||||
parse_sdvo_device_mapping(i915);
|
||||
parse_ddi_ports(i915);
|
||||
|
||||
kfree(oprom_vbt);
|
||||
kfree(vbt);
|
||||
}
|
||||
|
||||
static void intel_bios_init_panel(struct drm_i915_private *i915,
|
||||
@ -3344,8 +3420,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
|
||||
* additional data. Trust that if the VBT was written into
|
||||
* the OpRegion then they have validated the LVDS's existence.
|
||||
*/
|
||||
if (intel_opregion_get_vbt(i915, NULL))
|
||||
return true;
|
||||
return intel_opregion_vbt_present(i915);
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -3706,13 +3781,12 @@ static int intel_bios_vbt_show(struct seq_file *m, void *unused)
|
||||
const void *vbt;
|
||||
size_t vbt_size;
|
||||
|
||||
/*
|
||||
* FIXME: VBT might originate from other places than opregion, and then
|
||||
* this would be incorrect.
|
||||
*/
|
||||
vbt = intel_opregion_get_vbt(i915, &vbt_size);
|
||||
if (vbt)
|
||||
vbt = intel_bios_get_vbt(i915, &vbt_size);
|
||||
|
||||
if (vbt) {
|
||||
seq_write(m, vbt, vbt_size);
|
||||
kfree(vbt);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -52,7 +52,8 @@ struct intel_bw_state {
|
||||
u8 num_active_planes[I915_MAX_PIPES];
|
||||
};
|
||||
|
||||
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
|
||||
#define to_intel_bw_state(global_state) \
|
||||
container_of_const((global_state), struct intel_bw_state, base)
|
||||
|
||||
struct intel_bw_state *
|
||||
intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
|
||||
|
@ -39,6 +39,8 @@
|
||||
#include "intel_pcode.h"
|
||||
#include "intel_psr.h"
|
||||
#include "intel_vdsc.h"
|
||||
#include "skl_watermark.h"
|
||||
#include "skl_watermark_regs.h"
|
||||
#include "vlv_sideband.h"
|
||||
|
||||
/**
|
||||
@ -63,6 +65,32 @@
|
||||
* DMC will not change the active CDCLK frequency however, so that part
|
||||
* will still be performed by the driver directly.
|
||||
*
|
||||
* There are multiple components involved in the generation of the CDCLK
|
||||
* frequency:
|
||||
*
|
||||
* - We have the CDCLK PLL, which generates an output clock based on a
|
||||
* reference clock and a ratio parameter.
|
||||
* - The CD2X Divider, which divides the output of the PLL based on a
|
||||
* divisor selected from a set of pre-defined choices.
|
||||
* - The CD2X Squasher, which further divides the output based on a
|
||||
* waveform represented as a sequence of bits where each zero
|
||||
* "squashes out" a clock cycle.
|
||||
* - And, finally, a fixed divider that divides the output frequency by 2.
|
||||
*
|
||||
* As such, the resulting CDCLK frequency can be calculated with the
|
||||
* following formula:
|
||||
*
|
||||
* cdclk = vco / cd2x_div / (sq_len / sq_div) / 2
|
||||
*
|
||||
* , where vco is the frequency generated by the PLL; cd2x_div
|
||||
* represents the CD2X Divider; sq_len and sq_div are the bit length
|
||||
* and the number of high bits for the CD2X Squasher waveform, respectively;
|
||||
* and 2 represents the fixed divider.
|
||||
*
|
||||
* Note that some older platforms do not contain the CD2X Divider
|
||||
* and/or CD2X Squasher, in which case we can ignore their respective
|
||||
* factors in the formula above.
|
||||
*
|
||||
* Several methods exist to change the CDCLK frequency, which ones are
|
||||
* supported depends on the platform:
|
||||
*
|
||||
@ -993,15 +1021,14 @@ static int skl_cdclk_decimal(int cdclk)
|
||||
return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
|
||||
}
|
||||
|
||||
static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
|
||||
int vco)
|
||||
static void skl_set_preferred_cdclk_vco(struct drm_i915_private *i915, int vco)
|
||||
{
|
||||
bool changed = dev_priv->skl_preferred_vco_freq != vco;
|
||||
bool changed = i915->display.cdclk.skl_preferred_vco_freq != vco;
|
||||
|
||||
dev_priv->skl_preferred_vco_freq = vco;
|
||||
i915->display.cdclk.skl_preferred_vco_freq = vco;
|
||||
|
||||
if (changed)
|
||||
intel_update_max_cdclk(dev_priv);
|
||||
intel_update_max_cdclk(i915);
|
||||
}
|
||||
|
||||
static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco)
|
||||
@ -1205,7 +1232,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
|
||||
* Use the current vco as our initial
|
||||
* guess as to what the preferred vco is.
|
||||
*/
|
||||
if (dev_priv->skl_preferred_vco_freq == 0)
|
||||
if (dev_priv->display.cdclk.skl_preferred_vco_freq == 0)
|
||||
skl_set_preferred_cdclk_vco(dev_priv,
|
||||
dev_priv->display.cdclk.hw.vco);
|
||||
return;
|
||||
@ -1213,7 +1240,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
|
||||
|
||||
cdclk_config = dev_priv->display.cdclk.hw;
|
||||
|
||||
cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
|
||||
cdclk_config.vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
|
||||
if (cdclk_config.vco == 0)
|
||||
cdclk_config.vco = 8100000;
|
||||
cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco);
|
||||
@ -1391,7 +1418,7 @@ static const struct intel_cdclk_vals mtl_cdclk_table[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_cdclk_vals lnl_cdclk_table[] = {
|
||||
static const struct intel_cdclk_vals xe2lpd_cdclk_table[] = {
|
||||
{ .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa },
|
||||
{ .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a },
|
||||
{ .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 },
|
||||
@ -1656,6 +1683,8 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
out:
|
||||
if (DISPLAY_VER(dev_priv) >= 20)
|
||||
cdclk_config->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
|
||||
/*
|
||||
* Can't read this out :( Let's assume it's
|
||||
* at least what the CDCLK frequency requires.
|
||||
@ -1850,6 +1879,37 @@ static bool cdclk_pll_is_unknown(unsigned int vco)
|
||||
return vco == ~0;
|
||||
}
|
||||
|
||||
static bool mdclk_source_is_cdclk_pll(struct drm_i915_private *i915)
|
||||
{
|
||||
return DISPLAY_VER(i915) >= 20;
|
||||
}
|
||||
|
||||
static u32 xe2lpd_mdclk_source_sel(struct drm_i915_private *i915)
|
||||
{
|
||||
if (mdclk_source_is_cdclk_pll(i915))
|
||||
return MDCLK_SOURCE_SEL_CDCLK_PLL;
|
||||
|
||||
return MDCLK_SOURCE_SEL_CD2XCLK;
|
||||
}
|
||||
|
||||
int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915,
|
||||
const struct intel_cdclk_config *cdclk_config)
|
||||
{
|
||||
if (mdclk_source_is_cdclk_pll(i915))
|
||||
return DIV_ROUND_UP(cdclk_config->vco, cdclk_config->cdclk);
|
||||
|
||||
/* Otherwise, source for MDCLK is CD2XCLK. */
|
||||
return 2;
|
||||
}
|
||||
|
||||
static void xe2lpd_mdclk_cdclk_ratio_program(struct drm_i915_private *i915,
|
||||
const struct intel_cdclk_config *cdclk_config)
|
||||
{
|
||||
intel_dbuf_mdclk_cdclk_ratio_update(i915,
|
||||
intel_mdclk_cdclk_ratio(i915, cdclk_config),
|
||||
cdclk_config->joined_mbus);
|
||||
}
|
||||
|
||||
static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915,
|
||||
const struct intel_cdclk_config *old_cdclk_config,
|
||||
const struct intel_cdclk_config *new_cdclk_config,
|
||||
@ -1954,7 +2014,7 @@ static u32 bxt_cdclk_ctl(struct drm_i915_private *i915,
|
||||
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 20)
|
||||
val |= MDCLK_SOURCE_SEL_CDCLK_PLL;
|
||||
val |= xe2lpd_mdclk_source_sel(i915);
|
||||
else
|
||||
val |= skl_cdclk_decimal(cdclk);
|
||||
|
||||
@ -1967,7 +2027,6 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
int cdclk = cdclk_config->cdclk;
|
||||
int vco = cdclk_config->vco;
|
||||
u16 waveform;
|
||||
|
||||
if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
|
||||
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
|
||||
@ -1982,10 +2041,11 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
} else
|
||||
bxt_cdclk_pll_update(dev_priv, vco);
|
||||
|
||||
waveform = cdclk_squash_waveform(dev_priv, cdclk);
|
||||
if (HAS_CDCLK_SQUASH(dev_priv)) {
|
||||
u16 waveform = cdclk_squash_waveform(dev_priv, cdclk);
|
||||
|
||||
if (HAS_CDCLK_SQUASH(dev_priv))
|
||||
dg2_cdclk_squash_program(dev_priv, waveform);
|
||||
}
|
||||
|
||||
intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe));
|
||||
|
||||
@ -2030,6 +2090,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
return;
|
||||
}
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 20 && cdclk < dev_priv->display.cdclk.hw.cdclk)
|
||||
xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config);
|
||||
|
||||
if (cdclk_compute_crawl_and_squash_midpoint(dev_priv, &dev_priv->display.cdclk.hw,
|
||||
cdclk_config, &mid_cdclk_config)) {
|
||||
_bxt_set_cdclk(dev_priv, &mid_cdclk_config, pipe);
|
||||
@ -2038,6 +2101,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
_bxt_set_cdclk(dev_priv, cdclk_config, pipe);
|
||||
}
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 20 && cdclk > dev_priv->display.cdclk.hw.cdclk)
|
||||
xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 14)
|
||||
/*
|
||||
* NOOP - No Pcode communication needed for
|
||||
@ -2260,16 +2326,15 @@ static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_cdclk_needs_modeset - Determine if changong between the CDCLK
|
||||
* configurations requires a modeset on all pipes
|
||||
* intel_cdclk_clock_changed - Check whether the clock changed
|
||||
* @a: first CDCLK configuration
|
||||
* @b: second CDCLK configuration
|
||||
*
|
||||
* Returns:
|
||||
* True if changing between the two CDCLK configurations
|
||||
* requires all pipes to be off, false if not.
|
||||
* True if CDCLK changed in a way that requires re-programming and
|
||||
* False otherwise.
|
||||
*/
|
||||
bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
|
||||
bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a,
|
||||
const struct intel_cdclk_config *b)
|
||||
{
|
||||
return a->cdclk != b->cdclk ||
|
||||
@ -2322,7 +2387,7 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
|
||||
static bool intel_cdclk_changed(const struct intel_cdclk_config *a,
|
||||
const struct intel_cdclk_config *b)
|
||||
{
|
||||
return intel_cdclk_needs_modeset(a, b) ||
|
||||
return intel_cdclk_clock_changed(a, b) ||
|
||||
a->voltage_level != b->voltage_level;
|
||||
}
|
||||
|
||||
@ -2368,18 +2433,9 @@ static void intel_pcode_notify(struct drm_i915_private *i915,
|
||||
ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_set_cdclk - Push the CDCLK configuration to the hardware
|
||||
* @dev_priv: i915 device
|
||||
* @cdclk_config: new CDCLK configuration
|
||||
* @pipe: pipe with which to synchronize the update
|
||||
*
|
||||
* Program the hardware based on the passed in CDCLK state,
|
||||
* if necessary.
|
||||
*/
|
||||
static void intel_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_config *cdclk_config,
|
||||
enum pipe pipe)
|
||||
enum pipe pipe, const char *context)
|
||||
{
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
@ -2389,7 +2445,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk))
|
||||
return;
|
||||
|
||||
intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to");
|
||||
intel_cdclk_dump_config(dev_priv, cdclk_config, context);
|
||||
|
||||
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
@ -2519,6 +2575,17 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
|
||||
update_cdclk, update_pipe_count);
|
||||
}
|
||||
|
||||
bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state)
|
||||
{
|
||||
const struct intel_cdclk_state *old_cdclk_state =
|
||||
intel_atomic_get_old_cdclk_state(state);
|
||||
const struct intel_cdclk_state *new_cdclk_state =
|
||||
intel_atomic_get_new_cdclk_state(state);
|
||||
|
||||
return new_cdclk_state && !new_cdclk_state->disable_pipes &&
|
||||
new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
|
||||
* @state: intel atomic state
|
||||
@ -2534,7 +2601,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
|
||||
intel_atomic_get_old_cdclk_state(state);
|
||||
const struct intel_cdclk_state *new_cdclk_state =
|
||||
intel_atomic_get_new_cdclk_state(state);
|
||||
enum pipe pipe = new_cdclk_state->pipe;
|
||||
struct intel_cdclk_config cdclk_config;
|
||||
enum pipe pipe;
|
||||
|
||||
if (!intel_cdclk_changed(&old_cdclk_state->actual,
|
||||
&new_cdclk_state->actual))
|
||||
@ -2543,12 +2611,32 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
|
||||
if (IS_DG2(i915))
|
||||
intel_cdclk_pcode_pre_notify(state);
|
||||
|
||||
if (pipe == INVALID_PIPE ||
|
||||
old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
|
||||
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
|
||||
if (new_cdclk_state->disable_pipes) {
|
||||
cdclk_config = new_cdclk_state->actual;
|
||||
pipe = INVALID_PIPE;
|
||||
} else {
|
||||
if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
|
||||
cdclk_config = new_cdclk_state->actual;
|
||||
pipe = new_cdclk_state->pipe;
|
||||
} else {
|
||||
cdclk_config = old_cdclk_state->actual;
|
||||
pipe = INVALID_PIPE;
|
||||
}
|
||||
|
||||
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
|
||||
cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
|
||||
old_cdclk_state->actual.voltage_level);
|
||||
}
|
||||
|
||||
/*
|
||||
* mbus joining will be changed later by
|
||||
* intel_dbuf_mbus_{pre,post}_ddb_update()
|
||||
*/
|
||||
cdclk_config.joined_mbus = old_cdclk_state->actual.joined_mbus;
|
||||
|
||||
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
|
||||
|
||||
intel_set_cdclk(i915, &cdclk_config, pipe,
|
||||
"Pre changing CDCLK to");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2566,7 +2654,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
|
||||
intel_atomic_get_old_cdclk_state(state);
|
||||
const struct intel_cdclk_state *new_cdclk_state =
|
||||
intel_atomic_get_new_cdclk_state(state);
|
||||
enum pipe pipe = new_cdclk_state->pipe;
|
||||
enum pipe pipe;
|
||||
|
||||
if (!intel_cdclk_changed(&old_cdclk_state->actual,
|
||||
&new_cdclk_state->actual))
|
||||
@ -2575,12 +2663,16 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
|
||||
if (IS_DG2(i915))
|
||||
intel_cdclk_pcode_post_notify(state);
|
||||
|
||||
if (pipe != INVALID_PIPE &&
|
||||
old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
|
||||
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
|
||||
if (!new_cdclk_state->disable_pipes &&
|
||||
new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
|
||||
pipe = new_cdclk_state->pipe;
|
||||
else
|
||||
pipe = INVALID_PIPE;
|
||||
|
||||
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
|
||||
}
|
||||
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
|
||||
|
||||
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe,
|
||||
"Post changing CDCLK to");
|
||||
}
|
||||
|
||||
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
|
||||
@ -2731,25 +2823,6 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
|
||||
if (crtc_state->dsc.compression_enable)
|
||||
min_cdclk = max(min_cdclk, intel_vdsc_min_cdclk(crtc_state));
|
||||
|
||||
/*
|
||||
* HACK. Currently for TGL/DG2 platforms we calculate
|
||||
* min_cdclk initially based on pixel_rate divided
|
||||
* by 2, accounting for also plane requirements,
|
||||
* however in some cases the lowest possible CDCLK
|
||||
* doesn't work and causing the underruns.
|
||||
* Explicitly stating here that this seems to be currently
|
||||
* rather a Hack, than final solution.
|
||||
*/
|
||||
if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) {
|
||||
/*
|
||||
* Clamp to max_cdclk_freq in case pixel rate is higher,
|
||||
* in order not to break an 8K, but still leave W/A at place.
|
||||
*/
|
||||
min_cdclk = max_t(int, min_cdclk,
|
||||
min_t(int, crtc_state->pixel_rate,
|
||||
dev_priv->display.cdclk.max_cdclk_freq));
|
||||
}
|
||||
|
||||
return min_cdclk;
|
||||
}
|
||||
|
||||
@ -2937,7 +3010,7 @@ static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state)
|
||||
|
||||
vco = cdclk_state->logical.vco;
|
||||
if (!vco)
|
||||
vco = dev_priv->skl_preferred_vco_freq;
|
||||
vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
|
||||
|
||||
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
if (!crtc_state->hw.enable)
|
||||
@ -3058,6 +3131,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
|
||||
return NULL;
|
||||
|
||||
cdclk_state->pipe = INVALID_PIPE;
|
||||
cdclk_state->disable_pipes = false;
|
||||
|
||||
return &cdclk_state->base;
|
||||
}
|
||||
@ -3121,6 +3195,20 @@ int intel_cdclk_atomic_check(struct intel_atomic_state *state,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus)
|
||||
{
|
||||
struct intel_cdclk_state *cdclk_state;
|
||||
|
||||
cdclk_state = intel_atomic_get_cdclk_state(state);
|
||||
if (IS_ERR(cdclk_state))
|
||||
return PTR_ERR(cdclk_state);
|
||||
|
||||
cdclk_state->actual.joined_mbus = joined_mbus;
|
||||
cdclk_state->logical.joined_mbus = joined_mbus;
|
||||
|
||||
return intel_atomic_lock_global_state(&cdclk_state->base);
|
||||
}
|
||||
|
||||
int intel_cdclk_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_cdclk_state *cdclk_state;
|
||||
@ -3229,17 +3317,28 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Can change cdclk cd2x divider with pipe %c active\n",
|
||||
pipe_name(pipe));
|
||||
} else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
|
||||
} else if (intel_cdclk_clock_changed(&old_cdclk_state->actual,
|
||||
&new_cdclk_state->actual)) {
|
||||
/* All pipes must be switched off while we change the cdclk. */
|
||||
ret = intel_modeset_all_pipes_late(state, "CDCLK change");
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
new_cdclk_state->disable_pipes = true;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Modeset required for cdclk change\n");
|
||||
}
|
||||
|
||||
if (intel_mdclk_cdclk_ratio(dev_priv, &old_cdclk_state->actual) !=
|
||||
intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual)) {
|
||||
int ratio = intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual);
|
||||
|
||||
ret = intel_dbuf_state_set_mdclk_cdclk_ratio(state, ratio);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"New cdclk calculated to be logical %u kHz, actual %u kHz\n",
|
||||
new_cdclk_state->logical.cdclk,
|
||||
@ -3297,7 +3396,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
|
||||
u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
|
||||
int max_cdclk, vco;
|
||||
|
||||
vco = dev_priv->skl_preferred_vco_freq;
|
||||
vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
|
||||
drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
|
||||
|
||||
/*
|
||||
@ -3339,13 +3438,13 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
|
||||
dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk;
|
||||
}
|
||||
|
||||
dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
|
||||
dev_priv->display.cdclk.max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
|
||||
|
||||
drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
|
||||
dev_priv->display.cdclk.max_cdclk_freq);
|
||||
|
||||
drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
|
||||
dev_priv->max_dotclk_freq);
|
||||
dev_priv->display.cdclk.max_dotclk_freq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3519,7 +3618,7 @@ static int i915_cdclk_info_show(struct seq_file *m, void *unused)
|
||||
|
||||
seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
|
||||
seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
|
||||
seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
|
||||
seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->display.cdclk.max_dotclk_freq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3534,13 +3633,6 @@ void intel_cdclk_debugfs_register(struct drm_i915_private *i915)
|
||||
i915, &i915_cdclk_info_fops);
|
||||
}
|
||||
|
||||
static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
|
||||
.get_cdclk = bxt_get_cdclk,
|
||||
.set_cdclk = bxt_set_cdclk,
|
||||
.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
|
||||
.calc_voltage_level = rplu_calc_voltage_level,
|
||||
};
|
||||
|
||||
static const struct intel_cdclk_funcs rplu_cdclk_funcs = {
|
||||
.get_cdclk = bxt_get_cdclk,
|
||||
.set_cdclk = bxt_set_cdclk,
|
||||
@ -3684,10 +3776,10 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
|
||||
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (DISPLAY_VER(dev_priv) >= 20) {
|
||||
dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs;
|
||||
dev_priv->display.cdclk.table = lnl_cdclk_table;
|
||||
dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
|
||||
dev_priv->display.cdclk.table = xe2lpd_cdclk_table;
|
||||
} else if (DISPLAY_VER(dev_priv) >= 14) {
|
||||
dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs;
|
||||
dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
|
||||
dev_priv->display.cdclk.table = mtl_cdclk_table;
|
||||
} else if (IS_DG2(dev_priv)) {
|
||||
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
|
||||
|
@ -18,6 +18,8 @@ struct intel_crtc_state;
|
||||
struct intel_cdclk_config {
|
||||
unsigned int cdclk, vco, ref, bypass;
|
||||
u8 voltage_level;
|
||||
/* This field is only valid for Xe2LPD and above. */
|
||||
bool joined_mbus;
|
||||
};
|
||||
|
||||
struct intel_cdclk_state {
|
||||
@ -51,6 +53,9 @@ struct intel_cdclk_state {
|
||||
|
||||
/* bitmask of active pipes */
|
||||
u8 active_pipes;
|
||||
|
||||
/* update cdclk with pipes disabled */
|
||||
bool disable_pipes;
|
||||
};
|
||||
|
||||
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
|
||||
@ -60,8 +65,11 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
|
||||
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
|
||||
void intel_update_cdclk(struct drm_i915_private *dev_priv);
|
||||
u32 intel_read_rawclk(struct drm_i915_private *dev_priv);
|
||||
bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
|
||||
bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a,
|
||||
const struct intel_cdclk_config *b);
|
||||
int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915,
|
||||
const struct intel_cdclk_config *cdclk_config);
|
||||
bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state);
|
||||
void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state);
|
||||
void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
|
||||
void intel_cdclk_dump_config(struct drm_i915_private *i915,
|
||||
@ -72,10 +80,13 @@ void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
struct intel_cdclk_config *cdclk_config);
|
||||
int intel_cdclk_atomic_check(struct intel_atomic_state *state,
|
||||
bool *need_cdclk_calc);
|
||||
int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus);
|
||||
struct intel_cdclk_state *
|
||||
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
|
||||
|
||||
#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
|
||||
#define to_intel_cdclk_state(global_state) \
|
||||
container_of_const((global_state), struct intel_cdclk_state, base)
|
||||
|
||||
#define intel_atomic_get_old_cdclk_state(state) \
|
||||
to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
|
||||
#define intel_atomic_get_new_cdclk_state(state) \
|
||||
|
@ -25,28 +25,26 @@
|
||||
4 * (dw))
|
||||
|
||||
#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy))
|
||||
#define CL_POWER_DOWN_ENABLE (1 << 4)
|
||||
#define SUS_CLOCK_CONFIG (3 << 0)
|
||||
#define CL_POWER_DOWN_ENABLE REG_BIT(4)
|
||||
#define SUS_CLOCK_CONFIG REG_GENMASK(1, 0)
|
||||
|
||||
#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy))
|
||||
#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
|
||||
#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
|
||||
#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
|
||||
#define PWR_UP_ALL_LANES (0x0 << 4)
|
||||
#define PWR_DOWN_LN_3_2_1 (0xe << 4)
|
||||
#define PWR_DOWN_LN_3_2 (0xc << 4)
|
||||
#define PWR_DOWN_LN_3 (0x8 << 4)
|
||||
#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
|
||||
#define PWR_DOWN_LN_1_0 (0x3 << 4)
|
||||
#define PWR_DOWN_LN_3_1 (0xa << 4)
|
||||
#define PWR_DOWN_LN_3_1_0 (0xb << 4)
|
||||
#define PWR_DOWN_LN_MASK (0xf << 4)
|
||||
#define PWR_DOWN_LN_SHIFT 4
|
||||
#define EDP4K2K_MODE_OVRD_EN (1 << 3)
|
||||
#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2)
|
||||
#define PG_SEQ_DELAY_OVERRIDE_MASK REG_GENMASK(26, 25)
|
||||
#define PG_SEQ_DELAY_OVERRIDE_ENABLE REG_BIT(24)
|
||||
#define PWR_DOWN_LN_MASK REG_GENMASK(7, 4)
|
||||
#define PWR_UP_ALL_LANES REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x0)
|
||||
#define PWR_DOWN_LN_3_2_1 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xe)
|
||||
#define PWR_DOWN_LN_3_2 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xc)
|
||||
#define PWR_DOWN_LN_3 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x8)
|
||||
#define PWR_DOWN_LN_2_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x7)
|
||||
#define PWR_DOWN_LN_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0x3)
|
||||
#define PWR_DOWN_LN_3_1 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xa)
|
||||
#define PWR_DOWN_LN_3_1_0 REG_FIELD_PREP(PWR_DOWN_LN_MASK, 0xb)
|
||||
#define EDP4K2K_MODE_OVRD_EN REG_BIT(3)
|
||||
#define EDP4K2K_MODE_OVRD_OPTIMIZED REG_BIT(2)
|
||||
|
||||
#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy))
|
||||
#define ICL_LANE_ENABLE_AUX (1 << 0)
|
||||
#define ICL_LANE_ENABLE_AUX REG_BIT(0)
|
||||
|
||||
/* ICL Port COMP_DW registers */
|
||||
#define _ICL_PORT_COMP 0x100
|
||||
@ -54,24 +52,22 @@
|
||||
_ICL_PORT_COMP + 4 * (dw))
|
||||
|
||||
#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy))
|
||||
#define COMP_INIT (1 << 31)
|
||||
#define COMP_INIT REG_BIT(31)
|
||||
|
||||
#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy))
|
||||
|
||||
#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy))
|
||||
#define PROCESS_INFO_DOT_0 (0 << 26)
|
||||
#define PROCESS_INFO_DOT_1 (1 << 26)
|
||||
#define PROCESS_INFO_DOT_4 (2 << 26)
|
||||
#define PROCESS_INFO_MASK (7 << 26)
|
||||
#define PROCESS_INFO_SHIFT 26
|
||||
#define VOLTAGE_INFO_0_85V (0 << 24)
|
||||
#define VOLTAGE_INFO_0_95V (1 << 24)
|
||||
#define VOLTAGE_INFO_1_05V (2 << 24)
|
||||
#define VOLTAGE_INFO_MASK (3 << 24)
|
||||
#define VOLTAGE_INFO_SHIFT 24
|
||||
#define PROCESS_INFO_MASK REG_GENMASK(28, 26)
|
||||
#define PROCESS_INFO_DOT_0 REG_FIELD_PREP(PROCESS_INFO_MASK, 0)
|
||||
#define PROCESS_INFO_DOT_1 REG_FIELD_PREP(PROCESS_INFO_MASK, 1)
|
||||
#define PROCESS_INFO_DOT_4 REG_FIELD_PREP(PROCESS_INFO_MASK, 2)
|
||||
#define VOLTAGE_INFO_MASK REG_GENMASK(25, 24)
|
||||
#define VOLTAGE_INFO_0_85V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 0)
|
||||
#define VOLTAGE_INFO_0_95V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 1)
|
||||
#define VOLTAGE_INFO_1_05V REG_FIELD_PREP(VOLTAGE_INFO_MASK, 2)
|
||||
|
||||
#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy))
|
||||
#define IREFGEN (1 << 24)
|
||||
#define IREFGEN REG_BIT(24)
|
||||
|
||||
#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy))
|
||||
|
||||
@ -92,9 +88,9 @@
|
||||
#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy))
|
||||
#define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20)
|
||||
#define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0)
|
||||
#define COMMON_KEEPER_EN (1 << 26)
|
||||
#define LATENCY_OPTIM_MASK (0x3 << 2)
|
||||
#define LATENCY_OPTIM_VAL(x) ((x) << 2)
|
||||
#define COMMON_KEEPER_EN REG_BIT(26)
|
||||
#define LATENCY_OPTIM_MASK REG_GENMASK(3, 2)
|
||||
#define LATENCY_OPTIM_VAL(x) REG_FIELD_PREP(LATENCY_OPTIM_MASK, (x))
|
||||
|
||||
/* ICL Port TX registers */
|
||||
#define _ICL_PORT_TX_AUX 0x380
|
||||
@ -111,42 +107,49 @@
|
||||
#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
|
||||
#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
|
||||
#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy))
|
||||
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
|
||||
#define SWING_SEL_UPPER_MASK (1 << 15)
|
||||
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
|
||||
#define SWING_SEL_LOWER_MASK (0x7 << 11)
|
||||
#define FRC_LATENCY_OPTIM_MASK (0x7 << 8)
|
||||
#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8)
|
||||
#define RCOMP_SCALAR(x) ((x) << 0)
|
||||
#define RCOMP_SCALAR_MASK (0xFF << 0)
|
||||
#define SWING_SEL_UPPER_MASK REG_BIT(15)
|
||||
#define SWING_SEL_UPPER(x) REG_FIELD_PREP(SWING_SEL_UPPER_MASK, (x) >> 3)
|
||||
#define SWING_SEL_LOWER_MASK REG_GENMASK(13, 11)
|
||||
#define SWING_SEL_LOWER(x) REG_FIELD_PREP(SWING_SEL_LOWER_MASK, (x) & 0x7)
|
||||
#define FRC_LATENCY_OPTIM_MASK REG_GENMASK(10, 8)
|
||||
#define FRC_LATENCY_OPTIM_VAL(x) REG_FIELD_PREP(FRC_LATENCY_OPTIM_MASK, (x))
|
||||
#define RCOMP_SCALAR_MASK REG_GENMASK(7, 0)
|
||||
#define RCOMP_SCALAR(x) REG_FIELD_PREP(RCOMP_SCALAR_MASK, (x))
|
||||
|
||||
#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
|
||||
#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
|
||||
#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy))
|
||||
#define LOADGEN_SELECT (1 << 31)
|
||||
#define POST_CURSOR_1(x) ((x) << 12)
|
||||
#define POST_CURSOR_1_MASK (0x3F << 12)
|
||||
#define POST_CURSOR_2(x) ((x) << 6)
|
||||
#define POST_CURSOR_2_MASK (0x3F << 6)
|
||||
#define CURSOR_COEFF(x) ((x) << 0)
|
||||
#define CURSOR_COEFF_MASK (0x3F << 0)
|
||||
#define LOADGEN_SELECT REG_BIT(31)
|
||||
#define POST_CURSOR_1_MASK REG_GENMASK(17, 12)
|
||||
#define POST_CURSOR_1(x) REG_FIELD_PREP(POST_CURSOR_1_MASK, (x))
|
||||
#define POST_CURSOR_2_MASK REG_GENMASK(11, 6)
|
||||
#define POST_CURSOR_2(x) REG_FIELD_PREP(POST_CURSOR_2_MASK, (x))
|
||||
#define CURSOR_COEFF_MASK REG_GENMASK(5, 0)
|
||||
#define CURSOR_COEFF(x) REG_FIELD_PREP(CURSOR_COEFF_MASK, (x))
|
||||
|
||||
#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
|
||||
#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
|
||||
#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy))
|
||||
#define TX_TRAINING_EN (1 << 31)
|
||||
#define TAP2_DISABLE (1 << 30)
|
||||
#define TAP3_DISABLE (1 << 29)
|
||||
#define SCALING_MODE_SEL(x) ((x) << 18)
|
||||
#define SCALING_MODE_SEL_MASK (0x7 << 18)
|
||||
#define RTERM_SELECT(x) ((x) << 3)
|
||||
#define RTERM_SELECT_MASK (0x7 << 3)
|
||||
#define TX_TRAINING_EN REG_BIT(31)
|
||||
#define TAP2_DISABLE REG_BIT(30)
|
||||
#define TAP3_DISABLE REG_BIT(29)
|
||||
#define SCALING_MODE_SEL_MASK REG_GENMASK(20, 18)
|
||||
#define SCALING_MODE_SEL(x) REG_FIELD_PREP(SCALING_MODE_SEL_MASK, (x))
|
||||
#define RTERM_SELECT_MASK REG_GENMASK(5, 3)
|
||||
#define RTERM_SELECT(x) REG_FIELD_PREP(RTERM_SELECT_MASK, (x))
|
||||
|
||||
#define ICL_PORT_TX_DW6_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(6, phy))
|
||||
#define ICL_PORT_TX_DW6_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(6, phy))
|
||||
#define ICL_PORT_TX_DW6_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(6, ln, phy))
|
||||
#define O_FUNC_OVRD_EN REG_BIT(7)
|
||||
#define O_LDO_REF_SEL_CRI REG_GENMASK(6, 1)
|
||||
#define O_LDO_BYPASS_CRI REG_BIT(0)
|
||||
|
||||
#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
|
||||
#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
|
||||
#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy))
|
||||
#define N_SCALAR(x) ((x) << 24)
|
||||
#define N_SCALAR_MASK (0x7F << 24)
|
||||
#define N_SCALAR_MASK REG_GENMASK(30, 24)
|
||||
#define N_SCALAR(x) REG_FIELD_PREP(N_SCALAR_MASK, (x))
|
||||
|
||||
#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy))
|
||||
#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy))
|
||||
|
@ -348,7 +348,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int max_dotclk = dev_priv->max_dotclk_freq;
|
||||
int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
|
||||
enum drm_mode_status status;
|
||||
int max_clock;
|
||||
|
||||
@ -356,9 +356,6 @@ intel_crt_mode_valid(struct drm_connector *connector,
|
||||
if (status != MODE_OK)
|
||||
return status;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
||||
if (mode->clock < 25000)
|
||||
return MODE_CLOCK_LOW;
|
||||
|
||||
|
@ -12,33 +12,31 @@
|
||||
#include "intel_hdmi.h"
|
||||
#include "intel_vrr.h"
|
||||
|
||||
static void intel_dump_crtc_timings(struct drm_i915_private *i915,
|
||||
static void intel_dump_crtc_timings(struct drm_printer *p,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, "
|
||||
"hd=%d hb=%d-%d hs=%d-%d ht=%d, "
|
||||
"vd=%d vb=%d-%d vs=%d-%d vt=%d, "
|
||||
"flags=0x%x\n",
|
||||
mode->crtc_clock,
|
||||
mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
|
||||
mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
|
||||
mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
|
||||
mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
|
||||
mode->flags);
|
||||
drm_printf(p, "crtc timings: clock=%d, "
|
||||
"hd=%d hb=%d-%d hs=%d-%d ht=%d, "
|
||||
"vd=%d vb=%d-%d vs=%d-%d vt=%d, "
|
||||
"flags=0x%x\n",
|
||||
mode->crtc_clock,
|
||||
mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
|
||||
mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
|
||||
mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
|
||||
mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
|
||||
mode->flags);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
|
||||
intel_dump_m_n_config(struct drm_printer *p,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const char *id, unsigned int lane_count,
|
||||
const struct intel_link_m_n *m_n)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
|
||||
id, lane_count,
|
||||
m_n->data_m, m_n->data_n,
|
||||
m_n->link_m, m_n->link_n, m_n->tu);
|
||||
drm_printf(p, "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
|
||||
id, lane_count,
|
||||
m_n->data_m, m_n->data_n,
|
||||
m_n->link_m, m_n->link_n, m_n->tu);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -52,17 +50,7 @@ intel_dump_infoframe(struct drm_i915_private *i915,
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
|
||||
const struct drm_dp_vsc_sdp *vsc)
|
||||
{
|
||||
struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
|
||||
|
||||
drm_dp_vsc_sdp_log(&p, vsc);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dump_buffer(struct drm_i915_private *i915,
|
||||
const char *prefix, const u8 *buf, size_t len)
|
||||
intel_dump_buffer(const char *prefix, const u8 *buf, size_t len)
|
||||
{
|
||||
if (!drm_debug_enabled(DRM_UT_KMS))
|
||||
return;
|
||||
@ -130,71 +118,66 @@ const char *intel_output_format_name(enum intel_output_format format)
|
||||
return output_format_str[format];
|
||||
}
|
||||
|
||||
static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
|
||||
static void intel_dump_plane_state(struct drm_printer *p,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
|
||||
struct drm_i915_private *i915 = to_i915(plane->base.dev);
|
||||
const struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
|
||||
if (!fb) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
|
||||
plane->base.base.id, plane->base.name,
|
||||
str_yes_no(plane_state->uapi.visible));
|
||||
drm_printf(p, "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
|
||||
plane->base.base.id, plane->base.name,
|
||||
str_yes_no(plane_state->uapi.visible));
|
||||
return;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
|
||||
plane->base.base.id, plane->base.name,
|
||||
fb->base.id, fb->width, fb->height, &fb->format->format,
|
||||
fb->modifier, str_yes_no(plane_state->uapi.visible));
|
||||
drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
|
||||
plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
|
||||
drm_printf(p, "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
|
||||
plane->base.base.id, plane->base.name,
|
||||
fb->base.id, fb->width, fb->height, &fb->format->format,
|
||||
fb->modifier, str_yes_no(plane_state->uapi.visible));
|
||||
drm_printf(p, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
|
||||
plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
|
||||
if (plane_state->uapi.visible)
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
|
||||
DRM_RECT_FP_ARG(&plane_state->uapi.src),
|
||||
DRM_RECT_ARG(&plane_state->uapi.dst));
|
||||
drm_printf(p, "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
|
||||
DRM_RECT_FP_ARG(&plane_state->uapi.src),
|
||||
DRM_RECT_ARG(&plane_state->uapi.dst));
|
||||
}
|
||||
|
||||
static void
|
||||
ilk_dump_csc(struct drm_i915_private *i915, const char *name,
|
||||
ilk_dump_csc(struct drm_i915_private *i915,
|
||||
struct drm_printer *p,
|
||||
const char *name,
|
||||
const struct intel_csc_matrix *csc)
|
||||
{
|
||||
int i;
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name,
|
||||
csc->preoff[0], csc->preoff[1], csc->preoff[2]);
|
||||
drm_printf(p, "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name,
|
||||
csc->preoff[0], csc->preoff[1], csc->preoff[2]);
|
||||
|
||||
for (i = 0; i < 3; i++)
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
|
||||
csc->coeff[3 * i + 0],
|
||||
csc->coeff[3 * i + 1],
|
||||
csc->coeff[3 * i + 2]);
|
||||
drm_printf(p, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
|
||||
csc->coeff[3 * i + 0],
|
||||
csc->coeff[3 * i + 1],
|
||||
csc->coeff[3 * i + 2]);
|
||||
|
||||
if (DISPLAY_VER(i915) < 7)
|
||||
return;
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
|
||||
csc->postoff[0], csc->postoff[1], csc->postoff[2]);
|
||||
drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
|
||||
csc->postoff[0], csc->postoff[1], csc->postoff[2]);
|
||||
}
|
||||
|
||||
static void
|
||||
vlv_dump_csc(struct drm_i915_private *i915, const char *name,
|
||||
vlv_dump_csc(struct drm_printer *p, const char *name,
|
||||
const struct intel_csc_matrix *csc)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 3; i++)
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
|
||||
csc->coeff[3 * i + 0],
|
||||
csc->coeff[3 * i + 1],
|
||||
csc->coeff[3 * i + 2]);
|
||||
drm_printf(p, "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
|
||||
csc->coeff[3 * i + 0],
|
||||
csc->coeff[3 * i + 1],
|
||||
csc->coeff[3 * i + 2]);
|
||||
}
|
||||
|
||||
void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
|
||||
@ -205,85 +188,86 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
const struct intel_plane_state *plane_state;
|
||||
struct intel_plane *plane;
|
||||
struct drm_printer p;
|
||||
char buf[64];
|
||||
int i;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] enable: %s [%s]\n",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
str_yes_no(pipe_config->hw.enable), context);
|
||||
if (!drm_debug_enabled(DRM_UT_KMS))
|
||||
return;
|
||||
|
||||
p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
|
||||
|
||||
drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
str_yes_no(pipe_config->hw.enable), context);
|
||||
|
||||
if (!pipe_config->hw.enable)
|
||||
goto dump_planes;
|
||||
|
||||
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n",
|
||||
str_yes_no(pipe_config->hw.active),
|
||||
buf, pipe_config->output_types,
|
||||
intel_output_format_name(pipe_config->output_format),
|
||||
intel_output_format_name(pipe_config->sink_format));
|
||||
drm_printf(&p, "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n",
|
||||
str_yes_no(pipe_config->hw.active),
|
||||
buf, pipe_config->output_types,
|
||||
intel_output_format_name(pipe_config->output_format),
|
||||
intel_output_format_name(pipe_config->sink_format));
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
|
||||
transcoder_name(pipe_config->cpu_transcoder),
|
||||
pipe_config->pipe_bpp, pipe_config->dither);
|
||||
drm_printf(&p, "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
|
||||
transcoder_name(pipe_config->cpu_transcoder),
|
||||
pipe_config->pipe_bpp, pipe_config->dither);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "MST master transcoder: %s\n",
|
||||
transcoder_name(pipe_config->mst_master_transcoder));
|
||||
drm_printf(&p, "MST master transcoder: %s\n",
|
||||
transcoder_name(pipe_config->mst_master_transcoder));
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
|
||||
transcoder_name(pipe_config->master_transcoder),
|
||||
pipe_config->sync_mode_slaves_mask);
|
||||
drm_printf(&p, "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
|
||||
transcoder_name(pipe_config->master_transcoder),
|
||||
pipe_config->sync_mode_slaves_mask);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "bigjoiner: %s, pipes: 0x%x\n",
|
||||
intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
|
||||
intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
|
||||
pipe_config->bigjoiner_pipes);
|
||||
drm_printf(&p, "bigjoiner: %s, pipes: 0x%x\n",
|
||||
intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
|
||||
intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
|
||||
pipe_config->bigjoiner_pipes);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "splitter: %s, link count %d, overlap %d\n",
|
||||
str_enabled_disabled(pipe_config->splitter.enable),
|
||||
pipe_config->splitter.link_count,
|
||||
pipe_config->splitter.pixel_overlap);
|
||||
drm_printf(&p, "splitter: %s, link count %d, overlap %d\n",
|
||||
str_enabled_disabled(pipe_config->splitter.enable),
|
||||
pipe_config->splitter.link_count,
|
||||
pipe_config->splitter.pixel_overlap);
|
||||
|
||||
if (pipe_config->has_pch_encoder)
|
||||
intel_dump_m_n_config(pipe_config, "fdi",
|
||||
intel_dump_m_n_config(&p, pipe_config, "fdi",
|
||||
pipe_config->fdi_lanes,
|
||||
&pipe_config->fdi_m_n);
|
||||
|
||||
if (intel_crtc_has_dp_encoder(pipe_config)) {
|
||||
intel_dump_m_n_config(pipe_config, "dp m_n",
|
||||
intel_dump_m_n_config(&p, pipe_config, "dp m_n",
|
||||
pipe_config->lane_count,
|
||||
&pipe_config->dp_m_n);
|
||||
intel_dump_m_n_config(pipe_config, "dp m2_n2",
|
||||
intel_dump_m_n_config(&p, pipe_config, "dp m2_n2",
|
||||
pipe_config->lane_count,
|
||||
&pipe_config->dp_m2_n2);
|
||||
drm_dbg_kms(&i915->drm, "fec: %s, enhanced framing: %s\n",
|
||||
str_enabled_disabled(pipe_config->fec_enable),
|
||||
str_enabled_disabled(pipe_config->enhanced_framing));
|
||||
drm_printf(&p, "fec: %s, enhanced framing: %s\n",
|
||||
str_enabled_disabled(pipe_config->fec_enable),
|
||||
str_enabled_disabled(pipe_config->enhanced_framing));
|
||||
|
||||
drm_dbg_kms(&i915->drm, "sdp split: %s\n",
|
||||
str_enabled_disabled(pipe_config->sdp_split_enable));
|
||||
drm_printf(&p, "sdp split: %s\n",
|
||||
str_enabled_disabled(pipe_config->sdp_split_enable));
|
||||
|
||||
drm_dbg_kms(&i915->drm, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n",
|
||||
str_enabled_disabled(pipe_config->has_psr),
|
||||
str_enabled_disabled(pipe_config->has_psr2),
|
||||
str_enabled_disabled(pipe_config->has_panel_replay),
|
||||
str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
|
||||
drm_printf(&p, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n",
|
||||
str_enabled_disabled(pipe_config->has_psr),
|
||||
str_enabled_disabled(pipe_config->has_psr2),
|
||||
str_enabled_disabled(pipe_config->has_panel_replay),
|
||||
str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
|
||||
pipe_config->framestart_delay, pipe_config->msa_timing_delay);
|
||||
drm_printf(&p, "framestart delay: %d, MSA timing delay: %d\n",
|
||||
pipe_config->framestart_delay, pipe_config->msa_timing_delay);
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
|
||||
pipe_config->has_audio, pipe_config->has_infoframe,
|
||||
pipe_config->infoframes.enable);
|
||||
drm_printf(&p, "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
|
||||
pipe_config->has_audio, pipe_config->has_infoframe,
|
||||
pipe_config->infoframes.enable);
|
||||
|
||||
if (pipe_config->infoframes.enable &
|
||||
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
|
||||
drm_dbg_kms(&i915->drm, "GCP: 0x%x\n",
|
||||
pipe_config->infoframes.gcp);
|
||||
drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp);
|
||||
if (pipe_config->infoframes.enable &
|
||||
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
|
||||
intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
|
||||
@ -301,91 +285,88 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
|
||||
intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
|
||||
if (pipe_config->infoframes.enable &
|
||||
intel_hdmi_infoframe_enable(DP_SDP_VSC))
|
||||
intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc);
|
||||
drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc);
|
||||
if (pipe_config->infoframes.enable &
|
||||
intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC))
|
||||
drm_dp_as_sdp_log(&p, &pipe_config->infoframes.as_sdp);
|
||||
|
||||
if (pipe_config->has_audio)
|
||||
intel_dump_buffer(i915, "ELD: ", pipe_config->eld,
|
||||
intel_dump_buffer("ELD: ", pipe_config->eld,
|
||||
drm_eld_size(pipe_config->eld));
|
||||
|
||||
drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
|
||||
str_yes_no(pipe_config->vrr.enable),
|
||||
pipe_config->vrr.vmin, pipe_config->vrr.vmax,
|
||||
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
|
||||
pipe_config->vrr.flipline,
|
||||
intel_vrr_vmin_vblank_start(pipe_config),
|
||||
intel_vrr_vmax_vblank_start(pipe_config));
|
||||
drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
|
||||
str_yes_no(pipe_config->vrr.enable),
|
||||
pipe_config->vrr.vmin, pipe_config->vrr.vmax,
|
||||
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
|
||||
pipe_config->vrr.flipline,
|
||||
intel_vrr_vmin_vblank_start(pipe_config),
|
||||
intel_vrr_vmax_vblank_start(pipe_config));
|
||||
|
||||
drm_dbg_kms(&i915->drm, "requested mode: " DRM_MODE_FMT "\n",
|
||||
DRM_MODE_ARG(&pipe_config->hw.mode));
|
||||
drm_dbg_kms(&i915->drm, "adjusted mode: " DRM_MODE_FMT "\n",
|
||||
DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
|
||||
intel_dump_crtc_timings(i915, &pipe_config->hw.adjusted_mode);
|
||||
drm_dbg_kms(&i915->drm, "pipe mode: " DRM_MODE_FMT "\n",
|
||||
DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
|
||||
intel_dump_crtc_timings(i915, &pipe_config->hw.pipe_mode);
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
|
||||
pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
|
||||
pipe_config->pixel_rate);
|
||||
drm_printf(&p, "requested mode: " DRM_MODE_FMT "\n",
|
||||
DRM_MODE_ARG(&pipe_config->hw.mode));
|
||||
drm_printf(&p, "adjusted mode: " DRM_MODE_FMT "\n",
|
||||
DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
|
||||
intel_dump_crtc_timings(&p, &pipe_config->hw.adjusted_mode);
|
||||
drm_printf(&p, "pipe mode: " DRM_MODE_FMT "\n",
|
||||
DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
|
||||
intel_dump_crtc_timings(&p, &pipe_config->hw.pipe_mode);
|
||||
drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
|
||||
pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
|
||||
pipe_config->pixel_rate);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "linetime: %d, ips linetime: %d\n",
|
||||
pipe_config->linetime, pipe_config->ips_linetime);
|
||||
drm_printf(&p, "linetime: %d, ips linetime: %d\n",
|
||||
pipe_config->linetime, pipe_config->ips_linetime);
|
||||
|
||||
if (DISPLAY_VER(i915) >= 9)
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
|
||||
crtc->num_scalers,
|
||||
pipe_config->scaler_state.scaler_users,
|
||||
pipe_config->scaler_state.scaler_id,
|
||||
pipe_config->hw.scaling_filter);
|
||||
drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
|
||||
crtc->num_scalers,
|
||||
pipe_config->scaler_state.scaler_users,
|
||||
pipe_config->scaler_state.scaler_id,
|
||||
pipe_config->hw.scaling_filter);
|
||||
|
||||
if (HAS_GMCH(i915))
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
|
||||
pipe_config->gmch_pfit.control,
|
||||
pipe_config->gmch_pfit.pgm_ratios,
|
||||
pipe_config->gmch_pfit.lvds_border_bits);
|
||||
drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
|
||||
pipe_config->gmch_pfit.control,
|
||||
pipe_config->gmch_pfit.pgm_ratios,
|
||||
pipe_config->gmch_pfit.lvds_border_bits);
|
||||
else
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
|
||||
DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
|
||||
str_enabled_disabled(pipe_config->pch_pfit.enabled),
|
||||
str_yes_no(pipe_config->pch_pfit.force_thru));
|
||||
drm_printf(&p, "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
|
||||
DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
|
||||
str_enabled_disabled(pipe_config->pch_pfit.enabled),
|
||||
str_yes_no(pipe_config->pch_pfit.force_thru));
|
||||
|
||||
drm_dbg_kms(&i915->drm, "ips: %i, double wide: %i, drrs: %i\n",
|
||||
pipe_config->ips_enabled, pipe_config->double_wide,
|
||||
pipe_config->has_drrs);
|
||||
drm_printf(&p, "ips: %i, double wide: %i, drrs: %i\n",
|
||||
pipe_config->ips_enabled, pipe_config->double_wide,
|
||||
pipe_config->has_drrs);
|
||||
|
||||
intel_dpll_dump_hw_state(i915, &pipe_config->dpll_hw_state);
|
||||
intel_dpll_dump_hw_state(i915, &p, &pipe_config->dpll_hw_state);
|
||||
|
||||
if (IS_CHERRYVIEW(i915))
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
|
||||
pipe_config->cgm_mode, pipe_config->gamma_mode,
|
||||
pipe_config->gamma_enable, pipe_config->csc_enable);
|
||||
drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
|
||||
pipe_config->cgm_mode, pipe_config->gamma_mode,
|
||||
pipe_config->gamma_enable, pipe_config->csc_enable);
|
||||
else
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
|
||||
pipe_config->csc_mode, pipe_config->gamma_mode,
|
||||
pipe_config->gamma_enable, pipe_config->csc_enable);
|
||||
drm_printf(&p, "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
|
||||
pipe_config->csc_mode, pipe_config->gamma_mode,
|
||||
pipe_config->gamma_enable, pipe_config->csc_enable);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
|
||||
pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
|
||||
i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
|
||||
pipe_config->pre_csc_lut ?
|
||||
drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
|
||||
pipe_config->post_csc_lut ?
|
||||
drm_color_lut_size(pipe_config->post_csc_lut) : 0);
|
||||
drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
|
||||
pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
|
||||
i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
|
||||
pipe_config->pre_csc_lut ?
|
||||
drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
|
||||
pipe_config->post_csc_lut ?
|
||||
drm_color_lut_size(pipe_config->post_csc_lut) : 0);
|
||||
|
||||
if (DISPLAY_VER(i915) >= 11)
|
||||
ilk_dump_csc(i915, "output csc", &pipe_config->output_csc);
|
||||
ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc);
|
||||
|
||||
if (!HAS_GMCH(i915))
|
||||
ilk_dump_csc(i915, "pipe csc", &pipe_config->csc);
|
||||
ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc);
|
||||
else if (IS_CHERRYVIEW(i915))
|
||||
vlv_dump_csc(i915, "cgm csc", &pipe_config->csc);
|
||||
vlv_dump_csc(&p, "cgm csc", &pipe_config->csc);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
vlv_dump_csc(i915, "wgc csc", &pipe_config->csc);
|
||||
vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
|
||||
|
||||
dump_planes:
|
||||
if (!state)
|
||||
@ -393,6 +374,6 @@ dump_planes:
|
||||
|
||||
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
|
||||
if (plane->pipe == crtc->pipe)
|
||||
intel_dump_plane_state(plane_state);
|
||||
intel_dump_plane_state(&p, plane_state);
|
||||
}
|
||||
}
|
||||
|
@ -509,6 +509,24 @@ static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane,
|
||||
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
|
||||
}
|
||||
|
||||
static void wa_16021440873(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
||||
u32 ctl = plane_state->ctl;
|
||||
int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1;
|
||||
enum pipe pipe = plane->pipe;
|
||||
|
||||
ctl &= ~MCURSOR_MODE_MASK;
|
||||
ctl |= MCURSOR_MODE_64_2B;
|
||||
|
||||
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), ctl);
|
||||
|
||||
intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
|
||||
PIPESRC_HEIGHT(et_y_position));
|
||||
}
|
||||
|
||||
static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state)
|
||||
@ -529,7 +547,11 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
|
||||
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
|
||||
plane_state->ctl);
|
||||
} else {
|
||||
i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
|
||||
/* Wa_16021440873 */
|
||||
if (crtc_state->enable_psr2_su_region_et)
|
||||
wa_16021440873(plane, crtc_state, plane_state);
|
||||
else
|
||||
i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -29,8 +29,11 @@
|
||||
#define INTEL_CX0_LANE1 BIT(1)
|
||||
#define INTEL_CX0_BOTH_LANES (INTEL_CX0_LANE1 | INTEL_CX0_LANE0)
|
||||
|
||||
bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy)
|
||||
bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
|
||||
return true;
|
||||
|
||||
@ -46,8 +49,7 @@ static int lane_mask_to_lane(u8 lane_mask)
|
||||
return ilog2(lane_mask);
|
||||
}
|
||||
|
||||
static u8 intel_cx0_get_owned_lane_mask(struct drm_i915_private *i915,
|
||||
struct intel_encoder *encoder)
|
||||
static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
|
||||
@ -114,16 +116,20 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w
|
||||
intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref);
|
||||
}
|
||||
|
||||
static void intel_clear_response_ready_flag(struct drm_i915_private *i915,
|
||||
enum port port, int lane)
|
||||
static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
|
||||
int lane)
|
||||
{
|
||||
intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
|
||||
intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, encoder->port, lane),
|
||||
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
|
||||
}
|
||||
|
||||
static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
|
||||
static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(i915, port);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
|
||||
XELPDP_PORT_M2P_TRANSACTION_RESET);
|
||||
@ -135,20 +141,22 @@ static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, i
|
||||
return;
|
||||
}
|
||||
|
||||
intel_clear_response_ready_flag(i915, port, lane);
|
||||
intel_clear_response_ready_flag(encoder, lane);
|
||||
}
|
||||
|
||||
static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
|
||||
static int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
|
||||
int command, int lane, u32 *val)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(i915, port);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
if (__intel_de_wait_for_register(i915,
|
||||
XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
|
||||
XELPDP_PORT_P2M_RESPONSE_READY,
|
||||
XELPDP_PORT_P2M_RESPONSE_READY,
|
||||
XELPDP_MSGBUS_TIMEOUT_FAST_US,
|
||||
XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
|
||||
if (intel_de_wait_custom(i915,
|
||||
XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
|
||||
XELPDP_PORT_P2M_RESPONSE_READY,
|
||||
XELPDP_PORT_P2M_RESPONSE_READY,
|
||||
XELPDP_MSGBUS_TIMEOUT_FAST_US,
|
||||
XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
|
||||
drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
|
||||
phy_name(phy), *val);
|
||||
|
||||
@ -158,31 +166,33 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
|
||||
"PHY %c Hardware did not detect a timeout\n",
|
||||
phy_name(phy));
|
||||
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
intel_cx0_bus_reset(encoder, lane);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (*val & XELPDP_PORT_P2M_ERROR_SET) {
|
||||
drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy),
|
||||
command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
intel_cx0_bus_reset(encoder, lane);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) {
|
||||
drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy),
|
||||
command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
intel_cx0_bus_reset(encoder, lane);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
|
||||
static int __intel_cx0_read_once(struct intel_encoder *encoder,
|
||||
int lane, u16 addr)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(i915, port);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
int ack;
|
||||
u32 val;
|
||||
|
||||
@ -191,7 +201,7 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
|
||||
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
intel_cx0_bus_reset(encoder, lane);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -200,33 +210,34 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
|
||||
XELPDP_PORT_M2P_COMMAND_READ |
|
||||
XELPDP_PORT_M2P_ADDRESS(addr));
|
||||
|
||||
ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
|
||||
ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
|
||||
if (ack < 0)
|
||||
return ack;
|
||||
|
||||
intel_clear_response_ready_flag(i915, port, lane);
|
||||
intel_clear_response_ready_flag(encoder, lane);
|
||||
|
||||
/*
|
||||
* FIXME: Workaround to let HW to settle
|
||||
* down and let the message bus to end up
|
||||
* in a known state
|
||||
*/
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
intel_cx0_bus_reset(encoder, lane);
|
||||
|
||||
return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
|
||||
}
|
||||
|
||||
static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port,
|
||||
static u8 __intel_cx0_read(struct intel_encoder *encoder,
|
||||
int lane, u16 addr)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(i915, port);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
int i, status;
|
||||
|
||||
assert_dc_off(i915);
|
||||
|
||||
/* 3 tries is assumed to be enough to read successfully */
|
||||
for (i = 0; i < 3; i++) {
|
||||
status = __intel_cx0_read_once(i915, port, lane, addr);
|
||||
status = __intel_cx0_read_once(encoder, lane, addr);
|
||||
|
||||
if (status >= 0)
|
||||
return status;
|
||||
@ -238,18 +249,20 @@ static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
|
||||
static u8 intel_cx0_read(struct intel_encoder *encoder,
|
||||
u8 lane_mask, u16 addr)
|
||||
{
|
||||
int lane = lane_mask_to_lane(lane_mask);
|
||||
|
||||
return __intel_cx0_read(i915, port, lane, addr);
|
||||
return __intel_cx0_read(encoder, lane, addr);
|
||||
}
|
||||
|
||||
static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
|
||||
static int __intel_cx0_write_once(struct intel_encoder *encoder,
|
||||
int lane, u16 addr, u8 data, bool committed)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(i915, port);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
int ack;
|
||||
u32 val;
|
||||
|
||||
@ -258,7 +271,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
|
||||
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
intel_cx0_bus_reset(encoder, lane);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -274,45 +287,46 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
|
||||
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy));
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
intel_cx0_bus_reset(encoder, lane);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (committed) {
|
||||
ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
|
||||
ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
|
||||
if (ack < 0)
|
||||
return ack;
|
||||
} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) &
|
||||
XELPDP_PORT_P2M_ERROR_SET)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"PHY %c Error occurred during write command.\n", phy_name(phy));
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
intel_cx0_bus_reset(encoder, lane);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
intel_clear_response_ready_flag(i915, port, lane);
|
||||
intel_clear_response_ready_flag(encoder, lane);
|
||||
|
||||
/*
|
||||
* FIXME: Workaround to let HW to settle
|
||||
* down and let the message bus to end up
|
||||
* in a known state
|
||||
*/
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
intel_cx0_bus_reset(encoder, lane);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
|
||||
static void __intel_cx0_write(struct intel_encoder *encoder,
|
||||
int lane, u16 addr, u8 data, bool committed)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(i915, port);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
int i, status;
|
||||
|
||||
assert_dc_off(i915);
|
||||
|
||||
/* 3 tries is assumed to be enough to write successfully */
|
||||
for (i = 0; i < 3; i++) {
|
||||
status = __intel_cx0_write_once(i915, port, lane, addr, data, committed);
|
||||
status = __intel_cx0_write_once(encoder, lane, addr, data, committed);
|
||||
|
||||
if (status == 0)
|
||||
return;
|
||||
@ -322,63 +336,66 @@ static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
|
||||
"PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
|
||||
}
|
||||
|
||||
static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
|
||||
static void intel_cx0_write(struct intel_encoder *encoder,
|
||||
u8 lane_mask, u16 addr, u8 data, bool committed)
|
||||
{
|
||||
int lane;
|
||||
|
||||
for_each_cx0_lane_in_mask(lane_mask, lane)
|
||||
__intel_cx0_write(i915, port, lane, addr, data, committed);
|
||||
__intel_cx0_write(encoder, lane, addr, data, committed);
|
||||
}
|
||||
|
||||
static void intel_c20_sram_write(struct drm_i915_private *i915, enum port port,
|
||||
static void intel_c20_sram_write(struct intel_encoder *encoder,
|
||||
int lane, u16 addr, u16 data)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
|
||||
assert_dc_off(i915);
|
||||
|
||||
intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0);
|
||||
intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0);
|
||||
intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0);
|
||||
intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0);
|
||||
|
||||
intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_H, data >> 8, 0);
|
||||
intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_L, data & 0xff, 1);
|
||||
intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_H, data >> 8, 0);
|
||||
intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_L, data & 0xff, 1);
|
||||
}
|
||||
|
||||
static u16 intel_c20_sram_read(struct drm_i915_private *i915, enum port port,
|
||||
static u16 intel_c20_sram_read(struct intel_encoder *encoder,
|
||||
int lane, u16 addr)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
u16 val;
|
||||
|
||||
assert_dc_off(i915);
|
||||
|
||||
intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0);
|
||||
intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1);
|
||||
intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0);
|
||||
intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1);
|
||||
|
||||
val = intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_H);
|
||||
val = intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_H);
|
||||
val <<= 8;
|
||||
val |= intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_L);
|
||||
val |= intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_L);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
|
||||
static void __intel_cx0_rmw(struct intel_encoder *encoder,
|
||||
int lane, u16 addr, u8 clear, u8 set, bool committed)
|
||||
{
|
||||
u8 old, val;
|
||||
|
||||
old = __intel_cx0_read(i915, port, lane, addr);
|
||||
old = __intel_cx0_read(encoder, lane, addr);
|
||||
val = (old & ~clear) | set;
|
||||
|
||||
if (val != old)
|
||||
__intel_cx0_write(i915, port, lane, addr, val, committed);
|
||||
__intel_cx0_write(encoder, lane, addr, val, committed);
|
||||
}
|
||||
|
||||
static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
|
||||
static void intel_cx0_rmw(struct intel_encoder *encoder,
|
||||
u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
|
||||
{
|
||||
u8 lane;
|
||||
|
||||
for_each_cx0_lane_in_mask(lane_mask, lane)
|
||||
__intel_cx0_rmw(i915, port, lane, addr, clear, set, committed);
|
||||
__intel_cx0_rmw(encoder, lane, addr, clear, set, committed);
|
||||
}
|
||||
|
||||
static u8 intel_c10_get_tx_vboost_lvl(const struct intel_crtc_state *crtc_state)
|
||||
@ -414,7 +431,6 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
const struct intel_ddi_buf_trans *trans;
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
u8 owned_lane_mask;
|
||||
intel_wakeref_t wakeref;
|
||||
int n_entries, ln;
|
||||
@ -423,7 +439,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
|
||||
if (intel_tc_port_in_tbt_alt_mode(dig_port))
|
||||
return;
|
||||
|
||||
owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
|
||||
owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
|
||||
|
||||
wakeref = intel_cx0_phy_transaction_begin(encoder);
|
||||
|
||||
@ -433,14 +449,14 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_is_c10phy(i915, phy)) {
|
||||
intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
|
||||
if (intel_encoder_is_c10phy(encoder)) {
|
||||
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
|
||||
0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
|
||||
intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CMN(3),
|
||||
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CMN(3),
|
||||
C10_CMN3_TXVBOOST_MASK,
|
||||
C10_CMN3_TXVBOOST(intel_c10_get_tx_vboost_lvl(crtc_state)),
|
||||
MB_WRITE_UNCOMMITTED);
|
||||
intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_TX(1),
|
||||
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_TX(1),
|
||||
C10_TX1_TERMCTL_MASK,
|
||||
C10_TX1_TERMCTL(intel_c10_get_tx_term_ctl(crtc_state)),
|
||||
MB_WRITE_COMMITTED);
|
||||
@ -455,27 +471,27 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
|
||||
if (!(lane_mask & owned_lane_mask))
|
||||
continue;
|
||||
|
||||
intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0),
|
||||
intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0),
|
||||
C10_PHY_OVRD_LEVEL_MASK,
|
||||
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.pre_cursor),
|
||||
MB_WRITE_COMMITTED);
|
||||
intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1),
|
||||
intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1),
|
||||
C10_PHY_OVRD_LEVEL_MASK,
|
||||
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.vswing),
|
||||
MB_WRITE_COMMITTED);
|
||||
intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2),
|
||||
intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2),
|
||||
C10_PHY_OVRD_LEVEL_MASK,
|
||||
C10_PHY_OVRD_LEVEL(trans->entries[level].snps.post_cursor),
|
||||
MB_WRITE_COMMITTED);
|
||||
}
|
||||
|
||||
/* Write Override enables in 0xD71 */
|
||||
intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_OVRD,
|
||||
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_OVRD,
|
||||
0, PHY_C10_VDR_OVRD_TX1 | PHY_C10_VDR_OVRD_TX2,
|
||||
MB_WRITE_COMMITTED);
|
||||
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
|
||||
if (intel_encoder_is_c10phy(encoder))
|
||||
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
|
||||
0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
|
||||
|
||||
intel_cx0_phy_transaction_end(encoder, wakeref);
|
||||
@ -1856,7 +1872,6 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
|
||||
static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_c10pll_state *pll_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
u8 lane = INTEL_CX0_LANE0;
|
||||
intel_wakeref_t wakeref;
|
||||
int i;
|
||||
@ -1867,16 +1882,15 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
* According to C10 VDR Register programming Sequence we need
|
||||
* to do this to read PHY internal registers from MsgBus.
|
||||
*/
|
||||
intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1),
|
||||
intel_cx0_rmw(encoder, lane, PHY_C10_VDR_CONTROL(1),
|
||||
0, C10_VDR_CTRL_MSGBUS_ACCESS,
|
||||
MB_WRITE_COMMITTED);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
|
||||
pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane,
|
||||
PHY_C10_VDR_PLL(i));
|
||||
pll_state->pll[i] = intel_cx0_read(encoder, lane, PHY_C10_VDR_PLL(i));
|
||||
|
||||
pll_state->cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0));
|
||||
pll_state->tx = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0));
|
||||
pll_state->cmn = intel_cx0_read(encoder, lane, PHY_C10_VDR_CMN(0));
|
||||
pll_state->tx = intel_cx0_read(encoder, lane, PHY_C10_VDR_TX(0));
|
||||
|
||||
intel_cx0_phy_transaction_end(encoder, wakeref);
|
||||
}
|
||||
@ -1888,28 +1902,28 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
|
||||
const struct intel_c10pll_state *pll_state = &crtc_state->cx0pll_state.c10;
|
||||
int i;
|
||||
|
||||
intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
|
||||
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
|
||||
0, C10_VDR_CTRL_MSGBUS_ACCESS,
|
||||
MB_WRITE_COMMITTED);
|
||||
|
||||
/* Custom width needs to be programmed to 0 for both the phy lanes */
|
||||
intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
|
||||
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
|
||||
C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
|
||||
MB_WRITE_COMMITTED);
|
||||
intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
|
||||
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
|
||||
0, C10_VDR_CTRL_UPDATE_CFG,
|
||||
MB_WRITE_COMMITTED);
|
||||
|
||||
/* Program the pll values only for the master lane */
|
||||
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
|
||||
intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
|
||||
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
|
||||
pll_state->pll[i],
|
||||
(i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED);
|
||||
|
||||
intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
|
||||
intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
|
||||
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
|
||||
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
|
||||
|
||||
intel_cx0_rmw(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
|
||||
intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
|
||||
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
|
||||
MB_WRITE_COMMITTED);
|
||||
}
|
||||
@ -2037,10 +2051,8 @@ static int intel_c20_phy_check_hdmi_link_rate(int clock)
|
||||
int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock)
|
||||
{
|
||||
struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi);
|
||||
struct drm_i915_private *i915 = intel_hdmi_to_i915(hdmi);
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
if (intel_encoder_is_c10phy(&dig_port->base))
|
||||
return intel_c10_phy_check_hdmi_link_rate(clock);
|
||||
return intel_c20_phy_check_hdmi_link_rate(clock);
|
||||
}
|
||||
@ -2088,10 +2100,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
|
||||
int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
if (intel_encoder_is_c10phy(encoder))
|
||||
return intel_c10pll_calc_state(crtc_state, encoder);
|
||||
return intel_c20pll_calc_state(crtc_state, encoder);
|
||||
}
|
||||
@ -2149,7 +2158,6 @@ static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
|
||||
static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_c20pll_state *pll_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
bool cntx;
|
||||
intel_wakeref_t wakeref;
|
||||
int i;
|
||||
@ -2157,25 +2165,25 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
wakeref = intel_cx0_phy_transaction_begin(encoder);
|
||||
|
||||
/* 1. Read current context selection */
|
||||
cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE;
|
||||
cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE;
|
||||
|
||||
/* Read Tx configuration */
|
||||
for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
|
||||
if (cntx)
|
||||
pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_B_TX_CNTX_CFG(i));
|
||||
else
|
||||
pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
pll_state->tx[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_A_TX_CNTX_CFG(i));
|
||||
}
|
||||
|
||||
/* Read common configuration */
|
||||
for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
|
||||
if (cntx)
|
||||
pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_B_CMN_CNTX_CFG(i));
|
||||
else
|
||||
pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
pll_state->cmn[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_A_CMN_CNTX_CFG(i));
|
||||
}
|
||||
|
||||
@ -2183,20 +2191,20 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
/* MPLLB configuration */
|
||||
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
|
||||
if (cntx)
|
||||
pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_B_MPLLB_CNTX_CFG(i));
|
||||
else
|
||||
pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
pll_state->mpllb[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_A_MPLLB_CNTX_CFG(i));
|
||||
}
|
||||
} else {
|
||||
/* MPLLA configuration */
|
||||
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
|
||||
if (cntx)
|
||||
pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_B_MPLLA_CNTX_CFG(i));
|
||||
else
|
||||
pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
pll_state->mplla[i] = intel_c20_sram_read(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_A_MPLLA_CNTX_CFG(i));
|
||||
}
|
||||
}
|
||||
@ -2338,7 +2346,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
|
||||
dp = true;
|
||||
|
||||
/* 1. Read current context selection */
|
||||
cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
|
||||
cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
|
||||
|
||||
/*
|
||||
* 2. If there is a protocol switch from HDMI to DP or vice versa, clear
|
||||
@ -2347,7 +2355,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
|
||||
*/
|
||||
if (intel_c20_protocol_switch_valid(encoder)) {
|
||||
for (i = 0; i < 4; i++)
|
||||
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0);
|
||||
intel_c20_sram_write(encoder, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0);
|
||||
usleep_range(4000, 4100);
|
||||
}
|
||||
|
||||
@ -2355,63 +2363,63 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
|
||||
/* 3.1 Tx configuration */
|
||||
for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
|
||||
if (cntx)
|
||||
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]);
|
||||
intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]);
|
||||
else
|
||||
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]);
|
||||
intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]);
|
||||
}
|
||||
|
||||
/* 3.2 common configuration */
|
||||
for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
|
||||
if (cntx)
|
||||
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]);
|
||||
intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]);
|
||||
else
|
||||
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]);
|
||||
intel_c20_sram_write(encoder, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]);
|
||||
}
|
||||
|
||||
/* 3.3 mpllb or mplla configuration */
|
||||
if (intel_c20phy_use_mpllb(pll_state)) {
|
||||
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
|
||||
if (cntx)
|
||||
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_A_MPLLB_CNTX_CFG(i),
|
||||
pll_state->mpllb[i]);
|
||||
else
|
||||
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_B_MPLLB_CNTX_CFG(i),
|
||||
pll_state->mpllb[i]);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
|
||||
if (cntx)
|
||||
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_A_MPLLA_CNTX_CFG(i),
|
||||
pll_state->mplla[i]);
|
||||
else
|
||||
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
|
||||
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
|
||||
PHY_C20_B_MPLLA_CNTX_CFG(i),
|
||||
pll_state->mplla[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* 4. Program custom width to match the link protocol */
|
||||
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH,
|
||||
intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_WIDTH,
|
||||
PHY_C20_CUSTOM_WIDTH_MASK,
|
||||
PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)),
|
||||
MB_WRITE_COMMITTED);
|
||||
|
||||
/* 5. For DP or 6. For HDMI */
|
||||
if (dp) {
|
||||
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
|
||||
intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
|
||||
BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
|
||||
BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)),
|
||||
MB_WRITE_COMMITTED);
|
||||
} else {
|
||||
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
|
||||
intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
|
||||
BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
|
||||
is_hdmi_frl(clock) ? BIT(7) : 0,
|
||||
MB_WRITE_COMMITTED);
|
||||
|
||||
intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
|
||||
intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
|
||||
intel_c20_get_hdmi_rate(clock),
|
||||
MB_WRITE_COMMITTED);
|
||||
}
|
||||
@ -2420,7 +2428,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
|
||||
* 7. Write Vendor specific registers to toggle context setting to load
|
||||
* the updated programming toggle context bit
|
||||
*/
|
||||
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
|
||||
intel_cx0_rmw(encoder, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
|
||||
BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
|
||||
}
|
||||
|
||||
@ -2508,11 +2516,12 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
|
||||
return val;
|
||||
}
|
||||
|
||||
static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
|
||||
enum port port,
|
||||
static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
|
||||
u8 lane_mask, u8 state)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(i915, port);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port);
|
||||
int lane;
|
||||
|
||||
@ -2528,7 +2537,7 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n",
|
||||
phy_name(phy));
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
intel_cx0_bus_reset(encoder, lane);
|
||||
}
|
||||
|
||||
intel_de_rmw(i915, buf_ctl2_reg,
|
||||
@ -2536,15 +2545,18 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
|
||||
intel_cx0_get_powerdown_update(lane_mask));
|
||||
|
||||
/* Update Timeout Value */
|
||||
if (__intel_de_wait_for_register(i915, buf_ctl2_reg,
|
||||
intel_cx0_get_powerdown_update(lane_mask), 0,
|
||||
XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
|
||||
if (intel_de_wait_custom(i915, buf_ctl2_reg,
|
||||
intel_cx0_get_powerdown_update(lane_mask), 0,
|
||||
XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
|
||||
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
|
||||
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
|
||||
}
|
||||
|
||||
static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port)
|
||||
static void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
|
||||
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port),
|
||||
XELPDP_POWER_STATE_READY_MASK,
|
||||
XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
|
||||
@ -2577,13 +2589,13 @@ static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask)
|
||||
return val;
|
||||
}
|
||||
|
||||
static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
|
||||
struct intel_encoder *encoder,
|
||||
static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
|
||||
bool lane_reversal)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
enum phy phy = intel_port_to_phy(i915, port);
|
||||
u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
|
||||
u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0;
|
||||
u32 lane_pipe_reset = owned_lane_mask == INTEL_CX0_BOTH_LANES
|
||||
? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1)
|
||||
@ -2593,19 +2605,19 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
|
||||
XELPDP_LANE_PHY_CURRENT_STATUS(1))
|
||||
: XELPDP_LANE_PHY_CURRENT_STATUS(0);
|
||||
|
||||
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(i915, port),
|
||||
XELPDP_PORT_BUF_SOC_PHY_READY,
|
||||
XELPDP_PORT_BUF_SOC_PHY_READY,
|
||||
XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
|
||||
if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL1(i915, port),
|
||||
XELPDP_PORT_BUF_SOC_PHY_READY,
|
||||
XELPDP_PORT_BUF_SOC_PHY_READY,
|
||||
XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
|
||||
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
|
||||
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
|
||||
|
||||
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset,
|
||||
lane_pipe_reset);
|
||||
|
||||
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(i915, port),
|
||||
lane_phy_current_status, lane_phy_current_status,
|
||||
XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
|
||||
if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL2(i915, port),
|
||||
lane_phy_current_status, lane_phy_current_status,
|
||||
XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
|
||||
drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
|
||||
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
|
||||
|
||||
@ -2613,16 +2625,16 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
|
||||
intel_cx0_get_pclk_refclk_request(owned_lane_mask),
|
||||
intel_cx0_get_pclk_refclk_request(lane_mask));
|
||||
|
||||
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
|
||||
intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
|
||||
intel_cx0_get_pclk_refclk_ack(lane_mask),
|
||||
XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
|
||||
if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
|
||||
intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
|
||||
intel_cx0_get_pclk_refclk_ack(lane_mask),
|
||||
XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
|
||||
drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
|
||||
phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
|
||||
|
||||
intel_cx0_powerdown_change_sequence(i915, port, INTEL_CX0_BOTH_LANES,
|
||||
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
|
||||
CX0_P2_STATE_RESET);
|
||||
intel_cx0_setup_powerdown(i915, port);
|
||||
intel_cx0_setup_powerdown(encoder);
|
||||
|
||||
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0);
|
||||
|
||||
@ -2640,11 +2652,10 @@ static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
|
||||
int i;
|
||||
u8 disables;
|
||||
bool dp_alt_mode = intel_tc_port_in_dp_alt_mode(enc_to_dig_port(encoder));
|
||||
u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
|
||||
enum port port = encoder->port;
|
||||
u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
|
||||
|
||||
if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
|
||||
intel_cx0_rmw(i915, port, owned_lane_mask,
|
||||
if (intel_encoder_is_c10phy(encoder))
|
||||
intel_cx0_rmw(encoder, owned_lane_mask,
|
||||
PHY_C10_VDR_CONTROL(1), 0,
|
||||
C10_VDR_CTRL_MSGBUS_ACCESS,
|
||||
MB_WRITE_COMMITTED);
|
||||
@ -2666,14 +2677,14 @@ static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
|
||||
if (!(owned_lane_mask & lane_mask))
|
||||
continue;
|
||||
|
||||
intel_cx0_rmw(i915, port, lane_mask, PHY_CX0_TX_CONTROL(tx, 2),
|
||||
intel_cx0_rmw(encoder, lane_mask, PHY_CX0_TX_CONTROL(tx, 2),
|
||||
CONTROL2_DISABLE_SINGLE_TX,
|
||||
disables & BIT(i) ? CONTROL2_DISABLE_SINGLE_TX : 0,
|
||||
MB_WRITE_COMMITTED);
|
||||
}
|
||||
|
||||
if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
|
||||
intel_cx0_rmw(i915, port, owned_lane_mask,
|
||||
if (intel_encoder_is_c10phy(encoder))
|
||||
intel_cx0_rmw(encoder, owned_lane_mask,
|
||||
PHY_C10_VDR_CONTROL(1), 0,
|
||||
C10_VDR_CTRL_UPDATE_CFG,
|
||||
MB_WRITE_COMMITTED);
|
||||
@ -2705,7 +2716,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
|
||||
u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
|
||||
@ -2719,13 +2730,13 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
|
||||
intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
|
||||
|
||||
/* 2. Bring PHY out of reset. */
|
||||
intel_cx0_phy_lane_reset(i915, encoder, lane_reversal);
|
||||
intel_cx0_phy_lane_reset(encoder, lane_reversal);
|
||||
|
||||
/*
|
||||
* 3. Change Phy power state to Ready.
|
||||
* TODO: For DP alt mode use only one lane.
|
||||
*/
|
||||
intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
|
||||
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
|
||||
CX0_P2_STATE_READY);
|
||||
|
||||
/*
|
||||
@ -2735,7 +2746,7 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
|
||||
*/
|
||||
|
||||
/* 5. Program PHY internal PLL internal registers. */
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
if (intel_encoder_is_c10phy(encoder))
|
||||
intel_c10_pll_program(i915, crtc_state, encoder);
|
||||
else
|
||||
intel_c20_pll_program(i915, crtc_state, encoder);
|
||||
@ -2767,10 +2778,10 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
|
||||
intel_cx0_get_pclk_pll_request(maxpclk_lane));
|
||||
|
||||
/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
|
||||
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
|
||||
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
|
||||
intel_cx0_get_pclk_pll_ack(maxpclk_lane),
|
||||
XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
|
||||
if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
|
||||
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
|
||||
intel_cx0_get_pclk_pll_ack(maxpclk_lane),
|
||||
XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
|
||||
drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n",
|
||||
phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
|
||||
|
||||
@ -2831,7 +2842,7 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
u32 val = 0;
|
||||
|
||||
/*
|
||||
@ -2858,10 +2869,10 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
|
||||
intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val);
|
||||
|
||||
/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
|
||||
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
|
||||
XELPDP_TBT_CLOCK_ACK,
|
||||
XELPDP_TBT_CLOCK_ACK,
|
||||
100, 0, NULL))
|
||||
if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
|
||||
XELPDP_TBT_CLOCK_ACK,
|
||||
XELPDP_TBT_CLOCK_ACK,
|
||||
100, 0, NULL))
|
||||
drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n",
|
||||
encoder->base.base.id, encoder->base.name, phy_name(phy));
|
||||
|
||||
@ -2892,12 +2903,12 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder,
|
||||
static void intel_cx0pll_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
bool is_c10 = intel_is_c10phy(i915, phy);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
bool is_c10 = intel_encoder_is_c10phy(encoder);
|
||||
intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
|
||||
|
||||
/* 1. Change owned PHY lane power to Disable state. */
|
||||
intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
|
||||
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
|
||||
is_c10 ? CX0_P2PG_STATE_DISABLE :
|
||||
CX0_P4PG_STATE_DISABLE);
|
||||
|
||||
@ -2920,10 +2931,10 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
|
||||
/*
|
||||
* 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
|
||||
*/
|
||||
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
|
||||
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
|
||||
intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
|
||||
XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
|
||||
if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
|
||||
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
|
||||
intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
|
||||
XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
|
||||
drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n",
|
||||
phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
|
||||
|
||||
@ -2944,7 +2955,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
|
||||
static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
/*
|
||||
* 1. Follow the Display Voltage Frequency Switching Sequence Before
|
||||
@ -2958,8 +2969,8 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
|
||||
XELPDP_TBT_CLOCK_REQUEST, 0);
|
||||
|
||||
/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
|
||||
if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
|
||||
XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
|
||||
if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
|
||||
XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
|
||||
drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
|
||||
encoder->base.base.id, encoder->base.name, phy_name(phy));
|
||||
|
||||
@ -3043,10 +3054,7 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
|
||||
void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_cx0pll_state *pll_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
if (intel_encoder_is_c10phy(encoder))
|
||||
intel_c10pll_readout_hw_state(encoder, &pll_state->c10);
|
||||
else
|
||||
intel_c20pll_readout_hw_state(encoder, &pll_state->c20);
|
||||
@ -3055,10 +3063,7 @@ void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
|
||||
const struct intel_cx0pll_state *pll_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
if (intel_encoder_is_c10phy(encoder))
|
||||
return intel_c10pll_calc_port_clock(encoder, &pll_state->c10);
|
||||
|
||||
return intel_c20pll_calc_port_clock(encoder, &pll_state->c20);
|
||||
@ -3124,7 +3129,6 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_encoder *encoder;
|
||||
struct intel_cx0pll_state mpll_hw_state = {};
|
||||
enum phy phy;
|
||||
|
||||
if (DISPLAY_VER(i915) < 14)
|
||||
return;
|
||||
@ -3138,14 +3142,13 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
|
||||
return;
|
||||
|
||||
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
|
||||
phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
|
||||
return;
|
||||
|
||||
intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state);
|
||||
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
if (intel_encoder_is_c10phy(encoder))
|
||||
intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10);
|
||||
else
|
||||
intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20);
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/bits.h>
|
||||
|
||||
enum icl_port_dpll_id;
|
||||
enum phy;
|
||||
struct drm_i915_private;
|
||||
struct intel_atomic_state;
|
||||
struct intel_c10pll_state;
|
||||
@ -22,7 +21,7 @@ struct intel_crtc_state;
|
||||
struct intel_encoder;
|
||||
struct intel_hdmi;
|
||||
|
||||
bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy);
|
||||
bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
|
||||
void intel_mtl_pll_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_mtl_pll_disable(struct intel_encoder *encoder);
|
||||
|
@ -200,10 +200,10 @@ void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
|
||||
port_name(port));
|
||||
}
|
||||
|
||||
static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
static void intel_wait_ddi_buf_active(struct intel_encoder *encoder)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
int timeout_us;
|
||||
int ret;
|
||||
|
||||
@ -218,7 +218,7 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
|
||||
} else if (IS_DG2(dev_priv)) {
|
||||
timeout_us = 1200;
|
||||
} else if (DISPLAY_VER(dev_priv) >= 12) {
|
||||
if (intel_phy_is_tc(dev_priv, phy))
|
||||
if (intel_encoder_is_tc(encoder))
|
||||
timeout_us = 3000;
|
||||
else
|
||||
timeout_us = 1000;
|
||||
@ -331,7 +331,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
/* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */
|
||||
intel_dp->DP = dig_port->saved_port_bits |
|
||||
@ -345,7 +344,7 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
|
||||
intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT;
|
||||
}
|
||||
|
||||
if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) {
|
||||
if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) {
|
||||
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
|
||||
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
|
||||
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
|
||||
@ -895,7 +894,6 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
|
||||
/*
|
||||
* ICL+ HW requires corresponding AUX IOs to be powered up for PSR with
|
||||
@ -914,7 +912,7 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
|
||||
return intel_display_power_aux_io_domain(i915, dig_port->aux_ch);
|
||||
else if (DISPLAY_VER(i915) < 14 &&
|
||||
(intel_crtc_has_dp_encoder(crtc_state) ||
|
||||
intel_phy_is_tc(i915, phy)))
|
||||
intel_encoder_is_tc(&dig_port->base)))
|
||||
return intel_aux_power_domain(dig_port);
|
||||
else
|
||||
return POWER_DOMAIN_INVALID;
|
||||
@ -984,7 +982,7 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
u32 val;
|
||||
|
||||
if (cpu_transcoder == TRANSCODER_EDP)
|
||||
@ -1113,7 +1111,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
const struct intel_ddi_buf_trans *trans;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
int n_entries, ln;
|
||||
u32 val;
|
||||
|
||||
@ -1176,7 +1174,7 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
u32 val;
|
||||
int ln;
|
||||
|
||||
@ -1227,7 +1225,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(encoder);
|
||||
const struct intel_ddi_buf_trans *trans;
|
||||
int n_entries, ln;
|
||||
|
||||
@ -1328,7 +1326,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(encoder);
|
||||
const struct intel_ddi_buf_trans *trans;
|
||||
int n_entries, ln;
|
||||
|
||||
@ -1526,7 +1524,7 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, !pll))
|
||||
return;
|
||||
@ -1540,7 +1538,7 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
|
||||
static void adls_ddi_disable_clock(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
_icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
|
||||
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
|
||||
@ -1549,7 +1547,7 @@ static void adls_ddi_disable_clock(struct intel_encoder *encoder)
|
||||
static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
|
||||
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
|
||||
@ -1558,7 +1556,7 @@ static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
|
||||
static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
return _icl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
|
||||
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
|
||||
@ -1570,7 +1568,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, !pll))
|
||||
return;
|
||||
@ -1584,7 +1582,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
|
||||
static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
_icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
|
||||
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
|
||||
@ -1593,7 +1591,7 @@ static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
|
||||
static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
|
||||
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
|
||||
@ -1602,7 +1600,7 @@ static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
|
||||
static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
|
||||
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
|
||||
@ -1614,7 +1612,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, !pll))
|
||||
return;
|
||||
@ -1637,7 +1635,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
|
||||
static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
_icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
|
||||
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
|
||||
@ -1646,7 +1644,7 @@ static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
|
||||
static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
|
||||
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
|
||||
@ -1655,7 +1653,7 @@ static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
|
||||
static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
enum intel_dpll_id id;
|
||||
u32 val;
|
||||
|
||||
@ -1680,7 +1678,7 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, !pll))
|
||||
return;
|
||||
@ -1694,7 +1692,7 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
|
||||
static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
_icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
|
||||
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
|
||||
@ -1703,7 +1701,7 @@ static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
|
||||
static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
|
||||
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
|
||||
@ -1712,7 +1710,7 @@ static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
|
||||
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
|
||||
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
|
||||
@ -1767,7 +1765,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(encoder);
|
||||
enum port port = encoder->port;
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, !pll))
|
||||
@ -1787,7 +1785,7 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
|
||||
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(encoder);
|
||||
enum port port = encoder->port;
|
||||
|
||||
mutex_lock(&i915->display.dpll.lock);
|
||||
@ -1803,7 +1801,7 @@ static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
|
||||
static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(encoder);
|
||||
enum port port = encoder->port;
|
||||
u32 tmp;
|
||||
|
||||
@ -1820,7 +1818,7 @@ static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
|
||||
static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(encoder);
|
||||
enum port port = encoder->port;
|
||||
enum intel_dpll_id id;
|
||||
u32 tmp;
|
||||
@ -2086,12 +2084,11 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
|
||||
u32 ln0, ln1, pin_assignment;
|
||||
u8 width;
|
||||
|
||||
if (!intel_phy_is_tc(dev_priv, phy) ||
|
||||
if (!intel_encoder_is_tc(&dig_port->base) ||
|
||||
intel_tc_port_in_tbt_alt_mode(dig_port))
|
||||
return;
|
||||
|
||||
@ -2327,9 +2324,9 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_phy_is_combo(i915, phy)) {
|
||||
if (intel_encoder_is_combo(encoder)) {
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
bool lane_reversal =
|
||||
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
|
||||
|
||||
@ -2812,15 +2809,14 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (HAS_DP20(dev_priv)) {
|
||||
if (HAS_DP20(dev_priv))
|
||||
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
|
||||
crtc_state);
|
||||
if (crtc_state->has_panel_replay)
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
|
||||
DP_PANEL_REPLAY_ENABLE);
|
||||
}
|
||||
|
||||
/* Panel replay has to be enabled in sink dpcd before link training. */
|
||||
if (crtc_state->has_panel_replay)
|
||||
intel_psr_enable_sink(enc_to_intel_dp(encoder), crtc_state);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 14)
|
||||
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
|
||||
@ -3095,39 +3091,48 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
|
||||
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
|
||||
}
|
||||
|
||||
static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *pipe_crtc;
|
||||
|
||||
for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
|
||||
intel_crtc_joined_pipe_mask(old_crtc_state)) {
|
||||
const struct intel_crtc_state *old_pipe_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, pipe_crtc);
|
||||
|
||||
intel_crtc_vblank_off(old_pipe_crtc_state);
|
||||
}
|
||||
|
||||
intel_disable_transcoder(old_crtc_state);
|
||||
|
||||
intel_ddi_disable_transcoder_func(old_crtc_state);
|
||||
|
||||
for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
|
||||
intel_crtc_joined_pipe_mask(old_crtc_state)) {
|
||||
const struct intel_crtc_state *old_pipe_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, pipe_crtc);
|
||||
|
||||
intel_dsc_disable(old_pipe_crtc_state);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 9)
|
||||
skl_scaler_disable(old_pipe_crtc_state);
|
||||
else
|
||||
ilk_pfit_disable(old_pipe_crtc_state);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_ddi_post_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *slave_crtc;
|
||||
|
||||
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
|
||||
intel_crtc_vblank_off(old_crtc_state);
|
||||
|
||||
intel_disable_transcoder(old_crtc_state);
|
||||
|
||||
intel_ddi_disable_transcoder_func(old_crtc_state);
|
||||
|
||||
intel_dsc_disable(old_crtc_state);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 9)
|
||||
skl_scaler_disable(old_crtc_state);
|
||||
else
|
||||
ilk_pfit_disable(old_crtc_state);
|
||||
}
|
||||
|
||||
for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
|
||||
intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) {
|
||||
const struct intel_crtc_state *old_slave_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, slave_crtc);
|
||||
|
||||
intel_crtc_vblank_off(old_slave_crtc_state);
|
||||
|
||||
intel_dsc_disable(old_slave_crtc_state);
|
||||
skl_scaler_disable(old_slave_crtc_state);
|
||||
}
|
||||
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
|
||||
intel_ddi_post_disable_hdmi_or_sst(state, encoder, old_crtc_state,
|
||||
old_conn_state);
|
||||
|
||||
/*
|
||||
* When called from DP MST code:
|
||||
@ -3155,14 +3160,11 @@ static void intel_ddi_post_pll_disable(struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
bool is_tc_port = intel_phy_is_tc(i915, phy);
|
||||
|
||||
main_link_aux_power_domain_put(dig_port, old_crtc_state);
|
||||
|
||||
if (is_tc_port)
|
||||
if (intel_encoder_is_tc(encoder))
|
||||
intel_tc_port_put_link(dig_port);
|
||||
}
|
||||
|
||||
@ -3263,7 +3265,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_connector *connector = conn_state->connector;
|
||||
enum port port = encoder->port;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
u32 buf_ctl;
|
||||
|
||||
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
|
||||
@ -3347,14 +3348,14 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 20)
|
||||
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
|
||||
} else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
|
||||
} else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) {
|
||||
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
|
||||
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
|
||||
}
|
||||
|
||||
intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
|
||||
|
||||
intel_wait_ddi_buf_active(dev_priv, port);
|
||||
intel_wait_ddi_buf_active(encoder);
|
||||
}
|
||||
|
||||
static void intel_enable_ddi(struct intel_atomic_state *state,
|
||||
@ -3362,10 +3363,10 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *pipe_crtc;
|
||||
|
||||
if (!intel_crtc_is_bigjoiner_slave(crtc_state))
|
||||
intel_ddi_enable_transcoder_func(encoder, crtc_state);
|
||||
intel_ddi_enable_transcoder_func(encoder, crtc_state);
|
||||
|
||||
/* Enable/Disable DP2.0 SDP split config before transcoder */
|
||||
intel_audio_sdp_split_update(crtc_state);
|
||||
@ -3374,7 +3375,13 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
|
||||
|
||||
intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
|
||||
|
||||
intel_crtc_vblank_on(crtc_state);
|
||||
for_each_intel_crtc_in_pipe_mask_reverse(&i915->drm, pipe_crtc,
|
||||
intel_crtc_joined_pipe_mask(crtc_state)) {
|
||||
const struct intel_crtc_state *pipe_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, pipe_crtc);
|
||||
|
||||
intel_crtc_vblank_on(pipe_crtc_state);
|
||||
}
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
|
||||
intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
|
||||
@ -3470,19 +3477,17 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
const struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_crtc *slave_crtc;
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
struct intel_crtc *pipe_crtc;
|
||||
|
||||
/* FIXME: Add MTL pll_mgr */
|
||||
if (DISPLAY_VER(i915) >= 14 || !intel_phy_is_tc(i915, phy))
|
||||
if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder))
|
||||
return;
|
||||
|
||||
intel_update_active_dpll(state, crtc, encoder);
|
||||
for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
|
||||
intel_crtc_bigjoiner_slave_pipes(crtc_state))
|
||||
intel_update_active_dpll(state, slave_crtc, encoder);
|
||||
for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
|
||||
intel_crtc_joined_pipe_mask(crtc_state))
|
||||
intel_update_active_dpll(state, pipe_crtc, encoder);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3493,8 +3498,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
|
||||
bool is_tc_port = intel_encoder_is_tc(encoder);
|
||||
|
||||
if (is_tc_port) {
|
||||
struct intel_crtc *master_crtc =
|
||||
@ -3520,7 +3524,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
|
||||
static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(encoder);
|
||||
int ln;
|
||||
|
||||
for (ln = 0; ln < 2; ln++)
|
||||
@ -3574,7 +3578,7 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
|
||||
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
|
||||
|
||||
/* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */
|
||||
intel_wait_ddi_buf_active(dev_priv, port);
|
||||
intel_wait_ddi_buf_active(encoder);
|
||||
}
|
||||
|
||||
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
|
||||
@ -3624,7 +3628,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
|
||||
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
|
||||
|
||||
intel_wait_ddi_buf_active(dev_priv, port);
|
||||
intel_wait_ddi_buf_active(encoder);
|
||||
}
|
||||
|
||||
static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
|
||||
@ -3681,7 +3685,7 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
|
||||
|
||||
if (intel_de_wait_for_set(dev_priv,
|
||||
dp_tp_status_reg(encoder, crtc_state),
|
||||
DP_TP_STATUS_IDLE_DONE, 1))
|
||||
DP_TP_STATUS_IDLE_DONE, 2))
|
||||
drm_err(&dev_priv->drm,
|
||||
"Timed out waiting for DP idle patterns\n");
|
||||
}
|
||||
@ -3972,6 +3976,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
|
||||
intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
|
||||
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
|
||||
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_ADAPTIVE_SYNC);
|
||||
|
||||
intel_audio_codec_get_config(encoder, pipe_config);
|
||||
}
|
||||
@ -4144,10 +4149,7 @@ void hsw_ddi_get_config(struct intel_encoder *encoder,
|
||||
static void intel_ddi_sync_state(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_phy_is_tc(i915, phy))
|
||||
if (intel_encoder_is_tc(encoder))
|
||||
intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
|
||||
crtc_state);
|
||||
|
||||
@ -4159,10 +4161,9 @@ static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
bool fastset = true;
|
||||
|
||||
if (intel_phy_is_tc(i915, phy)) {
|
||||
if (intel_encoder_is_tc(encoder)) {
|
||||
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
crtc_state->uapi.mode_changed = true;
|
||||
@ -4256,7 +4257,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
|
||||
static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
|
||||
const struct intel_crtc_state *crtc_state2)
|
||||
{
|
||||
/*
|
||||
* FIXME the modeset sequence is currently wrong and
|
||||
* can't deal with bigjoiner + port sync at the same time.
|
||||
*/
|
||||
return crtc_state1->hw.active && crtc_state2->hw.active &&
|
||||
!crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
|
||||
crtc_state1->output_types == crtc_state2->output_types &&
|
||||
crtc_state1->output_format == crtc_state2->output_format &&
|
||||
crtc_state1->lane_count == crtc_state2->lane_count &&
|
||||
@ -4348,10 +4354,9 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
|
||||
intel_dp_encoder_flush_work(encoder);
|
||||
if (intel_phy_is_tc(i915, phy))
|
||||
if (intel_encoder_is_tc(&dig_port->base))
|
||||
intel_tc_port_cleanup(dig_port);
|
||||
intel_display_power_flush_work(i915);
|
||||
|
||||
@ -4362,16 +4367,14 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
|
||||
|
||||
static void intel_ddi_encoder_reset(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
|
||||
intel_dp->reset_link_params = true;
|
||||
|
||||
intel_pps_encoder_reset(intel_dp);
|
||||
|
||||
if (intel_phy_is_tc(i915, phy))
|
||||
if (intel_encoder_is_tc(&dig_port->base))
|
||||
intel_tc_port_init_mode(dig_port);
|
||||
}
|
||||
|
||||
@ -4538,11 +4541,9 @@ static enum intel_hotplug_state
|
||||
intel_ddi_hotplug(struct intel_encoder *encoder,
|
||||
struct intel_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
bool is_tc = intel_phy_is_tc(i915, phy);
|
||||
bool is_tc = intel_encoder_is_tc(encoder);
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
enum intel_hotplug_state state;
|
||||
int ret;
|
||||
@ -4824,10 +4825,7 @@ static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
|
||||
|
||||
static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
return init_dp || intel_phy_is_tc(i915, phy);
|
||||
return init_dp || intel_encoder_is_tc(encoder);
|
||||
}
|
||||
|
||||
static bool assert_has_icl_dsi(struct drm_i915_private *i915)
|
||||
@ -5071,12 +5069,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
|
||||
} else if (IS_DG2(dev_priv)) {
|
||||
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
|
||||
} else if (DISPLAY_VER(dev_priv) >= 12) {
|
||||
if (intel_phy_is_combo(dev_priv, phy))
|
||||
if (intel_encoder_is_combo(encoder))
|
||||
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
|
||||
else
|
||||
encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
|
||||
} else if (DISPLAY_VER(dev_priv) >= 11) {
|
||||
if (intel_phy_is_combo(dev_priv, phy))
|
||||
if (intel_encoder_is_combo(encoder))
|
||||
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
|
||||
else
|
||||
encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
|
||||
@ -5126,7 +5124,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (intel_phy_is_tc(dev_priv, phy)) {
|
||||
if (intel_encoder_is_tc(encoder)) {
|
||||
bool is_legacy =
|
||||
!intel_bios_encoder_supports_typec_usb(devdata) &&
|
||||
!intel_bios_encoder_supports_tbt(devdata);
|
||||
@ -5155,7 +5153,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
|
||||
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 11) {
|
||||
if (intel_phy_is_tc(dev_priv, phy))
|
||||
if (intel_encoder_is_tc(encoder))
|
||||
dig_port->connected = intel_tc_port_connected;
|
||||
else
|
||||
dig_port->connected = lpt_digital_port_connected;
|
||||
|
@ -1691,14 +1691,11 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->port_clock >= 1000000)
|
||||
return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries);
|
||||
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_is_c10phy(i915, phy)))
|
||||
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_encoder_is_c10phy(encoder)))
|
||||
return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries);
|
||||
else if (!intel_is_c10phy(i915, phy))
|
||||
else if (!intel_encoder_is_c10phy(encoder))
|
||||
return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
|
||||
else
|
||||
return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
|
||||
@ -1707,14 +1704,13 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
|
||||
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (DISPLAY_VER(i915) >= 14) {
|
||||
encoder->get_buf_trans = mtl_get_cx0_buf_trans;
|
||||
} else if (IS_DG2(i915)) {
|
||||
encoder->get_buf_trans = dg2_get_snps_buf_trans;
|
||||
} else if (IS_ALDERLAKE_P(i915)) {
|
||||
if (intel_phy_is_combo(i915, phy))
|
||||
if (intel_encoder_is_combo(encoder))
|
||||
encoder->get_buf_trans = adlp_get_combo_buf_trans;
|
||||
else
|
||||
encoder->get_buf_trans = adlp_get_dkl_buf_trans;
|
||||
@ -1725,16 +1721,16 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
|
||||
} else if (IS_DG1(i915)) {
|
||||
encoder->get_buf_trans = dg1_get_combo_buf_trans;
|
||||
} else if (DISPLAY_VER(i915) >= 12) {
|
||||
if (intel_phy_is_combo(i915, phy))
|
||||
if (intel_encoder_is_combo(encoder))
|
||||
encoder->get_buf_trans = tgl_get_combo_buf_trans;
|
||||
else
|
||||
encoder->get_buf_trans = tgl_get_dkl_buf_trans;
|
||||
} else if (DISPLAY_VER(i915) == 11) {
|
||||
if (IS_PLATFORM(i915, INTEL_JASPERLAKE))
|
||||
if (IS_JASPERLAKE(i915))
|
||||
encoder->get_buf_trans = jsl_get_combo_buf_trans;
|
||||
else if (IS_PLATFORM(i915, INTEL_ELKHARTLAKE))
|
||||
else if (IS_ELKHARTLAKE(i915))
|
||||
encoder->get_buf_trans = ehl_get_combo_buf_trans;
|
||||
else if (intel_phy_is_combo(i915, phy))
|
||||
else if (intel_encoder_is_combo(encoder))
|
||||
encoder->get_buf_trans = icl_get_combo_buf_trans;
|
||||
else
|
||||
encoder->get_buf_trans = icl_get_mg_buf_trans;
|
||||
|
@ -13,76 +13,157 @@
|
||||
static inline u32
|
||||
intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
|
||||
{
|
||||
return intel_uncore_read(&i915->uncore, reg);
|
||||
u32 val;
|
||||
|
||||
intel_dmc_wl_get(i915, reg);
|
||||
|
||||
val = intel_uncore_read(&i915->uncore, reg);
|
||||
|
||||
intel_dmc_wl_put(i915, reg);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline u8
|
||||
intel_de_read8(struct drm_i915_private *i915, i915_reg_t reg)
|
||||
{
|
||||
return intel_uncore_read8(&i915->uncore, reg);
|
||||
u8 val;
|
||||
|
||||
intel_dmc_wl_get(i915, reg);
|
||||
|
||||
val = intel_uncore_read8(&i915->uncore, reg);
|
||||
|
||||
intel_dmc_wl_put(i915, reg);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline u64
|
||||
intel_de_read64_2x32(struct drm_i915_private *i915,
|
||||
i915_reg_t lower_reg, i915_reg_t upper_reg)
|
||||
{
|
||||
return intel_uncore_read64_2x32(&i915->uncore, lower_reg, upper_reg);
|
||||
u64 val;
|
||||
|
||||
intel_dmc_wl_get(i915, lower_reg);
|
||||
intel_dmc_wl_get(i915, upper_reg);
|
||||
|
||||
val = intel_uncore_read64_2x32(&i915->uncore, lower_reg, upper_reg);
|
||||
|
||||
intel_dmc_wl_put(i915, upper_reg);
|
||||
intel_dmc_wl_put(i915, lower_reg);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
|
||||
{
|
||||
intel_dmc_wl_get(i915, reg);
|
||||
|
||||
intel_uncore_posting_read(&i915->uncore, reg);
|
||||
|
||||
intel_dmc_wl_put(i915, reg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
|
||||
{
|
||||
intel_dmc_wl_get(i915, reg);
|
||||
|
||||
intel_uncore_write(&i915->uncore, reg, val);
|
||||
|
||||
intel_dmc_wl_put(i915, reg);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
__intel_de_rmw_nowl(struct drm_i915_private *i915, i915_reg_t reg,
|
||||
u32 clear, u32 set)
|
||||
{
|
||||
return intel_uncore_rmw(&i915->uncore, reg, clear, set);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
|
||||
{
|
||||
return intel_uncore_rmw(&i915->uncore, reg, clear, set);
|
||||
u32 val;
|
||||
|
||||
intel_dmc_wl_get(i915, reg);
|
||||
|
||||
val = __intel_de_rmw_nowl(i915, reg, clear, set);
|
||||
|
||||
intel_dmc_wl_put(i915, reg);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline int
|
||||
intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
|
||||
u32 mask, u32 value, unsigned int timeout)
|
||||
__intel_wait_for_register_nowl(struct drm_i915_private *i915, i915_reg_t reg,
|
||||
u32 mask, u32 value, unsigned int timeout)
|
||||
{
|
||||
return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
|
||||
return intel_wait_for_register(&i915->uncore, reg, mask,
|
||||
value, timeout);
|
||||
}
|
||||
|
||||
static inline int
|
||||
intel_de_wait_for_register_fw(struct drm_i915_private *i915, i915_reg_t reg,
|
||||
u32 mask, u32 value, unsigned int timeout)
|
||||
intel_de_wait(struct drm_i915_private *i915, i915_reg_t reg,
|
||||
u32 mask, u32 value, unsigned int timeout)
|
||||
{
|
||||
return intel_wait_for_register_fw(&i915->uncore, reg, mask, value, timeout);
|
||||
int ret;
|
||||
|
||||
intel_dmc_wl_get(i915, reg);
|
||||
|
||||
ret = __intel_wait_for_register_nowl(i915, reg, mask, value, timeout);
|
||||
|
||||
intel_dmc_wl_put(i915, reg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
|
||||
u32 mask, u32 value,
|
||||
unsigned int fast_timeout_us,
|
||||
unsigned int slow_timeout_ms, u32 *out_value)
|
||||
intel_de_wait_fw(struct drm_i915_private *i915, i915_reg_t reg,
|
||||
u32 mask, u32 value, unsigned int timeout)
|
||||
{
|
||||
return __intel_wait_for_register(&i915->uncore, reg, mask, value,
|
||||
fast_timeout_us, slow_timeout_ms, out_value);
|
||||
int ret;
|
||||
|
||||
intel_dmc_wl_get(i915, reg);
|
||||
|
||||
ret = intel_wait_for_register_fw(&i915->uncore, reg, mask, value, timeout);
|
||||
|
||||
intel_dmc_wl_put(i915, reg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
intel_de_wait_custom(struct drm_i915_private *i915, i915_reg_t reg,
|
||||
u32 mask, u32 value,
|
||||
unsigned int fast_timeout_us,
|
||||
unsigned int slow_timeout_ms, u32 *out_value)
|
||||
{
|
||||
int ret;
|
||||
|
||||
intel_dmc_wl_get(i915, reg);
|
||||
|
||||
ret = __intel_wait_for_register(&i915->uncore, reg, mask, value,
|
||||
fast_timeout_us, slow_timeout_ms, out_value);
|
||||
|
||||
intel_dmc_wl_put(i915, reg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
|
||||
u32 mask, unsigned int timeout)
|
||||
{
|
||||
return intel_de_wait_for_register(i915, reg, mask, mask, timeout);
|
||||
return intel_de_wait(i915, reg, mask, mask, timeout);
|
||||
}
|
||||
|
||||
static inline int
|
||||
intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
|
||||
u32 mask, unsigned int timeout)
|
||||
{
|
||||
return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
|
||||
return intel_de_wait(i915, reg, mask, 0, timeout);
|
||||
}
|
||||
|
||||
/*
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -280,6 +280,12 @@ enum phy_fia {
|
||||
base.head) \
|
||||
for_each_if((pipe_mask) & BIT(intel_crtc->pipe))
|
||||
|
||||
#define for_each_intel_crtc_in_pipe_mask_reverse(dev, intel_crtc, pipe_mask) \
|
||||
list_for_each_entry_reverse((intel_crtc), \
|
||||
&(dev)->mode_config.crtc_list, \
|
||||
base.head) \
|
||||
for_each_if((pipe_mask) & BIT((intel_crtc)->pipe))
|
||||
|
||||
#define for_each_intel_encoder(dev, intel_encoder) \
|
||||
list_for_each_entry(intel_encoder, \
|
||||
&(dev)->mode_config.encoder_list, \
|
||||
@ -344,6 +350,14 @@ enum phy_fia {
|
||||
(__i)++) \
|
||||
for_each_if(crtc)
|
||||
|
||||
#define for_each_new_intel_crtc_in_state_reverse(__state, crtc, new_crtc_state, __i) \
|
||||
for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \
|
||||
(__i) >= 0 && \
|
||||
((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
|
||||
(new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
|
||||
(__i)--) \
|
||||
for_each_if(crtc)
|
||||
|
||||
#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
|
||||
@ -408,6 +422,7 @@ intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
|
||||
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
|
||||
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
|
||||
bool is_trans_port_sync_master(const struct intel_crtc_state *state);
|
||||
u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state);
|
||||
bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state);
|
||||
bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state);
|
||||
u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state);
|
||||
@ -448,6 +463,13 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
|
||||
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy);
|
||||
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
|
||||
enum port port);
|
||||
|
||||
enum phy intel_encoder_to_phy(struct intel_encoder *encoder);
|
||||
bool intel_encoder_is_combo(struct intel_encoder *encoder);
|
||||
bool intel_encoder_is_snps(struct intel_encoder *encoder);
|
||||
bool intel_encoder_is_tc(struct intel_encoder *encoder);
|
||||
enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder);
|
||||
|
||||
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "intel_global_state.h"
|
||||
#include "intel_gmbus.h"
|
||||
#include "intel_opregion.h"
|
||||
#include "intel_dmc_wl.h"
|
||||
#include "intel_wm_types.h"
|
||||
|
||||
struct task_struct;
|
||||
@ -345,6 +346,8 @@ struct intel_display {
|
||||
struct intel_global_obj obj;
|
||||
|
||||
unsigned int max_cdclk_freq;
|
||||
unsigned int max_dotclk_freq;
|
||||
unsigned int skl_preferred_vco_freq;
|
||||
} cdclk;
|
||||
|
||||
struct {
|
||||
@ -445,6 +448,16 @@ struct intel_display {
|
||||
bool false_color;
|
||||
} ips;
|
||||
|
||||
struct {
|
||||
bool display_irqs_enabled;
|
||||
|
||||
/* For i915gm/i945gm vblank irq workaround */
|
||||
u8 vblank_enabled;
|
||||
|
||||
u32 de_irq_mask[I915_MAX_PIPES];
|
||||
u32 pipestat_irq_mask[I915_MAX_PIPES];
|
||||
} irq;
|
||||
|
||||
struct {
|
||||
wait_queue_head_t waitqueue;
|
||||
|
||||
@ -534,6 +547,7 @@ struct intel_display {
|
||||
struct intel_overlay *overlay;
|
||||
struct intel_display_params params;
|
||||
struct intel_vbt_data vbt;
|
||||
struct intel_dmc_wl wl;
|
||||
struct intel_wm wm;
|
||||
};
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "intel_hdmi.h"
|
||||
#include "intel_hotplug.h"
|
||||
#include "intel_panel.h"
|
||||
#include "intel_pps.h"
|
||||
#include "intel_psr.h"
|
||||
#include "intel_psr_regs.h"
|
||||
#include "intel_wm.h"
|
||||
@ -191,7 +192,7 @@ static void intel_hdcp_info(struct seq_file *m,
|
||||
struct intel_connector *intel_connector,
|
||||
bool remote_req)
|
||||
{
|
||||
bool hdcp_cap, hdcp2_cap;
|
||||
bool hdcp_cap = false, hdcp2_cap = false;
|
||||
|
||||
if (!intel_connector->hdcp.shim) {
|
||||
seq_puts(m, "No Connector Support");
|
||||
@ -252,9 +253,6 @@ static void intel_connector_info(struct seq_file *m,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
const struct drm_connector_state *conn_state = connector->state;
|
||||
struct intel_encoder *encoder =
|
||||
to_intel_encoder(conn_state->best_encoder);
|
||||
const struct drm_display_mode *mode;
|
||||
|
||||
seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
|
||||
@ -271,28 +269,23 @@ static void intel_connector_info(struct seq_file *m,
|
||||
drm_get_subpixel_order_name(connector->display_info.subpixel_order));
|
||||
seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
|
||||
|
||||
if (!encoder)
|
||||
return;
|
||||
|
||||
switch (connector->connector_type) {
|
||||
case DRM_MODE_CONNECTOR_DisplayPort:
|
||||
case DRM_MODE_CONNECTOR_eDP:
|
||||
if (encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
if (intel_connector->mst_port)
|
||||
intel_dp_mst_info(m, intel_connector);
|
||||
else
|
||||
intel_dp_info(m, intel_connector);
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_HDMIA:
|
||||
if (encoder->type == INTEL_OUTPUT_HDMI ||
|
||||
encoder->type == INTEL_OUTPUT_DDI)
|
||||
intel_hdmi_info(m, intel_connector);
|
||||
intel_hdmi_info(m, intel_connector);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
seq_puts(m, "\tHDCP version: ");
|
||||
if (intel_encoder_is_mst(encoder)) {
|
||||
if (intel_connector->mst_port) {
|
||||
intel_hdcp_info(m, intel_connector, true);
|
||||
seq_puts(m, "\tMST Hub HDCP version: ");
|
||||
}
|
||||
@ -1103,27 +1096,6 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
|
||||
intel_display_debugfs_params(i915);
|
||||
}
|
||||
|
||||
static int i915_panel_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct intel_connector *connector = m->private;
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
|
||||
if (connector->base.status != connector_status_connected)
|
||||
return -ENODEV;
|
||||
|
||||
seq_printf(m, "Panel power up delay: %d\n",
|
||||
intel_dp->pps.panel_power_up_delay);
|
||||
seq_printf(m, "Panel power down delay: %d\n",
|
||||
intel_dp->pps.panel_power_down_delay);
|
||||
seq_printf(m, "Backlight on delay: %d\n",
|
||||
intel_dp->pps.backlight_on_delay);
|
||||
seq_printf(m, "Backlight off delay: %d\n",
|
||||
intel_dp->pps.backlight_off_delay);
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(i915_panel);
|
||||
|
||||
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct intel_connector *connector = m->private;
|
||||
@ -1402,20 +1374,6 @@ out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_bigjoiner_enable_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct intel_connector *connector = m->private;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
crtc = connector->base.state->crtc;
|
||||
if (connector->base.status != connector_status_connected || !crtc)
|
||||
return -ENODEV;
|
||||
|
||||
seq_printf(m, "Bigjoiner enable: %d\n", connector->force_bigjoiner_enable);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t i915_dsc_output_format_write(struct file *file,
|
||||
const char __user *ubuf,
|
||||
size_t len, loff_t *offp)
|
||||
@ -1437,30 +1395,6 @@ static ssize_t i915_dsc_output_format_write(struct file *file,
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t i915_bigjoiner_enable_write(struct file *file,
|
||||
const char __user *ubuf,
|
||||
size_t len, loff_t *offp)
|
||||
{
|
||||
struct seq_file *m = file->private_data;
|
||||
struct intel_connector *connector = m->private;
|
||||
struct drm_crtc *crtc;
|
||||
bool bigjoiner_en = 0;
|
||||
int ret;
|
||||
|
||||
crtc = connector->base.state->crtc;
|
||||
if (connector->base.status != connector_status_connected || !crtc)
|
||||
return -ENODEV;
|
||||
|
||||
ret = kstrtobool_from_user(ubuf, len, &bigjoiner_en);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
connector->force_bigjoiner_enable = bigjoiner_en;
|
||||
*offp += len;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int i915_dsc_output_format_open(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
@ -1554,8 +1488,6 @@ static const struct file_operations i915_dsc_fractional_bpp_fops = {
|
||||
.write = i915_dsc_fractional_bpp_write
|
||||
};
|
||||
|
||||
DEFINE_SHOW_STORE_ATTRIBUTE(i915_bigjoiner_enable);
|
||||
|
||||
/*
|
||||
* Returns the Current CRTC's bpc.
|
||||
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
|
||||
@ -1608,12 +1540,9 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
|
||||
return;
|
||||
|
||||
intel_drrs_connector_debugfs_add(connector);
|
||||
intel_pps_connector_debugfs_add(connector);
|
||||
intel_psr_connector_debugfs_add(connector);
|
||||
|
||||
if (connector_type == DRM_MODE_CONNECTOR_eDP)
|
||||
debugfs_create_file("i915_panel_timings", 0444, root,
|
||||
connector, &i915_panel_fops);
|
||||
|
||||
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
|
||||
connector_type == DRM_MODE_CONNECTOR_HDMIA ||
|
||||
connector_type == DRM_MODE_CONNECTOR_HDMIB) {
|
||||
@ -1640,8 +1569,8 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
|
||||
if (DISPLAY_VER(i915) >= 11 &&
|
||||
(connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
|
||||
connector_type == DRM_MODE_CONNECTOR_eDP)) {
|
||||
debugfs_create_file("i915_bigjoiner_force_enable", 0644, root,
|
||||
connector, &i915_bigjoiner_enable_fops);
|
||||
debugfs_create_bool("i915_bigjoiner_force_enable", 0644, root,
|
||||
&connector->force_bigjoiner_enable);
|
||||
}
|
||||
|
||||
if (connector_type == DRM_MODE_CONNECTOR_DSI ||
|
||||
|
@ -17,6 +17,9 @@
|
||||
#include "intel_display_reg_defs.h"
|
||||
#include "intel_fbc.h"
|
||||
|
||||
__diag_push();
|
||||
__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for display info");
|
||||
|
||||
static const struct intel_display_device_info no_display = {};
|
||||
|
||||
#define PIPE_A_OFFSET 0x70000
|
||||
@ -768,6 +771,8 @@ static const struct intel_display_device_info xe2_lpd_display = {
|
||||
BIT(INTEL_FBC_C) | BIT(INTEL_FBC_D),
|
||||
};
|
||||
|
||||
__diag_pop();
|
||||
|
||||
/*
|
||||
* Separate detection for no display cases to keep the display id array simple.
|
||||
*
|
||||
|
@ -47,6 +47,7 @@ struct drm_printer;
|
||||
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
|
||||
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
|
||||
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
|
||||
#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
|
||||
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
|
||||
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
|
||||
#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
|
||||
@ -68,6 +69,7 @@ struct drm_printer;
|
||||
#define HAS_TRANSCODER(i915, trans) ((DISPLAY_RUNTIME_INFO(i915)->cpu_transcoder_mask & \
|
||||
BIT(trans)) != 0)
|
||||
#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
|
||||
#define HAS_AS_SDP(i915) (DISPLAY_VER(i915) >= 13)
|
||||
#define INTEL_NUM_PIPES(i915) (hweight8(DISPLAY_RUNTIME_INFO(i915)->pipe_mask))
|
||||
#define I915_HAS_HOTPLUG(i915) (DISPLAY_INFO(i915)->has_hotplug)
|
||||
#define OVERLAY_NEEDS_PHYSICAL(i915) (DISPLAY_INFO(i915)->overlay_needs_physical)
|
||||
|
@ -198,6 +198,7 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915)
|
||||
intel_dpll_init_clock_hook(i915);
|
||||
intel_init_display_hooks(i915);
|
||||
intel_fdi_init_hook(i915);
|
||||
intel_dmc_wl_init(i915);
|
||||
}
|
||||
|
||||
/* part #1: call before irq install */
|
||||
|
@ -117,13 +117,14 @@ static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
|
||||
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
|
||||
return;
|
||||
|
||||
new_val = dev_priv->de_irq_mask[pipe];
|
||||
new_val = dev_priv->display.irq.de_irq_mask[pipe];
|
||||
new_val &= ~interrupt_mask;
|
||||
new_val |= (~enabled_irq_mask & interrupt_mask);
|
||||
|
||||
if (new_val != dev_priv->de_irq_mask[pipe]) {
|
||||
dev_priv->de_irq_mask[pipe] = new_val;
|
||||
intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
|
||||
if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
|
||||
dev_priv->display.irq.de_irq_mask[pipe] = new_val;
|
||||
intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe),
|
||||
dev_priv->display.irq.de_irq_mask[pipe]);
|
||||
intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
|
||||
}
|
||||
}
|
||||
@ -179,7 +180,7 @@ void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
|
||||
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
|
||||
u32 status_mask = dev_priv->display.irq.pipestat_irq_mask[pipe];
|
||||
u32 enable_mask = status_mask << 16;
|
||||
|
||||
lockdep_assert_held(&dev_priv->irq_lock);
|
||||
@ -233,10 +234,10 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
|
||||
lockdep_assert_held(&dev_priv->irq_lock);
|
||||
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
|
||||
|
||||
if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
|
||||
if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
|
||||
return;
|
||||
|
||||
dev_priv->pipestat_irq_mask[pipe] |= status_mask;
|
||||
dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
|
||||
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
|
||||
|
||||
intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
|
||||
@ -256,10 +257,10 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
|
||||
lockdep_assert_held(&dev_priv->irq_lock);
|
||||
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
|
||||
|
||||
if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
|
||||
if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
|
||||
return;
|
||||
|
||||
dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
|
||||
dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
|
||||
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
|
||||
|
||||
intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
|
||||
@ -401,7 +402,7 @@ void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
|
||||
PIPESTAT_INT_STATUS_MASK |
|
||||
PIPE_FIFO_UNDERRUN_STATUS);
|
||||
|
||||
dev_priv->pipestat_irq_mask[pipe] = 0;
|
||||
dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -412,7 +413,7 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
|
||||
if (!dev_priv->display_irqs_enabled) {
|
||||
if (!dev_priv->display.irq.display_irqs_enabled) {
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
return;
|
||||
}
|
||||
@ -445,7 +446,7 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
|
||||
break;
|
||||
}
|
||||
if (iir & iir_bit)
|
||||
status_mask |= dev_priv->pipestat_irq_mask[pipe];
|
||||
status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
|
||||
|
||||
if (!status_mask)
|
||||
continue;
|
||||
@ -1203,7 +1204,7 @@ int i8xx_enable_vblank(struct drm_crtc *crtc)
|
||||
|
||||
int i915gm_enable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct drm_i915_private *i915 = to_i915(crtc->dev);
|
||||
|
||||
/*
|
||||
* Vblank interrupts fail to wake the device up from C2+.
|
||||
@ -1211,8 +1212,8 @@ int i915gm_enable_vblank(struct drm_crtc *crtc)
|
||||
* the problem. There is a small power cost so we do this
|
||||
* only when vblank interrupts are actually enabled.
|
||||
*/
|
||||
if (dev_priv->vblank_enabled++ == 0)
|
||||
intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
|
||||
if (i915->display.irq.vblank_enabled++ == 0)
|
||||
intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
|
||||
|
||||
return i8xx_enable_vblank(crtc);
|
||||
}
|
||||
@ -1315,12 +1316,12 @@ void i8xx_disable_vblank(struct drm_crtc *crtc)
|
||||
|
||||
void i915gm_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct drm_i915_private *i915 = to_i915(crtc->dev);
|
||||
|
||||
i8xx_disable_vblank(crtc);
|
||||
|
||||
if (--dev_priv->vblank_enabled == 0)
|
||||
intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
|
||||
if (--i915->display.irq.vblank_enabled == 0)
|
||||
intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
|
||||
}
|
||||
|
||||
void i965_disable_vblank(struct drm_crtc *crtc)
|
||||
@ -1497,8 +1498,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
|
||||
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
|
||||
GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
|
||||
dev_priv->de_irq_mask[pipe],
|
||||
~dev_priv->de_irq_mask[pipe] | extra_ier);
|
||||
dev_priv->display.irq.de_irq_mask[pipe],
|
||||
~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
@ -1558,10 +1559,10 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
lockdep_assert_held(&dev_priv->irq_lock);
|
||||
|
||||
if (dev_priv->display_irqs_enabled)
|
||||
if (dev_priv->display.irq.display_irqs_enabled)
|
||||
return;
|
||||
|
||||
dev_priv->display_irqs_enabled = true;
|
||||
dev_priv->display.irq.display_irqs_enabled = true;
|
||||
|
||||
if (intel_irqs_enabled(dev_priv)) {
|
||||
vlv_display_irq_reset(dev_priv);
|
||||
@ -1573,10 +1574,10 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
lockdep_assert_held(&dev_priv->irq_lock);
|
||||
|
||||
if (!dev_priv->display_irqs_enabled)
|
||||
if (!dev_priv->display.irq.display_irqs_enabled)
|
||||
return;
|
||||
|
||||
dev_priv->display_irqs_enabled = false;
|
||||
dev_priv->display.irq.display_irqs_enabled = false;
|
||||
|
||||
if (intel_irqs_enabled(dev_priv))
|
||||
vlv_display_irq_reset(dev_priv);
|
||||
@ -1694,12 +1695,12 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
|
||||
dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
|
||||
|
||||
if (intel_display_power_is_enabled(dev_priv,
|
||||
POWER_DOMAIN_PIPE(pipe)))
|
||||
GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
|
||||
dev_priv->de_irq_mask[pipe],
|
||||
dev_priv->display.irq.de_irq_mask[pipe],
|
||||
de_pipe_enables);
|
||||
}
|
||||
|
||||
@ -1770,9 +1771,9 @@ void intel_display_irq_init(struct drm_i915_private *i915)
|
||||
* domain. We defer setting up the display irqs in this case to the
|
||||
* runtime pm.
|
||||
*/
|
||||
i915->display_irqs_enabled = true;
|
||||
i915->display.irq.display_irqs_enabled = true;
|
||||
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
|
||||
i915->display_irqs_enabled = false;
|
||||
i915->display.irq.display_irqs_enabled = false;
|
||||
|
||||
intel_hotplug_irq_init(i915);
|
||||
}
|
||||
|
@ -116,6 +116,11 @@ intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
|
||||
"(0=disabled, 1=enabled) "
|
||||
"Default: 1");
|
||||
|
||||
intel_display_param_named_unsafe(enable_dmc_wl, bool, 0400,
|
||||
"Enable DMC wakelock "
|
||||
"(0=disabled, 1=enabled) "
|
||||
"Default: 0");
|
||||
|
||||
__maybe_unused
|
||||
static void _param_print_bool(struct drm_printer *p, const char *driver_name,
|
||||
const char *name, bool val)
|
||||
|
@ -46,6 +46,7 @@ struct drm_i915_private;
|
||||
param(int, enable_psr, -1, 0600) \
|
||||
param(bool, psr_safest_params, false, 0400) \
|
||||
param(bool, enable_psr2_sel_fetch, true, 0400) \
|
||||
param(bool, enable_dmc_wl, false, 0400) \
|
||||
|
||||
#define MEMBER(T, member, ...) T member;
|
||||
struct intel_display_params {
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "intel_dkl_phy.h"
|
||||
#include "intel_dkl_phy_regs.h"
|
||||
#include "intel_dmc.h"
|
||||
#include "intel_dmc_wl.h"
|
||||
#include "intel_dp_aux_regs.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_dpll.h"
|
||||
@ -199,6 +200,9 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
||||
gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
|
||||
}
|
||||
|
||||
#define ICL_AUX_PW_TO_PHY(pw_idx) \
|
||||
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A)
|
||||
|
||||
#define ICL_AUX_PW_TO_CH(pw_idx) \
|
||||
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
|
||||
|
||||
@ -217,27 +221,22 @@ static struct intel_digital_port *
|
||||
aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
|
||||
enum aux_ch aux_ch)
|
||||
{
|
||||
struct intel_digital_port *dig_port = NULL;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
struct intel_digital_port *dig_port;
|
||||
|
||||
/* We'll check the MST primary port */
|
||||
if (encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
continue;
|
||||
|
||||
dig_port = enc_to_dig_port(encoder);
|
||||
if (!dig_port)
|
||||
continue;
|
||||
|
||||
if (dig_port->aux_ch != aux_ch) {
|
||||
dig_port = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
if (dig_port && dig_port->aux_ch == aux_ch)
|
||||
return dig_port;
|
||||
}
|
||||
|
||||
return dig_port;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
|
||||
@ -253,7 +252,7 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
|
||||
* as HDMI-only and routed to a combo PHY, the encoder either won't be
|
||||
* present at all or it will not have an aux_ch assigned.
|
||||
*/
|
||||
return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
|
||||
return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE;
|
||||
}
|
||||
|
||||
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
@ -396,17 +395,11 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
hsw_wait_for_power_well_disable(dev_priv, power_well);
|
||||
}
|
||||
|
||||
static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port)
|
||||
static bool intel_aux_ch_is_edp(struct drm_i915_private *i915, enum aux_ch aux_ch)
|
||||
{
|
||||
struct intel_encoder *encoder;
|
||||
struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
|
||||
|
||||
for_each_intel_encoder(&i915->drm, encoder) {
|
||||
if (encoder->type == INTEL_OUTPUT_EDP &&
|
||||
encoder->port == port)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -415,24 +408,25 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
|
||||
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
|
||||
enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
|
||||
|
||||
intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
|
||||
|
||||
/* FIXME this is a mess */
|
||||
if (phy != PHY_NONE)
|
||||
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
|
||||
0, ICL_LANE_ENABLE_AUX);
|
||||
/*
|
||||
* FIXME not sure if we should derive the PHY from the pw_idx, or
|
||||
* from the VBT defined AUX_CH->DDI->PHY mapping.
|
||||
*/
|
||||
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
|
||||
0, ICL_LANE_ENABLE_AUX);
|
||||
|
||||
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
|
||||
|
||||
/* Display WA #1178: icl */
|
||||
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
|
||||
!intel_port_is_edp(dev_priv, (enum port)phy))
|
||||
intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx),
|
||||
0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS);
|
||||
!intel_aux_ch_is_edp(dev_priv, ICL_AUX_PW_TO_CH(pw_idx)))
|
||||
intel_de_rmw(dev_priv, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
|
||||
0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -441,14 +435,15 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
|
||||
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
|
||||
enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
|
||||
|
||||
/* FIXME this is a mess */
|
||||
if (phy != PHY_NONE)
|
||||
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
|
||||
ICL_LANE_ENABLE_AUX, 0);
|
||||
/*
|
||||
* FIXME not sure if we should derive the PHY from the pw_idx, or
|
||||
* from the VBT defined AUX_CH->DDI->PHY mapping.
|
||||
*/
|
||||
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
|
||||
ICL_LANE_ENABLE_AUX, 0);
|
||||
|
||||
intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
|
||||
|
||||
@ -827,6 +822,8 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
|
||||
0, SKL_SELECT_ALTERNATE_DC_EXIT);
|
||||
|
||||
intel_dmc_wl_enable(dev_priv);
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
|
||||
}
|
||||
|
||||
@ -856,6 +853,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
|
||||
0, SKL_SELECT_ALTERNATE_DC_EXIT);
|
||||
|
||||
intel_dmc_wl_enable(dev_priv);
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
|
||||
}
|
||||
|
||||
@ -976,10 +975,12 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
|
||||
if (!HAS_DISPLAY(dev_priv))
|
||||
return;
|
||||
|
||||
intel_dmc_wl_disable(dev_priv);
|
||||
|
||||
intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
|
||||
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
|
||||
drm_WARN_ON(&dev_priv->drm,
|
||||
intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw,
|
||||
intel_cdclk_clock_changed(&dev_priv->display.cdclk.hw,
|
||||
&cdclk_config));
|
||||
|
||||
gen9_assert_dbuf_enabled(dev_priv);
|
||||
@ -1396,8 +1397,8 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
|
||||
* The PHY may be busy with some initial calibration and whatnot,
|
||||
* so the power state can take a while to actually change.
|
||||
*/
|
||||
if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
|
||||
phy_status_mask, phy_status, 10))
|
||||
if (intel_de_wait(dev_priv, DISPLAY_PHY_STATUS,
|
||||
phy_status_mask, phy_status, 10))
|
||||
drm_err(&dev_priv->drm,
|
||||
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
|
||||
intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
|
||||
|
@ -661,7 +661,8 @@ struct intel_digital_connector_state {
|
||||
int broadcast_rgb;
|
||||
};
|
||||
|
||||
#define to_intel_digital_connector_state(x) container_of(x, struct intel_digital_connector_state, base)
|
||||
#define to_intel_digital_connector_state(conn_state) \
|
||||
container_of_const((conn_state), struct intel_digital_connector_state, base)
|
||||
|
||||
struct dpll {
|
||||
/* given values */
|
||||
@ -1346,6 +1347,7 @@ struct intel_crtc_state {
|
||||
union hdmi_infoframe hdmi;
|
||||
union hdmi_infoframe drm;
|
||||
struct drm_dp_vsc_sdp vsc;
|
||||
struct drm_dp_as_sdp as_sdp;
|
||||
} infoframes;
|
||||
|
||||
u8 eld[MAX_ELD_BYTES];
|
||||
@ -1423,6 +1425,8 @@ struct intel_crtc_state {
|
||||
|
||||
u32 psr2_man_track_ctl;
|
||||
|
||||
u32 pipe_srcsz_early_tpt;
|
||||
|
||||
struct drm_rect psr2_su_area;
|
||||
|
||||
/* Variable Refresh Rate state */
|
||||
@ -1430,6 +1434,7 @@ struct intel_crtc_state {
|
||||
bool enable, in_range;
|
||||
u8 pipeline_full;
|
||||
u16 flipline, vmin, vmax, guardband;
|
||||
u32 vsync_end, vsync_start;
|
||||
} vrr;
|
||||
|
||||
/* Stream Splitter for eDP MSO */
|
||||
@ -1618,12 +1623,17 @@ struct intel_watermark_params {
|
||||
|
||||
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
|
||||
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
|
||||
#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
|
||||
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
|
||||
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
|
||||
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
|
||||
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
|
||||
#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, uapi)
|
||||
|
||||
#define to_intel_crtc_state(crtc_state) \
|
||||
container_of_const((crtc_state), struct intel_crtc_state, uapi)
|
||||
#define to_intel_plane_state(plane_state) \
|
||||
container_of_const((plane_state), struct intel_plane_state, uapi)
|
||||
#define to_intel_framebuffer(fb) \
|
||||
container_of_const((fb), struct intel_framebuffer, base)
|
||||
|
||||
#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
|
||||
|
||||
struct intel_hdmi {
|
||||
@ -1738,6 +1748,8 @@ struct intel_psr {
|
||||
|
||||
/* LNL and beyond */
|
||||
u8 check_entry_lines;
|
||||
u8 silence_period_sym_clocks;
|
||||
u8 lfps_half_cycle_num_of_syms;
|
||||
} alpm_parameters;
|
||||
|
||||
ktime_t last_entry_attempt;
|
||||
@ -1799,6 +1811,7 @@ struct intel_dp {
|
||||
|
||||
bool is_mst;
|
||||
int active_mst_links;
|
||||
enum drm_dp_mst_mode mst_detect;
|
||||
|
||||
/* connector directly attached - won't be use for modeset in mst world */
|
||||
struct intel_connector *attached_connector;
|
||||
|
@ -10,20 +10,12 @@
|
||||
|
||||
static void gen11_display_wa_apply(struct drm_i915_private *i915)
|
||||
{
|
||||
/* Wa_1409120013 */
|
||||
intel_de_write(i915, ILK_DPFC_CHICKEN(INTEL_FBC_A),
|
||||
DPFC_CHICKEN_COMP_DUMMY_PIXEL);
|
||||
|
||||
/* Wa_14010594013 */
|
||||
intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
|
||||
}
|
||||
|
||||
static void xe_d_display_wa_apply(struct drm_i915_private *i915)
|
||||
{
|
||||
/* Wa_1409120013 */
|
||||
intel_de_write(i915, ILK_DPFC_CHICKEN(INTEL_FBC_A),
|
||||
DPFC_CHICKEN_COMP_DUMMY_PIXEL);
|
||||
|
||||
/* Wa_14013723622 */
|
||||
intel_de_rmw(i915, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
|
||||
}
|
||||
|
@ -38,6 +38,8 @@
|
||||
* low-power state and comes back to normal.
|
||||
*/
|
||||
|
||||
#define INTEL_DMC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git"
|
||||
|
||||
enum intel_dmc_id {
|
||||
DMC_FW_MAIN = 0,
|
||||
DMC_FW_PIPEA,
|
||||
@ -89,10 +91,14 @@ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
|
||||
__stringify(major) "_" \
|
||||
__stringify(minor) ".bin"
|
||||
|
||||
#define XE2LPD_DMC_MAX_FW_SIZE 0x8000
|
||||
#define XELPDP_DMC_MAX_FW_SIZE 0x7000
|
||||
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
|
||||
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
|
||||
|
||||
#define XE2LPD_DMC_PATH DMC_PATH(xe2lpd)
|
||||
MODULE_FIRMWARE(XE2LPD_DMC_PATH);
|
||||
|
||||
#define MTL_DMC_PATH DMC_PATH(mtl)
|
||||
MODULE_FIRMWARE(MTL_DMC_PATH);
|
||||
|
||||
@ -546,6 +552,8 @@ void intel_dmc_disable_program(struct drm_i915_private *i915)
|
||||
pipedmc_clock_gating_wa(i915, true);
|
||||
disable_all_event_handlers(i915);
|
||||
pipedmc_clock_gating_wa(i915, false);
|
||||
|
||||
intel_dmc_wl_disable(i915);
|
||||
}
|
||||
|
||||
void assert_dmc_loaded(struct drm_i915_private *i915)
|
||||
@ -949,7 +957,7 @@ static void dmc_load_work_fn(struct work_struct *work)
|
||||
" Disabling runtime power management.\n",
|
||||
dmc->fw_path);
|
||||
drm_notice(&i915->drm, "DMC firmware homepage: %s",
|
||||
INTEL_UC_FIRMWARE_URL);
|
||||
INTEL_DMC_FIRMWARE_URL);
|
||||
}
|
||||
|
||||
release_firmware(fw);
|
||||
@ -987,7 +995,10 @@ void intel_dmc_init(struct drm_i915_private *i915)
|
||||
|
||||
INIT_WORK(&dmc->work, dmc_load_work_fn);
|
||||
|
||||
if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
|
||||
if (DISPLAY_VER_FULL(i915) == IP_VER(20, 0)) {
|
||||
dmc->fw_path = XE2LPD_DMC_PATH;
|
||||
dmc->max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
|
||||
} else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
|
||||
dmc->fw_path = MTL_DMC_PATH;
|
||||
dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
|
||||
} else if (IS_DG2(i915)) {
|
||||
@ -1072,6 +1083,8 @@ void intel_dmc_suspend(struct drm_i915_private *i915)
|
||||
if (dmc)
|
||||
flush_work(&dmc->work);
|
||||
|
||||
intel_dmc_wl_disable(i915);
|
||||
|
||||
/* Drop the reference held in case DMC isn't loaded. */
|
||||
if (!intel_dmc_has_payload(i915))
|
||||
intel_dmc_runtime_pm_put(i915);
|
||||
|
@ -97,4 +97,10 @@
|
||||
#define TGL_DMC_DEBUG3 _MMIO(0x101090)
|
||||
#define DG1_DMC_DEBUG3 _MMIO(0x13415c)
|
||||
|
||||
#define DMC_WAKELOCK_CFG _MMIO(0x8F1B0)
|
||||
#define DMC_WAKELOCK_CFG_ENABLE REG_BIT(31)
|
||||
#define DMC_WAKELOCK1_CTL _MMIO(0x8F140)
|
||||
#define DMC_WAKELOCK_CTL_REQ REG_BIT(31)
|
||||
#define DMC_WAKELOCK_CTL_ACK REG_BIT(15)
|
||||
|
||||
#endif /* __INTEL_DMC_REGS_H__ */
|
||||
|
262
drivers/gpu/drm/i915/display/intel_dmc_wl.c
Normal file
262
drivers/gpu/drm/i915/display/intel_dmc_wl.c
Normal file
@ -0,0 +1,262 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright (C) 2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "intel_de.h"
|
||||
#include "intel_dmc.h"
|
||||
#include "intel_dmc_regs.h"
|
||||
#include "intel_dmc_wl.h"
|
||||
|
||||
/**
|
||||
* DOC: DMC wakelock support
|
||||
*
|
||||
* Wake lock is the mechanism to cause display engine to exit DC
|
||||
* states to allow programming to registers that are powered down in
|
||||
* those states. Previous projects exited DC states automatically when
|
||||
* detecting programming. Now software controls the exit by
|
||||
* programming the wake lock. This improves system performance and
|
||||
* system interactions and better fits the flip queue style of
|
||||
* programming. Wake lock is only required when DC5, DC6, or DC6v have
|
||||
* been enabled in DC_STATE_EN and the wake lock mode of operation has
|
||||
* been enabled.
|
||||
*
|
||||
* The wakelock mechanism in DMC allows the display engine to exit DC
|
||||
* states explicitly before programming registers that may be powered
|
||||
* down. In earlier hardware, this was done automatically and
|
||||
* implicitly when the display engine accessed a register. With the
|
||||
* wakelock implementation, the driver asserts a wakelock in DMC,
|
||||
* which forces it to exit the DC state until the wakelock is
|
||||
* deasserted.
|
||||
*
|
||||
* The mechanism can be enabled and disabled by writing to the
|
||||
* DMC_WAKELOCK_CFG register. There are also 13 control registers
|
||||
* that can be used to hold and release different wakelocks. In the
|
||||
* current implementation, we only need one wakelock, so only
|
||||
* DMC_WAKELOCK1_CTL is used. The other definitions are here for
|
||||
* potential future use.
|
||||
*/
|
||||
|
||||
#define DMC_WAKELOCK_CTL_TIMEOUT 5
|
||||
#define DMC_WAKELOCK_HOLD_TIME 50
|
||||
|
||||
struct intel_dmc_wl_range {
|
||||
u32 start;
|
||||
u32 end;
|
||||
};
|
||||
|
||||
static struct intel_dmc_wl_range lnl_wl_range[] = {
|
||||
{ .start = 0x60000, .end = 0x7ffff },
|
||||
};
|
||||
|
||||
static void __intel_dmc_wl_release(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_dmc_wl *wl = &i915->display.wl;
|
||||
|
||||
WARN_ON(refcount_read(&wl->refcount));
|
||||
|
||||
queue_delayed_work(i915->unordered_wq, &wl->work,
|
||||
msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
|
||||
}
|
||||
|
||||
static void intel_dmc_wl_work(struct work_struct *work)
|
||||
{
|
||||
struct intel_dmc_wl *wl =
|
||||
container_of(work, struct intel_dmc_wl, work.work);
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(wl, struct drm_i915_private, display.wl);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&wl->lock, flags);
|
||||
|
||||
/* Bail out if refcount reached zero while waiting for the spinlock */
|
||||
if (!refcount_read(&wl->refcount))
|
||||
goto out_unlock;
|
||||
|
||||
__intel_de_rmw_nowl(i915, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
|
||||
|
||||
if (__intel_wait_for_register_nowl(i915, DMC_WAKELOCK1_CTL,
|
||||
DMC_WAKELOCK_CTL_ACK, 0,
|
||||
DMC_WAKELOCK_CTL_TIMEOUT)) {
|
||||
WARN_RATELIMIT(1, "DMC wakelock release timed out");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
wl->taken = false;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&wl->lock, flags);
|
||||
}
|
||||
|
||||
static bool intel_dmc_wl_check_range(u32 address)
|
||||
{
|
||||
int i;
|
||||
bool wl_needed = false;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) {
|
||||
if (address >= lnl_wl_range[i].start &&
|
||||
address <= lnl_wl_range[i].end) {
|
||||
wl_needed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return wl_needed;
|
||||
}
|
||||
|
||||
static bool __intel_dmc_wl_supported(struct drm_i915_private *i915)
|
||||
{
|
||||
if (DISPLAY_VER(i915) < 20 ||
|
||||
!intel_dmc_has_payload(i915) ||
|
||||
!i915->display.params.enable_dmc_wl)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void intel_dmc_wl_init(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_dmc_wl *wl = &i915->display.wl;
|
||||
|
||||
/* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */
|
||||
if (DISPLAY_VER(i915) < 20 ||
|
||||
!i915->display.params.enable_dmc_wl)
|
||||
return;
|
||||
|
||||
INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
|
||||
spin_lock_init(&wl->lock);
|
||||
refcount_set(&wl->refcount, 0);
|
||||
}
|
||||
|
||||
void intel_dmc_wl_enable(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_dmc_wl *wl = &i915->display.wl;
|
||||
unsigned long flags;
|
||||
|
||||
if (!__intel_dmc_wl_supported(i915))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&wl->lock, flags);
|
||||
|
||||
if (wl->enabled)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Enable wakelock in DMC. We shouldn't try to take the
|
||||
* wakelock, because we're just enabling it, so call the
|
||||
* non-locking version directly here.
|
||||
*/
|
||||
__intel_de_rmw_nowl(i915, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
|
||||
|
||||
wl->enabled = true;
|
||||
wl->taken = false;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&wl->lock, flags);
|
||||
}
|
||||
|
||||
void intel_dmc_wl_disable(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_dmc_wl *wl = &i915->display.wl;
|
||||
unsigned long flags;
|
||||
|
||||
if (!__intel_dmc_wl_supported(i915))
|
||||
return;
|
||||
|
||||
flush_delayed_work(&wl->work);
|
||||
|
||||
spin_lock_irqsave(&wl->lock, flags);
|
||||
|
||||
if (!wl->enabled)
|
||||
goto out_unlock;
|
||||
|
||||
/* Disable wakelock in DMC */
|
||||
__intel_de_rmw_nowl(i915, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
|
||||
|
||||
refcount_set(&wl->refcount, 0);
|
||||
wl->enabled = false;
|
||||
wl->taken = false;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&wl->lock, flags);
|
||||
}
|
||||
|
||||
void intel_dmc_wl_get(struct drm_i915_private *i915, i915_reg_t reg)
|
||||
{
|
||||
struct intel_dmc_wl *wl = &i915->display.wl;
|
||||
unsigned long flags;
|
||||
|
||||
if (!__intel_dmc_wl_supported(i915))
|
||||
return;
|
||||
|
||||
if (!intel_dmc_wl_check_range(reg.reg))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&wl->lock, flags);
|
||||
|
||||
if (!wl->enabled)
|
||||
goto out_unlock;
|
||||
|
||||
cancel_delayed_work(&wl->work);
|
||||
|
||||
if (refcount_inc_not_zero(&wl->refcount))
|
||||
goto out_unlock;
|
||||
|
||||
refcount_set(&wl->refcount, 1);
|
||||
|
||||
/*
|
||||
* Only try to take the wakelock if it's not marked as taken
|
||||
* yet. It may be already taken at this point if we have
|
||||
* already released the last reference, but the work has not
|
||||
* run yet.
|
||||
*/
|
||||
if (!wl->taken) {
|
||||
__intel_de_rmw_nowl(i915, DMC_WAKELOCK1_CTL, 0,
|
||||
DMC_WAKELOCK_CTL_REQ);
|
||||
|
||||
if (__intel_wait_for_register_nowl(i915, DMC_WAKELOCK1_CTL,
|
||||
DMC_WAKELOCK_CTL_ACK,
|
||||
DMC_WAKELOCK_CTL_ACK,
|
||||
DMC_WAKELOCK_CTL_TIMEOUT)) {
|
||||
WARN_RATELIMIT(1, "DMC wakelock ack timed out");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
wl->taken = true;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&wl->lock, flags);
|
||||
}
|
||||
|
||||
void intel_dmc_wl_put(struct drm_i915_private *i915, i915_reg_t reg)
|
||||
{
|
||||
struct intel_dmc_wl *wl = &i915->display.wl;
|
||||
unsigned long flags;
|
||||
|
||||
if (!__intel_dmc_wl_supported(i915))
|
||||
return;
|
||||
|
||||
if (!intel_dmc_wl_check_range(reg.reg))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&wl->lock, flags);
|
||||
|
||||
if (!wl->enabled)
|
||||
goto out_unlock;
|
||||
|
||||
if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
|
||||
"Tried to put wakelock with refcount zero\n"))
|
||||
goto out_unlock;
|
||||
|
||||
if (refcount_dec_and_test(&wl->refcount)) {
|
||||
__intel_dmc_wl_release(i915);
|
||||
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&wl->lock, flags);
|
||||
}
|
31
drivers/gpu/drm/i915/display/intel_dmc_wl.h
Normal file
31
drivers/gpu/drm/i915/display/intel_dmc_wl.h
Normal file
@ -0,0 +1,31 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright (C) 2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_WAKELOCK_H__
|
||||
#define __INTEL_WAKELOCK_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/refcount.h>
|
||||
|
||||
#include "i915_reg_defs.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
|
||||
struct intel_dmc_wl {
|
||||
spinlock_t lock; /* protects enabled, taken and refcount */
|
||||
bool enabled;
|
||||
bool taken;
|
||||
refcount_t refcount;
|
||||
struct delayed_work work;
|
||||
};
|
||||
|
||||
void intel_dmc_wl_init(struct drm_i915_private *i915);
|
||||
void intel_dmc_wl_enable(struct drm_i915_private *i915);
|
||||
void intel_dmc_wl_disable(struct drm_i915_private *i915);
|
||||
void intel_dmc_wl_get(struct drm_i915_private *i915, i915_reg_t reg);
|
||||
void intel_dmc_wl_put(struct drm_i915_private *i915, i915_reg_t reg);
|
||||
|
||||
#endif /* __INTEL_WAKELOCK_H__ */
|
@ -123,6 +123,14 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
|
||||
return dig_port->base.type == INTEL_OUTPUT_EDP;
|
||||
}
|
||||
|
||||
bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
return HAS_AS_SDP(i915) &&
|
||||
drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
|
||||
}
|
||||
|
||||
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
|
||||
|
||||
/* Is link rate UHBR and thus 128b/132b? */
|
||||
@ -425,7 +433,7 @@ int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
|
||||
return max_rate;
|
||||
}
|
||||
|
||||
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
|
||||
bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *encoder = &intel_dig_port->base;
|
||||
@ -443,11 +451,9 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
|
||||
|
||||
static int icl_max_source_rate(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
|
||||
if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
|
||||
if (intel_encoder_is_combo(encoder) && !intel_dp_is_edp(intel_dp))
|
||||
return 540000;
|
||||
|
||||
return 810000;
|
||||
@ -463,11 +469,9 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp)
|
||||
|
||||
static int mtl_max_source_rate(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
if (intel_encoder_is_c10phy(encoder))
|
||||
return 810000;
|
||||
|
||||
return 2000000;
|
||||
@ -499,7 +503,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
|
||||
/* The values must be in increasing order */
|
||||
static const int mtl_rates[] = {
|
||||
162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
|
||||
810000, 1000000, 1350000, 2000000,
|
||||
810000, 1000000, 2000000,
|
||||
};
|
||||
static const int icl_rates[] = {
|
||||
162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
|
||||
@ -1198,15 +1202,15 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
|
||||
}
|
||||
|
||||
bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
|
||||
struct intel_connector *connector,
|
||||
int hdisplay, int clock)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_connector *connector = intel_dp->attached_connector;
|
||||
|
||||
if (!intel_dp_can_bigjoiner(intel_dp))
|
||||
if (!intel_dp_has_bigjoiner(intel_dp))
|
||||
return false;
|
||||
|
||||
return clock > i915->max_dotclk_freq || hdisplay > 5120 ||
|
||||
return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 ||
|
||||
connector->force_bigjoiner_enable;
|
||||
}
|
||||
|
||||
@ -1220,7 +1224,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
|
||||
const struct drm_display_mode *fixed_mode;
|
||||
int target_clock = mode->clock;
|
||||
int max_rate, mode_rate, max_lanes, max_link_clock;
|
||||
int max_dotclk = dev_priv->max_dotclk_freq;
|
||||
int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
|
||||
u16 dsc_max_compressed_bpp = 0;
|
||||
u8 dsc_slice_count = 0;
|
||||
enum drm_mode_status status;
|
||||
@ -1233,6 +1237,9 @@ intel_dp_mode_valid(struct drm_connector *_connector,
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
return MODE_H_ILLEGAL;
|
||||
|
||||
if (mode->clock < 10000)
|
||||
return MODE_CLOCK_LOW;
|
||||
|
||||
fixed_mode = intel_panel_fixed_mode(connector, mode);
|
||||
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
|
||||
status = intel_panel_mode_valid(connector, mode);
|
||||
@ -1242,10 +1249,8 @@ intel_dp_mode_valid(struct drm_connector *_connector,
|
||||
target_clock = fixed_mode->clock;
|
||||
}
|
||||
|
||||
if (mode->clock < 10000)
|
||||
return MODE_CLOCK_LOW;
|
||||
|
||||
if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
|
||||
if (intel_dp_need_bigjoiner(intel_dp, connector,
|
||||
mode->hdisplay, target_clock)) {
|
||||
bigjoiner = true;
|
||||
max_dotclk *= 2;
|
||||
}
|
||||
@ -1306,11 +1311,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
|
||||
dsc = dsc_max_compressed_bpp && dsc_slice_count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Big joiner configuration needs DSC for TGL which is not true for
|
||||
* XE_LPD where uncompressed joiner is supported.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
|
||||
if (intel_dp_joiner_needs_dsc(dev_priv, bigjoiner) && !dsc)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode_rate > max_rate && !dsc)
|
||||
@ -1422,7 +1423,8 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
|
||||
if (DISPLAY_VER(dev_priv) >= 12)
|
||||
return true;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
|
||||
if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
|
||||
!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -1917,8 +1919,9 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
|
||||
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
|
||||
if (valid_dsc_bpp[i] < dsc_min_bpp ||
|
||||
valid_dsc_bpp[i] > dsc_max_bpp)
|
||||
if (valid_dsc_bpp[i] < dsc_min_bpp)
|
||||
continue;
|
||||
if (valid_dsc_bpp[i] > dsc_max_bpp)
|
||||
break;
|
||||
|
||||
ret = dsc_compute_link_config(intel_dp,
|
||||
@ -2399,6 +2402,16 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
|
||||
return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
|
||||
}
|
||||
|
||||
bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner)
|
||||
{
|
||||
/*
|
||||
* Pipe joiner needs compression up to display 12 due to bandwidth
|
||||
* limitation. DG2 onwards pipe joiner can be enabled without
|
||||
* compression.
|
||||
*/
|
||||
return DISPLAY_VER(i915) < 13 && use_joiner;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_compute_link_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
@ -2407,30 +2420,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
const struct intel_connector *connector =
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct link_config_limits limits;
|
||||
bool joiner_needs_dsc = false;
|
||||
bool dsc_needed;
|
||||
bool dsc_needed, joiner_needs_dsc;
|
||||
int ret = 0;
|
||||
|
||||
if (pipe_config->fec_enable &&
|
||||
!intel_dp_supports_fec(intel_dp, connector, pipe_config))
|
||||
return -EINVAL;
|
||||
|
||||
if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
|
||||
if (intel_dp_need_bigjoiner(intel_dp, connector,
|
||||
adjusted_mode->crtc_hdisplay,
|
||||
adjusted_mode->crtc_clock))
|
||||
pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
|
||||
|
||||
/*
|
||||
* Pipe joiner needs compression up to display 12 due to bandwidth
|
||||
* limitation. DG2 onwards pipe joiner can be enabled without
|
||||
* compression.
|
||||
*/
|
||||
joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes;
|
||||
joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->bigjoiner_pipes);
|
||||
|
||||
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
|
||||
!intel_dp_compute_config_limits(intel_dp, pipe_config,
|
||||
@ -2613,6 +2621,29 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
|
||||
vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
|
||||
}
|
||||
|
||||
static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_dp_as_sdp *as_sdp = &crtc_state->infoframes.as_sdp;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
|
||||
if (!crtc_state->vrr.enable ||
|
||||
!intel_dp_as_sdp_supported(intel_dp))
|
||||
return;
|
||||
|
||||
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
|
||||
|
||||
/* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
|
||||
as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
|
||||
as_sdp->length = 0x9;
|
||||
as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
|
||||
as_sdp->vtotal = adjusted_mode->vtotal;
|
||||
as_sdp->target_rr = 0;
|
||||
as_sdp->duration_incr_ms = 0;
|
||||
as_sdp->duration_incr_ms = 0;
|
||||
}
|
||||
|
||||
static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
@ -2723,7 +2754,11 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
|
||||
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
|
||||
int pixel_clock;
|
||||
|
||||
if (has_seamless_m_n(connector))
|
||||
/*
|
||||
* FIXME all joined pipes share the same transcoder.
|
||||
* Need to account for that when updating M/N live.
|
||||
*/
|
||||
if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
|
||||
pipe_config->update_m_n = true;
|
||||
|
||||
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
|
||||
@ -2964,6 +2999,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
g4x_dp_set_clock(encoder, pipe_config);
|
||||
|
||||
intel_vrr_compute_config(pipe_config, conn_state);
|
||||
intel_dp_compute_as_sdp(intel_dp, pipe_config);
|
||||
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
|
||||
intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16);
|
||||
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
|
||||
@ -3356,6 +3392,14 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
|
||||
fastset = false;
|
||||
}
|
||||
|
||||
if (CAN_PANEL_REPLAY(intel_dp)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n",
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
crtc_state->uapi.mode_changed = true;
|
||||
fastset = false;
|
||||
}
|
||||
|
||||
return fastset;
|
||||
}
|
||||
|
||||
@ -4039,39 +4083,84 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
||||
intel_dp->downstream_ports) == 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_can_mst(struct intel_dp *intel_dp)
|
||||
static const char *intel_dp_mst_mode_str(enum drm_dp_mst_mode mst_mode)
|
||||
{
|
||||
if (mst_mode == DRM_DP_MST)
|
||||
return "MST";
|
||||
else if (mst_mode == DRM_DP_SST_SIDEBAND_MSG)
|
||||
return "SST w/ sideband messaging";
|
||||
else
|
||||
return "SST";
|
||||
}
|
||||
|
||||
static enum drm_dp_mst_mode
|
||||
intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
|
||||
enum drm_dp_mst_mode sink_mst_mode)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
return i915->display.params.enable_dp_mst &&
|
||||
intel_dp_mst_source_support(intel_dp) &&
|
||||
drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
|
||||
if (!i915->display.params.enable_dp_mst)
|
||||
return DRM_DP_SST;
|
||||
|
||||
if (!intel_dp_mst_source_support(intel_dp))
|
||||
return DRM_DP_SST;
|
||||
|
||||
if (sink_mst_mode == DRM_DP_SST_SIDEBAND_MSG &&
|
||||
!(intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B))
|
||||
return DRM_DP_SST;
|
||||
|
||||
return sink_mst_mode;
|
||||
}
|
||||
|
||||
static enum drm_dp_mst_mode
|
||||
intel_dp_mst_detect(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
enum drm_dp_mst_mode sink_mst_mode;
|
||||
enum drm_dp_mst_mode mst_detect;
|
||||
|
||||
sink_mst_mode = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
|
||||
|
||||
mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode);
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n",
|
||||
encoder->base.base.id, encoder->base.name,
|
||||
str_yes_no(intel_dp_mst_source_support(intel_dp)),
|
||||
intel_dp_mst_mode_str(sink_mst_mode),
|
||||
str_yes_no(i915->display.params.enable_dp_mst),
|
||||
intel_dp_mst_mode_str(mst_detect));
|
||||
|
||||
return mst_detect;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_configure_mst(struct intel_dp *intel_dp)
|
||||
intel_dp_mst_configure(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_encoder *encoder =
|
||||
&dp_to_dig_port(intel_dp)->base;
|
||||
bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
|
||||
encoder->base.base.id, encoder->base.name,
|
||||
str_yes_no(intel_dp_mst_source_support(intel_dp)),
|
||||
str_yes_no(sink_can_mst),
|
||||
str_yes_no(i915->display.params.enable_dp_mst));
|
||||
|
||||
if (!intel_dp_mst_source_support(intel_dp))
|
||||
return;
|
||||
|
||||
intel_dp->is_mst = sink_can_mst &&
|
||||
i915->display.params.enable_dp_mst;
|
||||
intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST;
|
||||
|
||||
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
|
||||
intel_dp->is_mst);
|
||||
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
|
||||
|
||||
/* Avoid stale info on the next detect cycle. */
|
||||
intel_dp->mst_detect = DRM_DP_SST;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_mst_disconnect(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
if (!intel_dp->is_mst)
|
||||
return;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n",
|
||||
intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
|
||||
intel_dp->is_mst = false;
|
||||
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -4119,6 +4208,32 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
|
||||
return false;
|
||||
}
|
||||
|
||||
static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp,
|
||||
struct dp_sdp *sdp, size_t size)
|
||||
{
|
||||
size_t length = sizeof(struct dp_sdp);
|
||||
|
||||
if (size < length)
|
||||
return -ENOSPC;
|
||||
|
||||
memset(sdp, 0, size);
|
||||
|
||||
/* Prepare AS (Adaptive Sync) SDP Header */
|
||||
sdp->sdp_header.HB0 = 0;
|
||||
sdp->sdp_header.HB1 = as_sdp->sdp_type;
|
||||
sdp->sdp_header.HB2 = 0x02;
|
||||
sdp->sdp_header.HB3 = as_sdp->length;
|
||||
|
||||
/* Fill AS (Adaptive Sync) SDP Payload */
|
||||
sdp->db[0] = as_sdp->mode;
|
||||
sdp->db[1] = as_sdp->vtotal & 0xFF;
|
||||
sdp->db[2] = (as_sdp->vtotal >> 8) & 0xFF;
|
||||
sdp->db[3] = as_sdp->target_rr & 0xFF;
|
||||
sdp->db[4] = (as_sdp->target_rr >> 8) & 0x3;
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
|
||||
const struct hdmi_drm_infoframe *drm_infoframe,
|
||||
@ -4218,6 +4333,10 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
|
||||
&crtc_state->infoframes.drm.drm,
|
||||
&sdp, sizeof(sdp));
|
||||
break;
|
||||
case DP_SDP_ADAPTIVE_SYNC:
|
||||
len = intel_dp_as_sdp_pack(&crtc_state->infoframes.as_sdp, &sdp,
|
||||
sizeof(sdp));
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(type);
|
||||
return;
|
||||
@ -4239,6 +4358,10 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
|
||||
u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
|
||||
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
|
||||
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
|
||||
|
||||
if (HAS_AS_SDP(dev_priv))
|
||||
dip_enable |= VIDEO_DIP_ENABLE_AS_ADL;
|
||||
|
||||
u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
|
||||
|
||||
/* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
|
||||
@ -4256,10 +4379,42 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
|
||||
return;
|
||||
|
||||
intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
|
||||
intel_write_dp_sdp(encoder, crtc_state, DP_SDP_ADAPTIVE_SYNC);
|
||||
|
||||
intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
|
||||
}
|
||||
|
||||
static
|
||||
int intel_dp_as_sdp_unpack(struct drm_dp_as_sdp *as_sdp,
|
||||
const void *buffer, size_t size)
|
||||
{
|
||||
const struct dp_sdp *sdp = buffer;
|
||||
|
||||
if (size < sizeof(struct dp_sdp))
|
||||
return -EINVAL;
|
||||
|
||||
memset(as_sdp, 0, sizeof(*as_sdp));
|
||||
|
||||
if (sdp->sdp_header.HB0 != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (sdp->sdp_header.HB1 != DP_SDP_ADAPTIVE_SYNC)
|
||||
return -EINVAL;
|
||||
|
||||
if (sdp->sdp_header.HB2 != 0x02)
|
||||
return -EINVAL;
|
||||
|
||||
if ((sdp->sdp_header.HB3 & 0x3F) != 9)
|
||||
return -EINVAL;
|
||||
|
||||
as_sdp->length = sdp->sdp_header.HB3 & DP_ADAPTIVE_SYNC_SDP_LENGTH;
|
||||
as_sdp->mode = sdp->db[0] & DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE;
|
||||
as_sdp->vtotal = (sdp->db[2] << 8) | sdp->db[1];
|
||||
as_sdp->target_rr = (u64)sdp->db[3] | ((u64)sdp->db[4] & 0x3);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
|
||||
const void *buffer, size_t size)
|
||||
{
|
||||
@ -4330,6 +4485,29 @@ static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_read_dp_as_sdp(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct drm_dp_as_sdp *as_sdp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
unsigned int type = DP_SDP_ADAPTIVE_SYNC;
|
||||
struct dp_sdp sdp = {};
|
||||
int ret;
|
||||
|
||||
if ((crtc_state->infoframes.enable &
|
||||
intel_hdmi_infoframe_enable(type)) == 0)
|
||||
return;
|
||||
|
||||
dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
|
||||
sizeof(sdp));
|
||||
|
||||
ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp));
|
||||
if (ret)
|
||||
drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n");
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
|
||||
const void *buffer, size_t size)
|
||||
@ -4436,6 +4614,10 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
|
||||
intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
|
||||
&crtc_state->infoframes.drm.drm);
|
||||
break;
|
||||
case DP_SDP_ADAPTIVE_SYNC:
|
||||
intel_read_dp_as_sdp(encoder, crtc_state,
|
||||
&crtc_state->infoframes.as_sdp);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(type);
|
||||
break;
|
||||
@ -5363,6 +5545,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
|
||||
if (!intel_dp_get_dpcd(intel_dp))
|
||||
return connector_status_disconnected;
|
||||
|
||||
intel_dp->mst_detect = intel_dp_mst_detect(intel_dp);
|
||||
|
||||
/* if there's no downstream port, we're done */
|
||||
if (!drm_dp_is_branch(dpcd))
|
||||
return connector_status_connected;
|
||||
@ -5374,7 +5558,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
|
||||
connector_status_connected : connector_status_disconnected;
|
||||
}
|
||||
|
||||
if (intel_dp_can_mst(intel_dp))
|
||||
if (intel_dp->mst_detect == DRM_DP_MST)
|
||||
return connector_status_connected;
|
||||
|
||||
/* If no HPD, poke DDC gently */
|
||||
@ -5679,15 +5863,7 @@ intel_dp_detect(struct drm_connector *connector,
|
||||
memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
|
||||
intel_dp->psr.sink_panel_replay_support = false;
|
||||
|
||||
if (intel_dp->is_mst) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"MST device may have disappeared %d vs %d\n",
|
||||
intel_dp->is_mst,
|
||||
intel_dp->mst_mgr.mst_state);
|
||||
intel_dp->is_mst = false;
|
||||
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
|
||||
intel_dp->is_mst);
|
||||
}
|
||||
intel_dp_mst_disconnect(intel_dp);
|
||||
|
||||
intel_dp_tunnel_disconnect(intel_dp);
|
||||
|
||||
@ -5706,7 +5882,7 @@ intel_dp_detect(struct drm_connector *connector,
|
||||
|
||||
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
|
||||
|
||||
intel_dp_configure_mst(intel_dp);
|
||||
intel_dp_mst_configure(intel_dp);
|
||||
|
||||
/*
|
||||
* TODO: Reset link params when switching to MST mode, until MST
|
||||
@ -6489,7 +6665,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
|
||||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port = intel_encoder->port;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
int type;
|
||||
|
||||
/* Initialize the work for modeset in case of link train failure */
|
||||
@ -6514,7 +6689,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
|
||||
* Currently we don't support eDP on TypeC ports, although in
|
||||
* theory it could work on TypeC legacy ports.
|
||||
*/
|
||||
drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
|
||||
drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder));
|
||||
type = DRM_MODE_CONNECTOR_eDP;
|
||||
intel_encoder->type = INTEL_OUTPUT_EDP;
|
||||
|
||||
@ -6557,6 +6732,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
|
||||
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
|
||||
else
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
intel_connector->sync_state = intel_dp_connector_sync_state;
|
||||
|
||||
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
|
||||
intel_dp_aux_fini(intel_dp);
|
||||
|
@ -88,6 +88,7 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder,
|
||||
struct drm_connector_state *conn_state);
|
||||
bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp);
|
||||
bool intel_dp_is_edp(struct intel_dp *intel_dp);
|
||||
bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp);
|
||||
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
|
||||
int intel_dp_link_symbol_size(int rate);
|
||||
int intel_dp_link_symbol_clock(int rate);
|
||||
@ -119,7 +120,8 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
|
||||
int bw_overhead);
|
||||
int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
|
||||
int max_dprx_rate, int max_dprx_lanes);
|
||||
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
|
||||
bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner);
|
||||
bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp);
|
||||
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
|
||||
@ -149,6 +151,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
|
||||
int mode_clock, int mode_hdisplay,
|
||||
bool bigjoiner);
|
||||
bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
|
||||
struct intel_connector *connector,
|
||||
int hdisplay, int clock);
|
||||
|
||||
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
|
||||
|
@ -61,9 +61,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
|
||||
u32 status;
|
||||
int ret;
|
||||
|
||||
ret = __intel_de_wait_for_register(i915, ch_ctl,
|
||||
DP_AUX_CH_CTL_SEND_BUSY, 0,
|
||||
2, timeout_ms, &status);
|
||||
ret = intel_de_wait_custom(i915, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 0,
|
||||
2, timeout_ms, &status);
|
||||
|
||||
if (ret == -ETIMEDOUT)
|
||||
drm_err(&i915->drm,
|
||||
@ -143,9 +142,15 @@ static int intel_dp_aux_sync_len(void)
|
||||
return precharge + preamble;
|
||||
}
|
||||
|
||||
static int intel_dp_aux_fw_sync_len(void)
|
||||
int intel_dp_aux_fw_sync_len(void)
|
||||
{
|
||||
int precharge = 10; /* 10-16 */
|
||||
/*
|
||||
* We faced some glitches on Dell Precision 5490 MTL laptop with panel:
|
||||
* "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20
|
||||
* is fixing these problems with the panel. It is still within range
|
||||
* mentioned in eDP specification.
|
||||
*/
|
||||
int precharge = 12; /* 10-16 */
|
||||
int preamble = 8;
|
||||
|
||||
return precharge + preamble;
|
||||
|
@ -20,5 +20,6 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
|
||||
|
||||
void intel_dp_aux_irq_handler(struct drm_i915_private *i915);
|
||||
u32 intel_dp_aux_pack(const u8 *src, int src_bytes);
|
||||
int intel_dp_aux_fw_sync_len(void);
|
||||
|
||||
#endif /* __INTEL_DP_AUX_H__ */
|
||||
|
@ -691,12 +691,15 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
|
||||
u8 bcaps;
|
||||
int ret;
|
||||
|
||||
*hdcp_capable = false;
|
||||
*hdcp2_capable = false;
|
||||
if (!intel_encoder_is_mst(connector->encoder))
|
||||
return -EINVAL;
|
||||
|
||||
ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
|
||||
if (ret)
|
||||
return ret;
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"HDCP2 DPCD capability read failed err: %d\n", ret);
|
||||
|
||||
ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
|
||||
if (ret)
|
||||
@ -766,11 +769,9 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
|
||||
return -EINVAL;
|
||||
|
||||
/* Wait for encryption confirmation */
|
||||
if (intel_de_wait_for_register(i915,
|
||||
HDCP_STATUS(i915, cpu_transcoder, port),
|
||||
stream_enc_status,
|
||||
enable ? stream_enc_status : 0,
|
||||
HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
|
||||
if (intel_de_wait(i915, HDCP_STATUS(i915, cpu_transcoder, port),
|
||||
stream_enc_status, enable ? stream_enc_status : 0,
|
||||
HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
|
||||
drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
|
||||
transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
|
||||
return -ETIMEDOUT;
|
||||
@ -801,11 +802,10 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
|
||||
return ret;
|
||||
|
||||
/* Wait for encryption confirmation */
|
||||
if (intel_de_wait_for_register(i915,
|
||||
HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
|
||||
STREAM_ENCRYPTION_STATUS,
|
||||
enable ? STREAM_ENCRYPTION_STATUS : 0,
|
||||
HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
|
||||
if (intel_de_wait(i915, HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
|
||||
STREAM_ENCRYPTION_STATUS,
|
||||
enable ? STREAM_ENCRYPTION_STATUS : 0,
|
||||
HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
|
||||
drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
|
||||
transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
|
||||
return -ETIMEDOUT;
|
||||
|
@ -88,11 +88,10 @@ static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
|
||||
|
||||
if (dsc) {
|
||||
flags |= DRM_DP_BW_OVERHEAD_DSC;
|
||||
/* TODO: add support for bigjoiner */
|
||||
dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
|
||||
adjusted_mode->clock,
|
||||
adjusted_mode->hdisplay,
|
||||
false);
|
||||
crtc_state->bigjoiner_pipes);
|
||||
}
|
||||
|
||||
overhead = drm_dp_bw_overhead(crtc_state->lane_count,
|
||||
@ -525,14 +524,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_dp *intel_dp = &intel_mst->primary->dp;
|
||||
const struct intel_connector *connector =
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
struct link_config_limits limits;
|
||||
bool dsc_needed;
|
||||
bool dsc_needed, joiner_needs_dsc;
|
||||
int ret = 0;
|
||||
|
||||
if (pipe_config->fec_enable &&
|
||||
@ -542,11 +542,18 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return -EINVAL;
|
||||
|
||||
if (intel_dp_need_bigjoiner(intel_dp, connector,
|
||||
adjusted_mode->crtc_hdisplay,
|
||||
adjusted_mode->crtc_clock))
|
||||
pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
|
||||
|
||||
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
|
||||
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
|
||||
pipe_config->has_pch_encoder = false;
|
||||
|
||||
dsc_needed = intel_dp->force_dsc_en ||
|
||||
joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, pipe_config->bigjoiner_pipes);
|
||||
|
||||
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
|
||||
!intel_dp_mst_compute_config_limits(intel_dp,
|
||||
connector,
|
||||
pipe_config,
|
||||
@ -566,8 +573,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
/* enable compression if the mode doesn't fit available BW */
|
||||
if (dsc_needed) {
|
||||
drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, force=%s)\n",
|
||||
str_yes_no(ret),
|
||||
drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
|
||||
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
|
||||
str_yes_no(intel_dp->force_dsc_en));
|
||||
|
||||
if (!intel_dp_mst_dsc_source_support(pipe_config))
|
||||
@ -954,6 +961,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
||||
struct drm_dp_mst_atomic_payload *new_payload =
|
||||
drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_crtc *pipe_crtc;
|
||||
bool last_mst_stream;
|
||||
|
||||
intel_dp->active_mst_links--;
|
||||
@ -962,7 +970,13 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
||||
DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
|
||||
!intel_dp_mst_is_master_trans(old_crtc_state));
|
||||
|
||||
intel_crtc_vblank_off(old_crtc_state);
|
||||
for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
|
||||
intel_crtc_joined_pipe_mask(old_crtc_state)) {
|
||||
const struct intel_crtc_state *old_pipe_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, pipe_crtc);
|
||||
|
||||
intel_crtc_vblank_off(old_pipe_crtc_state);
|
||||
}
|
||||
|
||||
intel_disable_transcoder(old_crtc_state);
|
||||
|
||||
@ -980,12 +994,18 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
||||
|
||||
intel_ddi_disable_transcoder_func(old_crtc_state);
|
||||
|
||||
intel_dsc_disable(old_crtc_state);
|
||||
for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
|
||||
intel_crtc_joined_pipe_mask(old_crtc_state)) {
|
||||
const struct intel_crtc_state *old_pipe_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, pipe_crtc);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 9)
|
||||
skl_scaler_disable(old_crtc_state);
|
||||
else
|
||||
ilk_pfit_disable(old_crtc_state);
|
||||
intel_dsc_disable(old_pipe_crtc_state);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 9)
|
||||
skl_scaler_disable(old_pipe_crtc_state);
|
||||
else
|
||||
ilk_pfit_disable(old_pipe_crtc_state);
|
||||
}
|
||||
|
||||
/*
|
||||
* Power down mst path before disabling the port, otherwise we end
|
||||
@ -1117,6 +1137,39 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
|
||||
intel_ddi_set_dp_msa(pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
|
||||
u32 clear = 0;
|
||||
u32 set = 0;
|
||||
|
||||
if (!IS_ALDERLAKE_P(i915))
|
||||
return;
|
||||
|
||||
if (!IS_DISPLAY_STEP(i915, STEP_D0, STEP_FOREVER))
|
||||
return;
|
||||
|
||||
/* Wa_14013163432:adlp */
|
||||
if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
|
||||
set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
|
||||
|
||||
/* Wa_14014143976:adlp */
|
||||
if (IS_DISPLAY_STEP(i915, STEP_E0, STEP_FOREVER)) {
|
||||
if (intel_dp_is_uhbr(crtc_state))
|
||||
set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
|
||||
else if (crtc_state->fec_enable)
|
||||
clear |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
|
||||
|
||||
if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
|
||||
set |= DP_MST_DPT_DPTP_ALIGN_WA(crtc_state->cpu_transcoder);
|
||||
}
|
||||
|
||||
if (!clear && !set)
|
||||
return;
|
||||
|
||||
intel_de_rmw(i915, CHICKEN_MISC_3, clear, set);
|
||||
}
|
||||
|
||||
static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
@ -1131,6 +1184,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
||||
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
|
||||
enum transcoder trans = pipe_config->cpu_transcoder;
|
||||
bool first_mst_stream = intel_dp->active_mst_links == 1;
|
||||
struct intel_crtc *pipe_crtc;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
|
||||
|
||||
@ -1145,6 +1199,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
||||
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
|
||||
}
|
||||
|
||||
enable_bs_jitter_was(pipe_config);
|
||||
|
||||
intel_ddi_enable_transcoder_func(encoder, pipe_config);
|
||||
|
||||
clear_act_sent(encoder, pipe_config);
|
||||
@ -1172,7 +1228,13 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
||||
|
||||
intel_enable_transcoder(pipe_config);
|
||||
|
||||
intel_crtc_vblank_on(pipe_config);
|
||||
for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
|
||||
intel_crtc_joined_pipe_mask(pipe_config)) {
|
||||
const struct intel_crtc_state *pipe_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, pipe_crtc);
|
||||
|
||||
intel_crtc_vblank_on(pipe_crtc_state);
|
||||
}
|
||||
|
||||
intel_hdcp_enable(state, encoder, pipe_config, conn_state);
|
||||
}
|
||||
@ -1285,7 +1347,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
|
||||
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
|
||||
struct drm_dp_mst_port *port = intel_connector->port;
|
||||
const int min_bpp = 18;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
|
||||
int max_rate, mode_rate, max_lanes, max_link_clock;
|
||||
int ret;
|
||||
bool dsc = false, bigjoiner = false;
|
||||
@ -1302,8 +1364,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
|
||||
if (*status != MODE_OK)
|
||||
return 0;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
|
||||
*status = MODE_NO_DBLESCAN;
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
|
||||
*status = MODE_H_ILLEGAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mode->clock < 10000) {
|
||||
*status = MODE_CLOCK_LOW;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1314,10 +1381,6 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
|
||||
max_link_clock, max_lanes);
|
||||
mode_rate = intel_dp_link_required(mode->clock, min_bpp);
|
||||
|
||||
ret = drm_modeset_lock(&mgr->base.lock, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
* - Also check if compression would allow for the mode
|
||||
@ -1330,32 +1393,23 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
|
||||
* corresponding link capabilities of the sink) in case the
|
||||
* stream is uncompressed for it by the last branch device.
|
||||
*/
|
||||
if (intel_dp_need_bigjoiner(intel_dp, intel_connector,
|
||||
mode->hdisplay, target_clock)) {
|
||||
bigjoiner = true;
|
||||
max_dotclk *= 2;
|
||||
}
|
||||
|
||||
ret = drm_modeset_lock(&mgr->base.lock, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (mode_rate > max_rate || mode->clock > max_dotclk ||
|
||||
drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
|
||||
*status = MODE_CLOCK_HIGH;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mode->clock < 10000) {
|
||||
*status = MODE_CLOCK_LOW;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
|
||||
*status = MODE_H_ILLEGAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
|
||||
bigjoiner = true;
|
||||
max_dotclk *= 2;
|
||||
|
||||
/* TODO: add support for bigjoiner */
|
||||
*status = MODE_CLOCK_HIGH;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 10 &&
|
||||
if (HAS_DSC_MST(dev_priv) &&
|
||||
drm_dp_sink_supports_dsc(intel_connector->dp.dsc_dpcd)) {
|
||||
/*
|
||||
* TBD pass the connector BPC,
|
||||
@ -1383,11 +1437,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
|
||||
dsc = dsc_max_compressed_bpp && dsc_slice_count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Big joiner configuration needs DSC for TGL which is not true for
|
||||
* XE_LPD where uncompressed joiner is supported.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) {
|
||||
if (intel_dp_joiner_needs_dsc(dev_priv, bigjoiner) && !dsc) {
|
||||
*status = MODE_CLOCK_HIGH;
|
||||
return 0;
|
||||
}
|
||||
@ -1397,7 +1447,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
|
||||
return 0;
|
||||
}
|
||||
|
||||
*status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
|
||||
*status = intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -348,7 +348,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
|
||||
|
||||
out_err:
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and redect it (err %pe)\n",
|
||||
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
|
||||
drm_dp_tunnel_name(intel_dp->tunnel),
|
||||
connector->base.base.id, connector->base.name,
|
||||
encoder->base.base.id, encoder->base.name,
|
||||
|
@ -399,11 +399,8 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
|
||||
* The flag should get set in 100us according to the HW team, but
|
||||
* use 1ms due to occasional timeouts observed with that.
|
||||
*/
|
||||
if (intel_wait_for_register_fw(&dev_priv->uncore,
|
||||
BXT_PORT_CL1CM_DW0(phy),
|
||||
PHY_RESERVED | PHY_POWER_GOOD,
|
||||
PHY_POWER_GOOD,
|
||||
1))
|
||||
if (intel_de_wait_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
|
||||
PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1))
|
||||
drm_err(&dev_priv->drm, "timeout during PHY%d power on\n",
|
||||
phy);
|
||||
|
||||
|
@ -107,7 +107,7 @@ struct intel_dpll_mgr {
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder);
|
||||
void (*update_ref_clks)(struct drm_i915_private *i915);
|
||||
void (*dump_hw_state)(struct drm_i915_private *i915,
|
||||
void (*dump_hw_state)(struct drm_printer *p,
|
||||
const struct intel_dpll_hw_state *hw_state);
|
||||
bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
|
||||
const struct intel_dpll_hw_state *b);
|
||||
@ -634,16 +634,15 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ibx_dump_hw_state(struct drm_i915_private *i915,
|
||||
static void ibx_dump_hw_state(struct drm_printer *p,
|
||||
const struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
|
||||
"fp0: 0x%x, fp1: 0x%x\n",
|
||||
hw_state->dpll,
|
||||
hw_state->dpll_md,
|
||||
hw_state->fp0,
|
||||
hw_state->fp1);
|
||||
drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
|
||||
"fp0: 0x%x, fp1: 0x%x\n",
|
||||
hw_state->dpll,
|
||||
hw_state->dpll_md,
|
||||
hw_state->fp0,
|
||||
hw_state->fp1);
|
||||
}
|
||||
|
||||
static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a,
|
||||
@ -1225,11 +1224,11 @@ static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
|
||||
i915->display.dpll.ref_clks.nssc = 135000;
|
||||
}
|
||||
|
||||
static void hsw_dump_hw_state(struct drm_i915_private *i915,
|
||||
static void hsw_dump_hw_state(struct drm_printer *p,
|
||||
const struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
|
||||
hw_state->wrpll, hw_state->spll);
|
||||
drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
|
||||
hw_state->wrpll, hw_state->spll);
|
||||
}
|
||||
|
||||
static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a,
|
||||
@ -1939,14 +1938,11 @@ static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
|
||||
i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
|
||||
}
|
||||
|
||||
static void skl_dump_hw_state(struct drm_i915_private *i915,
|
||||
static void skl_dump_hw_state(struct drm_printer *p,
|
||||
const struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
drm_dbg_kms(&i915->drm, "dpll_hw_state: "
|
||||
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
|
||||
hw_state->ctrl1,
|
||||
hw_state->cfgcr1,
|
||||
hw_state->cfgcr2);
|
||||
drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
|
||||
hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
|
||||
}
|
||||
|
||||
static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a,
|
||||
@ -2402,23 +2398,16 @@ static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
|
||||
/* DSI non-SSC ref 19.2MHz */
|
||||
}
|
||||
|
||||
static void bxt_dump_hw_state(struct drm_i915_private *i915,
|
||||
static void bxt_dump_hw_state(struct drm_printer *p,
|
||||
const struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
|
||||
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
|
||||
"pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
|
||||
hw_state->ebb0,
|
||||
hw_state->ebb4,
|
||||
hw_state->pll0,
|
||||
hw_state->pll1,
|
||||
hw_state->pll2,
|
||||
hw_state->pll3,
|
||||
hw_state->pll6,
|
||||
hw_state->pll8,
|
||||
hw_state->pll9,
|
||||
hw_state->pll10,
|
||||
hw_state->pcsdw12);
|
||||
drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
|
||||
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
|
||||
"pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
|
||||
hw_state->ebb0, hw_state->ebb4,
|
||||
hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
|
||||
hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
|
||||
hw_state->pcsdw12);
|
||||
}
|
||||
|
||||
static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a,
|
||||
@ -3389,7 +3378,6 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct icl_port_dpll *port_dpll =
|
||||
@ -3408,8 +3396,7 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
|
||||
|
||||
|
||||
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
|
||||
dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915,
|
||||
encoder->port));
|
||||
dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
|
||||
port_dpll->pll = intel_find_shared_dpll(state, crtc,
|
||||
&port_dpll->hw_state,
|
||||
BIT(dpll_id));
|
||||
@ -3435,15 +3422,12 @@ static int icl_compute_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_phy_is_combo(i915, phy))
|
||||
if (intel_encoder_is_combo(encoder))
|
||||
return icl_compute_combo_phy_dpll(state, crtc);
|
||||
else if (intel_phy_is_tc(i915, phy))
|
||||
else if (intel_encoder_is_tc(encoder))
|
||||
return icl_compute_tc_phy_dplls(state, crtc);
|
||||
|
||||
MISSING_CASE(phy);
|
||||
MISSING_CASE(encoder->port);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3452,15 +3436,12 @@ static int icl_get_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_phy_is_combo(i915, phy))
|
||||
if (intel_encoder_is_combo(encoder))
|
||||
return icl_get_combo_phy_dpll(state, crtc, encoder);
|
||||
else if (intel_phy_is_tc(i915, phy))
|
||||
else if (intel_encoder_is_tc(encoder))
|
||||
return icl_get_tc_phy_dplls(state, crtc, encoder);
|
||||
|
||||
MISSING_CASE(phy);
|
||||
MISSING_CASE(encoder->port);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -4026,28 +4007,26 @@ static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
|
||||
i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
|
||||
}
|
||||
|
||||
static void icl_dump_hw_state(struct drm_i915_private *i915,
|
||||
static void icl_dump_hw_state(struct drm_printer *p,
|
||||
const struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
|
||||
"mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
|
||||
"mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
|
||||
"mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
|
||||
"mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
|
||||
"mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
|
||||
hw_state->cfgcr0, hw_state->cfgcr1,
|
||||
hw_state->div0,
|
||||
hw_state->mg_refclkin_ctl,
|
||||
hw_state->mg_clktop2_coreclkctl1,
|
||||
hw_state->mg_clktop2_hsclkctl,
|
||||
hw_state->mg_pll_div0,
|
||||
hw_state->mg_pll_div1,
|
||||
hw_state->mg_pll_lf,
|
||||
hw_state->mg_pll_frac_lock,
|
||||
hw_state->mg_pll_ssc,
|
||||
hw_state->mg_pll_bias,
|
||||
hw_state->mg_pll_tdc_coldst_bias);
|
||||
drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
|
||||
"mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
|
||||
"mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
|
||||
"mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
|
||||
"mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
|
||||
"mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
|
||||
hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
|
||||
hw_state->mg_refclkin_ctl,
|
||||
hw_state->mg_clktop2_coreclkctl1,
|
||||
hw_state->mg_clktop2_hsclkctl,
|
||||
hw_state->mg_pll_div0,
|
||||
hw_state->mg_pll_div1,
|
||||
hw_state->mg_pll_lf,
|
||||
hw_state->mg_pll_frac_lock,
|
||||
hw_state->mg_pll_ssc,
|
||||
hw_state->mg_pll_bias,
|
||||
hw_state->mg_pll_tdc_coldst_bias);
|
||||
}
|
||||
|
||||
static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a,
|
||||
@ -4514,22 +4493,24 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915)
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dpll_dump_hw_state - write hw_state to dmesg
|
||||
* intel_dpll_dump_hw_state - dump hw_state
|
||||
* @i915: i915 drm device
|
||||
* @hw_state: hw state to be written to the log
|
||||
* @p: where to print the state to
|
||||
* @hw_state: hw state to be dumped
|
||||
*
|
||||
* Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
|
||||
* Dumo out the relevant values in @hw_state.
|
||||
*/
|
||||
void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
|
||||
struct drm_printer *p,
|
||||
const struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
if (i915->display.dpll.mgr) {
|
||||
i915->display.dpll.mgr->dump_hw_state(i915, hw_state);
|
||||
i915->display.dpll.mgr->dump_hw_state(p, hw_state);
|
||||
} else {
|
||||
/* fallback for platforms that don't use the shared dpll
|
||||
* infrastructure
|
||||
*/
|
||||
ibx_dump_hw_state(i915, hw_state);
|
||||
ibx_dump_hw_state(p, hw_state);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
|
||||
enum tc_port;
|
||||
struct drm_i915_private;
|
||||
struct drm_printer;
|
||||
struct intel_atomic_state;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
@ -377,6 +378,7 @@ void intel_dpll_readout_hw_state(struct drm_i915_private *i915);
|
||||
void intel_dpll_sanitize_state(struct drm_i915_private *i915);
|
||||
|
||||
void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
|
||||
struct drm_printer *p,
|
||||
const struct intel_dpll_hw_state *hw_state);
|
||||
bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
|
||||
const struct intel_dpll_hw_state *a,
|
||||
|
@ -343,12 +343,13 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
|
||||
static u32 dsb_chicken(struct intel_crtc *crtc)
|
||||
{
|
||||
if (crtc->mode_flags & I915_MODE_FLAG_VRR)
|
||||
return DSB_CTRL_WAIT_SAFE_WINDOW |
|
||||
return DSB_SKIP_WAITS_EN |
|
||||
DSB_CTRL_WAIT_SAFE_WINDOW |
|
||||
DSB_CTRL_NO_WAIT_VBLANK |
|
||||
DSB_INST_WAIT_SAFE_WINDOW |
|
||||
DSB_INST_NO_WAIT_VBLANK;
|
||||
else
|
||||
return 0;
|
||||
return DSB_SKIP_WAITS_EN;
|
||||
}
|
||||
|
||||
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
|
||||
|
@ -64,14 +64,11 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
const struct drm_display_mode *fixed_mode =
|
||||
intel_panel_fixed_mode(intel_connector, mode);
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
|
||||
enum drm_mode_status status;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "\n");
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
||||
status = intel_panel_mode_valid(intel_connector, mode);
|
||||
if (status != MODE_OK)
|
||||
return status;
|
||||
|
@ -223,7 +223,7 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
|
||||
const struct drm_display_mode *fixed_mode =
|
||||
intel_panel_fixed_mode(connector, mode);
|
||||
int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq;
|
||||
int max_dotclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq;
|
||||
int target_clock = mode->clock;
|
||||
enum drm_mode_status status;
|
||||
|
||||
@ -231,9 +231,6 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
|
||||
if (status != MODE_OK)
|
||||
return status;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
||||
/* XXX: Validate clock range */
|
||||
|
||||
if (fixed_mode) {
|
||||
|
@ -1106,7 +1106,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(fb->dev);
|
||||
unsigned int height;
|
||||
u32 alignment;
|
||||
u32 alignment, unused;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 12 &&
|
||||
!intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) &&
|
||||
@ -1128,8 +1128,8 @@ static int intel_fb_offset_to_xy(int *x, int *y,
|
||||
height = ALIGN(height, intel_tile_height(fb, color_plane));
|
||||
|
||||
/* Catch potential overflows early */
|
||||
if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
|
||||
fb->offsets[color_plane])) {
|
||||
if (check_add_overflow(mul_u32_u32(height, fb->pitches[color_plane]),
|
||||
fb->offsets[color_plane], &unused)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Bad offset 0x%08x or pitch %d for color plane %d\n",
|
||||
fb->offsets[color_plane], fb->pitches[color_plane],
|
||||
|
@ -826,10 +826,36 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
|
||||
|
||||
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
|
||||
{
|
||||
struct drm_i915_private *i915 = fbc->i915;
|
||||
|
||||
if (IS_SKYLAKE(i915) || IS_BROXTON(i915)) {
|
||||
/*
|
||||
* WaFbcHighMemBwCorruptionAvoidance:skl,bxt
|
||||
* Display WA #0883: skl,bxt
|
||||
*/
|
||||
intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
|
||||
0, DPFC_DISABLE_DUMMY0);
|
||||
}
|
||||
|
||||
if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
|
||||
IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
|
||||
/*
|
||||
* WaFbcNukeOnHostModify:skl,kbl,cfl
|
||||
* Display WA #0873: skl,kbl,cfl
|
||||
*/
|
||||
intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
|
||||
0, DPFC_NUKE_ON_ANY_MODIFICATION);
|
||||
}
|
||||
|
||||
/* Wa_1409120013:icl,jsl,tgl,dg1 */
|
||||
if (IS_DISPLAY_VER(i915, 11, 12))
|
||||
intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
|
||||
0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
|
||||
|
||||
/* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
|
||||
if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
|
||||
intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
|
||||
DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
|
||||
if (DISPLAY_VER(i915) >= 11 && !IS_DG2(i915))
|
||||
intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
|
||||
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
|
||||
}
|
||||
|
||||
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
|
||||
|
@ -135,6 +135,9 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
||||
return i915_gem_fb_mmap(obj, vma);
|
||||
}
|
||||
|
||||
__diag_push();
|
||||
__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for fb ops");
|
||||
|
||||
static const struct fb_ops intelfb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
__FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev),
|
||||
@ -146,6 +149,8 @@ static const struct fb_ops intelfb_ops = {
|
||||
.fb_mmap = intel_fbdev_mmap,
|
||||
};
|
||||
|
||||
__diag_pop();
|
||||
|
||||
static int intelfb_create(struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
|
@ -411,7 +411,7 @@ gmbus_wait_idle(struct drm_i915_private *i915)
|
||||
add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
|
||||
intel_de_write_fw(i915, GMBUS4(i915), irq_enable);
|
||||
|
||||
ret = intel_de_wait_for_register_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10);
|
||||
ret = intel_de_wait_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10);
|
||||
|
||||
intel_de_write_fw(i915, GMBUS4(i915), 0);
|
||||
remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
|
||||
|
@ -369,9 +369,9 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915)
|
||||
}
|
||||
|
||||
/* Wait for the keys to load (500us) */
|
||||
ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS,
|
||||
HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
|
||||
10, 1, &val);
|
||||
ret = intel_de_wait_custom(i915, HDCP_KEY_STATUS,
|
||||
HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
|
||||
10, 1, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
else if (!(val & HDCP_KEY_LOAD_STATUS))
|
||||
|
@ -114,6 +114,8 @@ static u32 g4x_infoframe_enable(unsigned int type)
|
||||
return VIDEO_DIP_ENABLE_GAMUT;
|
||||
case DP_SDP_VSC:
|
||||
return 0;
|
||||
case DP_SDP_ADAPTIVE_SYNC:
|
||||
return 0;
|
||||
case HDMI_INFOFRAME_TYPE_AVI:
|
||||
return VIDEO_DIP_ENABLE_AVI;
|
||||
case HDMI_INFOFRAME_TYPE_SPD:
|
||||
@ -137,6 +139,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
|
||||
return VIDEO_DIP_ENABLE_GMP_HSW;
|
||||
case DP_SDP_VSC:
|
||||
return VIDEO_DIP_ENABLE_VSC_HSW;
|
||||
case DP_SDP_ADAPTIVE_SYNC:
|
||||
return VIDEO_DIP_ENABLE_AS_ADL;
|
||||
case DP_SDP_PPS:
|
||||
return VDIP_ENABLE_PPS;
|
||||
case HDMI_INFOFRAME_TYPE_AVI:
|
||||
@ -164,6 +168,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
|
||||
return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i);
|
||||
case DP_SDP_VSC:
|
||||
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
|
||||
case DP_SDP_ADAPTIVE_SYNC:
|
||||
return ADL_TVIDEO_DIP_AS_SDP_DATA(cpu_transcoder, i);
|
||||
case DP_SDP_PPS:
|
||||
return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
|
||||
case HDMI_INFOFRAME_TYPE_AVI:
|
||||
@ -186,6 +192,8 @@ static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
|
||||
switch (type) {
|
||||
case DP_SDP_VSC:
|
||||
return VIDEO_DIP_VSC_DATA_SIZE;
|
||||
case DP_SDP_ADAPTIVE_SYNC:
|
||||
return VIDEO_DIP_ASYNC_DATA_SIZE;
|
||||
case DP_SDP_PPS:
|
||||
return VIDEO_DIP_PPS_DATA_SIZE;
|
||||
case HDMI_PACKET_TYPE_GAMUT_METADATA:
|
||||
@ -563,6 +571,9 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
|
||||
if (DISPLAY_VER(dev_priv) >= 10)
|
||||
mask |= VIDEO_DIP_ENABLE_DRM_GLK;
|
||||
|
||||
if (HAS_AS_SDP(dev_priv))
|
||||
mask |= VIDEO_DIP_ENABLE_AS_ADL;
|
||||
|
||||
return val & mask;
|
||||
}
|
||||
|
||||
@ -570,6 +581,7 @@ static const u8 infoframe_type_to_idx[] = {
|
||||
HDMI_PACKET_TYPE_GENERAL_CONTROL,
|
||||
HDMI_PACKET_TYPE_GAMUT_METADATA,
|
||||
DP_SDP_VSC,
|
||||
DP_SDP_ADAPTIVE_SYNC,
|
||||
HDMI_INFOFRAME_TYPE_AVI,
|
||||
HDMI_INFOFRAME_TYPE_SPD,
|
||||
HDMI_INFOFRAME_TYPE_VENDOR,
|
||||
@ -1212,7 +1224,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
|
||||
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
|
||||
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
|
||||
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
|
||||
VIDEO_DIP_ENABLE_DRM_GLK);
|
||||
VIDEO_DIP_ENABLE_DRM_GLK | VIDEO_DIP_ENABLE_AS_ADL);
|
||||
|
||||
if (!enable) {
|
||||
intel_de_write(dev_priv, reg, val);
|
||||
@ -1832,7 +1844,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
|
||||
bool has_hdmi_sink)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, hdmi_to_dig_port(hdmi)->base.port);
|
||||
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
|
||||
|
||||
if (clock < 25000)
|
||||
return MODE_CLOCK_LOW;
|
||||
@ -1854,11 +1866,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
|
||||
return MODE_CLOCK_RANGE;
|
||||
|
||||
/* ICL+ combo PHY PLL can't generate 500-533.2 MHz */
|
||||
if (intel_phy_is_combo(dev_priv, phy) && clock > 500000 && clock < 533200)
|
||||
if (intel_encoder_is_combo(encoder) && clock > 500000 && clock < 533200)
|
||||
return MODE_CLOCK_RANGE;
|
||||
|
||||
/* ICL+ TC PHY PLL can't generate 500-532.8 MHz */
|
||||
if (intel_phy_is_tc(dev_priv, phy) && clock > 500000 && clock < 532800)
|
||||
if (intel_encoder_is_tc(encoder) && clock > 500000 && clock < 532800)
|
||||
return MODE_CLOCK_RANGE;
|
||||
|
||||
/*
|
||||
@ -1981,7 +1993,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
|
||||
struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
|
||||
enum drm_mode_status status;
|
||||
int clock = mode->clock;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
|
||||
bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
|
||||
bool ycbcr_420_only;
|
||||
enum intel_output_format sink_format;
|
||||
@ -2664,8 +2676,9 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
|
||||
drm_scdc_set_scrambling(connector, scrambling);
|
||||
}
|
||||
|
||||
static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
static u8 chv_encoder_to_ddc_pin(struct intel_encoder *encoder)
|
||||
{
|
||||
enum port port = encoder->port;
|
||||
u8 ddc_pin;
|
||||
|
||||
switch (port) {
|
||||
@ -2686,8 +2699,9 @@ static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
return ddc_pin;
|
||||
}
|
||||
|
||||
static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
static u8 bxt_encoder_to_ddc_pin(struct intel_encoder *encoder)
|
||||
{
|
||||
enum port port = encoder->port;
|
||||
u8 ddc_pin;
|
||||
|
||||
switch (port) {
|
||||
@ -2705,9 +2719,9 @@ static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
return ddc_pin;
|
||||
}
|
||||
|
||||
static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
static u8 cnp_encoder_to_ddc_pin(struct intel_encoder *encoder)
|
||||
{
|
||||
enum port port = encoder->port;
|
||||
u8 ddc_pin;
|
||||
|
||||
switch (port) {
|
||||
@ -2731,22 +2745,23 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
|
||||
return ddc_pin;
|
||||
}
|
||||
|
||||
static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
|
||||
if (intel_phy_is_combo(dev_priv, phy))
|
||||
if (intel_encoder_is_combo(encoder))
|
||||
return GMBUS_PIN_1_BXT + port;
|
||||
else if (intel_phy_is_tc(dev_priv, phy))
|
||||
return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
|
||||
else if (intel_encoder_is_tc(encoder))
|
||||
return GMBUS_PIN_9_TC1_ICP + intel_encoder_to_tc(encoder);
|
||||
|
||||
drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
|
||||
return GMBUS_PIN_2_BXT;
|
||||
}
|
||||
|
||||
static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
static u8 mcc_encoder_to_ddc_pin(struct intel_encoder *encoder)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
u8 ddc_pin;
|
||||
|
||||
switch (phy) {
|
||||
@ -2767,11 +2782,12 @@ static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
return ddc_pin;
|
||||
}
|
||||
|
||||
static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
WARN_ON(port == PORT_C);
|
||||
WARN_ON(encoder->port == PORT_C);
|
||||
|
||||
/*
|
||||
* Pin mapping for RKL depends on which PCH is present. With TGP, the
|
||||
@ -2785,11 +2801,12 @@ static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
return GMBUS_PIN_1_BXT + phy;
|
||||
}
|
||||
|
||||
static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port port)
|
||||
static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(i915, port);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
drm_WARN_ON(&i915->drm, port == PORT_A);
|
||||
drm_WARN_ON(&i915->drm, encoder->port == PORT_A);
|
||||
|
||||
/*
|
||||
* Pin mapping for GEN9 BC depends on which PCH is present. With TGP,
|
||||
@ -2803,16 +2820,16 @@ static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port po
|
||||
return GMBUS_PIN_1_BXT + phy;
|
||||
}
|
||||
|
||||
static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
static u8 dg1_encoder_to_ddc_pin(struct intel_encoder *encoder)
|
||||
{
|
||||
return intel_port_to_phy(dev_priv, port) + 1;
|
||||
return intel_encoder_to_phy(encoder) + 1;
|
||||
}
|
||||
|
||||
static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
static u8 adls_encoder_to_ddc_pin(struct intel_encoder *encoder)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
WARN_ON(port == PORT_B || port == PORT_C);
|
||||
WARN_ON(encoder->port == PORT_B || encoder->port == PORT_C);
|
||||
|
||||
/*
|
||||
* Pin mapping for ADL-S requires TC pins for all combo phy outputs
|
||||
@ -2824,9 +2841,9 @@ static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port
|
||||
return GMBUS_PIN_9_TC1_ICP + phy - PHY_B;
|
||||
}
|
||||
|
||||
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder)
|
||||
{
|
||||
enum port port = encoder->port;
|
||||
u8 ddc_pin;
|
||||
|
||||
switch (port) {
|
||||
@ -2850,30 +2867,29 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
|
||||
static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
u8 ddc_pin;
|
||||
|
||||
if (IS_ALDERLAKE_S(dev_priv))
|
||||
ddc_pin = adls_port_to_ddc_pin(dev_priv, port);
|
||||
ddc_pin = adls_encoder_to_ddc_pin(encoder);
|
||||
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
|
||||
ddc_pin = dg1_port_to_ddc_pin(dev_priv, port);
|
||||
ddc_pin = dg1_encoder_to_ddc_pin(encoder);
|
||||
else if (IS_ROCKETLAKE(dev_priv))
|
||||
ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
|
||||
ddc_pin = rkl_encoder_to_ddc_pin(encoder);
|
||||
else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
|
||||
ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
|
||||
ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder);
|
||||
else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
|
||||
HAS_PCH_TGP(dev_priv))
|
||||
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
|
||||
ddc_pin = mcc_encoder_to_ddc_pin(encoder);
|
||||
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
|
||||
ddc_pin = icl_encoder_to_ddc_pin(encoder);
|
||||
else if (HAS_PCH_CNP(dev_priv))
|
||||
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
|
||||
ddc_pin = cnp_encoder_to_ddc_pin(encoder);
|
||||
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
|
||||
ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
|
||||
ddc_pin = bxt_encoder_to_ddc_pin(encoder);
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
|
||||
ddc_pin = chv_encoder_to_ddc_pin(encoder);
|
||||
else
|
||||
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
|
||||
ddc_pin = g4x_encoder_to_ddc_pin(encoder);
|
||||
|
||||
return ddc_pin;
|
||||
}
|
||||
|
@ -1444,7 +1444,7 @@ void intel_hpd_enable_detection(struct intel_encoder *encoder)
|
||||
|
||||
void intel_hpd_irq_setup(struct drm_i915_private *i915)
|
||||
{
|
||||
if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
|
||||
if (i915->display.irq.display_irqs_enabled && i915->display.funcs.hotplug)
|
||||
i915->display.funcs.hotplug->hpd_irq_setup(i915);
|
||||
}
|
||||
|
||||
|
@ -392,16 +392,13 @@ intel_lvds_mode_valid(struct drm_connector *_connector,
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
const struct drm_display_mode *fixed_mode =
|
||||
intel_panel_fixed_mode(connector, mode);
|
||||
int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
|
||||
int max_pixclk = to_i915(connector->base.dev)->display.cdclk.max_dotclk_freq;
|
||||
enum drm_mode_status status;
|
||||
|
||||
status = intel_cpu_transcoder_mode_valid(i915, mode);
|
||||
if (status != MODE_OK)
|
||||
return status;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
||||
status = intel_panel_mode_valid(connector, mode);
|
||||
if (status != MODE_OK)
|
||||
return status;
|
||||
|
@ -27,7 +27,6 @@
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <acpi/video.h>
|
||||
|
||||
#include <drm/drm_edid.h>
|
||||
@ -263,7 +262,6 @@ struct intel_opregion {
|
||||
struct opregion_asle *asle;
|
||||
struct opregion_asle_ext *asle_ext;
|
||||
void *rvda;
|
||||
void *vbt_firmware;
|
||||
const void *vbt;
|
||||
u32 vbt_size;
|
||||
struct work_struct asle_work;
|
||||
@ -869,46 +867,6 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_opregion *opregion = dev_priv->display.opregion;
|
||||
const struct firmware *fw = NULL;
|
||||
const char *name = dev_priv->display.params.vbt_firmware;
|
||||
int ret;
|
||||
|
||||
if (!name || !*name)
|
||||
return -ENOENT;
|
||||
|
||||
ret = request_firmware(&fw, name, dev_priv->drm.dev);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Requesting VBT firmware \"%s\" failed (%d)\n",
|
||||
name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (intel_bios_is_valid_vbt(dev_priv, fw->data, fw->size)) {
|
||||
opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
|
||||
if (opregion->vbt_firmware) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Found valid VBT firmware \"%s\"\n", name);
|
||||
opregion->vbt = opregion->vbt_firmware;
|
||||
opregion->vbt_size = fw->size;
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n",
|
||||
name);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
release_firmware(fw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int intel_opregion_setup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_opregion *opregion;
|
||||
@ -1006,9 +964,6 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
|
||||
drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n");
|
||||
}
|
||||
|
||||
if (intel_load_vbt_firmware(dev_priv) == 0)
|
||||
goto out;
|
||||
|
||||
if (dmi_check_system(intel_no_opregion_vbt))
|
||||
goto out;
|
||||
|
||||
@ -1176,6 +1131,16 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
|
||||
return drm_edid;
|
||||
}
|
||||
|
||||
bool intel_opregion_vbt_present(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_opregion *opregion = i915->display.opregion;
|
||||
|
||||
if (!opregion || !opregion->vbt)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
|
||||
{
|
||||
struct intel_opregion *opregion = i915->display.opregion;
|
||||
@ -1186,7 +1151,7 @@ const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
|
||||
if (size)
|
||||
*size = opregion->vbt_size;
|
||||
|
||||
return opregion->vbt;
|
||||
return kmemdup(opregion->vbt, opregion->vbt_size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
|
||||
@ -1312,7 +1277,6 @@ void intel_opregion_cleanup(struct drm_i915_private *i915)
|
||||
memunmap(opregion->header);
|
||||
if (opregion->rvda)
|
||||
memunmap(opregion->rvda);
|
||||
kfree(opregion->vbt_firmware);
|
||||
kfree(opregion);
|
||||
i915->display.opregion = NULL;
|
||||
}
|
||||
|
@ -53,6 +53,7 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
|
||||
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
|
||||
const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
|
||||
|
||||
bool intel_opregion_vbt_present(struct drm_i915_private *i915);
|
||||
const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size);
|
||||
|
||||
bool intel_opregion_headless_sku(struct drm_i915_private *i915);
|
||||
@ -119,6 +120,11 @@ intel_opregion_get_edid(struct intel_connector *connector)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool intel_opregion_vbt_present(struct drm_i915_private *i915)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline const void *
|
||||
intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
|
||||
{
|
||||
|
@ -972,10 +972,11 @@ static int check_overlay_dst(struct intel_overlay *overlay,
|
||||
rec->dst_width, rec->dst_height);
|
||||
|
||||
clipped = req;
|
||||
drm_rect_intersect(&clipped, &crtc_state->pipe_src);
|
||||
|
||||
if (!drm_rect_visible(&clipped) ||
|
||||
!drm_rect_equals(&clipped, &req))
|
||||
if (!drm_rect_intersect(&clipped, &crtc_state->pipe_src))
|
||||
return -EINVAL;
|
||||
|
||||
if (!drm_rect_equals(&clipped, &req))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
@ -119,10 +119,11 @@ intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
|
||||
if (!encoder)
|
||||
return;
|
||||
|
||||
phy = intel_port_to_phy(i915, encoder->port);
|
||||
if (intel_phy_is_tc(i915, phy))
|
||||
if (intel_encoder_is_tc(encoder))
|
||||
return;
|
||||
|
||||
phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
if (set_bit)
|
||||
pmdemand_state->active_combo_phys_mask |= BIT(phy);
|
||||
else
|
||||
@ -222,14 +223,7 @@ static bool
|
||||
intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
enum phy phy;
|
||||
|
||||
if (!encoder)
|
||||
return false;
|
||||
|
||||
phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
return intel_phy_is_tc(i915, phy);
|
||||
return encoder && intel_encoder_is_tc(encoder);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -43,9 +43,8 @@ struct intel_pmdemand_state {
|
||||
struct pmdemand_params params;
|
||||
};
|
||||
|
||||
#define to_intel_pmdemand_state(x) container_of((x), \
|
||||
struct intel_pmdemand_state, \
|
||||
base)
|
||||
#define to_intel_pmdemand_state(global_state) \
|
||||
container_of_const((global_state), struct intel_pmdemand_state, base)
|
||||
|
||||
void intel_pmdemand_init_early(struct drm_i915_private *i915);
|
||||
int intel_pmdemand_init(struct drm_i915_private *i915);
|
||||
|
@ -605,8 +605,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
|
||||
intel_de_read(dev_priv, pp_stat_reg),
|
||||
intel_de_read(dev_priv, pp_ctrl_reg));
|
||||
|
||||
if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
|
||||
mask, value, 5000))
|
||||
if (intel_de_wait(dev_priv, pp_stat_reg, mask, value, 5000))
|
||||
drm_err(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
|
||||
dig_port->base.base.base.id, dig_port->base.base.name,
|
||||
@ -1671,6 +1670,37 @@ void intel_pps_setup(struct drm_i915_private *i915)
|
||||
i915->display.pps.mmio_base = PPS_BASE;
|
||||
}
|
||||
|
||||
static int intel_pps_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct intel_connector *connector = m->private;
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
|
||||
if (connector->base.status != connector_status_connected)
|
||||
return -ENODEV;
|
||||
|
||||
seq_printf(m, "Panel power up delay: %d\n",
|
||||
intel_dp->pps.panel_power_up_delay);
|
||||
seq_printf(m, "Panel power down delay: %d\n",
|
||||
intel_dp->pps.panel_power_down_delay);
|
||||
seq_printf(m, "Backlight on delay: %d\n",
|
||||
intel_dp->pps.backlight_on_delay);
|
||||
seq_printf(m, "Backlight off delay: %d\n",
|
||||
intel_dp->pps.backlight_off_delay);
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(intel_pps);
|
||||
|
||||
void intel_pps_connector_debugfs_add(struct intel_connector *connector)
|
||||
{
|
||||
struct dentry *root = connector->base.debugfs_entry;
|
||||
int connector_type = connector->base.connector_type;
|
||||
|
||||
if (connector_type == DRM_MODE_CONNECTOR_eDP)
|
||||
debugfs_create_file("i915_panel_timings", 0444, root,
|
||||
connector, &intel_pps_fops);
|
||||
}
|
||||
|
||||
void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
{
|
||||
i915_reg_t pp_reg;
|
||||
|
@ -51,6 +51,8 @@ void vlv_pps_init(struct intel_encoder *encoder,
|
||||
void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
|
||||
void intel_pps_setup(struct drm_i915_private *i915);
|
||||
|
||||
void intel_pps_connector_debugfs_add(struct intel_connector *connector);
|
||||
|
||||
void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe);
|
||||
|
||||
#endif /* __INTEL_PPS_H__ */
|
||||
|
@ -171,14 +171,27 @@
|
||||
*
|
||||
* The rest of the bits are more self-explanatory and/or
|
||||
* irrelevant for normal operation.
|
||||
*
|
||||
* Description of intel_crtc_state variables. has_psr, has_panel_replay and
|
||||
* has_sel_update:
|
||||
*
|
||||
* has_psr (alone): PSR1
|
||||
* has_psr + has_sel_update: PSR2
|
||||
* has_psr + has_panel_replay: Panel Replay
|
||||
* has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
|
||||
*
|
||||
* Description of some intel_psr varibles. enabled, panel_replay_enabled,
|
||||
* sel_update_enabled
|
||||
*
|
||||
* enabled (alone): PSR1
|
||||
* enabled + sel_update_enabled: PSR2
|
||||
* enabled + panel_replay_enabled: Panel Replay
|
||||
* enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU
|
||||
*/
|
||||
|
||||
#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
|
||||
(intel_dp)->psr.source_support)
|
||||
|
||||
#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
|
||||
(intel_dp)->psr.source_panel_replay_support)
|
||||
|
||||
bool intel_encoder_can_psr(struct intel_encoder *encoder)
|
||||
{
|
||||
if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
@ -330,6 +343,9 @@ static void psr_irq_control(struct intel_dp *intel_dp)
|
||||
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
||||
u32 mask;
|
||||
|
||||
if (intel_dp->psr.panel_replay_enabled)
|
||||
return;
|
||||
|
||||
mask = psr_irq_psr_error_bit_get(intel_dp);
|
||||
if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
|
||||
mask |= psr_irq_post_exit_bit_get(intel_dp) |
|
||||
@ -619,40 +635,59 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
static unsigned int intel_psr_get_enable_sink_offset(struct intel_dp *intel_dp)
|
||||
{
|
||||
return intel_dp->psr.panel_replay_enabled ?
|
||||
PANEL_REPLAY_CONFIG : DP_PSR_EN_CFG;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: Most of the bits are same in PANEL_REPLAY_CONFIG and DP_PSR_EN_CFG. We
|
||||
* are relying on PSR definitions on these "common" bits.
|
||||
*/
|
||||
void intel_psr_enable_sink(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u8 dpcd_val = DP_PSR_ENABLE;
|
||||
|
||||
if (intel_dp->psr.panel_replay_enabled)
|
||||
return;
|
||||
|
||||
if (intel_dp->psr.psr2_enabled) {
|
||||
if (crtc_state->has_psr2) {
|
||||
/* Enable ALPM at sink for psr2 */
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
|
||||
DP_ALPM_ENABLE |
|
||||
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
|
||||
if (!crtc_state->has_panel_replay) {
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_RECEIVER_ALPM_CONFIG,
|
||||
DP_ALPM_ENABLE |
|
||||
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
|
||||
|
||||
if (psr2_su_region_et_valid(intel_dp))
|
||||
dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
|
||||
}
|
||||
|
||||
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
|
||||
if (psr2_su_region_et_valid(intel_dp))
|
||||
dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
|
||||
} else {
|
||||
if (intel_dp->psr.link_standby)
|
||||
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 8)
|
||||
if (!crtc_state->has_panel_replay && DISPLAY_VER(dev_priv) >= 8)
|
||||
dpcd_val |= DP_PSR_CRC_VERIFICATION;
|
||||
}
|
||||
|
||||
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
|
||||
if (crtc_state->has_panel_replay)
|
||||
dpcd_val |= DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
|
||||
DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN;
|
||||
|
||||
if (crtc_state->req_psr2_sdp_prior_scanline)
|
||||
dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
|
||||
|
||||
if (intel_dp->psr.entry_setup_frames > 0)
|
||||
dpcd_val |= DP_PSR_FRAME_CAPTURE;
|
||||
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
intel_psr_get_enable_sink_offset(intel_dp),
|
||||
dpcd_val);
|
||||
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
|
||||
if (intel_dp_is_edp(intel_dp))
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
|
||||
}
|
||||
|
||||
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
|
||||
@ -1126,6 +1161,141 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* See Bspec: 71632 for the table
|
||||
*
|
||||
* Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2)
|
||||
*
|
||||
* Half cycle duration:
|
||||
*
|
||||
* Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns
|
||||
* FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) )
|
||||
*
|
||||
* Link rates 5.4 - 8.1
|
||||
* PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10
|
||||
* LFPS Period chosen is the mid-point of the min:max values from the table
|
||||
* FLOOR( LFPS Period in Symbol clocks /
|
||||
* (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) )
|
||||
*/
|
||||
static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
|
||||
int *silence_period,
|
||||
int *lfps_half_cycle)
|
||||
{
|
||||
switch (link_rate) {
|
||||
case 162000:
|
||||
*silence_period = 20;
|
||||
*lfps_half_cycle = 5;
|
||||
break;
|
||||
case 216000:
|
||||
*silence_period = 27;
|
||||
*lfps_half_cycle = 7;
|
||||
break;
|
||||
case 243000:
|
||||
*silence_period = 31;
|
||||
*lfps_half_cycle = 8;
|
||||
break;
|
||||
case 270000:
|
||||
*silence_period = 34;
|
||||
*lfps_half_cycle = 9;
|
||||
break;
|
||||
case 324000:
|
||||
*silence_period = 41;
|
||||
*lfps_half_cycle = 11;
|
||||
break;
|
||||
case 432000:
|
||||
*silence_period = 56;
|
||||
*lfps_half_cycle = 15;
|
||||
break;
|
||||
case 540000:
|
||||
*silence_period = 69;
|
||||
*lfps_half_cycle = 12;
|
||||
break;
|
||||
case 648000:
|
||||
*silence_period = 84;
|
||||
*lfps_half_cycle = 15;
|
||||
break;
|
||||
case 675000:
|
||||
*silence_period = 87;
|
||||
*lfps_half_cycle = 15;
|
||||
break;
|
||||
case 810000:
|
||||
*silence_period = 104;
|
||||
*lfps_half_cycle = 19;
|
||||
break;
|
||||
default:
|
||||
*silence_period = *lfps_half_cycle = -1;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* AUX-Less Wake Time = CEILING( ((PHY P2 to P0) + tLFPS_Period, Max+
|
||||
* tSilence, Max+ tPHY Establishment + tCDS) / tline)
|
||||
* For the "PHY P2 to P0" latency see the PHY Power Control page
|
||||
* (PHY P2 to P0) : https://gfxspecs.intel.com/Predator/Home/Index/68965
|
||||
* : 12 us
|
||||
* The tLFPS_Period, Max term is 800ns
|
||||
* The tSilence, Max term is 180ns
|
||||
* The tPHY Establishment (a.k.a. t1) term is 50us
|
||||
* The tCDS term is 1 or 2 times t2
|
||||
* t2 = Number ML_PHY_LOCK * tML_PHY_LOCK
|
||||
* Number ML_PHY_LOCK = ( 7 + CEILING( 6.5us / tML_PHY_LOCK ) + 1)
|
||||
* Rounding up the 6.5us padding to the next ML_PHY_LOCK boundary and
|
||||
* adding the "+ 1" term ensures all ML_PHY_LOCK sequences that start
|
||||
* within the CDS period complete within the CDS period regardless of
|
||||
* entry into the period
|
||||
* tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) )
|
||||
* TPS4 Length = 252 Symbols
|
||||
*/
|
||||
static int _lnl_compute_aux_less_wake_time(int port_clock)
|
||||
{
|
||||
int tphy2_p2_to_p0 = 12 * 1000;
|
||||
int tlfps_period_max = 800;
|
||||
int tsilence_max = 180;
|
||||
int t1 = 50 * 1000;
|
||||
int tps4 = 252;
|
||||
int tml_phy_lock = 1000 * 1000 * tps4 * 10 / port_clock;
|
||||
int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1;
|
||||
int t2 = num_ml_phy_lock * tml_phy_lock;
|
||||
int tcds = 1 * t2;
|
||||
|
||||
return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max +
|
||||
t1 + tcds, 1000);
|
||||
}
|
||||
|
||||
static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
int aux_less_wake_time, aux_less_wake_lines, silence_period,
|
||||
lfps_half_cycle;
|
||||
|
||||
aux_less_wake_time =
|
||||
_lnl_compute_aux_less_wake_time(crtc_state->port_clock);
|
||||
aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
|
||||
aux_less_wake_time);
|
||||
|
||||
if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock,
|
||||
&silence_period,
|
||||
&lfps_half_cycle))
|
||||
return false;
|
||||
|
||||
if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
|
||||
silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK ||
|
||||
lfps_half_cycle > PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK)
|
||||
return false;
|
||||
|
||||
if (i915->display.params.psr_safest_params)
|
||||
aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
|
||||
|
||||
intel_dp->psr.alpm_parameters.fast_wake_lines = aux_less_wake_lines;
|
||||
intel_dp->psr.alpm_parameters.silence_period_sym_clocks = silence_period;
|
||||
intel_dp->psr.alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
@ -1142,6 +1312,9 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
|
||||
if (check_entry_lines > 15)
|
||||
return false;
|
||||
|
||||
if (!_lnl_compute_aux_less_alpm_params(intel_dp, crtc_state))
|
||||
return false;
|
||||
|
||||
if (i915->display.params.psr_safest_params)
|
||||
check_entry_lines = 15;
|
||||
|
||||
@ -1150,28 +1323,52 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* IO wake time for DISPLAY_VER < 12 is not directly mentioned in Bspec. There
|
||||
* are 50 us io wake time and 32 us fast wake time. Clearly preharge pulses are
|
||||
* not (improperly) included in 32 us fast wake time. 50 us - 32 us = 18 us.
|
||||
*/
|
||||
static int skl_io_buffer_wake_time(void)
|
||||
{
|
||||
return 18;
|
||||
}
|
||||
|
||||
static int tgl_io_buffer_wake_time(void)
|
||||
{
|
||||
return 10;
|
||||
}
|
||||
|
||||
static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
|
||||
|
||||
if (DISPLAY_VER(i915) >= 12)
|
||||
return tgl_io_buffer_wake_time();
|
||||
else
|
||||
return skl_io_buffer_wake_time();
|
||||
}
|
||||
|
||||
static bool _compute_alpm_params(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
|
||||
int tfw_exit_latency = 20; /* eDP spec */
|
||||
int phy_wake = 4; /* eDP spec */
|
||||
int preamble = 8; /* eDP spec */
|
||||
int precharge = intel_dp_aux_fw_sync_len() - preamble;
|
||||
u8 max_wake_lines;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 12) {
|
||||
io_wake_time = 42;
|
||||
/*
|
||||
* According to Bspec it's 42us, but based on testing
|
||||
* it is not enough -> use 45 us.
|
||||
*/
|
||||
fast_wake_time = 45;
|
||||
io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) +
|
||||
preamble + phy_wake + tfw_exit_latency;
|
||||
fast_wake_time = precharge + preamble + phy_wake +
|
||||
tfw_exit_latency;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 12)
|
||||
/* TODO: Check how we can use ALPM_CTL fast wake extended field */
|
||||
max_wake_lines = 12;
|
||||
} else {
|
||||
io_wake_time = 50;
|
||||
fast_wake_time = 32;
|
||||
else
|
||||
max_wake_lines = 8;
|
||||
}
|
||||
|
||||
io_wake_lines = intel_usecs_to_scanlines(
|
||||
&crtc_state->hw.adjusted_mode, io_wake_time);
|
||||
@ -1422,12 +1619,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME figure out what is wrong with PSR+bigjoiner and
|
||||
* fix it. Presumably something related to the fact that
|
||||
* PSR is a transcoder level feature.
|
||||
*/
|
||||
if (crtc_state->bigjoiner_pipes) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR disabled due to bigjoiner\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (CAN_PANEL_REPLAY(intel_dp))
|
||||
crtc_state->has_panel_replay = true;
|
||||
else
|
||||
crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
|
||||
|
||||
if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
|
||||
crtc_state->has_psr = crtc_state->has_panel_replay ? true :
|
||||
_psr_compute_config(intel_dp, crtc_state);
|
||||
|
||||
if (!crtc_state->has_psr)
|
||||
return;
|
||||
|
||||
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
|
||||
@ -1454,7 +1663,7 @@ void intel_psr_get_config(struct intel_encoder *encoder,
|
||||
goto unlock;
|
||||
|
||||
if (intel_dp->psr.panel_replay_enabled) {
|
||||
pipe_config->has_panel_replay = true;
|
||||
pipe_config->has_psr = pipe_config->has_panel_replay = true;
|
||||
} else {
|
||||
/*
|
||||
* Not possible to read EDP_PSR/PSR2_CTL registers as it is
|
||||
@ -1559,14 +1768,44 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp)
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
||||
struct intel_psr *psr = &intel_dp->psr;
|
||||
u32 alpm_ctl;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) < 20)
|
||||
if (DISPLAY_VER(dev_priv) < 20 || (!intel_dp->psr.psr2_enabled &&
|
||||
!intel_dp_is_edp(intel_dp)))
|
||||
return;
|
||||
|
||||
intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder),
|
||||
ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
|
||||
ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines) |
|
||||
ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines));
|
||||
/*
|
||||
* Panel Replay on eDP is always using ALPM aux less. I.e. no need to
|
||||
* check panel support at this point.
|
||||
*/
|
||||
if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
|
||||
alpm_ctl = ALPM_CTL_ALPM_ENABLE |
|
||||
ALPM_CTL_ALPM_AUX_LESS_ENABLE |
|
||||
ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS;
|
||||
|
||||
intel_de_write(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
|
||||
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
|
||||
PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
|
||||
PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
|
||||
PORT_ALPM_CTL_SILENCE_PERIOD(
|
||||
psr->alpm_parameters.silence_period_sym_clocks));
|
||||
|
||||
intel_de_write(dev_priv, PORT_ALPM_LFPS_CTL(cpu_transcoder),
|
||||
PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
|
||||
PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
|
||||
psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
|
||||
PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
|
||||
psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
|
||||
PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
|
||||
psr->alpm_parameters.lfps_half_cycle_num_of_syms));
|
||||
} else {
|
||||
alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
|
||||
ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines);
|
||||
}
|
||||
|
||||
alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines);
|
||||
|
||||
intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder), alpm_ctl);
|
||||
}
|
||||
|
||||
static void intel_psr_enable_source(struct intel_dp *intel_dp,
|
||||
@ -1574,7 +1813,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
||||
u32 mask;
|
||||
u32 mask = 0;
|
||||
|
||||
/*
|
||||
* Only HSW and BDW have PSR AUX registers that need to be setup.
|
||||
@ -1588,34 +1827,46 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
|
||||
* mask LPSP to avoid dependency on other drivers that might block
|
||||
* runtime_pm besides preventing other hw tracking issues now we
|
||||
* can rely on frontbuffer tracking.
|
||||
*
|
||||
* From bspec prior LunarLake:
|
||||
* Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
|
||||
* panel replay mode.
|
||||
*
|
||||
* From bspec beyod LunarLake:
|
||||
* Panel Replay on DP: No bits are applicable
|
||||
* Panel Replay on eDP: All bits are applicable
|
||||
*/
|
||||
mask = EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
EDP_PSR_DEBUG_MASK_HPD;
|
||||
if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
|
||||
mask = EDP_PSR_DEBUG_MASK_HPD;
|
||||
|
||||
/*
|
||||
* For some unknown reason on HSW non-ULT (or at least on
|
||||
* Dell Latitude E6540) external displays start to flicker
|
||||
* when PSR is enabled on the eDP. SR/PC6 residency is much
|
||||
* higher than should be possible with an external display.
|
||||
* As a workaround leave LPSP unmasked to prevent PSR entry
|
||||
* when external displays are active.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
|
||||
mask |= EDP_PSR_DEBUG_MASK_LPSP;
|
||||
if (intel_dp_is_edp(intel_dp)) {
|
||||
mask |= EDP_PSR_DEBUG_MASK_MEMUP;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) < 20)
|
||||
mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
|
||||
/*
|
||||
* For some unknown reason on HSW non-ULT (or at least on
|
||||
* Dell Latitude E6540) external displays start to flicker
|
||||
* when PSR is enabled on the eDP. SR/PC6 residency is much
|
||||
* higher than should be possible with an external display.
|
||||
* As a workaround leave LPSP unmasked to prevent PSR entry
|
||||
* when external displays are active.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
|
||||
mask |= EDP_PSR_DEBUG_MASK_LPSP;
|
||||
|
||||
/*
|
||||
* No separate pipe reg write mask on hsw/bdw, so have to unmask all
|
||||
* registers in order to keep the CURSURFLIVE tricks working :(
|
||||
*/
|
||||
if (IS_DISPLAY_VER(dev_priv, 9, 10))
|
||||
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
|
||||
if (DISPLAY_VER(dev_priv) < 20)
|
||||
mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
|
||||
|
||||
/* allow PSR with sprite enabled */
|
||||
if (IS_HASWELL(dev_priv))
|
||||
mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
|
||||
/*
|
||||
* No separate pipe reg write mask on hsw/bdw, so have to unmask all
|
||||
* registers in order to keep the CURSURFLIVE tricks working :(
|
||||
*/
|
||||
if (IS_DISPLAY_VER(dev_priv, 9, 10))
|
||||
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
|
||||
|
||||
/* allow PSR with sprite enabled */
|
||||
if (IS_HASWELL(dev_priv))
|
||||
mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
|
||||
}
|
||||
|
||||
intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
|
||||
|
||||
@ -1634,7 +1885,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
|
||||
intel_dp->psr.psr2_sel_fetch_enabled ?
|
||||
IGNORE_PSR2_HW_TRACKING : 0);
|
||||
|
||||
lnl_alpm_configure(intel_dp);
|
||||
if (intel_dp_is_edp(intel_dp))
|
||||
lnl_alpm_configure(intel_dp);
|
||||
|
||||
/*
|
||||
* Wa_16013835468
|
||||
@ -1675,6 +1927,9 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
|
||||
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
||||
u32 val;
|
||||
|
||||
if (intel_dp->psr.panel_replay_enabled)
|
||||
goto no_err;
|
||||
|
||||
/*
|
||||
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
|
||||
* will still keep the error set even after the reset done in the
|
||||
@ -1692,6 +1947,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
|
||||
return false;
|
||||
}
|
||||
|
||||
no_err:
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1700,7 +1956,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
|
||||
u32 val;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
|
||||
@ -1722,14 +1977,22 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
|
||||
if (!psr_interrupt_error_check(intel_dp))
|
||||
return;
|
||||
|
||||
if (intel_dp->psr.panel_replay_enabled)
|
||||
if (intel_dp->psr.panel_replay_enabled) {
|
||||
drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
|
||||
else
|
||||
} else {
|
||||
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
|
||||
intel_dp->psr.psr2_enabled ? "2" : "1");
|
||||
|
||||
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
|
||||
intel_psr_enable_sink(intel_dp);
|
||||
/*
|
||||
* Panel replay has to be enabled before link training: doing it
|
||||
* only for PSR here.
|
||||
*/
|
||||
intel_psr_enable_sink(intel_dp, crtc_state);
|
||||
}
|
||||
|
||||
if (intel_dp_is_edp(intel_dp))
|
||||
intel_snps_phy_update_psr_power_state(&dig_port->base, true);
|
||||
|
||||
intel_psr_enable_source(intel_dp, crtc_state);
|
||||
intel_dp->psr.enabled = true;
|
||||
intel_dp->psr.paused = false;
|
||||
@ -1799,8 +2062,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
||||
enum phy phy = intel_port_to_phy(dev_priv,
|
||||
dp_to_dig_port(intel_dp)->base.port);
|
||||
|
||||
lockdep_assert_held(&intel_dp->psr.lock);
|
||||
|
||||
@ -1835,12 +2096,25 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
|
||||
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
|
||||
}
|
||||
|
||||
intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
|
||||
if (intel_dp_is_edp(intel_dp))
|
||||
intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
|
||||
|
||||
/* Panel Replay on eDP is always using ALPM aux less. */
|
||||
if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
|
||||
intel_de_rmw(dev_priv, ALPM_CTL(cpu_transcoder),
|
||||
ALPM_CTL_ALPM_ENABLE |
|
||||
ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
|
||||
|
||||
intel_de_rmw(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
|
||||
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
|
||||
}
|
||||
|
||||
/* Disable PSR on Sink */
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
intel_psr_get_enable_sink_offset(intel_dp), 0);
|
||||
|
||||
if (intel_dp->psr.psr2_enabled)
|
||||
if (!intel_dp->psr.panel_replay_enabled &&
|
||||
intel_dp->psr.psr2_enabled)
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
|
||||
|
||||
intel_dp->psr.enabled = false;
|
||||
@ -1888,7 +2162,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_psr *psr = &intel_dp->psr;
|
||||
|
||||
if (!CAN_PSR(intel_dp))
|
||||
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
|
||||
return;
|
||||
|
||||
mutex_lock(&psr->lock);
|
||||
@ -1921,7 +2195,7 @@ void intel_psr_resume(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_psr *psr = &intel_dp->psr;
|
||||
|
||||
if (!CAN_PSR(intel_dp))
|
||||
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
|
||||
return;
|
||||
|
||||
mutex_lock(&psr->lock);
|
||||
@ -1994,6 +2268,7 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
|
||||
|
||||
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
struct intel_encoder *encoder;
|
||||
@ -2013,6 +2288,12 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
|
||||
|
||||
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
|
||||
crtc_state->psr2_man_track_ctl);
|
||||
|
||||
if (!crtc_state->enable_psr2_su_region_et)
|
||||
return;
|
||||
|
||||
intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
|
||||
crtc_state->pipe_srcsz_early_tpt);
|
||||
}
|
||||
|
||||
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
|
||||
@ -2051,6 +2332,25 @@ exit:
|
||||
crtc_state->psr2_man_track_ctl = val;
|
||||
}
|
||||
|
||||
static u32
|
||||
psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
|
||||
bool full_update, bool cursor_in_su_area)
|
||||
{
|
||||
int width, height;
|
||||
|
||||
if (!crtc_state->enable_psr2_su_region_et || full_update)
|
||||
return 0;
|
||||
|
||||
if (!cursor_in_su_area)
|
||||
return PIPESRC_WIDTH(0) |
|
||||
PIPESRC_HEIGHT(drm_rect_height(&crtc_state->pipe_src));
|
||||
|
||||
width = drm_rect_width(&crtc_state->psr2_su_area);
|
||||
height = drm_rect_height(&crtc_state->psr2_su_area);
|
||||
|
||||
return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
|
||||
}
|
||||
|
||||
static void clip_area_update(struct drm_rect *overlap_damage_area,
|
||||
struct drm_rect *damage_area,
|
||||
struct drm_rect *pipe_src)
|
||||
@ -2095,21 +2395,38 @@ static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_st
|
||||
* cursor fully when cursor is in SU area.
|
||||
*/
|
||||
static void
|
||||
intel_psr2_sel_fetch_et_alignment(struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *cursor_state)
|
||||
intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
bool *cursor_in_su_area)
|
||||
{
|
||||
struct drm_rect inter;
|
||||
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_plane_state *new_plane_state;
|
||||
struct intel_plane *plane;
|
||||
int i;
|
||||
|
||||
if (!crtc_state->enable_psr2_su_region_et ||
|
||||
!cursor_state->uapi.visible)
|
||||
if (!crtc_state->enable_psr2_su_region_et)
|
||||
return;
|
||||
|
||||
inter = crtc_state->psr2_su_area;
|
||||
if (!drm_rect_intersect(&inter, &cursor_state->uapi.dst))
|
||||
return;
|
||||
for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
|
||||
struct drm_rect inter;
|
||||
|
||||
clip_area_update(&crtc_state->psr2_su_area, &cursor_state->uapi.dst,
|
||||
&crtc_state->pipe_src);
|
||||
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
|
||||
continue;
|
||||
|
||||
if (plane->id != PLANE_CURSOR)
|
||||
continue;
|
||||
|
||||
if (!new_plane_state->uapi.visible)
|
||||
continue;
|
||||
|
||||
inter = crtc_state->psr2_su_area;
|
||||
if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
|
||||
continue;
|
||||
|
||||
clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
|
||||
&crtc_state->pipe_src);
|
||||
*cursor_in_su_area = true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2152,10 +2469,9 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_plane_state *new_plane_state, *old_plane_state,
|
||||
*cursor_plane_state = NULL;
|
||||
struct intel_plane_state *new_plane_state, *old_plane_state;
|
||||
struct intel_plane *plane;
|
||||
bool full_update = false;
|
||||
bool full_update = false, cursor_in_su_area = false;
|
||||
int i, ret;
|
||||
|
||||
if (!crtc_state->enable_psr2_sel_fetch)
|
||||
@ -2238,13 +2554,6 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
|
||||
|
||||
clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
|
||||
|
||||
/*
|
||||
* Cursor plane new state is stored to adjust su area to cover
|
||||
* cursor are fully.
|
||||
*/
|
||||
if (plane->id == PLANE_CURSOR)
|
||||
cursor_plane_state = new_plane_state;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2273,9 +2582,13 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Adjust su area to cover cursor fully as necessary */
|
||||
if (cursor_plane_state)
|
||||
intel_psr2_sel_fetch_et_alignment(crtc_state, cursor_plane_state);
|
||||
/*
|
||||
* Adjust su area to cover cursor fully as necessary (early
|
||||
* transport). This needs to be done after
|
||||
* drm_atomic_add_affected_planes to ensure visible cursor is added into
|
||||
* affected planes even when cursor is not updated by itself.
|
||||
*/
|
||||
intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
|
||||
|
||||
intel_psr2_sel_fetch_pipe_alignment(crtc_state);
|
||||
|
||||
@ -2338,6 +2651,9 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
|
||||
skip_sel_fetch_set_loop:
|
||||
psr2_man_trk_ctl_calc(crtc_state, full_update);
|
||||
crtc_state->pipe_srcsz_early_tpt =
|
||||
psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update,
|
||||
cursor_in_su_area);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2394,7 +2710,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
|
||||
if (!crtc_state->has_psr)
|
||||
return;
|
||||
|
||||
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
|
||||
@ -2994,6 +3310,13 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* On common bits:
|
||||
* DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
|
||||
* DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
|
||||
* DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
|
||||
* this function is relying on PSR definitions
|
||||
*/
|
||||
void intel_psr_short_pulse(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
@ -3003,7 +3326,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
|
||||
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
|
||||
DP_PSR_LINK_CRC_ERROR;
|
||||
|
||||
if (!CAN_PSR(intel_dp))
|
||||
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
|
||||
return;
|
||||
|
||||
mutex_lock(&psr->lock);
|
||||
@ -3017,12 +3340,14 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
|
||||
if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
|
||||
(error_status & errors)) {
|
||||
intel_psr_disable_locked(intel_dp);
|
||||
psr->sink_not_reliable = true;
|
||||
}
|
||||
|
||||
if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
|
||||
if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
|
||||
!error_status)
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR sink internal error, disabling PSR\n");
|
||||
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
|
||||
@ -3042,8 +3367,10 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
|
||||
/* clear status register */
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
|
||||
|
||||
psr_alpm_check(intel_dp);
|
||||
psr_capability_changed_check(intel_dp);
|
||||
if (!psr->panel_replay_enabled) {
|
||||
psr_alpm_check(intel_dp);
|
||||
psr_capability_changed_check(intel_dp);
|
||||
}
|
||||
|
||||
exit:
|
||||
mutex_unlock(&psr->lock);
|
||||
|
@ -21,8 +21,13 @@ struct intel_encoder;
|
||||
struct intel_plane;
|
||||
struct intel_plane_state;
|
||||
|
||||
#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
|
||||
(intel_dp)->psr.source_panel_replay_support)
|
||||
|
||||
bool intel_encoder_can_psr(struct intel_encoder *encoder);
|
||||
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
|
||||
void intel_psr_enable_sink(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
void intel_psr_post_plane_update(struct intel_atomic_state *state,
|
||||
|
@ -348,9 +348,13 @@
|
||||
#define PORT_ALPM_LFPS_CTL(tran) _MMIO_TRANS2(tran, _PORT_ALPM_LFPS_CTL_A)
|
||||
#define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31)
|
||||
#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24)
|
||||
#define ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES 5
|
||||
#define ALPM_CTL_EXTENDED_FAST_WAKE_TIME(lines) REG_FIELD_PREP(ALPM_CTL_EXTENDED_FAST_WAKE_TIME_MASK, (lines) - ALPM_CTL_EXTENDED_FAST_WAKE_MIN_LINES)
|
||||
#define ALPM_CTL_AUX_LESS_WAKE_TIME_MASK REG_GENMASK(5, 0)
|
||||
#define ALPM_CTL_AUX_LESS_WAKE_TIME(val) REG_FIELD_PREP(ALPM_CTL_AUX_LESS_WAKE_TIME_MASK, val)
|
||||
#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN 7
|
||||
#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK, (val) - PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN)
|
||||
#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(20, 16)
|
||||
#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
|
||||
#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(12, 8)
|
||||
#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
|
||||
#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(4, 0)
|
||||
#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
|
||||
|
||||
#endif /* __INTEL_PSR_REGS_H__ */
|
||||
|
@ -193,7 +193,7 @@ to_intel_sdvo_connector(struct drm_connector *connector)
|
||||
}
|
||||
|
||||
#define to_intel_sdvo_connector_state(conn_state) \
|
||||
container_of((conn_state), struct intel_sdvo_connector_state, base.base)
|
||||
container_of_const((conn_state), struct intel_sdvo_connector_state, base.base)
|
||||
|
||||
static bool
|
||||
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo);
|
||||
@ -1944,7 +1944,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
|
||||
struct intel_sdvo_connector *intel_sdvo_connector =
|
||||
to_intel_sdvo_connector(connector);
|
||||
bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
|
||||
int max_dotclk = i915->max_dotclk_freq;
|
||||
int max_dotclk = i915->display.cdclk.max_dotclk_freq;
|
||||
enum drm_mode_status status;
|
||||
int clock = mode->clock;
|
||||
|
||||
@ -1952,9 +1952,6 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
|
||||
if (status != MODE_OK)
|
||||
return status;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
||||
if (clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
@ -2378,7 +2375,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
|
||||
u64 *val)
|
||||
{
|
||||
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
|
||||
const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state);
|
||||
const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
|
||||
|
||||
if (property == intel_sdvo_connector->tv_format) {
|
||||
int i;
|
||||
|
@ -44,12 +44,14 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
|
||||
}
|
||||
}
|
||||
|
||||
void intel_snps_phy_update_psr_power_state(struct drm_i915_private *i915,
|
||||
enum phy phy, bool enable)
|
||||
void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
u32 val;
|
||||
|
||||
if (!intel_phy_is_snps(i915, phy))
|
||||
if (!intel_encoder_is_snps(encoder))
|
||||
return;
|
||||
|
||||
val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR,
|
||||
@ -63,7 +65,7 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
const struct intel_ddi_buf_trans *trans;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
int n_entries, ln;
|
||||
|
||||
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
@ -1822,7 +1824,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
const struct intel_mpllb_state *pll_state = &crtc_state->mpllb_state;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
i915_reg_t enable_reg = (phy <= PHY_D ?
|
||||
DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
|
||||
|
||||
@ -1879,7 +1881,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
|
||||
void intel_mpllb_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
i915_reg_t enable_reg = (phy <= PHY_D ?
|
||||
DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
|
||||
|
||||
@ -1951,7 +1953,7 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_mpllb_state *pll_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
pll_state->mpllb_cp = intel_de_read(dev_priv, SNPS_PHY_MPLLB_CP(phy));
|
||||
pll_state->mpllb_div = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV(phy));
|
||||
|
@ -17,8 +17,8 @@ struct intel_mpllb_state;
|
||||
enum phy;
|
||||
|
||||
void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv);
|
||||
void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
|
||||
enum phy phy, bool enable);
|
||||
void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
|
||||
bool enable);
|
||||
|
||||
int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
|
||||
struct intel_encoder *encoder);
|
||||
|
@ -100,11 +100,9 @@ static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
|
||||
static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
|
||||
enum tc_port_mode mode)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
struct intel_tc_port *tc = to_tc_port(dig_port);
|
||||
|
||||
return intel_phy_is_tc(i915, phy) && tc->mode == mode;
|
||||
return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode;
|
||||
}
|
||||
|
||||
bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
|
||||
@ -124,11 +122,9 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
|
||||
|
||||
bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
struct intel_tc_port *tc = to_tc_port(dig_port);
|
||||
|
||||
return intel_phy_is_tc(i915, phy) && !tc->legacy_port;
|
||||
return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -254,8 +250,7 @@ assert_tc_cold_blocked(struct intel_tc_port *tc)
|
||||
static enum intel_display_power_domain
|
||||
tc_port_power_domain(struct intel_tc_port *tc)
|
||||
{
|
||||
struct drm_i915_private *i915 = tc_to_i915(tc);
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
|
||||
|
||||
return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
|
||||
}
|
||||
@ -302,7 +297,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
|
||||
static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
|
||||
intel_wakeref_t wakeref;
|
||||
u32 val, pin_assignment;
|
||||
|
||||
@ -375,9 +370,8 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_tc_port *tc = to_tc_port(dig_port);
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
|
||||
if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
|
||||
if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
|
||||
return 4;
|
||||
|
||||
assert_tc_cold_blocked(tc);
|
||||
@ -458,9 +452,7 @@ static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
|
||||
|
||||
static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
|
||||
{
|
||||
struct drm_i915_private *i915 = tc_to_i915(tc);
|
||||
enum port port = tc->dig_port->base.port;
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
|
||||
|
||||
/*
|
||||
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
|
||||
@ -812,7 +804,7 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
|
||||
static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
|
||||
{
|
||||
struct drm_i915_private *i915 = tc_to_i915(tc);
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
|
||||
u32 val;
|
||||
|
||||
assert_display_core_power_enabled(tc);
|
||||
@ -1635,10 +1627,7 @@ static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
|
||||
|
||||
bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
|
||||
if (!intel_phy_is_tc(i915, phy))
|
||||
if (!intel_encoder_is_tc(&dig_port->base))
|
||||
return false;
|
||||
|
||||
return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
|
||||
@ -1740,11 +1729,9 @@ bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
|
||||
|
||||
void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
struct intel_tc_port *tc = to_tc_port(dig_port);
|
||||
|
||||
if (!intel_phy_is_tc(i915, phy))
|
||||
if (!intel_encoder_is_tc(&dig_port->base))
|
||||
return;
|
||||
|
||||
cancel_delayed_work(&tc->link_reset_work);
|
||||
@ -1861,7 +1848,7 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_tc_port *tc;
|
||||
enum port port = dig_port->base.port;
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, port);
|
||||
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
|
||||
return -EINVAL;
|
||||
|
@ -885,7 +885,8 @@ struct intel_tv_connector_state {
|
||||
bool bypass_vfilter;
|
||||
};
|
||||
|
||||
#define to_intel_tv_connector_state(x) container_of(x, struct intel_tv_connector_state, base)
|
||||
#define to_intel_tv_connector_state(conn_state) \
|
||||
container_of_const((conn_state), struct intel_tv_connector_state, base)
|
||||
|
||||
static struct drm_connector_state *
|
||||
intel_tv_connector_duplicate_state(struct drm_connector *connector)
|
||||
@ -961,16 +962,13 @@ intel_tv_mode_valid(struct drm_connector *connector,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->dev);
|
||||
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
|
||||
int max_dotclk = i915->max_dotclk_freq;
|
||||
int max_dotclk = i915->display.cdclk.max_dotclk_freq;
|
||||
enum drm_mode_status status;
|
||||
|
||||
status = intel_cpu_transcoder_mode_valid(i915, mode);
|
||||
if (status != MODE_OK)
|
||||
return status;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
||||
if (mode->clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
|
@ -485,6 +485,7 @@ struct child_device_config {
|
||||
u8 hdmi_iboost_level:4; /* 196+ */
|
||||
u8 dp_max_link_rate:3; /* 216+ */
|
||||
u8 dp_max_link_rate_reserved:5; /* 216+ */
|
||||
u8 efp_index; /* 256+ */
|
||||
} __packed;
|
||||
|
||||
struct bdb_general_definitions {
|
||||
@ -602,22 +603,22 @@ struct bdb_driver_features {
|
||||
u8 custom_vbt_version; /* 155+ */
|
||||
|
||||
/* Driver Feature Flags */
|
||||
u16 rmpm_enabled:1; /* 165+ */
|
||||
u16 s2ddt_enabled:1; /* 165+ */
|
||||
u16 dpst_enabled:1; /* 165-227 */
|
||||
u16 bltclt_enabled:1; /* 165+ */
|
||||
u16 adb_enabled:1; /* 165-227 */
|
||||
u16 drrs_enabled:1; /* 165-227 */
|
||||
u16 grs_enabled:1; /* 165+ */
|
||||
u16 gpmt_enabled:1; /* 165+ */
|
||||
u16 tbt_enabled:1; /* 165+ */
|
||||
u16 rmpm_enabled:1; /* 159+ */
|
||||
u16 s2ddt_enabled:1; /* 159+ */
|
||||
u16 dpst_enabled:1; /* 159-227 */
|
||||
u16 bltclt_enabled:1; /* 159+ */
|
||||
u16 adb_enabled:1; /* 159-227 */
|
||||
u16 drrs_enabled:1; /* 159-227 */
|
||||
u16 grs_enabled:1; /* 159+ */
|
||||
u16 gpmt_enabled:1; /* 159+ */
|
||||
u16 tbt_enabled:1; /* 159+ */
|
||||
u16 psr_enabled:1; /* 165-227 */
|
||||
u16 ips_enabled:1; /* 165+ */
|
||||
u16 dpfs_enabled:1; /* 165+ */
|
||||
u16 dfps_enabled:1; /* 165+ */
|
||||
u16 dmrrs_enabled:1; /* 174-227 */
|
||||
u16 adt_enabled:1; /* ???-228 */
|
||||
u16 hpd_wake:1; /* 201-240 */
|
||||
u16 pc_feature_valid:1;
|
||||
u16 pc_feature_valid:1; /* 159+ */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
@ -880,11 +881,12 @@ struct bdb_lvds_lfp_data_tail {
|
||||
struct lfp_backlight_data_entry {
|
||||
u8 type:2;
|
||||
u8 active_low_pwm:1;
|
||||
u8 obsolete1:5;
|
||||
u8 i2c_pin:3; /* obsolete since ? */
|
||||
u8 i2c_speed:2; /* obsolete since ? */
|
||||
u16 pwm_freq_hz;
|
||||
u8 min_brightness; /* ???-233 */
|
||||
u8 obsolete2;
|
||||
u8 obsolete3;
|
||||
u8 i2c_address; /* obsolete since ? */
|
||||
u8 i2c_command; /* obsolete since ? */
|
||||
} __packed;
|
||||
|
||||
struct lfp_backlight_control_method {
|
||||
@ -905,8 +907,8 @@ struct lfp_brightness_level {
|
||||
struct bdb_lfp_backlight_data {
|
||||
u8 entry_size;
|
||||
struct lfp_backlight_data_entry data[16];
|
||||
u8 level[16]; /* ???-233 */
|
||||
struct lfp_backlight_control_method backlight_control[16];
|
||||
u8 level[16]; /* 162-233 */
|
||||
struct lfp_backlight_control_method backlight_control[16]; /* 191+ */
|
||||
struct lfp_brightness_level brightness_level[16]; /* 234+ */
|
||||
struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
|
||||
u8 brightness_precision_bits[16]; /* 236+ */
|
||||
@ -917,7 +919,7 @@ struct bdb_lfp_backlight_data {
|
||||
* Block 44 - LFP Power Conservation Features Block
|
||||
*/
|
||||
struct lfp_power_features {
|
||||
u8 reserved1:1;
|
||||
u8 dpst_support:1; /* ???-159 */
|
||||
u8 power_conservation_pref:3;
|
||||
u8 reserved2:1;
|
||||
u8 lace_enabled_status:1; /* 210+ */
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_vrr.h"
|
||||
#include "intel_dp.h"
|
||||
|
||||
bool intel_vrr_is_capable(struct intel_connector *connector)
|
||||
{
|
||||
@ -113,10 +114,18 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
|
||||
const struct drm_display_info *info = &connector->base.display_info;
|
||||
int vmin, vmax;
|
||||
|
||||
/*
|
||||
* FIXME all joined pipes share the same transcoder.
|
||||
* Need to account for that during VRR toggle/push/etc.
|
||||
*/
|
||||
if (crtc_state->bigjoiner_pipes)
|
||||
return;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
return;
|
||||
|
||||
@ -165,6 +174,14 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
|
||||
if (crtc_state->uapi.vrr_enabled) {
|
||||
crtc_state->vrr.enable = true;
|
||||
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
|
||||
if (intel_dp_as_sdp_supported(intel_dp)) {
|
||||
crtc_state->vrr.vsync_start =
|
||||
(crtc_state->hw.adjusted_mode.crtc_vtotal -
|
||||
crtc_state->hw.adjusted_mode.vsync_start);
|
||||
crtc_state->vrr.vsync_end =
|
||||
(crtc_state->hw.adjusted_mode.crtc_vtotal -
|
||||
crtc_state->hw.adjusted_mode.vsync_end);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -240,6 +257,12 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
|
||||
return;
|
||||
|
||||
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
|
||||
|
||||
if (HAS_AS_SDP(dev_priv))
|
||||
intel_de_write(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder),
|
||||
VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
|
||||
VRR_VSYNC_START(crtc_state->vrr.vsync_start));
|
||||
|
||||
intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
|
||||
VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
|
||||
}
|
||||
@ -258,13 +281,16 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
|
||||
intel_de_wait_for_clear(dev_priv, TRANS_VRR_STATUS(cpu_transcoder),
|
||||
VRR_STATUS_VRR_EN_LIVE, 1000);
|
||||
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
|
||||
|
||||
if (HAS_AS_SDP(dev_priv))
|
||||
intel_de_write(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder), 0);
|
||||
}
|
||||
|
||||
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
u32 trans_vrr_ctl;
|
||||
u32 trans_vrr_ctl, trans_vrr_vsync;
|
||||
|
||||
trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
|
||||
|
||||
@ -284,6 +310,16 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
|
||||
crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
|
||||
}
|
||||
|
||||
if (crtc_state->vrr.enable)
|
||||
if (crtc_state->vrr.enable) {
|
||||
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
|
||||
|
||||
if (HAS_AS_SDP(dev_priv)) {
|
||||
trans_vrr_vsync =
|
||||
intel_de_read(dev_priv, TRANS_VRR_VSYNC(cpu_transcoder));
|
||||
crtc_state->vrr.vsync_start =
|
||||
REG_FIELD_GET(VRR_VSYNC_START_MASK, trans_vrr_vsync);
|
||||
crtc_state->vrr.vsync_end =
|
||||
REG_FIELD_GET(VRR_VSYNC_END_MASK, trans_vrr_vsync);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -213,10 +213,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
* The pipe scaler does not use all the bits of PIPESRC, at least
|
||||
* on the earlier platforms. So even when we're scaling a plane
|
||||
* the *pipe* source size must not be too large. For simplicity
|
||||
* we assume the limits match the scaler source size limits. Might
|
||||
* not be 100% accurate on all platforms, but good enough for now.
|
||||
* we assume the limits match the scaler destination size limits.
|
||||
* Might not be 100% accurate on all platforms, but good enough for
|
||||
* now.
|
||||
*/
|
||||
if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
|
||||
if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"scaler_user index %u.%u: pipe src size %ux%u "
|
||||
"is out of scaler range\n",
|
||||
|
@ -6,18 +6,19 @@
|
||||
#include <drm/drm_blend.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_fixed.h"
|
||||
#include "i915_reg.h"
|
||||
#include "i9xx_wm.h"
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_atomic_plane.h"
|
||||
#include "intel_bw.h"
|
||||
#include "intel_cdclk.h"
|
||||
#include "intel_crtc.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display.h"
|
||||
#include "intel_display_power.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fb.h"
|
||||
#include "intel_fixed.h"
|
||||
#include "intel_pcode.h"
|
||||
#include "intel_wm.h"
|
||||
#include "skl_watermark.h"
|
||||
@ -2601,10 +2602,17 @@ skl_compute_ddb(struct intel_atomic_state *state)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (HAS_MBUS_JOINING(i915))
|
||||
if (HAS_MBUS_JOINING(i915)) {
|
||||
new_dbuf_state->joined_mbus =
|
||||
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
|
||||
|
||||
if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
|
||||
ret = intel_cdclk_state_set_joined_mbus(state, new_dbuf_state->joined_mbus);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_intel_crtc(&i915->drm, crtc) {
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
@ -2628,13 +2636,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
|
||||
/* TODO: Implement vblank synchronized MBUS joining changes */
|
||||
ret = intel_modeset_all_pipes_late(state, "MBUS joining change");
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
|
||||
old_dbuf_state->enabled_slices,
|
||||
@ -3057,6 +3058,8 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
|
||||
if (HAS_MBUS_JOINING(i915))
|
||||
dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
|
||||
|
||||
dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(i915, &i915->display.cdclk.hw);
|
||||
|
||||
for_each_intel_crtc(&i915->drm, crtc) {
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
@ -3530,85 +3533,6 @@ int intel_dbuf_init(struct drm_i915_private *i915)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
|
||||
* update the request state of all DBUS slices.
|
||||
*/
|
||||
static void update_mbus_pre_enable(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
u32 mbus_ctl, dbuf_min_tracker_val;
|
||||
enum dbuf_slice slice;
|
||||
const struct intel_dbuf_state *dbuf_state =
|
||||
intel_atomic_get_new_dbuf_state(state);
|
||||
|
||||
if (!HAS_MBUS_JOINING(i915))
|
||||
return;
|
||||
|
||||
/*
|
||||
* TODO: Implement vblank synchronized MBUS joining changes.
|
||||
* Must be properly coordinated with dbuf reprogramming.
|
||||
*/
|
||||
if (dbuf_state->joined_mbus) {
|
||||
mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
|
||||
MBUS_JOIN_PIPE_SELECT_NONE;
|
||||
dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
|
||||
} else {
|
||||
mbus_ctl = MBUS_HASHING_MODE_2x2 |
|
||||
MBUS_JOIN_PIPE_SELECT_NONE;
|
||||
dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
|
||||
}
|
||||
|
||||
intel_de_rmw(i915, MBUS_CTL,
|
||||
MBUS_HASHING_MODE_MASK | MBUS_JOIN |
|
||||
MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
|
||||
|
||||
for_each_dbuf_slice(i915, slice)
|
||||
intel_de_rmw(i915, DBUF_CTL_S(slice),
|
||||
DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
|
||||
dbuf_min_tracker_val);
|
||||
}
|
||||
|
||||
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
const struct intel_dbuf_state *new_dbuf_state =
|
||||
intel_atomic_get_new_dbuf_state(state);
|
||||
const struct intel_dbuf_state *old_dbuf_state =
|
||||
intel_atomic_get_old_dbuf_state(state);
|
||||
|
||||
if (!new_dbuf_state ||
|
||||
(new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
|
||||
new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
|
||||
return;
|
||||
|
||||
WARN_ON(!new_dbuf_state->base.changed);
|
||||
|
||||
update_mbus_pre_enable(state);
|
||||
gen9_dbuf_slices_update(i915,
|
||||
old_dbuf_state->enabled_slices |
|
||||
new_dbuf_state->enabled_slices);
|
||||
}
|
||||
|
||||
void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
const struct intel_dbuf_state *new_dbuf_state =
|
||||
intel_atomic_get_new_dbuf_state(state);
|
||||
const struct intel_dbuf_state *old_dbuf_state =
|
||||
intel_atomic_get_old_dbuf_state(state);
|
||||
|
||||
if (!new_dbuf_state ||
|
||||
(new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
|
||||
new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
|
||||
return;
|
||||
|
||||
WARN_ON(!new_dbuf_state->base.changed);
|
||||
|
||||
gen9_dbuf_slices_update(i915,
|
||||
new_dbuf_state->enabled_slices);
|
||||
}
|
||||
|
||||
static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
|
||||
{
|
||||
switch (pipe) {
|
||||
@ -3628,14 +3552,12 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
|
||||
return false;
|
||||
}
|
||||
|
||||
void intel_mbus_dbox_update(struct intel_atomic_state *state)
|
||||
static void intel_mbus_dbox_update(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
|
||||
const struct intel_crtc_state *new_crtc_state;
|
||||
const struct intel_crtc *crtc;
|
||||
u32 val = 0;
|
||||
int i;
|
||||
|
||||
if (DISPLAY_VER(i915) < 11)
|
||||
return;
|
||||
@ -3679,12 +3601,9 @@ void intel_mbus_dbox_update(struct intel_atomic_state *state)
|
||||
val |= MBUS_DBOX_B_CREDIT(8);
|
||||
}
|
||||
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) {
|
||||
u32 pipe_val = val;
|
||||
|
||||
if (!new_crtc_state->hw.active)
|
||||
continue;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 14) {
|
||||
if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
|
||||
new_dbuf_state->active_pipes))
|
||||
@ -3697,6 +3616,217 @@ void intel_mbus_dbox_update(struct intel_atomic_state *state)
|
||||
}
|
||||
}
|
||||
|
||||
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
|
||||
int ratio)
|
||||
{
|
||||
struct intel_dbuf_state *dbuf_state;
|
||||
|
||||
dbuf_state = intel_atomic_get_dbuf_state(state);
|
||||
if (IS_ERR(dbuf_state))
|
||||
return PTR_ERR(dbuf_state);
|
||||
|
||||
dbuf_state->mdclk_cdclk_ratio = ratio;
|
||||
|
||||
return intel_atomic_lock_global_state(&dbuf_state->base);
|
||||
}
|
||||
|
||||
void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
|
||||
int ratio, bool joined_mbus)
|
||||
{
|
||||
enum dbuf_slice slice;
|
||||
|
||||
if (!HAS_MBUS_JOINING(i915))
|
||||
return;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 20)
|
||||
intel_de_rmw(i915, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
|
||||
MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
|
||||
|
||||
if (joined_mbus)
|
||||
ratio *= 2;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
|
||||
ratio, str_yes_no(joined_mbus));
|
||||
|
||||
for_each_dbuf_slice(i915, slice)
|
||||
intel_de_rmw(i915, DBUF_CTL_S(slice),
|
||||
DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
|
||||
DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
|
||||
}
|
||||
|
||||
static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
const struct intel_dbuf_state *old_dbuf_state =
|
||||
intel_atomic_get_old_dbuf_state(state);
|
||||
const struct intel_dbuf_state *new_dbuf_state =
|
||||
intel_atomic_get_new_dbuf_state(state);
|
||||
int mdclk_cdclk_ratio;
|
||||
|
||||
if (intel_cdclk_is_decreasing_later(state)) {
|
||||
/* cdclk/mdclk will be changed later by intel_set_cdclk_post_plane_update() */
|
||||
mdclk_cdclk_ratio = old_dbuf_state->mdclk_cdclk_ratio;
|
||||
} else {
|
||||
/* cdclk/mdclk already changed by intel_set_cdclk_pre_plane_update() */
|
||||
mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
|
||||
}
|
||||
|
||||
intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
|
||||
new_dbuf_state->joined_mbus);
|
||||
}
|
||||
|
||||
static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
|
||||
const struct intel_dbuf_state *dbuf_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
|
||||
const struct intel_crtc_state *new_crtc_state;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
|
||||
drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
|
||||
|
||||
crtc = intel_crtc_for_pipe(i915, pipe);
|
||||
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state))
|
||||
return pipe;
|
||||
else
|
||||
return INVALID_PIPE;
|
||||
}
|
||||
|
||||
static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
const struct intel_dbuf_state *old_dbuf_state =
|
||||
intel_atomic_get_old_dbuf_state(state);
|
||||
const struct intel_dbuf_state *new_dbuf_state =
|
||||
intel_atomic_get_new_dbuf_state(state);
|
||||
u32 mbus_ctl;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
|
||||
str_yes_no(old_dbuf_state->joined_mbus),
|
||||
str_yes_no(new_dbuf_state->joined_mbus),
|
||||
pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
|
||||
|
||||
if (new_dbuf_state->joined_mbus)
|
||||
mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN;
|
||||
else
|
||||
mbus_ctl = MBUS_HASHING_MODE_2x2;
|
||||
|
||||
if (pipe != INVALID_PIPE)
|
||||
mbus_ctl |= MBUS_JOIN_PIPE_SELECT(pipe);
|
||||
else
|
||||
mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
|
||||
|
||||
intel_de_rmw(i915, MBUS_CTL,
|
||||
MBUS_HASHING_MODE_MASK | MBUS_JOIN |
|
||||
MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
|
||||
}
|
||||
|
||||
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
|
||||
{
|
||||
const struct intel_dbuf_state *new_dbuf_state =
|
||||
intel_atomic_get_new_dbuf_state(state);
|
||||
const struct intel_dbuf_state *old_dbuf_state =
|
||||
intel_atomic_get_old_dbuf_state(state);
|
||||
|
||||
if (!new_dbuf_state)
|
||||
return;
|
||||
|
||||
if (!old_dbuf_state->joined_mbus && new_dbuf_state->joined_mbus) {
|
||||
enum pipe pipe = intel_mbus_joined_pipe(state, new_dbuf_state);
|
||||
|
||||
WARN_ON(!new_dbuf_state->base.changed);
|
||||
|
||||
intel_dbuf_mbus_join_update(state, pipe);
|
||||
intel_mbus_dbox_update(state);
|
||||
intel_dbuf_mdclk_min_tracker_update(state);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
const struct intel_dbuf_state *new_dbuf_state =
|
||||
intel_atomic_get_new_dbuf_state(state);
|
||||
const struct intel_dbuf_state *old_dbuf_state =
|
||||
intel_atomic_get_old_dbuf_state(state);
|
||||
|
||||
if (!new_dbuf_state)
|
||||
return;
|
||||
|
||||
if (old_dbuf_state->joined_mbus && !new_dbuf_state->joined_mbus) {
|
||||
enum pipe pipe = intel_mbus_joined_pipe(state, old_dbuf_state);
|
||||
|
||||
WARN_ON(!new_dbuf_state->base.changed);
|
||||
|
||||
intel_dbuf_mdclk_min_tracker_update(state);
|
||||
intel_mbus_dbox_update(state);
|
||||
intel_dbuf_mbus_join_update(state, pipe);
|
||||
|
||||
if (pipe != INVALID_PIPE) {
|
||||
struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
|
||||
|
||||
intel_crtc_wait_for_next_vblank(crtc);
|
||||
}
|
||||
} else if (old_dbuf_state->joined_mbus == new_dbuf_state->joined_mbus &&
|
||||
old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
|
||||
WARN_ON(!new_dbuf_state->base.changed);
|
||||
|
||||
intel_dbuf_mdclk_min_tracker_update(state);
|
||||
intel_mbus_dbox_update(state);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
const struct intel_dbuf_state *new_dbuf_state =
|
||||
intel_atomic_get_new_dbuf_state(state);
|
||||
const struct intel_dbuf_state *old_dbuf_state =
|
||||
intel_atomic_get_old_dbuf_state(state);
|
||||
u8 old_slices, new_slices;
|
||||
|
||||
if (!new_dbuf_state)
|
||||
return;
|
||||
|
||||
old_slices = old_dbuf_state->enabled_slices;
|
||||
new_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
|
||||
|
||||
if (old_slices == new_slices)
|
||||
return;
|
||||
|
||||
WARN_ON(!new_dbuf_state->base.changed);
|
||||
|
||||
gen9_dbuf_slices_update(i915, new_slices);
|
||||
}
|
||||
|
||||
void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
const struct intel_dbuf_state *new_dbuf_state =
|
||||
intel_atomic_get_new_dbuf_state(state);
|
||||
const struct intel_dbuf_state *old_dbuf_state =
|
||||
intel_atomic_get_old_dbuf_state(state);
|
||||
u8 old_slices, new_slices;
|
||||
|
||||
if (!new_dbuf_state)
|
||||
return;
|
||||
|
||||
old_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
|
||||
new_slices = new_dbuf_state->enabled_slices;
|
||||
|
||||
if (old_slices == new_slices)
|
||||
return;
|
||||
|
||||
WARN_ON(!new_dbuf_state->base.changed);
|
||||
|
||||
gen9_dbuf_slices_update(i915, new_slices);
|
||||
}
|
||||
|
||||
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *i915 = m->private;
|
||||
|
@ -58,22 +58,31 @@ struct intel_dbuf_state {
|
||||
u8 slices[I915_MAX_PIPES];
|
||||
u8 enabled_slices;
|
||||
u8 active_pipes;
|
||||
u8 mdclk_cdclk_ratio;
|
||||
bool joined_mbus;
|
||||
};
|
||||
|
||||
struct intel_dbuf_state *
|
||||
intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
|
||||
|
||||
#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
|
||||
#define to_intel_dbuf_state(global_state) \
|
||||
container_of_const((global_state), struct intel_dbuf_state, base)
|
||||
|
||||
#define intel_atomic_get_old_dbuf_state(state) \
|
||||
to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
|
||||
#define intel_atomic_get_new_dbuf_state(state) \
|
||||
to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
|
||||
|
||||
int intel_dbuf_init(struct drm_i915_private *i915);
|
||||
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
|
||||
int ratio);
|
||||
|
||||
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
|
||||
void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
|
||||
void intel_mbus_dbox_update(struct intel_atomic_state *state);
|
||||
void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
|
||||
int ratio, bool joined_mbus);
|
||||
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
|
||||
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
|
||||
|
||||
#endif /* __SKL_WATERMARK_H__ */
|
||||
|
||||
|
@ -32,14 +32,16 @@
|
||||
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
|
||||
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
|
||||
|
||||
#define MBUS_CTL _MMIO(0x4438C)
|
||||
#define MBUS_JOIN REG_BIT(31)
|
||||
#define MBUS_HASHING_MODE_MASK REG_BIT(30)
|
||||
#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
|
||||
#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
|
||||
#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
|
||||
#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
|
||||
#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
|
||||
#define MBUS_CTL _MMIO(0x4438C)
|
||||
#define MBUS_JOIN REG_BIT(31)
|
||||
#define MBUS_HASHING_MODE_MASK REG_BIT(30)
|
||||
#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
|
||||
#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
|
||||
#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
|
||||
#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
|
||||
#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
|
||||
#define MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(15, 13)
|
||||
#define MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(MBUS_TRANSLATION_THROTTLE_MIN_MASK, val)
|
||||
|
||||
/* Watermark register definitions for SKL */
|
||||
#define _CUR_WM_A_0 0x70140
|
||||
|
@ -273,8 +273,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
|
||||
base);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct intel_connector *intel_connector = intel_dsi->attached_connector;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
int ret;
|
||||
|
@ -386,7 +386,7 @@ struct drm_i915_gem_object {
|
||||
* and kernel mode driver for caching policy control after GEN12.
|
||||
* In the meantime platform specific tables are created to translate
|
||||
* i915_cache_level into pat index, for more details check the macros
|
||||
* defined i915/i915_pci.c, e.g. PVC_CACHELEVEL.
|
||||
* defined i915/i915_pci.c, e.g. TGL_CACHELEVEL.
|
||||
* For backward compatibility, this field contains values exactly match
|
||||
* the entries of enum i915_cache_level for pre-GEN12 platforms (See
|
||||
* LEGACY_CACHELEVEL), so that the PTE encode functions for these
|
||||
|
@ -713,7 +713,7 @@ static int igt_ppgtt_huge_fill(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
|
||||
bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
|
||||
bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
|
||||
struct i915_address_space *vm;
|
||||
struct i915_gem_context *ctx;
|
||||
unsigned long max_pages;
|
||||
@ -857,7 +857,7 @@ out:
|
||||
static int igt_ppgtt_64K(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
|
||||
bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_address_space *vm;
|
||||
struct i915_gem_context *ctx;
|
||||
|
@ -117,7 +117,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
|
||||
if (gen < 12)
|
||||
return true;
|
||||
|
||||
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
|
||||
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
|
||||
return false;
|
||||
|
||||
return HAS_DISPLAY(i915);
|
||||
@ -166,7 +166,7 @@ static int prepare_blit(const struct tiled_blits *t,
|
||||
src_pitch = t->width; /* in dwords */
|
||||
if (src->tiling == CLIENT_TILING_Y) {
|
||||
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
|
||||
if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50))
|
||||
if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
|
||||
src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4;
|
||||
} else if (src->tiling == CLIENT_TILING_X) {
|
||||
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X);
|
||||
@ -177,7 +177,7 @@ static int prepare_blit(const struct tiled_blits *t,
|
||||
dst_pitch = t->width; /* in dwords */
|
||||
if (dst->tiling == CLIENT_TILING_Y) {
|
||||
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
|
||||
if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50))
|
||||
if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
|
||||
dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4;
|
||||
} else if (dst->tiling == CLIENT_TILING_X) {
|
||||
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X);
|
||||
@ -365,7 +365,7 @@ static u64 tiled_offset(const struct intel_gt *gt,
|
||||
v += x;
|
||||
|
||||
swizzle = gt->ggtt->bit_6_swizzle_x;
|
||||
} else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
|
||||
} else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
|
||||
/* Y-major tiling layout is Tile4 for Xe_HP and beyond */
|
||||
v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32);
|
||||
|
||||
|
@ -189,9 +189,6 @@ static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
|
||||
{
|
||||
i915_reg_t reg = gen12_get_aux_inv_reg(engine);
|
||||
|
||||
if (IS_PONTEVECCHIO(engine->i915))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* So far platforms supported by i915 having flat ccs do not require
|
||||
* AUX invalidation. Check also whether the engine requires it.
|
||||
@ -827,7 +824,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
|
||||
cs = gen12_emit_pipe_control(cs, 0,
|
||||
PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
|
||||
|
||||
if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
|
||||
if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
|
||||
/* Wa_1409600907 */
|
||||
flags |= PIPE_CONTROL_DEPTH_STALL;
|
||||
|
||||
|
@ -500,11 +500,11 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
|
||||
}
|
||||
|
||||
static void
|
||||
xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
|
||||
struct i915_vma_resource *vma_res,
|
||||
struct sgt_dma *iter,
|
||||
unsigned int pat_index,
|
||||
u32 flags)
|
||||
xehp_ppgtt_insert_huge(struct i915_address_space *vm,
|
||||
struct i915_vma_resource *vma_res,
|
||||
struct sgt_dma *iter,
|
||||
unsigned int pat_index,
|
||||
u32 flags)
|
||||
{
|
||||
const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
|
||||
unsigned int rem = sg_dma_len(iter->sg);
|
||||
@ -741,8 +741,8 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm,
|
||||
struct sgt_dma iter = sgt_dma(vma_res);
|
||||
|
||||
if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
|
||||
if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 50))
|
||||
xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
|
||||
if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 55))
|
||||
xehp_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
|
||||
else
|
||||
gen8_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
|
||||
} else {
|
||||
@ -781,11 +781,11 @@ static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
|
||||
drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
|
||||
}
|
||||
|
||||
static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
|
||||
dma_addr_t addr,
|
||||
u64 offset,
|
||||
unsigned int pat_index,
|
||||
u32 flags)
|
||||
static void xehp_ppgtt_insert_entry_lm(struct i915_address_space *vm,
|
||||
dma_addr_t addr,
|
||||
u64 offset,
|
||||
unsigned int pat_index,
|
||||
u32 flags)
|
||||
{
|
||||
u64 idx = offset >> GEN8_PTE_SHIFT;
|
||||
struct i915_page_directory * const pdp =
|
||||
@ -810,15 +810,15 @@ static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
|
||||
vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, pat_index, flags);
|
||||
}
|
||||
|
||||
static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm,
|
||||
dma_addr_t addr,
|
||||
u64 offset,
|
||||
unsigned int pat_index,
|
||||
u32 flags)
|
||||
static void xehp_ppgtt_insert_entry(struct i915_address_space *vm,
|
||||
dma_addr_t addr,
|
||||
u64 offset,
|
||||
unsigned int pat_index,
|
||||
u32 flags)
|
||||
{
|
||||
if (flags & PTE_LM)
|
||||
return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset,
|
||||
pat_index, flags);
|
||||
return xehp_ppgtt_insert_entry_lm(vm, addr, offset,
|
||||
pat_index, flags);
|
||||
|
||||
return gen8_ppgtt_insert_entry(vm, addr, offset, pat_index, flags);
|
||||
}
|
||||
@ -1042,7 +1042,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
|
||||
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
|
||||
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
|
||||
if (HAS_64K_PAGES(gt->i915))
|
||||
ppgtt->vm.insert_page = xehpsdv_ppgtt_insert_entry;
|
||||
ppgtt->vm.insert_page = xehp_ppgtt_insert_entry;
|
||||
else
|
||||
ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
|
||||
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
|
||||
|
@ -497,9 +497,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
|
||||
engine->logical_mask = BIT(logical_instance);
|
||||
__sprint_engine_name(engine);
|
||||
|
||||
if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
|
||||
__ffs(CCS_MASK(engine->gt)) == engine->instance) ||
|
||||
engine->class == RENDER_CLASS)
|
||||
if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
|
||||
__ffs(CCS_MASK(engine->gt) | RCS_MASK(engine->gt)) == engine->instance)
|
||||
engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
|
||||
|
||||
/* features common between engines sharing EUs */
|
||||
@ -765,14 +764,14 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt)
|
||||
* and bits have disable semantices.
|
||||
*/
|
||||
media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
|
||||
if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
|
||||
if (MEDIA_VER_FULL(i915) < IP_VER(12, 55))
|
||||
media_fuse = ~media_fuse;
|
||||
|
||||
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
|
||||
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
|
||||
GEN11_GT_VEBOX_DISABLE_SHIFT;
|
||||
|
||||
if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
|
||||
if (MEDIA_VER_FULL(i915) >= IP_VER(12, 55)) {
|
||||
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
|
||||
gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
|
||||
} else {
|
||||
@ -839,38 +838,6 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
|
||||
}
|
||||
}
|
||||
|
||||
static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_gt_info *info = >->info;
|
||||
unsigned long meml3_mask;
|
||||
unsigned long quad;
|
||||
|
||||
if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
|
||||
GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
|
||||
return;
|
||||
|
||||
meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
|
||||
meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
|
||||
|
||||
/*
|
||||
* Link Copy engines may be fused off according to meml3_mask. Each
|
||||
* bit is a quad that houses 2 Link Copy and two Sub Copy engines.
|
||||
*/
|
||||
for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) {
|
||||
unsigned int instance = quad * 2 + 1;
|
||||
intel_engine_mask_t mask = GENMASK(_BCS(instance + 1),
|
||||
_BCS(instance));
|
||||
|
||||
if (mask & info->engine_mask) {
|
||||
gt_dbg(gt, "bcs%u fused off\n", instance);
|
||||
gt_dbg(gt, "bcs%u fused off\n", instance + 1);
|
||||
|
||||
info->engine_mask &= ~mask;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine which engines are fused off in our particular hardware.
|
||||
* Note that we have a catch-22 situation where we need to be able to access
|
||||
@ -889,7 +856,6 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
|
||||
|
||||
engine_mask_apply_media_fuses(gt);
|
||||
engine_mask_apply_compute_fuses(gt);
|
||||
engine_mask_apply_copy_fuses(gt);
|
||||
|
||||
/*
|
||||
* The only use of the GSC CS is to load and communicate with the GSC
|
||||
@ -1193,7 +1159,6 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
|
||||
if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 74) ||
|
||||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
|
||||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
|
||||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
|
||||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
|
||||
regs = xehp_regs;
|
||||
num = ARRAY_SIZE(xehp_regs);
|
||||
|
@ -493,7 +493,7 @@ __execlists_schedule_in(struct i915_request *rq)
|
||||
/* Use a fixed tag for OA and friends */
|
||||
GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
|
||||
ce->lrc.ccid = ce->tag;
|
||||
} else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
|
||||
} else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
|
||||
/* We don't need a strict matching tag, just different values */
|
||||
unsigned int tag = ffs(READ_ONCE(engine->context_tag));
|
||||
|
||||
@ -613,7 +613,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
|
||||
intel_engine_add_retire(engine, ce->timeline);
|
||||
|
||||
ccid = ce->lrc.ccid;
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
|
||||
ccid >>= XEHP_SW_CTX_ID_SHIFT - 32;
|
||||
ccid &= XEHP_MAX_CONTEXT_HW_ID;
|
||||
} else {
|
||||
@ -1907,7 +1907,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
|
||||
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
|
||||
head, upper_32_bits(csb), lower_32_bits(csb));
|
||||
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
|
||||
promote = xehp_csb_parse(csb);
|
||||
else if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
promote = gen12_csb_parse(csb);
|
||||
@ -3482,7 +3482,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
|
||||
}
|
||||
}
|
||||
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
|
||||
if (intel_engine_has_preemption(engine))
|
||||
engine->emit_bb_start = xehp_emit_bb_start;
|
||||
else
|
||||
@ -3585,7 +3585,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
|
||||
|
||||
engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
|
||||
if (GRAPHICS_VER(engine->i915) >= 11 &&
|
||||
GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 50)) {
|
||||
GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 55)) {
|
||||
execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
|
||||
execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
|
||||
}
|
||||
|
@ -103,19 +103,6 @@ static const struct gsc_def gsc_def_dg1[] = {
|
||||
}
|
||||
};
|
||||
|
||||
static const struct gsc_def gsc_def_xehpsdv[] = {
|
||||
{
|
||||
/* HECI1 not enabled on the device. */
|
||||
},
|
||||
{
|
||||
.name = "mei-gscfi",
|
||||
.bar = DG1_GSC_HECI2_BASE,
|
||||
.bar_size = GSC_BAR_LENGTH,
|
||||
.use_polling = true,
|
||||
.slow_firmware = true,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct gsc_def gsc_def_dg2[] = {
|
||||
{
|
||||
.name = "mei-gsc",
|
||||
@ -188,8 +175,6 @@ static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
|
||||
|
||||
if (IS_DG1(i915)) {
|
||||
def = &gsc_def_dg1[intf_id];
|
||||
} else if (IS_XEHPSDV(i915)) {
|
||||
def = &gsc_def_xehpsdv[intf_id];
|
||||
} else if (IS_DG2(i915)) {
|
||||
def = &gsc_def_dg2[intf_id];
|
||||
} else {
|
||||
|
@ -278,7 +278,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
|
||||
intel_uncore_posting_read(uncore,
|
||||
XELPMP_RING_FAULT_REG);
|
||||
|
||||
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
|
||||
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
|
||||
intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
|
||||
RING_FAULT_VALID, 0);
|
||||
intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
|
||||
@ -403,7 +403,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
|
||||
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
|
||||
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
|
||||
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
|
||||
xehp_check_faults(gt);
|
||||
else if (GRAPHICS_VER(i915) >= 8)
|
||||
gen8_check_faults(gt);
|
||||
|
@ -57,51 +57,18 @@ static const struct intel_mmio_range icl_l3bank_steering_table[] = {
|
||||
* are of a "GAM" subclass that has special rules. Thus we use a separate
|
||||
* GAM table farther down for those.
|
||||
*/
|
||||
static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
|
||||
static const struct intel_mmio_range dg2_mslice_steering_table[] = {
|
||||
{ 0x00DD00, 0x00DDFF },
|
||||
{ 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
|
||||
{},
|
||||
};
|
||||
|
||||
static const struct intel_mmio_range xehpsdv_gam_steering_table[] = {
|
||||
{ 0x004000, 0x004AFF },
|
||||
{ 0x00C800, 0x00CFFF },
|
||||
{},
|
||||
};
|
||||
|
||||
static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
|
||||
{ 0x00B000, 0x00B0FF },
|
||||
{ 0x00D800, 0x00D8FF },
|
||||
{},
|
||||
};
|
||||
|
||||
static const struct intel_mmio_range dg2_lncf_steering_table[] = {
|
||||
{ 0x00B000, 0x00B0FF },
|
||||
{ 0x00D880, 0x00D8FF },
|
||||
{},
|
||||
};
|
||||
|
||||
/*
|
||||
* We have several types of MCR registers on PVC where steering to (0,0)
|
||||
* will always provide us with a non-terminated value. We'll stick them
|
||||
* all in the same table for simplicity.
|
||||
*/
|
||||
static const struct intel_mmio_range pvc_instance0_steering_table[] = {
|
||||
{ 0x004000, 0x004AFF }, /* HALF-BSLICE */
|
||||
{ 0x008800, 0x00887F }, /* CC */
|
||||
{ 0x008A80, 0x008AFF }, /* TILEPSMI */
|
||||
{ 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
|
||||
{ 0x00B100, 0x00B3FF }, /* L3BANK */
|
||||
{ 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
|
||||
{ 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
|
||||
{ 0x00DD00, 0x00DDFF }, /* BSLICE */
|
||||
{ 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
|
||||
{ 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
|
||||
{ 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
|
||||
{ 0x024180, 0x0241FF }, /* HALF-BSLICE */
|
||||
{},
|
||||
};
|
||||
|
||||
static const struct intel_mmio_range xelpg_instance0_steering_table[] = {
|
||||
{ 0x000B00, 0x000BFF }, /* SQIDI */
|
||||
{ 0x001000, 0x001FFF }, /* SQIDI */
|
||||
@ -185,22 +152,16 @@ void intel_gt_mcr_init(struct intel_gt *gt)
|
||||
gt->steering_table[INSTANCE0] = xelpg_instance0_steering_table;
|
||||
gt->steering_table[L3BANK] = xelpg_l3bank_steering_table;
|
||||
gt->steering_table[DSS] = xelpg_dss_steering_table;
|
||||
} else if (IS_PONTEVECCHIO(i915)) {
|
||||
gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
|
||||
} else if (IS_DG2(i915)) {
|
||||
gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
|
||||
gt->steering_table[MSLICE] = dg2_mslice_steering_table;
|
||||
gt->steering_table[LNCF] = dg2_lncf_steering_table;
|
||||
/*
|
||||
* No need to hook up the GAM table since it has a dedicated
|
||||
* steering control register on DG2 and can use implicit
|
||||
* steering.
|
||||
*/
|
||||
} else if (IS_XEHPSDV(i915)) {
|
||||
gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
|
||||
gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
|
||||
gt->steering_table[GAM] = xehpsdv_gam_steering_table;
|
||||
} else if (GRAPHICS_VER(i915) >= 11 &&
|
||||
GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
|
||||
GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) {
|
||||
gt->steering_table[L3BANK] = icl_l3bank_steering_table;
|
||||
gt->info.l3bank_mask =
|
||||
~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
|
||||
@ -821,8 +782,6 @@ void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
|
||||
for (int i = 0; i < NUM_STEERING_TYPES; i++)
|
||||
if (gt->steering_table[i])
|
||||
report_steering_type(p, gt, i, dump_table);
|
||||
} else if (IS_PONTEVECCHIO(gt->i915)) {
|
||||
report_steering_type(p, gt, INSTANCE0, dump_table);
|
||||
} else if (HAS_MSLICE_STEERING(gt->i915)) {
|
||||
report_steering_type(p, gt, MSLICE, dump_table);
|
||||
report_steering_type(p, gt, LNCF, dump_table);
|
||||
@ -842,10 +801,7 @@ void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
|
||||
void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
|
||||
unsigned int *group, unsigned int *instance)
|
||||
{
|
||||
if (IS_PONTEVECCHIO(gt->i915)) {
|
||||
*group = dss / GEN_DSS_PER_CSLICE;
|
||||
*instance = dss % GEN_DSS_PER_CSLICE;
|
||||
} else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
|
||||
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
|
||||
*group = dss / GEN_DSS_PER_GSLICE;
|
||||
*instance = dss % GEN_DSS_PER_GSLICE;
|
||||
} else {
|
||||
|
@ -54,7 +54,7 @@ int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
|
||||
* the topology, so we lookup the DSS ID directly in "slice 0."
|
||||
*/
|
||||
#define _HAS_SS(ss_, gt_, group_, instance_) ( \
|
||||
GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \
|
||||
GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 55) ? \
|
||||
intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \
|
||||
intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_))
|
||||
|
||||
|
@ -392,10 +392,6 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
|
||||
drm_puts(p, "no P-state info available\n");
|
||||
}
|
||||
|
||||
drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
|
||||
drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
|
||||
drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
|
||||
|
||||
intel_runtime_pm_put(uncore->rpm, wakeref);
|
||||
}
|
||||
|
||||
|
@ -718,44 +718,11 @@
|
||||
|
||||
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
|
||||
#define VFUNIT_CLKGATE_DIS REG_BIT(20)
|
||||
#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
|
||||
#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
|
||||
#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
|
||||
#define HSUNIT_CLKGATE_DIS REG_BIT(8)
|
||||
#define VSUNIT_CLKGATE_DIS REG_BIT(3)
|
||||
|
||||
#define UNSLCGCTL9440 _MMIO(0x9440)
|
||||
#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
|
||||
#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
|
||||
#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
|
||||
#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
|
||||
#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
|
||||
#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
|
||||
#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
|
||||
#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
|
||||
#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
|
||||
#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
|
||||
#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
|
||||
#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
|
||||
|
||||
#define UNSLCGCTL9444 _MMIO(0x9444)
|
||||
#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
|
||||
#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
|
||||
#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
|
||||
#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
|
||||
#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
|
||||
#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
|
||||
#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
|
||||
#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
|
||||
#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
|
||||
#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
|
||||
#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
|
||||
#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
|
||||
#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
|
||||
#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
|
||||
#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
|
||||
#define LTCDD_CLKGATE_DIS REG_BIT(10)
|
||||
|
||||
#define GEN11_SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
|
||||
#define XEHP_SLICE_UNIT_LEVEL_CLKGATE MCR_REG(0x94d4)
|
||||
#define SARBUNIT_CLKGATE_DIS (1 << 5)
|
||||
@ -765,9 +732,6 @@
|
||||
#define L3_CLKGATE_DIS REG_BIT(16)
|
||||
#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
|
||||
|
||||
#define SCCGCTL94DC MCR_REG(0x94dc)
|
||||
#define CG3DDISURB REG_BIT(14)
|
||||
|
||||
#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
|
||||
#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
|
||||
#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
|
||||
@ -989,10 +953,6 @@
|
||||
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
|
||||
#define GEN7_L3AGDIS (1 << 19)
|
||||
|
||||
#define XEHPC_LNCFMISCCFGREG0 MCR_REG(0xb01c)
|
||||
#define XEHPC_HOSTCACHEEN REG_BIT(1)
|
||||
#define XEHPC_OVRLSCCC REG_BIT(0)
|
||||
|
||||
#define GEN7_L3CNTLREG2 _MMIO(0xb020)
|
||||
|
||||
/* MOCS (Memory Object Control State) registers */
|
||||
@ -1046,20 +1006,9 @@
|
||||
#define XEHP_L3SQCREG5 MCR_REG(0xb158)
|
||||
#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
|
||||
|
||||
#define MLTICTXCTL MCR_REG(0xb170)
|
||||
#define TDONRENDER REG_BIT(2)
|
||||
|
||||
#define XEHP_L3SCQREG7 MCR_REG(0xb188)
|
||||
#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
|
||||
|
||||
#define XEHPC_L3SCRUB MCR_REG(0xb18c)
|
||||
#define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12)
|
||||
#define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0)
|
||||
#define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6)
|
||||
|
||||
#define L3SQCREG1_CCS0 MCR_REG(0xb200)
|
||||
#define FLUSHALLNONCOH REG_BIT(5)
|
||||
|
||||
#define GEN11_GLBLINVL _MMIO(0xb404)
|
||||
#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5)
|
||||
#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5)
|
||||
@ -1109,7 +1058,6 @@
|
||||
#define XEHP_COMPCTX_TLB_INV_CR MCR_REG(0xcf04)
|
||||
#define XELPMP_GSC_TLB_INV_CR _MMIO(0xcf04) /* media GT only */
|
||||
|
||||
#define XEHP_MERT_MOD_CTRL MCR_REG(0xcf28)
|
||||
#define RENDER_MOD_CTRL MCR_REG(0xcf2c)
|
||||
#define COMP_MOD_CTRL MCR_REG(0xcf30)
|
||||
#define XELPMP_GSC_MOD_CTRL _MMIO(0xcf30) /* media GT only */
|
||||
@ -1185,7 +1133,6 @@
|
||||
#define EU_PERF_CNTL4 PERF_REG(0xe45c)
|
||||
|
||||
#define GEN9_ROW_CHICKEN4 MCR_REG(0xe48c)
|
||||
#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
|
||||
#define XEHP_DIS_BBL_SYSPIPE REG_BIT(11)
|
||||
#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
|
||||
#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
|
||||
@ -1202,7 +1149,6 @@
|
||||
#define FLOW_CONTROL_ENABLE REG_BIT(15)
|
||||
#define UGM_BACKUP_MODE REG_BIT(13)
|
||||
#define MDQ_ARBITRATION_MODE REG_BIT(12)
|
||||
#define SYSTOLIC_DOP_CLOCK_GATING_DIS REG_BIT(10)
|
||||
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8)
|
||||
#define STALL_DOP_GATING_DISABLE REG_BIT(5)
|
||||
#define THROTTLE_12_5 REG_GENMASK(4, 2)
|
||||
@ -1679,11 +1625,6 @@
|
||||
|
||||
#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
|
||||
|
||||
#define GT0_PACKAGE_ENERGY_STATUS _MMIO(0x250004)
|
||||
#define GT0_PACKAGE_RAPL_LIMIT _MMIO(0x250008)
|
||||
#define GT0_PACKAGE_POWER_SKU_UNIT _MMIO(0x250068)
|
||||
#define GT0_PLATFORM_ENERGY_STATUS _MMIO(0x25006c)
|
||||
|
||||
/*
|
||||
* Standalone Media's non-engine GT registers are located at their regular GT
|
||||
* offsets plus 0x380000. This extra offset is stored inside the intel_uncore
|
||||
|
@ -573,7 +573,6 @@ static ssize_t media_freq_factor_show(struct kobject *kobj,
|
||||
char *buff)
|
||||
{
|
||||
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
|
||||
struct intel_guc_slpc *slpc = >->uc.guc.slpc;
|
||||
intel_wakeref_t wakeref;
|
||||
u32 mode;
|
||||
|
||||
@ -581,20 +580,12 @@ static ssize_t media_freq_factor_show(struct kobject *kobj,
|
||||
* Retrieve media_ratio_mode from GEN6_RPNSWREQ bit 13 set by
|
||||
* GuC. GEN6_RPNSWREQ:13 value 0 represents 1:2 and 1 represents 1:1
|
||||
*/
|
||||
if (IS_XEHPSDV(gt->i915) &&
|
||||
slpc->media_ratio_mode == SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL) {
|
||||
/*
|
||||
* For XEHPSDV dynamic mode GEN6_RPNSWREQ:13 does not contain
|
||||
* the media_ratio_mode, just return the cached media ratio
|
||||
*/
|
||||
mode = slpc->media_ratio_mode;
|
||||
} else {
|
||||
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
|
||||
mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
|
||||
mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
|
||||
SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
|
||||
SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
|
||||
}
|
||||
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
|
||||
mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
|
||||
|
||||
mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
|
||||
SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
|
||||
SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
|
||||
|
||||
return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode));
|
||||
}
|
||||
|
@ -680,7 +680,7 @@ void setup_private_pat(struct intel_gt *gt)
|
||||
|
||||
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
|
||||
xelpg_setup_private_ppat(gt);
|
||||
else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
|
||||
else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
|
||||
xehp_setup_private_ppat(gt);
|
||||
else if (GRAPHICS_VER(i915) >= 12)
|
||||
tgl_setup_private_ppat(uncore);
|
||||
|
@ -546,47 +546,6 @@ static const u8 gen12_rcs_offsets[] = {
|
||||
END
|
||||
};
|
||||
|
||||
static const u8 xehp_rcs_offsets[] = {
|
||||
NOP(1),
|
||||
LRI(13, POSTED),
|
||||
REG16(0x244),
|
||||
REG(0x034),
|
||||
REG(0x030),
|
||||
REG(0x038),
|
||||
REG(0x03c),
|
||||
REG(0x168),
|
||||
REG(0x140),
|
||||
REG(0x110),
|
||||
REG(0x1c0),
|
||||
REG(0x1c4),
|
||||
REG(0x1c8),
|
||||
REG(0x180),
|
||||
REG16(0x2b4),
|
||||
|
||||
NOP(5),
|
||||
LRI(9, POSTED),
|
||||
REG16(0x3a8),
|
||||
REG16(0x28c),
|
||||
REG16(0x288),
|
||||
REG16(0x284),
|
||||
REG16(0x280),
|
||||
REG16(0x27c),
|
||||
REG16(0x278),
|
||||
REG16(0x274),
|
||||
REG16(0x270),
|
||||
|
||||
LRI(3, POSTED),
|
||||
REG(0x1b0),
|
||||
REG16(0x5a8),
|
||||
REG16(0x5ac),
|
||||
|
||||
NOP(6),
|
||||
LRI(1, 0),
|
||||
REG(0x0c8),
|
||||
|
||||
END
|
||||
};
|
||||
|
||||
static const u8 dg2_rcs_offsets[] = {
|
||||
NOP(1),
|
||||
LRI(15, POSTED),
|
||||
@ -695,8 +654,6 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
|
||||
return mtl_rcs_offsets;
|
||||
else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
|
||||
return dg2_rcs_offsets;
|
||||
else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
|
||||
return xehp_rcs_offsets;
|
||||
else if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return gen12_rcs_offsets;
|
||||
else if (GRAPHICS_VER(engine->i915) >= 11)
|
||||
@ -719,7 +676,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
|
||||
|
||||
static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
|
||||
return 0x70;
|
||||
else if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0x60;
|
||||
@ -733,7 +690,7 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
|
||||
|
||||
static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
|
||||
return 0x80;
|
||||
else if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0x70;
|
||||
@ -748,7 +705,7 @@ static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
|
||||
|
||||
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
|
||||
{
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
|
||||
return 0x84;
|
||||
else if (GRAPHICS_VER(engine->i915) >= 12)
|
||||
return 0x74;
|
||||
@ -795,7 +752,7 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
|
||||
static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
|
||||
{
|
||||
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
|
||||
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
|
||||
/*
|
||||
* Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
|
||||
* simply to match the RCS context image layout.
|
||||
|
@ -35,9 +35,9 @@ static bool engine_supports_migration(struct intel_engine_cs *engine)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
|
||||
struct i915_page_table *pt,
|
||||
void *data)
|
||||
static void xehp_toggle_pdes(struct i915_address_space *vm,
|
||||
struct i915_page_table *pt,
|
||||
void *data)
|
||||
{
|
||||
struct insert_pte_data *d = data;
|
||||
|
||||
@ -52,9 +52,9 @@ static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
|
||||
d->offset += SZ_2M;
|
||||
}
|
||||
|
||||
static void xehpsdv_insert_pte(struct i915_address_space *vm,
|
||||
struct i915_page_table *pt,
|
||||
void *data)
|
||||
static void xehp_insert_pte(struct i915_address_space *vm,
|
||||
struct i915_page_table *pt,
|
||||
void *data)
|
||||
{
|
||||
struct insert_pte_data *d = data;
|
||||
|
||||
@ -120,7 +120,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
|
||||
* 512 entry layout using 4K GTT pages. The other two windows just map
|
||||
* lmem pages and must use the new compact 32 entry layout using 64K GTT
|
||||
* pages, which ensures we can address any lmem object that the user
|
||||
* throws at us. We then also use the xehpsdv_toggle_pdes as a way of
|
||||
* throws at us. We then also use the xehp_toggle_pdes as a way of
|
||||
* just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the
|
||||
* compact layout for each of these page-tables, that fall within the
|
||||
* [CHUNK_SIZE, 3 * CHUNK_SIZE) range.
|
||||
@ -209,12 +209,12 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
|
||||
/* Now allow the GPU to rewrite the PTE via its own ppGTT */
|
||||
if (HAS_64K_PAGES(gt->i915)) {
|
||||
vm->vm.foreach(&vm->vm, base, d.offset - base,
|
||||
xehpsdv_insert_pte, &d);
|
||||
xehp_insert_pte, &d);
|
||||
d.offset = base + CHUNK_SZ;
|
||||
vm->vm.foreach(&vm->vm,
|
||||
d.offset,
|
||||
2 * CHUNK_SZ,
|
||||
xehpsdv_toggle_pdes, &d);
|
||||
xehp_toggle_pdes, &d);
|
||||
} else {
|
||||
vm->vm.foreach(&vm->vm, base, d.offset - base,
|
||||
insert_pte, &d);
|
||||
@ -925,7 +925,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
|
||||
|
||||
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
|
||||
|
||||
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
|
||||
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
|
||||
ring_sz = XY_FAST_COLOR_BLT_DW;
|
||||
else if (ver >= 8)
|
||||
ring_sz = 8;
|
||||
@ -936,7 +936,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
|
||||
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
|
||||
*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
|
||||
(XY_FAST_COLOR_BLT_DW - 2);
|
||||
*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user