mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
Merge tag 'drm-intel-next-2023-11-23' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
drm/i915 feature pull for v6.8: Features and functionality: - Major DP MST improvements on bandwidth management, DSC (Imre, Stan, Ville) - DP panel replay enabling (Animesh, Jouni) - MTL C20 phy state verification (Mika) - MTL DP DSC fractional bpp support (Ankit, Vandita, Swati, Imre) - Audio fastset support (Ville) Refactoring and cleanups: - Use dma fence interfaces instead of i915_sw_fence (Jouni) - Separate gem and display code (Jouni, Juha-Pekka) - AUX register macro refactoring (Jani) - Separate display module/device parameters from the rest (Jouni) - Move display capabilities debugfs under display (Vinod) - Makefile cleanup (Jani) - Register cleanups (Ville) - Enginer iterator cleanups (Tvrtko) - Move display lock inits under display/ (Jani) - VLV/CHV DPIO PHY register and interface refactoring (Jani) - DSI VBT sequence refactoring (Jani, Andy Shevchenko) - C10/C20 PHY PLL hardware readout and calculation abstractions (Lucas) - DPLL code cleanups (Ville) - Cleanup PXP plane protection checks (Jani) Fixes: - Replace VLV/CHV DSI GPIO direct access with proper GPIO API usage (Andy Shevchenko) - Fix VLV/CHV DSI GPIO wrong initial value (Hans de Goede) - Fix UHBR data, link M/N/TU and PBN values (Imre) - Fix HDCP state on an enable/disable cycle (Suraj) - Fix DP MST modeset sequence to be according to spec (Ville) - Improved atomicity for multi-pipe commits (Ville) - Update URLs in i915 MAINTAINERS entry and code (Jani) - Check for VGA converter presence in eDP probe (Ville) - Fix surface size checks (Ville) - Fix LNL port/phy assignment (Lucas) - Reset C10/C20 message bus harder to avoid sporadic failures (Mika) - Fix bogus VBT HDMI level shift on BDW (Ville) - Add workaround for LNL underruns when enabling FBC (Vinod) - DSB refactoring (Animesh) - DPT refactoring (Juha-Pekka) - Disable DSC on DP MST on ICL (Imre) - Fix PSR VSC packet setup timing (Mika) - Fix LUT rounding and conversions (Ville) DRM core display changes: - DP MST fixes, helpers, refactoring to support bandwidth management (Imre) - DP MST PBN divider value refactoring and fixes (Imre) - DPCD register definitions (Ankit, Imre) - Add helper to get DSC bpp precision (Ankit) - Fix color LUT rounding (Ville) From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87v89sl2ao.fsf@intel.com [sima: Some conflicts in the amdgpu dp mst code] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
commit
221d6546bd
@ -10645,9 +10645,9 @@ M: Rodrigo Vivi <rodrigo.vivi@intel.com>
|
||||
M: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
|
||||
L: intel-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
W: https://01.org/linuxgraphics/
|
||||
W: https://drm.pages.freedesktop.org/intel-docs/
|
||||
Q: http://patchwork.freedesktop.org/project/intel-gfx/
|
||||
B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
|
||||
B: https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html
|
||||
C: irc://irc.oftc.net/intel-gfx
|
||||
T: git git://anongit.freedesktop.org/drm-intel
|
||||
F: Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
|
||||
|
@ -85,6 +85,7 @@
|
||||
#include <drm/drm_atomic_uapi.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_blend.h>
|
||||
#include <drm/drm_fixed.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_eld.h>
|
||||
@ -6910,8 +6911,8 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
||||
if (IS_ERR(mst_state))
|
||||
return PTR_ERR(mst_state);
|
||||
|
||||
if (!mst_state->pbn_div)
|
||||
mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
|
||||
if (!mst_state->pbn_div.full)
|
||||
mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
|
||||
|
||||
if (!state->duplicated) {
|
||||
int max_bpc = conn_state->max_requested_bpc;
|
||||
@ -6923,7 +6924,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
||||
max_bpc);
|
||||
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
|
||||
clock = adjusted_mode->clock;
|
||||
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
|
||||
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
|
||||
}
|
||||
|
||||
dm_new_connector_state->vcpi_slots =
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_fixed.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "amdgpu.h"
|
||||
@ -210,7 +211,7 @@ static void dm_helpers_construct_old_payload(
|
||||
struct drm_dp_mst_atomic_payload *old_payload)
|
||||
{
|
||||
struct drm_dp_mst_atomic_payload *pos;
|
||||
int pbn_per_slot = mst_state->pbn_div;
|
||||
int pbn_per_slot = dfixed_trunc(mst_state->pbn_div);
|
||||
u8 next_payload_vc_start = mgr->next_start_slot;
|
||||
u8 payload_vc_start = new_payload->vc_start_slot;
|
||||
u8 allocated_time_slots;
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <drm/display/drm_dp_mst_helper.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_fixed.h>
|
||||
#include "dm_services.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_dm.h"
|
||||
@ -941,10 +942,10 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
|
||||
link_timeslots_used = 0;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div);
|
||||
link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, dfixed_trunc(mst_state->pbn_div));
|
||||
|
||||
fair_pbn_alloc =
|
||||
(63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div;
|
||||
(63 - link_timeslots_used) / remaining_to_increase * dfixed_trunc(mst_state->pbn_div);
|
||||
|
||||
if (initial_slack[next_index] > fair_pbn_alloc) {
|
||||
vars[next_index].pbn += fair_pbn_alloc;
|
||||
@ -1642,7 +1643,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
} else {
|
||||
/* check if mode could be supported within full_pbn */
|
||||
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
|
||||
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
|
||||
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
|
||||
if (pbn > full_pbn)
|
||||
return DC_FAIL_BANDWIDTH_VALIDATE;
|
||||
}
|
||||
|
@ -2245,6 +2245,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
|
||||
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
|
||||
/* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
|
||||
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
|
||||
/* Synaptics DP1.4 MST hubs require DSC for some modes on which it applies HBLANK expansion. */
|
||||
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
|
||||
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
|
||||
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
|
||||
};
|
||||
@ -2326,6 +2328,33 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_read_desc);
|
||||
|
||||
/**
|
||||
* drm_dp_dsc_sink_bpp_incr() - Get bits per pixel increment
|
||||
* @dsc_dpcd: DSC capabilities from DPCD
|
||||
*
|
||||
* Returns the bpp precision supported by the DP sink.
|
||||
*/
|
||||
u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
|
||||
{
|
||||
u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT];
|
||||
|
||||
switch (bpp_increment_dpcd) {
|
||||
case DP_DSC_BITS_PER_PIXEL_1_16:
|
||||
return 16;
|
||||
case DP_DSC_BITS_PER_PIXEL_1_8:
|
||||
return 8;
|
||||
case DP_DSC_BITS_PER_PIXEL_1_4:
|
||||
return 4;
|
||||
case DP_DSC_BITS_PER_PIXEL_1_2:
|
||||
return 2;
|
||||
case DP_DSC_BITS_PER_PIXEL_1_1:
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_dsc_sink_bpp_incr);
|
||||
|
||||
/**
|
||||
* drm_dp_dsc_sink_max_slice_count() - Get the max slice count
|
||||
* supported by the DSC sink.
|
||||
@ -3898,3 +3927,135 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
|
||||
EXPORT_SYMBOL(drm_panel_dp_aux_backlight);
|
||||
|
||||
#endif
|
||||
|
||||
/* See DP Standard v2.1 2.6.4.4.1.1, 2.8.4.4, 2.8.7 */
|
||||
static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
|
||||
int symbol_size, bool is_mst)
|
||||
{
|
||||
int cycles = DIV_ROUND_UP(pixels * bpp_x16, 16 * symbol_size * lane_count);
|
||||
int align = is_mst ? 4 / lane_count : 1;
|
||||
|
||||
return ALIGN(cycles, align);
|
||||
}
|
||||
|
||||
static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_count,
|
||||
int bpp_x16, int symbol_size, bool is_mst)
|
||||
{
|
||||
int slice_pixels = DIV_ROUND_UP(pixels, slice_count);
|
||||
int slice_data_cycles = drm_dp_link_symbol_cycles(lane_count, slice_pixels,
|
||||
bpp_x16, symbol_size, is_mst);
|
||||
int slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
|
||||
|
||||
return slice_count * (slice_data_cycles + slice_eoc_cycles);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_bw_overhead - Calculate the BW overhead of a DP link stream
|
||||
* @lane_count: DP link lane count
|
||||
* @hactive: pixel count of the active period in one scanline of the stream
|
||||
* @dsc_slice_count: DSC slice count if @flags/DRM_DP_LINK_BW_OVERHEAD_DSC is set
|
||||
* @bpp_x16: bits per pixel in .4 binary fixed point
|
||||
* @flags: DRM_DP_OVERHEAD_x flags
|
||||
*
|
||||
* Calculate the BW allocation overhead of a DP link stream, depending
|
||||
* on the link's
|
||||
* - @lane_count
|
||||
* - SST/MST mode (@flags / %DRM_DP_OVERHEAD_MST)
|
||||
* - symbol size (@flags / %DRM_DP_OVERHEAD_UHBR)
|
||||
* - FEC mode (@flags / %DRM_DP_OVERHEAD_FEC)
|
||||
* - SSC/REF_CLK mode (@flags / %DRM_DP_OVERHEAD_SSC_REF_CLK)
|
||||
* as well as the stream's
|
||||
* - @hactive timing
|
||||
* - @bpp_x16 color depth
|
||||
* - compression mode (@flags / %DRM_DP_OVERHEAD_DSC).
|
||||
* Note that this overhead doesn't account for the 8b/10b, 128b/132b
|
||||
* channel coding efficiency, for that see
|
||||
* @drm_dp_link_bw_channel_coding_efficiency().
|
||||
*
|
||||
* Returns the overhead as 100% + overhead% in 1ppm units.
|
||||
*/
|
||||
int drm_dp_bw_overhead(int lane_count, int hactive,
|
||||
int dsc_slice_count,
|
||||
int bpp_x16, unsigned long flags)
|
||||
{
|
||||
int symbol_size = flags & DRM_DP_BW_OVERHEAD_UHBR ? 32 : 8;
|
||||
bool is_mst = flags & DRM_DP_BW_OVERHEAD_MST;
|
||||
u32 overhead = 1000000;
|
||||
int symbol_cycles;
|
||||
|
||||
/*
|
||||
* DP Standard v2.1 2.6.4.1
|
||||
* SSC downspread and ref clock variation margin:
|
||||
* 5300ppm + 300ppm ~ 0.6%
|
||||
*/
|
||||
if (flags & DRM_DP_BW_OVERHEAD_SSC_REF_CLK)
|
||||
overhead += 6000;
|
||||
|
||||
/*
|
||||
* DP Standard v2.1 2.6.4.1.1, 3.5.1.5.4:
|
||||
* FEC symbol insertions for 8b/10b channel coding:
|
||||
* After each 250 data symbols on 2-4 lanes:
|
||||
* 250 LL + 5 FEC_PARITY_PH + 1 CD_ADJ (256 byte FEC block)
|
||||
* After each 2 x 250 data symbols on 1 lane:
|
||||
* 2 * 250 LL + 11 FEC_PARITY_PH + 1 CD_ADJ (512 byte FEC block)
|
||||
* After 256 (2-4 lanes) or 128 (1 lane) FEC blocks:
|
||||
* 256 * 256 bytes + 1 FEC_PM
|
||||
* or
|
||||
* 128 * 512 bytes + 1 FEC_PM
|
||||
* (256 * 6 + 1) / (256 * 250) = 2.4015625 %
|
||||
*/
|
||||
if (flags & DRM_DP_BW_OVERHEAD_FEC)
|
||||
overhead += 24016;
|
||||
|
||||
/*
|
||||
* DP Standard v2.1 2.7.9, 5.9.7
|
||||
* The FEC overhead for UHBR is accounted for in its 96.71% channel
|
||||
* coding efficiency.
|
||||
*/
|
||||
WARN_ON((flags & DRM_DP_BW_OVERHEAD_UHBR) &&
|
||||
(flags & DRM_DP_BW_OVERHEAD_FEC));
|
||||
|
||||
if (flags & DRM_DP_BW_OVERHEAD_DSC)
|
||||
symbol_cycles = drm_dp_link_dsc_symbol_cycles(lane_count, hactive,
|
||||
dsc_slice_count,
|
||||
bpp_x16, symbol_size,
|
||||
is_mst);
|
||||
else
|
||||
symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
|
||||
bpp_x16, symbol_size,
|
||||
is_mst);
|
||||
|
||||
return DIV_ROUND_UP_ULL(mul_u32_u32(symbol_cycles * symbol_size * lane_count,
|
||||
overhead * 16),
|
||||
hactive * bpp_x16);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_bw_overhead);
|
||||
|
||||
/**
|
||||
* drm_dp_bw_channel_coding_efficiency - Get a DP link's channel coding efficiency
|
||||
* @is_uhbr: Whether the link has a 128b/132b channel coding
|
||||
*
|
||||
* Return the channel coding efficiency of the given DP link type, which is
|
||||
* either 8b/10b or 128b/132b (aka UHBR). The corresponding overhead includes
|
||||
* the 8b -> 10b, 128b -> 132b pixel data to link symbol conversion overhead
|
||||
* and for 128b/132b any link or PHY level control symbol insertion overhead
|
||||
* (LLCP, FEC, PHY sync, see DP Standard v2.1 3.5.2.18). For 8b/10b the
|
||||
* corresponding FEC overhead is BW allocation specific, included in the value
|
||||
* returned by drm_dp_bw_overhead().
|
||||
*
|
||||
* Returns the efficiency in the 100%/coding-overhead% ratio in
|
||||
* 1ppm units.
|
||||
*/
|
||||
int drm_dp_bw_channel_coding_efficiency(bool is_uhbr)
|
||||
{
|
||||
if (is_uhbr)
|
||||
return 967100;
|
||||
else
|
||||
/*
|
||||
* Note that on 8b/10b MST the efficiency is only
|
||||
* 78.75% due to the 1 out of 64 MTPH packet overhead,
|
||||
* not accounted for here.
|
||||
*/
|
||||
return 800000;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency);
|
||||
|
@ -43,6 +43,7 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_fixed.h>
|
||||
#include <drm/drm_print.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
||||
@ -3578,16 +3579,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
|
||||
* value is in units of PBNs/(timeslots/1 MTP). This value can be used to
|
||||
* convert the number of PBNs required for a given stream to the number of
|
||||
* timeslots this stream requires in each MTP.
|
||||
*
|
||||
* Returns the BW / timeslot value in 20.12 fixed point format.
|
||||
*/
|
||||
int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
|
||||
int link_rate, int link_lane_count)
|
||||
fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
|
||||
int link_rate, int link_lane_count)
|
||||
{
|
||||
int ch_coding_efficiency =
|
||||
drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));
|
||||
fixed20_12 ret;
|
||||
|
||||
if (link_rate == 0 || link_lane_count == 0)
|
||||
drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
|
||||
link_rate, link_lane_count);
|
||||
|
||||
/* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
|
||||
return link_rate * link_lane_count / 54000;
|
||||
/* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
|
||||
ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,
|
||||
ch_coding_efficiency),
|
||||
(1000000ULL * 8 * 5400) >> 12);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
|
||||
|
||||
@ -4335,7 +4346,7 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
|
||||
}
|
||||
}
|
||||
|
||||
req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div);
|
||||
req_slots = DIV_ROUND_UP(dfixed_const(pbn), topology_state->pbn_div.full);
|
||||
|
||||
drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
|
||||
port->connector->base.id, port->connector->name,
|
||||
@ -4718,35 +4729,36 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
|
||||
|
||||
/**
|
||||
* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
|
||||
* @clock: dot clock for the mode
|
||||
* @bpp: bpp for the mode.
|
||||
* @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
|
||||
* @clock: dot clock
|
||||
* @bpp: bpp as .4 binary fixed point
|
||||
*
|
||||
* This uses the formula in the spec to calculate the PBN value for a mode.
|
||||
*/
|
||||
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
|
||||
int drm_dp_calc_pbn_mode(int clock, int bpp)
|
||||
{
|
||||
/*
|
||||
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
|
||||
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
|
||||
* common multiplier to render an integer PBN for all link rate/lane
|
||||
* counts combinations
|
||||
* calculate
|
||||
* peak_kbps *= (1006/1000)
|
||||
* peak_kbps *= (64/54)
|
||||
* peak_kbps *= 8 convert to bytes
|
||||
*
|
||||
* If the bpp is in units of 1/16, further divide by 16. Put this
|
||||
* factor in the numerator rather than the denominator to avoid
|
||||
* integer overflow
|
||||
* peak_kbps = clock * bpp / 16
|
||||
* peak_kbps *= SSC overhead / 1000000
|
||||
* peak_kbps /= 8 convert to Kbytes
|
||||
* peak_kBps *= (64/54) / 1000 convert to PBN
|
||||
*/
|
||||
/*
|
||||
* TODO: Use the actual link and mode parameters to calculate
|
||||
* the overhead. For now it's assumed that these are
|
||||
* 4 link lanes, 4096 hactive pixels, which don't add any
|
||||
* significant data padding overhead and that there is no DSC
|
||||
* or FEC overhead.
|
||||
*/
|
||||
int overhead = drm_dp_bw_overhead(4, 4096, 0, bpp,
|
||||
DRM_DP_BW_OVERHEAD_MST |
|
||||
DRM_DP_BW_OVERHEAD_SSC_REF_CLK);
|
||||
|
||||
if (dsc)
|
||||
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
|
||||
8 * 54 * 1000 * 1000);
|
||||
|
||||
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
|
||||
8 * 54 * 1000 * 1000);
|
||||
return DIV64_U64_ROUND_UP(mul_u32_u32(clock * bpp, 64 * overhead >> 4),
|
||||
1000000ULL * 8 * 54 * 1000);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
|
||||
|
||||
@ -4871,7 +4883,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
|
||||
state = to_drm_dp_mst_topology_state(mgr->base.state);
|
||||
seq_printf(m, "\n*** Atomic state info ***\n");
|
||||
seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
|
||||
state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
|
||||
state->payload_mask, mgr->max_payloads, state->start_slot,
|
||||
dfixed_trunc(state->pbn_div));
|
||||
|
||||
seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | status | sink name |\n");
|
||||
for (i = 0; i < mgr->max_payloads; i++) {
|
||||
@ -5136,13 +5149,67 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
drm_dp_mst_port_downstream_of_parent_locked(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_port *port,
|
||||
struct drm_dp_mst_port *parent)
|
||||
{
|
||||
if (!mgr->mst_primary)
|
||||
return false;
|
||||
|
||||
port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
|
||||
port);
|
||||
if (!port)
|
||||
return false;
|
||||
|
||||
if (!parent)
|
||||
return true;
|
||||
|
||||
parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
|
||||
parent);
|
||||
if (!parent)
|
||||
return false;
|
||||
|
||||
if (!parent->mstb)
|
||||
return false;
|
||||
|
||||
return drm_dp_mst_port_downstream_of_branch(port, parent->mstb);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_mst_port_downstream_of_parent - check if a port is downstream of a parent port
|
||||
* @mgr: MST topology manager
|
||||
* @port: the port being looked up
|
||||
* @parent: the parent port
|
||||
*
|
||||
* The function returns %true if @port is downstream of @parent. If @parent is
|
||||
* %NULL - denoting the root port - the function returns %true if @port is in
|
||||
* @mgr's topology.
|
||||
*/
|
||||
bool
|
||||
drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_port *port,
|
||||
struct drm_dp_mst_port *parent)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
ret = drm_dp_mst_port_downstream_of_parent_locked(mgr, port, parent);
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_mst_port_downstream_of_parent);
|
||||
|
||||
static int
|
||||
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
|
||||
struct drm_dp_mst_topology_state *state);
|
||||
struct drm_dp_mst_topology_state *state,
|
||||
struct drm_dp_mst_port **failing_port);
|
||||
|
||||
static int
|
||||
drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
|
||||
struct drm_dp_mst_topology_state *state)
|
||||
struct drm_dp_mst_topology_state *state,
|
||||
struct drm_dp_mst_port **failing_port)
|
||||
{
|
||||
struct drm_dp_mst_atomic_payload *payload;
|
||||
struct drm_dp_mst_port *port;
|
||||
@ -5171,7 +5238,7 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
|
||||
drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
|
||||
|
||||
list_for_each_entry(port, &mstb->ports, next) {
|
||||
ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
|
||||
ret = drm_dp_mst_atomic_check_port_bw_limit(port, state, failing_port);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -5183,7 +5250,8 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
|
||||
|
||||
static int
|
||||
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
|
||||
struct drm_dp_mst_topology_state *state)
|
||||
struct drm_dp_mst_topology_state *state,
|
||||
struct drm_dp_mst_port **failing_port)
|
||||
{
|
||||
struct drm_dp_mst_atomic_payload *payload;
|
||||
int pbn_used = 0;
|
||||
@ -5204,13 +5272,15 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
|
||||
drm_dbg_atomic(port->mgr->dev,
|
||||
"[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
|
||||
port->parent, port);
|
||||
*failing_port = port;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pbn_used = payload->pbn;
|
||||
} else {
|
||||
pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
|
||||
state);
|
||||
state,
|
||||
failing_port);
|
||||
if (pbn_used <= 0)
|
||||
return pbn_used;
|
||||
}
|
||||
@ -5219,6 +5289,7 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
|
||||
drm_dbg_atomic(port->mgr->dev,
|
||||
"[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
|
||||
port->parent, port, pbn_used, port->full_pbn);
|
||||
*failing_port = port;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
@ -5271,10 +5342,10 @@ drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr
|
||||
}
|
||||
|
||||
if (!payload_count)
|
||||
mst_state->pbn_div = 0;
|
||||
mst_state->pbn_div.full = dfixed_const(0);
|
||||
|
||||
drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
|
||||
mgr, mst_state, mst_state->pbn_div, avail_slots,
|
||||
mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots,
|
||||
mst_state->total_avail_slots - avail_slots);
|
||||
|
||||
return 0;
|
||||
@ -5396,20 +5467,82 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
|
||||
|
||||
/**
|
||||
* drm_dp_mst_atomic_check_mgr - Check the atomic state of an MST topology manager
|
||||
* @state: The global atomic state
|
||||
* @mgr: Manager to check
|
||||
* @mst_state: The MST atomic state for @mgr
|
||||
* @failing_port: Returns the port with a BW limitation
|
||||
*
|
||||
* Checks the given MST manager's topology state for an atomic update to ensure
|
||||
* that it's valid. This includes checking whether there's enough bandwidth to
|
||||
* support the new timeslot allocations in the atomic update.
|
||||
*
|
||||
* Any atomic drivers supporting DP MST must make sure to call this or
|
||||
* the drm_dp_mst_atomic_check() function after checking the rest of their state
|
||||
* in their &drm_mode_config_funcs.atomic_check() callback.
|
||||
*
|
||||
* See also:
|
||||
* drm_dp_mst_atomic_check()
|
||||
* drm_dp_atomic_find_time_slots()
|
||||
* drm_dp_atomic_release_time_slots()
|
||||
*
|
||||
* Returns:
|
||||
* - 0 if the new state is valid
|
||||
* - %-ENOSPC, if the new state is invalid, because of BW limitation
|
||||
* @failing_port is set to:
|
||||
* - The non-root port where a BW limit check failed
|
||||
* with all the ports downstream of @failing_port passing
|
||||
* the BW limit check.
|
||||
* The returned port pointer is valid until at least
|
||||
* one payload downstream of it exists.
|
||||
* - %NULL if the BW limit check failed at the root port
|
||||
* with all the ports downstream of the root port passing
|
||||
* the BW limit check.
|
||||
* - %-EINVAL, if the new state is invalid, because the root port has
|
||||
* too many payloads.
|
||||
*/
|
||||
int drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_topology_state *mst_state,
|
||||
struct drm_dp_mst_port **failing_port)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*failing_port = NULL;
|
||||
|
||||
if (!mgr->mst_state)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
|
||||
mst_state,
|
||||
failing_port);
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr);
|
||||
|
||||
/**
|
||||
* drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
|
||||
* atomic update is valid
|
||||
* @state: Pointer to the new &struct drm_dp_mst_topology_state
|
||||
*
|
||||
* Checks the given topology state for an atomic update to ensure that it's
|
||||
* valid. This includes checking whether there's enough bandwidth to support
|
||||
* the new timeslot allocations in the atomic update.
|
||||
* valid, calling drm_dp_mst_atomic_check_mgr() for all MST manager in the
|
||||
* atomic state. This includes checking whether there's enough bandwidth to
|
||||
* support the new timeslot allocations in the atomic update.
|
||||
*
|
||||
* Any atomic drivers supporting DP MST must make sure to call this after
|
||||
* checking the rest of their state in their
|
||||
* &drm_mode_config_funcs.atomic_check() callback.
|
||||
*
|
||||
* See also:
|
||||
* drm_dp_mst_atomic_check_mgr()
|
||||
* drm_dp_atomic_find_time_slots()
|
||||
* drm_dp_atomic_release_time_slots()
|
||||
*
|
||||
@ -5424,21 +5557,11 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
|
||||
int i, ret = 0;
|
||||
|
||||
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
|
||||
if (!mgr->mst_state)
|
||||
continue;
|
||||
struct drm_dp_mst_port *tmp_port;
|
||||
|
||||
ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
|
||||
ret = drm_dp_mst_atomic_check_mgr(state, mgr, mst_state, &tmp_port);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
|
||||
mst_state);
|
||||
mutex_unlock(&mgr->lock);
|
||||
if (ret < 0)
|
||||
break;
|
||||
else
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -5894,6 +6017,7 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
|
||||
struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
|
||||
{
|
||||
struct drm_dp_mst_port *immediate_upstream_port;
|
||||
struct drm_dp_aux *immediate_upstream_aux;
|
||||
struct drm_dp_mst_port *fec_port;
|
||||
struct drm_dp_desc desc = {};
|
||||
u8 endpoint_fec;
|
||||
@ -5958,21 +6082,25 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
|
||||
* - Port is on primary branch device
|
||||
* - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
|
||||
*/
|
||||
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
|
||||
if (immediate_upstream_port)
|
||||
immediate_upstream_aux = &immediate_upstream_port->aux;
|
||||
else
|
||||
immediate_upstream_aux = port->mgr->aux;
|
||||
|
||||
if (drm_dp_read_desc(immediate_upstream_aux, &desc, true))
|
||||
return NULL;
|
||||
|
||||
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
|
||||
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
|
||||
port->parent == port->mgr->mst_primary) {
|
||||
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
|
||||
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
|
||||
|
||||
if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0)
|
||||
if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0)
|
||||
return NULL;
|
||||
|
||||
if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
|
||||
if (dpcd_ext[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
|
||||
((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
|
||||
((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
|
||||
!= DP_DWN_STRM_PORT_TYPE_ANALOG))
|
||||
return port->mgr->aux;
|
||||
!= DP_DWN_STRM_PORT_TYPE_ANALOG)))
|
||||
return immediate_upstream_aux;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -94,7 +94,7 @@ config DRM_I915_CAPTURE_ERROR
|
||||
This option enables capturing the GPU state when a hang is detected.
|
||||
This information is vital for triaging hangs and assists in debugging.
|
||||
Please report any hang for triaging according to:
|
||||
https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
|
||||
https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html
|
||||
|
||||
If in doubt, say "Y".
|
||||
|
||||
|
@ -47,33 +47,34 @@ subdir-ccflags-y += -I$(srctree)/$(src)
|
||||
# Please keep these build lists sorted!
|
||||
|
||||
# core driver code
|
||||
i915-y += i915_driver.o \
|
||||
i915_drm_client.o \
|
||||
i915_config.o \
|
||||
i915_getparam.o \
|
||||
i915_ioctl.o \
|
||||
i915_irq.o \
|
||||
i915_mitigations.o \
|
||||
i915_module.o \
|
||||
i915_params.o \
|
||||
i915_pci.o \
|
||||
i915_scatterlist.o \
|
||||
i915_suspend.o \
|
||||
i915_switcheroo.o \
|
||||
i915_sysfs.o \
|
||||
i915_utils.o \
|
||||
intel_clock_gating.o \
|
||||
intel_device_info.o \
|
||||
intel_memory_region.o \
|
||||
intel_pcode.o \
|
||||
intel_region_ttm.o \
|
||||
intel_runtime_pm.o \
|
||||
intel_sbi.o \
|
||||
intel_step.o \
|
||||
intel_uncore.o \
|
||||
intel_wakeref.o \
|
||||
vlv_sideband.o \
|
||||
vlv_suspend.o
|
||||
i915-y += \
|
||||
i915_config.o \
|
||||
i915_driver.o \
|
||||
i915_drm_client.o \
|
||||
i915_getparam.o \
|
||||
i915_ioctl.o \
|
||||
i915_irq.o \
|
||||
i915_mitigations.o \
|
||||
i915_module.o \
|
||||
i915_params.o \
|
||||
i915_pci.o \
|
||||
i915_scatterlist.o \
|
||||
i915_suspend.o \
|
||||
i915_switcheroo.o \
|
||||
i915_sysfs.o \
|
||||
i915_utils.o \
|
||||
intel_clock_gating.o \
|
||||
intel_device_info.o \
|
||||
intel_memory_region.o \
|
||||
intel_pcode.o \
|
||||
intel_region_ttm.o \
|
||||
intel_runtime_pm.o \
|
||||
intel_sbi.o \
|
||||
intel_step.o \
|
||||
intel_uncore.o \
|
||||
intel_wakeref.o \
|
||||
vlv_sideband.o \
|
||||
vlv_suspend.o
|
||||
|
||||
# core peripheral code
|
||||
i915-y += \
|
||||
@ -90,13 +91,13 @@ i915-y += \
|
||||
i915_syncmap.o \
|
||||
i915_user_extensions.o
|
||||
|
||||
i915-$(CONFIG_COMPAT) += i915_ioc32.o
|
||||
i915-$(CONFIG_COMPAT) += \
|
||||
i915_ioc32.o
|
||||
i915-$(CONFIG_DEBUG_FS) += \
|
||||
i915_debugfs.o \
|
||||
i915_debugfs_params.o \
|
||||
display/intel_display_debugfs.o \
|
||||
display/intel_pipe_crc.o
|
||||
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
|
||||
i915_debugfs_params.o
|
||||
i915-$(CONFIG_PERF_EVENTS) += \
|
||||
i915_pmu.o
|
||||
|
||||
# "Graphics Technology" (aka we talk to the gpu)
|
||||
gt-y += \
|
||||
@ -153,7 +154,8 @@ gt-y += \
|
||||
gt/sysfs_engines.o
|
||||
|
||||
# x86 intel-gtt module support
|
||||
gt-$(CONFIG_X86) += gt/intel_ggtt_gmch.o
|
||||
gt-$(CONFIG_X86) += \
|
||||
gt/intel_ggtt_gmch.o
|
||||
# autogenerated null render state
|
||||
gt-y += \
|
||||
gt/gen6_renderstate.o \
|
||||
@ -172,9 +174,9 @@ gem-y += \
|
||||
gem/i915_gem_domain.o \
|
||||
gem/i915_gem_execbuffer.o \
|
||||
gem/i915_gem_internal.o \
|
||||
gem/i915_gem_object.o \
|
||||
gem/i915_gem_lmem.o \
|
||||
gem/i915_gem_mman.o \
|
||||
gem/i915_gem_object.o \
|
||||
gem/i915_gem_pages.o \
|
||||
gem/i915_gem_phys.o \
|
||||
gem/i915_gem_pm.o \
|
||||
@ -191,57 +193,61 @@ gem-y += \
|
||||
gem/i915_gem_wait.o \
|
||||
gem/i915_gemfs.o
|
||||
i915-y += \
|
||||
$(gem-y) \
|
||||
i915_active.o \
|
||||
i915_cmd_parser.o \
|
||||
i915_deps.o \
|
||||
i915_gem_evict.o \
|
||||
i915_gem_gtt.o \
|
||||
i915_gem_ww.o \
|
||||
i915_gem.o \
|
||||
i915_query.o \
|
||||
i915_request.o \
|
||||
i915_scheduler.o \
|
||||
i915_trace_points.o \
|
||||
i915_ttm_buddy_manager.o \
|
||||
i915_vma.o \
|
||||
i915_vma_resource.o
|
||||
$(gem-y) \
|
||||
i915_active.o \
|
||||
i915_cmd_parser.o \
|
||||
i915_deps.o \
|
||||
i915_gem.o \
|
||||
i915_gem_evict.o \
|
||||
i915_gem_gtt.o \
|
||||
i915_gem_ww.o \
|
||||
i915_query.o \
|
||||
i915_request.o \
|
||||
i915_scheduler.o \
|
||||
i915_trace_points.o \
|
||||
i915_ttm_buddy_manager.o \
|
||||
i915_vma.o \
|
||||
i915_vma_resource.o
|
||||
|
||||
# general-purpose microcontroller (GuC) support
|
||||
i915-y += \
|
||||
gt/uc/intel_gsc_fw.o \
|
||||
gt/uc/intel_gsc_proxy.o \
|
||||
gt/uc/intel_gsc_uc.o \
|
||||
gt/uc/intel_gsc_uc_debugfs.o \
|
||||
gt/uc/intel_gsc_uc_heci_cmd_submit.o \
|
||||
gt/uc/intel_guc.o \
|
||||
gt/uc/intel_guc_ads.o \
|
||||
gt/uc/intel_guc_capture.o \
|
||||
gt/uc/intel_guc_ct.o \
|
||||
gt/uc/intel_guc_debugfs.o \
|
||||
gt/uc/intel_guc_fw.o \
|
||||
gt/uc/intel_guc_hwconfig.o \
|
||||
gt/uc/intel_guc_log.o \
|
||||
gt/uc/intel_guc_log_debugfs.o \
|
||||
gt/uc/intel_guc_rc.o \
|
||||
gt/uc/intel_guc_slpc.o \
|
||||
gt/uc/intel_guc_submission.o \
|
||||
gt/uc/intel_huc.o \
|
||||
gt/uc/intel_huc_debugfs.o \
|
||||
gt/uc/intel_huc_fw.o \
|
||||
gt/uc/intel_uc.o \
|
||||
gt/uc/intel_uc_debugfs.o \
|
||||
gt/uc/intel_uc_fw.o
|
||||
gt/uc/intel_gsc_fw.o \
|
||||
gt/uc/intel_gsc_proxy.o \
|
||||
gt/uc/intel_gsc_uc.o \
|
||||
gt/uc/intel_gsc_uc_debugfs.o \
|
||||
gt/uc/intel_gsc_uc_heci_cmd_submit.o\
|
||||
gt/uc/intel_guc.o \
|
||||
gt/uc/intel_guc_ads.o \
|
||||
gt/uc/intel_guc_capture.o \
|
||||
gt/uc/intel_guc_ct.o \
|
||||
gt/uc/intel_guc_debugfs.o \
|
||||
gt/uc/intel_guc_fw.o \
|
||||
gt/uc/intel_guc_hwconfig.o \
|
||||
gt/uc/intel_guc_log.o \
|
||||
gt/uc/intel_guc_log_debugfs.o \
|
||||
gt/uc/intel_guc_rc.o \
|
||||
gt/uc/intel_guc_slpc.o \
|
||||
gt/uc/intel_guc_submission.o \
|
||||
gt/uc/intel_huc.o \
|
||||
gt/uc/intel_huc_debugfs.o \
|
||||
gt/uc/intel_huc_fw.o \
|
||||
gt/uc/intel_uc.o \
|
||||
gt/uc/intel_uc_debugfs.o \
|
||||
gt/uc/intel_uc_fw.o
|
||||
|
||||
# graphics system controller (GSC) support
|
||||
i915-y += gt/intel_gsc.o
|
||||
i915-y += \
|
||||
gt/intel_gsc.o
|
||||
|
||||
# graphics hardware monitoring (HWMON) support
|
||||
i915-$(CONFIG_HWMON) += i915_hwmon.o
|
||||
i915-$(CONFIG_HWMON) += \
|
||||
i915_hwmon.o
|
||||
|
||||
# modesetting core code
|
||||
i915-y += \
|
||||
display/hsw_ips.o \
|
||||
display/i9xx_plane.o \
|
||||
display/i9xx_wm.o \
|
||||
display/intel_atomic.o \
|
||||
display/intel_atomic_plane.o \
|
||||
display/intel_audio.o \
|
||||
@ -257,6 +263,7 @@ i915-y += \
|
||||
display/intel_display.o \
|
||||
display/intel_display_driver.o \
|
||||
display/intel_display_irq.o \
|
||||
display/intel_display_params.o \
|
||||
display/intel_display_power.o \
|
||||
display/intel_display_power_map.o \
|
||||
display/intel_display_power_well.o \
|
||||
@ -268,8 +275,10 @@ i915-y += \
|
||||
display/intel_dpll.o \
|
||||
display/intel_dpll_mgr.o \
|
||||
display/intel_dpt.o \
|
||||
display/intel_dpt_common.o \
|
||||
display/intel_drrs.o \
|
||||
display/intel_dsb.o \
|
||||
display/intel_dsb_buffer.o \
|
||||
display/intel_fb.o \
|
||||
display/intel_fb_pin.o \
|
||||
display/intel_fbc.o \
|
||||
@ -287,8 +296,8 @@ i915-y += \
|
||||
display/intel_load_detect.o \
|
||||
display/intel_lpe_audio.o \
|
||||
display/intel_modeset_lock.o \
|
||||
display/intel_modeset_verify.o \
|
||||
display/intel_modeset_setup.o \
|
||||
display/intel_modeset_verify.o \
|
||||
display/intel_overlay.o \
|
||||
display/intel_pch_display.o \
|
||||
display/intel_pch_refclk.o \
|
||||
@ -302,8 +311,6 @@ i915-y += \
|
||||
display/intel_vblank.o \
|
||||
display/intel_vga.o \
|
||||
display/intel_wm.o \
|
||||
display/i9xx_plane.o \
|
||||
display/i9xx_wm.o \
|
||||
display/skl_scaler.o \
|
||||
display/skl_universal_plane.o \
|
||||
display/skl_watermark.o
|
||||
@ -312,6 +319,10 @@ i915-$(CONFIG_ACPI) += \
|
||||
display/intel_opregion.o
|
||||
i915-$(CONFIG_DRM_FBDEV_EMULATION) += \
|
||||
display/intel_fbdev.o
|
||||
i915-$(CONFIG_DEBUG_FS) += \
|
||||
display/intel_display_debugfs.o \
|
||||
display/intel_display_debugfs_params.o \
|
||||
display/intel_pipe_crc.o
|
||||
|
||||
# modesetting output/encoder code
|
||||
i915-y += \
|
||||
@ -357,13 +368,14 @@ i915-y += \
|
||||
display/vlv_dsi.o \
|
||||
display/vlv_dsi_pll.o
|
||||
|
||||
i915-y += i915_perf.o
|
||||
i915-y += \
|
||||
i915_perf.o
|
||||
|
||||
# Protected execution platform (PXP) support. Base support is required for HuC
|
||||
i915-y += \
|
||||
pxp/intel_pxp.o \
|
||||
pxp/intel_pxp_tee.o \
|
||||
pxp/intel_pxp_huc.o
|
||||
pxp/intel_pxp_huc.o \
|
||||
pxp/intel_pxp_tee.o
|
||||
|
||||
i915-$(CONFIG_DRM_I915_PXP) += \
|
||||
pxp/intel_pxp_cmd.o \
|
||||
@ -374,11 +386,11 @@ i915-$(CONFIG_DRM_I915_PXP) += \
|
||||
pxp/intel_pxp_session.o
|
||||
|
||||
# Post-mortem debug and GPU hang state capture
|
||||
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
|
||||
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += \
|
||||
i915_gpu_error.o
|
||||
i915-$(CONFIG_DRM_I915_SELFTEST) += \
|
||||
gem/selftests/i915_gem_client_blt.o \
|
||||
gem/selftests/igt_gem_utils.o \
|
||||
selftests/intel_scheduler_helpers.o \
|
||||
selftests/i915_random.o \
|
||||
selftests/i915_selftest.o \
|
||||
selftests/igt_atomic.o \
|
||||
@ -387,10 +399,12 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
|
||||
selftests/igt_mmap.o \
|
||||
selftests/igt_reset.o \
|
||||
selftests/igt_spinner.o \
|
||||
selftests/intel_scheduler_helpers.o \
|
||||
selftests/librapl.o
|
||||
|
||||
# virtual gpu code
|
||||
i915-y += i915_vgpu.o
|
||||
i915-y += \
|
||||
i915_vgpu.o
|
||||
|
||||
i915-$(CONFIG_DRM_I915_GVT) += \
|
||||
intel_gvt.o \
|
||||
|
@ -432,7 +432,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
|
||||
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
|
||||
intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
|
||||
intel_dp->DP &= ~DP_PORT_EN;
|
||||
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
|
||||
@ -475,6 +475,40 @@ intel_dp_link_down(struct intel_encoder *encoder,
|
||||
}
|
||||
}
|
||||
|
||||
static void g4x_dp_audio_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (!crtc_state->has_audio)
|
||||
return;
|
||||
|
||||
/* Enable audio presence detect */
|
||||
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
|
||||
intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
|
||||
|
||||
intel_audio_codec_enable(encoder, crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static void g4x_dp_audio_disable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (!old_crtc_state->has_audio)
|
||||
return;
|
||||
|
||||
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
|
||||
|
||||
/* Disable audio presence detect */
|
||||
intel_dp->DP &= ~DP_AUDIO_OUTPUT_ENABLE;
|
||||
intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
|
||||
}
|
||||
|
||||
static void intel_disable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
@ -484,8 +518,6 @@ static void intel_disable_dp(struct intel_atomic_state *state,
|
||||
|
||||
intel_dp->link_trained = false;
|
||||
|
||||
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
|
||||
|
||||
/*
|
||||
* Make sure the panel is off before trying to change the mode.
|
||||
* But also ensure that we have vdd while we switch off the panel.
|
||||
@ -631,8 +663,6 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
|
||||
* fail when the power sequencer is freshly used for this port.
|
||||
*/
|
||||
intel_dp->DP |= DP_PORT_EN;
|
||||
if (crtc_state->has_audio)
|
||||
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
|
||||
|
||||
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
@ -686,8 +716,8 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
intel_enable_dp(state, encoder, pipe_config, conn_state);
|
||||
intel_audio_codec_enable(encoder, pipe_config, conn_state);
|
||||
intel_edp_backlight_on(pipe_config, conn_state);
|
||||
encoder->audio_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void vlv_enable_dp(struct intel_atomic_state *state,
|
||||
@ -695,8 +725,8 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
intel_audio_codec_enable(encoder, pipe_config, conn_state);
|
||||
intel_edp_backlight_on(pipe_config, conn_state);
|
||||
encoder->audio_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void g4x_pre_enable_dp(struct intel_atomic_state *state,
|
||||
@ -1325,6 +1355,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
|
||||
intel_encoder->disable = g4x_disable_dp;
|
||||
intel_encoder->post_disable = g4x_post_disable_dp;
|
||||
}
|
||||
intel_encoder->audio_enable = g4x_dp_audio_enable;
|
||||
intel_encoder->audio_disable = g4x_dp_audio_disable;
|
||||
|
||||
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
|
||||
(HAS_PCH_CPT(dev_priv) && port != PORT_A))
|
||||
|
@ -228,25 +228,51 @@ static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
|
||||
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
|
||||
|
||||
temp |= SDVO_ENABLE;
|
||||
if (pipe_config->has_audio)
|
||||
temp |= HDMI_AUDIO_ENABLE;
|
||||
|
||||
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
|
||||
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
|
||||
}
|
||||
|
||||
static void g4x_hdmi_audio_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
|
||||
|
||||
if (!crtc_state->has_audio)
|
||||
return;
|
||||
|
||||
drm_WARN_ON(&i915->drm, !crtc_state->has_hdmi_sink);
|
||||
|
||||
/* Enable audio presence detect */
|
||||
intel_de_rmw(i915, hdmi->hdmi_reg, 0, HDMI_AUDIO_ENABLE);
|
||||
|
||||
intel_audio_codec_enable(encoder, crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static void g4x_hdmi_audio_disable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
|
||||
|
||||
if (!old_crtc_state->has_audio)
|
||||
return;
|
||||
|
||||
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
|
||||
|
||||
/* Disable audio presence detect */
|
||||
intel_de_rmw(i915, hdmi->hdmi_reg, HDMI_AUDIO_ENABLE, 0);
|
||||
}
|
||||
|
||||
static void g4x_enable_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
g4x_hdmi_enable_port(encoder, pipe_config);
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
|
||||
!pipe_config->has_hdmi_sink);
|
||||
intel_audio_codec_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void ibx_enable_hdmi(struct intel_atomic_state *state,
|
||||
@ -262,8 +288,6 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
|
||||
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
|
||||
|
||||
temp |= SDVO_ENABLE;
|
||||
if (pipe_config->has_audio)
|
||||
temp |= HDMI_AUDIO_ENABLE;
|
||||
|
||||
/*
|
||||
* HW workaround, need to write this twice for issue
|
||||
@ -296,10 +320,6 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
|
||||
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
|
||||
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
|
||||
}
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
|
||||
!pipe_config->has_hdmi_sink);
|
||||
intel_audio_codec_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void cpt_enable_hdmi(struct intel_atomic_state *state,
|
||||
@ -317,8 +337,6 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
|
||||
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
|
||||
|
||||
temp |= SDVO_ENABLE;
|
||||
if (pipe_config->has_audio)
|
||||
temp |= HDMI_AUDIO_ENABLE;
|
||||
|
||||
/*
|
||||
* WaEnableHDMI8bpcBefore12bpc:snb,ivb
|
||||
@ -351,10 +369,6 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
|
||||
intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
|
||||
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0);
|
||||
}
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
|
||||
!pipe_config->has_hdmi_sink);
|
||||
intel_audio_codec_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void vlv_enable_hdmi(struct intel_atomic_state *state,
|
||||
@ -362,11 +376,6 @@ static void vlv_enable_hdmi(struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
|
||||
!pipe_config->has_hdmi_sink);
|
||||
intel_audio_codec_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void intel_disable_hdmi(struct intel_atomic_state *state,
|
||||
@ -384,7 +393,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
|
||||
|
||||
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
|
||||
|
||||
temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
|
||||
temp &= ~SDVO_ENABLE;
|
||||
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
|
||||
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
|
||||
|
||||
@ -433,8 +442,6 @@ static void g4x_disable_hdmi(struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
|
||||
|
||||
intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
@ -443,7 +450,6 @@ static void pch_disable_hdmi(struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
static void pch_post_disable_hdmi(struct intel_atomic_state *state,
|
||||
@ -750,6 +756,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
|
||||
else
|
||||
intel_encoder->enable = g4x_enable_hdmi;
|
||||
}
|
||||
intel_encoder->audio_enable = g4x_hdmi_audio_enable;
|
||||
intel_encoder->audio_disable = g4x_hdmi_audio_disable;
|
||||
intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_HDMI;
|
||||
|
@ -193,7 +193,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
|
||||
if (!hsw_crtc_supports_ips(crtc))
|
||||
return false;
|
||||
|
||||
if (!i915->params.enable_ips)
|
||||
if (!i915->display.params.enable_ips)
|
||||
return false;
|
||||
|
||||
if (crtc_state->pipe_bpp > 24)
|
||||
@ -329,7 +329,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
seq_printf(m, "Enabled by kernel parameter: %s\n",
|
||||
str_yes_no(i915->params.enable_ips));
|
||||
str_yes_no(i915->display.params.enable_ips));
|
||||
|
||||
if (DISPLAY_VER(i915) >= 8) {
|
||||
seq_puts(m, "Currently: unknown\n");
|
||||
|
@ -2993,7 +2993,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
|
||||
|
||||
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
|
||||
if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
|
||||
dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
|
||||
dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
|
||||
for (level = 2; level < num_levels; level++) {
|
||||
struct intel_wm_level *wm = &merged->wm[level];
|
||||
|
||||
|
@ -330,7 +330,7 @@ static int afe_clk(struct intel_encoder *encoder,
|
||||
int bpp;
|
||||
|
||||
if (crtc_state->dsc.compression_enable)
|
||||
bpp = crtc_state->dsc.compressed_bpp;
|
||||
bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
|
||||
else
|
||||
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
|
||||
@ -860,7 +860,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
|
||||
* compressed and non-compressed bpp.
|
||||
*/
|
||||
if (crtc_state->dsc.compression_enable) {
|
||||
mul = crtc_state->dsc.compressed_bpp;
|
||||
mul = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
|
||||
div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
}
|
||||
|
||||
@ -884,7 +884,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
|
||||
int bpp, line_time_us, byte_clk_period_ns;
|
||||
|
||||
if (crtc_state->dsc.compression_enable)
|
||||
bpp = crtc_state->dsc.compressed_bpp;
|
||||
bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
|
||||
else
|
||||
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
|
||||
@ -1451,8 +1451,8 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
|
||||
if (pipe_config->dsc.compressed_bpp) {
|
||||
int div = pipe_config->dsc.compressed_bpp;
|
||||
if (pipe_config->dsc.compressed_bpp_x16) {
|
||||
int div = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
|
||||
int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
|
||||
adjusted_mode->crtc_htotal =
|
||||
|
@ -331,9 +331,6 @@ void intel_atomic_state_free(struct drm_atomic_state *_state)
|
||||
|
||||
drm_atomic_state_default_release(&state->base);
|
||||
kfree(state->global_objs);
|
||||
|
||||
i915_sw_fence_fini(&state->commit_ready);
|
||||
|
||||
kfree(state);
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,10 @@
|
||||
* prepare/check/commit/cleanup steps.
|
||||
*/
|
||||
|
||||
#include <linux/dma-fence-chain.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_blend.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
|
||||
@ -1012,6 +1015,41 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int add_dma_resv_fences(struct dma_resv *resv,
|
||||
struct drm_plane_state *new_plane_state)
|
||||
{
|
||||
struct dma_fence *fence = dma_fence_get(new_plane_state->fence);
|
||||
struct dma_fence *new;
|
||||
int ret;
|
||||
|
||||
ret = dma_resv_get_singleton(resv, dma_resv_usage_rw(false), &new);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
if (new && fence) {
|
||||
struct dma_fence_chain *chain = dma_fence_chain_alloc();
|
||||
|
||||
if (!chain) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
dma_fence_chain_init(chain, fence, new, 1);
|
||||
fence = &chain->base;
|
||||
|
||||
} else if (new) {
|
||||
fence = new;
|
||||
}
|
||||
|
||||
dma_fence_put(new_plane_state->fence);
|
||||
new_plane_state->fence = fence;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
dma_fence_put(fence);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_prepare_plane_fb - Prepare fb for usage on plane
|
||||
* @_plane: drm plane to prepare for
|
||||
@ -1035,7 +1073,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
|
||||
struct intel_atomic_state *state =
|
||||
to_intel_atomic_state(new_plane_state->uapi.state);
|
||||
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
||||
const struct intel_plane_state *old_plane_state =
|
||||
struct intel_plane_state *old_plane_state =
|
||||
intel_atomic_get_old_plane_state(state, plane);
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
|
||||
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
|
||||
@ -1058,55 +1096,28 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
|
||||
* can safely continue.
|
||||
*/
|
||||
if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
|
||||
ret = i915_sw_fence_await_reservation(&state->commit_ready,
|
||||
old_obj->base.resv,
|
||||
false, 0,
|
||||
GFP_KERNEL);
|
||||
ret = add_dma_resv_fences(intel_bo_to_drm_bo(old_obj)->resv,
|
||||
&new_plane_state->uapi);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (new_plane_state->uapi.fence) { /* explicit fencing */
|
||||
i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
|
||||
&attr);
|
||||
ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
|
||||
new_plane_state->uapi.fence,
|
||||
i915_fence_timeout(dev_priv),
|
||||
GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!obj)
|
||||
return 0;
|
||||
|
||||
|
||||
ret = intel_plane_pin_fb(new_plane_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_object_wait_priority(obj, 0, &attr);
|
||||
ret = drm_gem_plane_helper_prepare_fb(&plane->base, &new_plane_state->uapi);
|
||||
if (ret < 0)
|
||||
goto unpin_fb;
|
||||
|
||||
if (!new_plane_state->uapi.fence) { /* implicit fencing */
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
if (new_plane_state->uapi.fence) {
|
||||
i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
|
||||
&attr);
|
||||
|
||||
ret = i915_sw_fence_await_reservation(&state->commit_ready,
|
||||
obj->base.resv, false,
|
||||
i915_fence_timeout(dev_priv),
|
||||
GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto unpin_fb;
|
||||
|
||||
dma_resv_iter_begin(&cursor, obj->base.resv,
|
||||
DMA_RESV_USAGE_WRITE);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
|
||||
fence);
|
||||
}
|
||||
dma_resv_iter_end(&cursor);
|
||||
} else {
|
||||
intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
|
||||
new_plane_state->uapi.fence);
|
||||
}
|
||||
|
@ -522,25 +522,25 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
|
||||
unsigned int link_clks_available, link_clks_required;
|
||||
unsigned int tu_data, tu_line, link_clks_active;
|
||||
unsigned int h_active, h_total, hblank_delta, pixel_clk;
|
||||
unsigned int fec_coeff, cdclk, vdsc_bpp;
|
||||
unsigned int fec_coeff, cdclk, vdsc_bppx16;
|
||||
unsigned int link_clk, lanes;
|
||||
unsigned int hblank_rise;
|
||||
|
||||
h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay;
|
||||
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
|
||||
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
|
||||
vdsc_bpp = crtc_state->dsc.compressed_bpp;
|
||||
vdsc_bppx16 = crtc_state->dsc.compressed_bpp_x16;
|
||||
cdclk = i915->display.cdclk.hw.cdclk;
|
||||
/* fec= 0.972261, using rounding multiplier of 1000000 */
|
||||
fec_coeff = 972261;
|
||||
link_clk = crtc_state->port_clock;
|
||||
lanes = crtc_state->lane_count;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :"
|
||||
"lanes = %u vdsc_bpp = %u cdclk = %u\n",
|
||||
h_active, link_clk, lanes, vdsc_bpp, cdclk);
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " BPP_X16_FMT " cdclk = %u\n",
|
||||
h_active, link_clk, lanes, BPP_X16_ARGS(vdsc_bppx16), cdclk);
|
||||
|
||||
if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bpp || !cdclk))
|
||||
if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bppx16 || !cdclk))
|
||||
return 0;
|
||||
|
||||
link_clks_available = (h_total - h_active) * link_clk / pixel_clk - 28;
|
||||
@ -552,8 +552,8 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
|
||||
hblank_delta = DIV64_U64_ROUND_UP(mul_u32_u32(5 * (link_clk + cdclk), pixel_clk),
|
||||
mul_u32_u32(link_clk, cdclk));
|
||||
|
||||
tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bpp * 8, 1000000),
|
||||
mul_u32_u32(link_clk * lanes, fec_coeff));
|
||||
tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bppx16 * 8, 1000000),
|
||||
mul_u32_u32(link_clk * lanes * 16, fec_coeff));
|
||||
tu_line = div64_u64(h_active * mul_u32_u32(link_clk, fec_coeff),
|
||||
mul_u32_u32(64 * pixel_clk, 1000000));
|
||||
link_clks_active = (tu_line - 1) * 64 + tu_data;
|
||||
|
@ -88,10 +88,10 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
|
||||
|
||||
drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
|
||||
|
||||
if (i915->params.invert_brightness < 0)
|
||||
if (i915->display.params.invert_brightness < 0)
|
||||
return val;
|
||||
|
||||
if (i915->params.invert_brightness > 0 ||
|
||||
if (i915->display.params.invert_brightness > 0 ||
|
||||
intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) {
|
||||
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
|
||||
}
|
||||
@ -132,8 +132,9 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
|
||||
drm_WARN_ON_ONCE(&i915->drm,
|
||||
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
|
||||
|
||||
if (i915->params.invert_brightness > 0 ||
|
||||
(i915->params.invert_brightness == 0 && intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
|
||||
if (i915->display.params.invert_brightness > 0 ||
|
||||
(i915->display.params.invert_brightness == 0 &&
|
||||
intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
|
||||
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
|
||||
|
||||
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
|
||||
|
@ -1116,7 +1116,7 @@ parse_sdvo_panel_data(struct drm_i915_private *i915,
|
||||
struct drm_display_mode *panel_fixed_mode;
|
||||
int index;
|
||||
|
||||
index = i915->params.vbt_sdvo_panel_type;
|
||||
index = i915->display.params.vbt_sdvo_panel_type;
|
||||
if (index == -2) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Ignore SDVO panel mode from BIOS VBT tables.\n");
|
||||
@ -1514,9 +1514,9 @@ parse_edp(struct drm_i915_private *i915,
|
||||
u8 vswing;
|
||||
|
||||
/* Don't read from VBT if module parameter has valid value*/
|
||||
if (i915->params.edp_vswing) {
|
||||
if (i915->display.params.edp_vswing) {
|
||||
panel->vbt.edp.low_vswing =
|
||||
i915->params.edp_vswing == 1;
|
||||
i915->display.params.edp_vswing == 1;
|
||||
} else {
|
||||
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
|
||||
panel->vbt.edp.low_vswing = vswing == 0;
|
||||
@ -2473,6 +2473,27 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
|
||||
devdata->child.device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
|
||||
}
|
||||
|
||||
static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata,
|
||||
enum port port)
|
||||
{
|
||||
struct drm_i915_private *i915 = devdata->i915;
|
||||
|
||||
if (!intel_bios_encoder_supports_dvi(devdata))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Some BDW machines (eg. HP Pavilion 15-ab) shipped
|
||||
* with a HSW VBT where the level shifter value goes
|
||||
* up to 11, whereas the BDW max is 9.
|
||||
*/
|
||||
if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) {
|
||||
drm_dbg_kms(&i915->drm, "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n",
|
||||
port_name(port), devdata->child.hdmi_level_shifter_value, 9);
|
||||
|
||||
devdata->child.hdmi_level_shifter_value = 9;
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_bios_encoder_supports_crt(const struct intel_bios_encoder_data *devdata)
|
||||
{
|
||||
@ -2652,6 +2673,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
|
||||
}
|
||||
|
||||
sanitize_device_type(devdata, port);
|
||||
sanitize_hdmi_level_shift(devdata, port);
|
||||
}
|
||||
|
||||
static bool has_ddi_port_info(struct drm_i915_private *i915)
|
||||
@ -3392,8 +3414,8 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
|
||||
|
||||
crtc_state->pipe_bpp = bpc * 3;
|
||||
|
||||
crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp,
|
||||
VBT_DSC_MAX_BPP(dsc->max_bpp));
|
||||
crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(min(crtc_state->pipe_bpp,
|
||||
VBT_DSC_MAX_BPP(dsc->max_bpp)));
|
||||
|
||||
/*
|
||||
* FIXME: This is ugly, and slice count should take DSC engine
|
||||
|
@ -2598,8 +2598,9 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
|
||||
* => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits
|
||||
*/
|
||||
int bigjoiner_interface_bits = DISPLAY_VER(i915) > 13 ? 36 : 24;
|
||||
int min_cdclk_bj = (crtc_state->dsc.compressed_bpp * pixel_clock) /
|
||||
(2 * bigjoiner_interface_bits);
|
||||
int min_cdclk_bj =
|
||||
(to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
|
||||
pixel_clock) / (2 * bigjoiner_interface_bits);
|
||||
|
||||
min_cdclk = max(min_cdclk, min_cdclk_bj);
|
||||
}
|
||||
|
@ -785,14 +785,12 @@ static void chv_assign_csc(struct intel_crtc_state *crtc_state)
|
||||
/* convert hw value with given bit_precision to lut property val */
|
||||
static u32 intel_color_lut_pack(u32 val, int bit_precision)
|
||||
{
|
||||
u32 max = 0xffff >> (16 - bit_precision);
|
||||
|
||||
val = clamp_val(val, 0, max);
|
||||
|
||||
if (bit_precision < 16)
|
||||
val <<= 16 - bit_precision;
|
||||
|
||||
return val;
|
||||
if (bit_precision > 16)
|
||||
return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(val, (1 << 16) - 1),
|
||||
(1 << bit_precision) - 1);
|
||||
else
|
||||
return DIV_ROUND_CLOSEST(val * ((1 << 16) - 1),
|
||||
(1 << bit_precision) - 1);
|
||||
}
|
||||
|
||||
static u32 i9xx_lut_8(const struct drm_color_lut *color)
|
||||
@ -911,7 +909,7 @@ static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
|
||||
static u16 i965_lut_11p6_max_pack(u32 val)
|
||||
{
|
||||
/* PIPEGCMAX is 11.6, clamp to 10.6 */
|
||||
return clamp_val(val, 0, 0xffff);
|
||||
return min(val, 0xffffu);
|
||||
}
|
||||
|
||||
static u32 ilk_lut_10(const struct drm_color_lut *color)
|
||||
@ -1528,14 +1526,27 @@ static int glk_degamma_lut_size(struct drm_i915_private *i915)
|
||||
return 35;
|
||||
}
|
||||
|
||||
/*
|
||||
* change_lut_val_precision: helper function to upscale or downscale lut values.
|
||||
* Parameters 'to' and 'from' needs to be less than 32. This should be sufficient
|
||||
* as currently there are no lut values exceeding 32 bit.
|
||||
*/
|
||||
static u32 change_lut_val_precision(u32 lut_val, int to, int from)
|
||||
static u32 glk_degamma_lut(const struct drm_color_lut *color)
|
||||
{
|
||||
return mul_u32_u32(lut_val, (1 << to)) / (1 << from);
|
||||
return color->green;
|
||||
}
|
||||
|
||||
static void glk_degamma_lut_pack(struct drm_color_lut *entry, u32 val)
|
||||
{
|
||||
/* PRE_CSC_GAMC_DATA is 3.16, clamp to 0.16 */
|
||||
entry->red = entry->green = entry->blue = min(val, 0xffffu);
|
||||
}
|
||||
|
||||
static u32 mtl_degamma_lut(const struct drm_color_lut *color)
|
||||
{
|
||||
return drm_color_lut_extract(color->green, 24);
|
||||
}
|
||||
|
||||
static void mtl_degamma_lut_pack(struct drm_color_lut *entry, u32 val)
|
||||
{
|
||||
/* PRE_CSC_GAMC_DATA is 3.24, clamp to 0.16 */
|
||||
entry->red = entry->green = entry->blue =
|
||||
intel_color_lut_pack(min(val, 0xffffffu), 24);
|
||||
}
|
||||
|
||||
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
|
||||
@ -1572,20 +1583,16 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
|
||||
* ToDo: Extend to max 7.0. Enable 32 bit input value
|
||||
* as compared to just 16 to achieve this.
|
||||
*/
|
||||
u32 lut_val;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 14)
|
||||
lut_val = change_lut_val_precision(lut[i].green, 24, 16);
|
||||
else
|
||||
lut_val = lut[i].green;
|
||||
|
||||
ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
|
||||
lut_val);
|
||||
DISPLAY_VER(i915) >= 14 ?
|
||||
mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i]));
|
||||
}
|
||||
|
||||
/* Clamp values > 1.0. */
|
||||
while (i++ < glk_degamma_lut_size(i915))
|
||||
ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
|
||||
ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
|
||||
DISPLAY_VER(i915) >= 14 ?
|
||||
1 << 24 : 1 << 16);
|
||||
|
||||
ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0);
|
||||
}
|
||||
@ -3572,17 +3579,10 @@ static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc)
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe));
|
||||
|
||||
/*
|
||||
* For MTL and beyond, convert back the 24 bit lut values
|
||||
* read from HW to 16 bit values to maintain parity with
|
||||
* userspace values
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) >= 14)
|
||||
val = change_lut_val_precision(val, 16, 24);
|
||||
|
||||
lut[i].red = val;
|
||||
lut[i].green = val;
|
||||
lut[i].blue = val;
|
||||
mtl_degamma_lut_pack(&lut[i], val);
|
||||
else
|
||||
glk_degamma_lut_pack(&lut[i], val);
|
||||
}
|
||||
|
||||
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
|
||||
|
@ -841,7 +841,7 @@ intel_crt_detect(struct drm_connector *connector,
|
||||
if (!intel_display_device_enabled(dev_priv))
|
||||
return connector_status_disconnected;
|
||||
|
||||
if (dev_priv->params.load_detect_test) {
|
||||
if (dev_priv->display.params.load_detect_test) {
|
||||
wakeref = intel_display_power_get(dev_priv,
|
||||
intel_encoder->power_domain);
|
||||
goto load_detect;
|
||||
@ -901,7 +901,7 @@ load_detect:
|
||||
else if (DISPLAY_VER(dev_priv) < 4)
|
||||
status = intel_crt_load_detect(crt,
|
||||
to_intel_crtc(connector->state->crtc)->pipe);
|
||||
else if (dev_priv->params.load_detect_test)
|
||||
else if (dev_priv->display.params.load_detect_test)
|
||||
status = connector_status_disconnected;
|
||||
else
|
||||
status = connector_status_unknown;
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy)
|
||||
{
|
||||
if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0) && phy < PHY_C)
|
||||
if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -206,6 +206,13 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
|
||||
|
||||
intel_clear_response_ready_flag(i915, port, lane);
|
||||
|
||||
/*
|
||||
* FIXME: Workaround to let HW to settle
|
||||
* down and let the message bus to end up
|
||||
* in a known state
|
||||
*/
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
|
||||
return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
|
||||
}
|
||||
|
||||
@ -285,6 +292,13 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
|
||||
|
||||
intel_clear_response_ready_flag(i915, port, lane);
|
||||
|
||||
/*
|
||||
* FIXME: Workaround to let HW to settle
|
||||
* down and let the message bus to end up
|
||||
* in a known state
|
||||
*/
|
||||
intel_cx0_bus_reset(i915, port, lane);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1850,8 +1864,8 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_c10pll_state *pll_state)
|
||||
static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_c10pll_state *pll_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
u8 lane = INTEL_CX0_LANE0;
|
||||
@ -2103,8 +2117,8 @@ static bool intel_c20_use_mplla(u32 clock)
|
||||
return false;
|
||||
}
|
||||
|
||||
void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_c20pll_state *pll_state)
|
||||
static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_c20pll_state *pll_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
bool cntx;
|
||||
@ -2378,8 +2392,8 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
|
||||
BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
|
||||
}
|
||||
|
||||
int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
|
||||
const struct intel_c10pll_state *pll_state)
|
||||
static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
|
||||
const struct intel_c10pll_state *pll_state)
|
||||
{
|
||||
unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
|
||||
unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400;
|
||||
@ -2405,8 +2419,8 @@ int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
|
||||
return tmpclk;
|
||||
}
|
||||
|
||||
int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
|
||||
const struct intel_c20pll_state *pll_state)
|
||||
static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
|
||||
const struct intel_c20pll_state *pll_state)
|
||||
{
|
||||
unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
|
||||
unsigned int multiplier, refclk = 38400;
|
||||
@ -3003,17 +3017,110 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
|
||||
return ICL_PORT_DPLL_DEFAULT;
|
||||
}
|
||||
|
||||
void intel_c10pll_state_verify(struct intel_atomic_state *state,
|
||||
static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder,
|
||||
struct intel_c10pll_state *mpllb_hw_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
|
||||
u8 expected = mpllb_sw_state->pll[i];
|
||||
|
||||
I915_STATE_WARN(i915, mpllb_hw_state->pll[i] != expected,
|
||||
"[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
|
||||
crtc->base.base.id, crtc->base.name, i,
|
||||
expected, mpllb_hw_state->pll[i]);
|
||||
}
|
||||
|
||||
I915_STATE_WARN(i915, mpllb_hw_state->tx != mpllb_sw_state->tx,
|
||||
"[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
mpllb_sw_state->tx, mpllb_hw_state->tx);
|
||||
|
||||
I915_STATE_WARN(i915, mpllb_hw_state->cmn != mpllb_sw_state->cmn,
|
||||
"[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
mpllb_sw_state->cmn, mpllb_hw_state->cmn);
|
||||
}
|
||||
|
||||
void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_cx0pll_state *pll_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
intel_c10pll_readout_hw_state(encoder, &pll_state->c10);
|
||||
else
|
||||
intel_c20pll_readout_hw_state(encoder, &pll_state->c20);
|
||||
}
|
||||
|
||||
int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
|
||||
const struct intel_cx0pll_state *pll_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
return intel_c10pll_calc_port_clock(encoder, &pll_state->c10);
|
||||
|
||||
return intel_c20pll_calc_port_clock(encoder, &pll_state->c20);
|
||||
}
|
||||
|
||||
static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder,
|
||||
struct intel_c20pll_state *mpll_hw_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
|
||||
bool use_mplla;
|
||||
int i;
|
||||
|
||||
use_mplla = intel_c20_use_mplla(mpll_hw_state->clock);
|
||||
if (use_mplla) {
|
||||
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
|
||||
I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
|
||||
"[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
|
||||
crtc->base.base.id, crtc->base.name, i,
|
||||
mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) {
|
||||
I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i],
|
||||
"[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)",
|
||||
crtc->base.base.id, crtc->base.name, i,
|
||||
mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) {
|
||||
I915_STATE_WARN(i915, mpll_hw_state->tx[i] != mpll_sw_state->tx[i],
|
||||
"[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)",
|
||||
crtc->base.base.id, crtc->base.name, i,
|
||||
mpll_sw_state->tx[i], mpll_hw_state->tx[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->cmn); i++) {
|
||||
I915_STATE_WARN(i915, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i],
|
||||
"[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)",
|
||||
crtc->base.base.id, crtc->base.name, i,
|
||||
mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_cx0pll_state_verify(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
const struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_c10pll_state mpllb_hw_state = {};
|
||||
const struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10;
|
||||
struct intel_encoder *encoder;
|
||||
struct intel_cx0pll_state mpll_hw_state = {};
|
||||
enum phy phy;
|
||||
int i;
|
||||
|
||||
if (DISPLAY_VER(i915) < 14)
|
||||
return;
|
||||
@ -3029,27 +3136,10 @@ void intel_c10pll_state_verify(struct intel_atomic_state *state,
|
||||
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
|
||||
phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
if (!intel_is_c10phy(i915, phy))
|
||||
return;
|
||||
intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state);
|
||||
|
||||
intel_c10pll_readout_hw_state(encoder, &mpllb_hw_state);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
|
||||
u8 expected = mpllb_sw_state->pll[i];
|
||||
|
||||
I915_STATE_WARN(i915, mpllb_hw_state.pll[i] != expected,
|
||||
"[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
|
||||
crtc->base.base.id, crtc->base.name, i,
|
||||
expected, mpllb_hw_state.pll[i]);
|
||||
}
|
||||
|
||||
I915_STATE_WARN(i915, mpllb_hw_state.tx != mpllb_sw_state->tx,
|
||||
"[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
mpllb_sw_state->tx, mpllb_hw_state.tx);
|
||||
|
||||
I915_STATE_WARN(i915, mpllb_hw_state.cmn != mpllb_sw_state->cmn,
|
||||
"[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
mpllb_sw_state->cmn, mpllb_hw_state.cmn);
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10);
|
||||
else
|
||||
intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20);
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ struct drm_i915_private;
|
||||
struct intel_atomic_state;
|
||||
struct intel_c10pll_state;
|
||||
struct intel_c20pll_state;
|
||||
struct intel_cx0pll_state;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
struct intel_encoder;
|
||||
@ -28,20 +29,19 @@ void intel_mtl_pll_disable(struct intel_encoder *encoder);
|
||||
enum icl_port_dpll_id
|
||||
intel_mtl_port_pll_type(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c10pll_state *pll_state);
|
||||
|
||||
int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder);
|
||||
void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_cx0pll_state *pll_state);
|
||||
int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
|
||||
const struct intel_cx0pll_state *pll_state);
|
||||
|
||||
void intel_c10pll_dump_hw_state(struct drm_i915_private *dev_priv,
|
||||
const struct intel_c10pll_state *hw_state);
|
||||
int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
|
||||
const struct intel_c10pll_state *pll_state);
|
||||
void intel_c10pll_state_verify(struct intel_atomic_state *state,
|
||||
void intel_cx0pll_state_verify(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_c20pll_state *pll_state);
|
||||
void intel_c20pll_dump_hw_state(struct drm_i915_private *i915,
|
||||
const struct intel_c20pll_state *hw_state);
|
||||
int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
|
||||
const struct intel_c20pll_state *pll_state);
|
||||
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
|
||||
|
@ -25,6 +25,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/string_helpers.h>
|
||||
|
||||
#include <drm/display/drm_scdc_helper.h>
|
||||
@ -2210,16 +2211,87 @@ static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel
|
||||
}
|
||||
|
||||
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
if (!crtc_state->fec_enable)
|
||||
return;
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Failed to set FEC_READY in the sink\n");
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION,
|
||||
enable ? DP_FEC_READY : 0) <= 0)
|
||||
drm_dbg_kms(&i915->drm, "Failed to set FEC_READY to %s in the sink\n",
|
||||
enable ? "enabled" : "disabled");
|
||||
|
||||
if (enable &&
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS,
|
||||
DP_FEC_DECODE_EN_DETECTED | DP_FEC_DECODE_DIS_DETECTED) <= 0)
|
||||
drm_dbg_kms(&i915->drm, "Failed to clear FEC detected flags\n");
|
||||
}
|
||||
|
||||
static int read_fec_detected_status(struct drm_dp_aux *aux)
|
||||
{
|
||||
int ret;
|
||||
u8 status;
|
||||
|
||||
ret = drm_dp_dpcd_readb(aux, DP_FEC_STATUS, &status);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(aux->drm_dev);
|
||||
int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED;
|
||||
int status;
|
||||
int err;
|
||||
|
||||
err = readx_poll_timeout(read_fec_detected_status, aux, status,
|
||||
status & mask || status < 0,
|
||||
10000, 200000);
|
||||
|
||||
if (!err && status >= 0)
|
||||
return;
|
||||
|
||||
if (err == -ETIMEDOUT)
|
||||
drm_dbg_kms(&i915->drm, "Timeout waiting for FEC %s to get detected\n",
|
||||
str_enabled_disabled(enabled));
|
||||
else
|
||||
drm_dbg_kms(&i915->drm, "FEC detected status read error: %d\n", status);
|
||||
}
|
||||
|
||||
void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
bool enabled)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
int ret;
|
||||
|
||||
if (!crtc_state->fec_enable)
|
||||
return;
|
||||
|
||||
if (enabled)
|
||||
ret = intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
|
||||
DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
|
||||
else
|
||||
ret = intel_de_wait_for_clear(i915, dp_tp_status_reg(encoder, crtc_state),
|
||||
DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
|
||||
|
||||
if (ret)
|
||||
drm_err(&i915->drm,
|
||||
"Timeout waiting for FEC live state to get %s\n",
|
||||
str_enabled_disabled(enabled));
|
||||
|
||||
/*
|
||||
* At least the Synoptics MST hub doesn't set the detected flag for
|
||||
* FEC decoding disabling so skip waiting for that.
|
||||
*/
|
||||
if (enabled)
|
||||
wait_for_fec_detected(&intel_dp->aux, enabled);
|
||||
}
|
||||
|
||||
static void intel_ddi_enable_fec(struct intel_encoder *encoder,
|
||||
@ -2234,8 +2306,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
|
||||
0, DP_TP_CTL_FEC_ENABLE);
|
||||
}
|
||||
|
||||
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
static void intel_ddi_disable_fec(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
@ -2466,13 +2538,17 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
|
||||
|
||||
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
|
||||
intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
|
||||
if (!is_mst)
|
||||
intel_dp_sink_enable_decompression(state,
|
||||
to_intel_connector(conn_state->connector),
|
||||
crtc_state);
|
||||
|
||||
/*
|
||||
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
|
||||
* in the FEC_CONFIGURATION register to 1 before initiating link
|
||||
* training
|
||||
*/
|
||||
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
|
||||
intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
|
||||
|
||||
intel_dp_check_frl_training(intel_dp);
|
||||
intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
|
||||
@ -2505,7 +2581,8 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
/* 6.o Configure and enable FEC if needed */
|
||||
intel_ddi_enable_fec(encoder, crtc_state);
|
||||
|
||||
intel_dsc_dp_pps_write(encoder, crtc_state);
|
||||
if (!is_mst)
|
||||
intel_dsc_dp_pps_write(encoder, crtc_state);
|
||||
}
|
||||
|
||||
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
@ -2616,13 +2693,16 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
|
||||
|
||||
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
|
||||
intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
|
||||
if (!is_mst)
|
||||
intel_dp_sink_enable_decompression(state,
|
||||
to_intel_connector(conn_state->connector),
|
||||
crtc_state);
|
||||
/*
|
||||
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
|
||||
* in the FEC_CONFIGURATION register to 1 before initiating link
|
||||
* training
|
||||
*/
|
||||
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
|
||||
intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
|
||||
|
||||
intel_dp_check_frl_training(intel_dp);
|
||||
intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
|
||||
@ -2643,7 +2723,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
/* 7.l Configure and enable FEC if needed */
|
||||
intel_ddi_enable_fec(encoder, crtc_state);
|
||||
|
||||
intel_dsc_dp_pps_write(encoder, crtc_state);
|
||||
if (!is_mst)
|
||||
intel_dsc_dp_pps_write(encoder, crtc_state);
|
||||
}
|
||||
|
||||
static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
@ -2695,9 +2776,11 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
if (!is_mst)
|
||||
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
|
||||
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
|
||||
intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
|
||||
true);
|
||||
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
|
||||
if (!is_mst)
|
||||
intel_dp_sink_enable_decompression(state,
|
||||
to_intel_connector(conn_state->connector),
|
||||
crtc_state);
|
||||
intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
|
||||
intel_dp_start_link_train(intel_dp, crtc_state);
|
||||
if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
|
||||
!is_trans_port_sync_mode(crtc_state))
|
||||
@ -2705,10 +2788,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
|
||||
intel_ddi_enable_fec(encoder, crtc_state);
|
||||
|
||||
if (!is_mst)
|
||||
if (!is_mst) {
|
||||
intel_ddi_enable_transcoder_clock(encoder, crtc_state);
|
||||
|
||||
intel_dsc_dp_pps_write(encoder, crtc_state);
|
||||
intel_dsc_dp_pps_write(encoder, crtc_state);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
@ -2717,10 +2800,15 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (HAS_DP20(dev_priv))
|
||||
if (HAS_DP20(dev_priv)) {
|
||||
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
|
||||
crtc_state);
|
||||
if (crtc_state->has_panel_replay)
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
|
||||
DP_PANEL_REPLAY_ENABLE);
|
||||
}
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 14)
|
||||
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
|
||||
@ -2866,8 +2954,7 @@ static void disable_ddi_buf(struct intel_encoder *encoder,
|
||||
intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
|
||||
DP_TP_CTL_ENABLE, 0);
|
||||
|
||||
/* Disable FEC in DP Sink */
|
||||
intel_ddi_disable_fec_state(encoder, crtc_state);
|
||||
intel_ddi_disable_fec(encoder, crtc_state);
|
||||
|
||||
if (wait)
|
||||
intel_wait_ddi_buf_idle(dev_priv, port);
|
||||
@ -2882,10 +2969,12 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
|
||||
mtl_disable_ddi_buf(encoder, crtc_state);
|
||||
|
||||
/* 3.f Disable DP_TP_CTL FEC Enable if it is needed */
|
||||
intel_ddi_disable_fec_state(encoder, crtc_state);
|
||||
intel_ddi_disable_fec(encoder, crtc_state);
|
||||
} else {
|
||||
disable_ddi_buf(encoder, crtc_state);
|
||||
}
|
||||
|
||||
intel_ddi_wait_for_fec_status(encoder, crtc_state, false);
|
||||
}
|
||||
|
||||
static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
|
||||
@ -2925,6 +3014,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
|
||||
|
||||
intel_disable_ddi_buf(encoder, old_crtc_state);
|
||||
|
||||
intel_dp_sink_set_fec_ready(intel_dp, old_crtc_state, false);
|
||||
|
||||
/*
|
||||
* From TGL spec: "If single stream or multi-stream master transcoder:
|
||||
* Configure Transcoder Clock select to direct no clock to the
|
||||
@ -3110,11 +3201,18 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
|
||||
if (!dig_port->lspcon.active || intel_dp_has_hdmi_sink(&dig_port->dp))
|
||||
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
|
||||
|
||||
intel_audio_codec_enable(encoder, crtc_state, conn_state);
|
||||
|
||||
trans_port_sync_stop_link_train(state, encoder, crtc_state);
|
||||
}
|
||||
|
||||
/* FIXME bad home for this function */
|
||||
i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
|
||||
enum transcoder cpu_transcoder)
|
||||
{
|
||||
return DISPLAY_VER(i915) >= 14 ?
|
||||
MTL_CHICKEN_TRANS(cpu_transcoder) :
|
||||
CHICKEN_TRANS(cpu_transcoder);
|
||||
}
|
||||
|
||||
static i915_reg_t
|
||||
gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
@ -3233,8 +3331,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
|
||||
intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
|
||||
|
||||
intel_wait_ddi_buf_active(dev_priv, port);
|
||||
|
||||
intel_audio_codec_enable(encoder, crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static void intel_enable_ddi(struct intel_atomic_state *state,
|
||||
@ -3252,6 +3348,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
|
||||
|
||||
intel_enable_transcoder(crtc_state);
|
||||
|
||||
intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
|
||||
|
||||
intel_crtc_vblank_on(crtc_state);
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
|
||||
@ -3259,10 +3357,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
|
||||
else
|
||||
intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
|
||||
|
||||
/* Enable hdcp if it's desired */
|
||||
if (conn_state->content_protection ==
|
||||
DRM_MODE_CONTENT_PROTECTION_DESIRED)
|
||||
intel_hdcp_enable(state, encoder, crtc_state, conn_state);
|
||||
intel_hdcp_enable(state, encoder, crtc_state, conn_state);
|
||||
|
||||
}
|
||||
|
||||
static void intel_disable_ddi_dp(struct intel_atomic_state *state,
|
||||
@ -3271,16 +3367,16 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(old_conn_state->connector);
|
||||
|
||||
intel_dp->link_trained = false;
|
||||
|
||||
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
|
||||
|
||||
intel_psr_disable(intel_dp, old_crtc_state);
|
||||
intel_edp_backlight_off(old_conn_state);
|
||||
/* Disable the decompression in DP Sink */
|
||||
intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
|
||||
false);
|
||||
intel_dp_sink_disable_decompression(state,
|
||||
connector, old_crtc_state);
|
||||
/* Disable Ignore_MSA bit in DP Sink */
|
||||
intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state,
|
||||
false);
|
||||
@ -3294,8 +3390,6 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct drm_connector *connector = old_conn_state->connector;
|
||||
|
||||
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
|
||||
|
||||
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
|
||||
false, false))
|
||||
drm_dbg_kms(&i915->drm,
|
||||
@ -3854,18 +3948,13 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
|
||||
static void mtl_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
|
||||
if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
|
||||
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
|
||||
} else if (intel_is_c10phy(i915, phy)) {
|
||||
intel_c10pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c10);
|
||||
crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
|
||||
} else {
|
||||
intel_c20pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c20);
|
||||
crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
|
||||
intel_cx0pll_readout_hw_state(encoder, &crtc_state->cx0pll_state);
|
||||
crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
|
||||
}
|
||||
|
||||
intel_ddi_get_config(encoder, crtc_state);
|
||||
@ -4844,6 +4933,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
|
||||
encoder->post_pll_disable = intel_ddi_post_pll_disable;
|
||||
encoder->post_disable = intel_ddi_post_disable;
|
||||
encoder->update_pipe = intel_ddi_update_pipe;
|
||||
encoder->audio_enable = intel_audio_codec_enable;
|
||||
encoder->audio_disable = intel_audio_codec_disable;
|
||||
encoder->get_hw_state = intel_ddi_get_hw_state;
|
||||
encoder->sync_state = intel_ddi_sync_state;
|
||||
encoder->initial_fastset_check = intel_ddi_initial_fastset_check;
|
||||
|
@ -27,6 +27,8 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
|
||||
enum transcoder cpu_transcoder);
|
||||
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *intel_encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
@ -60,6 +62,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
|
||||
void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
bool enabled);
|
||||
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include "g4x_dp.h"
|
||||
#include "g4x_hdmi.h"
|
||||
#include "hsw_ips.h"
|
||||
#include "i915_config.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
#include "i915_utils.h"
|
||||
@ -72,10 +73,10 @@
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dp_link_training.h"
|
||||
#include "intel_dp_mst.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_dpll.h"
|
||||
#include "intel_dpll_mgr.h"
|
||||
#include "intel_dpt.h"
|
||||
#include "intel_dpt_common.h"
|
||||
#include "intel_drrs.h"
|
||||
#include "intel_dsb.h"
|
||||
#include "intel_dsi.h"
|
||||
@ -193,12 +194,9 @@ static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
|
||||
static void
|
||||
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
|
||||
{
|
||||
if (enable)
|
||||
intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
|
||||
0, DUPS1_GATING_DIS | DUPS2_GATING_DIS);
|
||||
else
|
||||
intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
|
||||
DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0);
|
||||
intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
|
||||
DUPS1_GATING_DIS | DUPS2_GATING_DIS,
|
||||
enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0);
|
||||
}
|
||||
|
||||
/* Wa_2006604312:icl,ehl */
|
||||
@ -206,10 +204,9 @@ static void
|
||||
icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
bool enable)
|
||||
{
|
||||
if (enable)
|
||||
intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS);
|
||||
else
|
||||
intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0);
|
||||
intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
|
||||
DPFR_GATING_DIS,
|
||||
enable ? DPFR_GATING_DIS : 0);
|
||||
}
|
||||
|
||||
/* Wa_1604331009:icl,jsl,ehl */
|
||||
@ -217,7 +214,8 @@ static void
|
||||
icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
bool enable)
|
||||
{
|
||||
intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
|
||||
intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
|
||||
CURSOR_GATING_DIS,
|
||||
enable ? CURSOR_GATING_DIS : 0);
|
||||
}
|
||||
|
||||
@ -397,7 +395,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
i915_reg_t reg;
|
||||
u32 val;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
|
||||
@ -430,16 +427,16 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
|
||||
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
|
||||
0, PIPE_ARB_USE_PROG_SLOTS);
|
||||
|
||||
reg = TRANSCONF(cpu_transcoder);
|
||||
val = intel_de_read(dev_priv, reg);
|
||||
val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
|
||||
if (val & TRANSCONF_ENABLE) {
|
||||
/* we keep both pipes enabled on 830 */
|
||||
drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
|
||||
return;
|
||||
}
|
||||
|
||||
intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE);
|
||||
intel_de_posting_read(dev_priv, reg);
|
||||
intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
|
||||
val | TRANSCONF_ENABLE);
|
||||
intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
|
||||
|
||||
/*
|
||||
* Until the pipe starts PIPEDSL reads will return a stale value,
|
||||
@ -458,7 +455,6 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
i915_reg_t reg;
|
||||
u32 val;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
|
||||
@ -469,8 +465,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
|
||||
*/
|
||||
assert_planes_disabled(crtc);
|
||||
|
||||
reg = TRANSCONF(cpu_transcoder);
|
||||
val = intel_de_read(dev_priv, reg);
|
||||
val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
|
||||
if ((val & TRANSCONF_ENABLE) == 0)
|
||||
return;
|
||||
|
||||
@ -485,14 +480,12 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
|
||||
if (!IS_I830(dev_priv))
|
||||
val &= ~TRANSCONF_ENABLE;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 14)
|
||||
intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
|
||||
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
|
||||
else if (DISPLAY_VER(dev_priv) >= 12)
|
||||
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
|
||||
intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 12)
|
||||
intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
|
||||
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
|
||||
|
||||
intel_de_write(dev_priv, reg, val);
|
||||
if ((val & TRANSCONF_ENABLE) == 0)
|
||||
intel_wait_for_pipe_off(old_crtc_state);
|
||||
}
|
||||
@ -896,6 +889,48 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
|
||||
(DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
|
||||
}
|
||||
|
||||
static void intel_encoders_audio_enable(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
const struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
const struct drm_connector_state *conn_state;
|
||||
struct drm_connector *conn;
|
||||
int i;
|
||||
|
||||
for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
|
||||
struct intel_encoder *encoder =
|
||||
to_intel_encoder(conn_state->best_encoder);
|
||||
|
||||
if (conn_state->crtc != &crtc->base)
|
||||
continue;
|
||||
|
||||
if (encoder->audio_enable)
|
||||
encoder->audio_enable(encoder, crtc_state, conn_state);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_encoders_audio_disable(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
const struct drm_connector_state *old_conn_state;
|
||||
struct drm_connector *conn;
|
||||
int i;
|
||||
|
||||
for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
|
||||
struct intel_encoder *encoder =
|
||||
to_intel_encoder(old_conn_state->best_encoder);
|
||||
|
||||
if (old_conn_state->crtc != &crtc->base)
|
||||
continue;
|
||||
|
||||
if (encoder->audio_disable)
|
||||
encoder->audio_disable(encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
}
|
||||
|
||||
#define is_enabling(feature, old_crtc_state, new_crtc_state) \
|
||||
((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \
|
||||
(new_crtc_state)->feature)
|
||||
@ -906,12 +941,18 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
|
||||
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
|
||||
const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
if (!new_crtc_state->hw.active)
|
||||
return false;
|
||||
|
||||
return is_enabling(active_planes, old_crtc_state, new_crtc_state);
|
||||
}
|
||||
|
||||
static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
|
||||
const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
if (!old_crtc_state->hw.active)
|
||||
return false;
|
||||
|
||||
return is_disabling(active_planes, old_crtc_state, new_crtc_state);
|
||||
}
|
||||
|
||||
@ -928,6 +969,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
|
||||
static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
|
||||
const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
if (!new_crtc_state->hw.active)
|
||||
return false;
|
||||
|
||||
return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) ||
|
||||
(new_crtc_state->vrr.enable &&
|
||||
(new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
|
||||
@ -937,12 +981,37 @@ static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
|
||||
static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
|
||||
const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
if (!old_crtc_state->hw.active)
|
||||
return false;
|
||||
|
||||
return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) ||
|
||||
(old_crtc_state->vrr.enable &&
|
||||
(new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
|
||||
vrr_params_changed(old_crtc_state, new_crtc_state)));
|
||||
}
|
||||
|
||||
static bool audio_enabling(const struct intel_crtc_state *old_crtc_state,
|
||||
const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
if (!new_crtc_state->hw.active)
|
||||
return false;
|
||||
|
||||
return is_enabling(has_audio, old_crtc_state, new_crtc_state) ||
|
||||
(new_crtc_state->has_audio &&
|
||||
memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
|
||||
}
|
||||
|
||||
static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
|
||||
const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
if (!old_crtc_state->hw.active)
|
||||
return false;
|
||||
|
||||
return is_disabling(has_audio, old_crtc_state, new_crtc_state) ||
|
||||
(old_crtc_state->has_audio &&
|
||||
memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
|
||||
}
|
||||
|
||||
#undef is_disabling
|
||||
#undef is_enabling
|
||||
|
||||
@ -983,6 +1052,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
|
||||
|
||||
if (intel_crtc_needs_color_update(new_crtc_state))
|
||||
intel_color_post_update(new_crtc_state);
|
||||
|
||||
if (audio_enabling(old_crtc_state, new_crtc_state))
|
||||
intel_encoders_audio_enable(state, crtc);
|
||||
}
|
||||
|
||||
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
|
||||
@ -1066,6 +1138,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
|
||||
intel_crtc_update_active_timings(old_crtc_state, false);
|
||||
}
|
||||
|
||||
if (audio_disabling(old_crtc_state, new_crtc_state))
|
||||
intel_encoders_audio_disable(state, crtc);
|
||||
|
||||
intel_drrs_deactivate(old_crtc_state);
|
||||
|
||||
intel_psr_pre_plane_update(state, crtc);
|
||||
@ -1501,12 +1576,9 @@ static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
|
||||
static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder transcoder = crtc_state->cpu_transcoder;
|
||||
i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
|
||||
CHICKEN_TRANS(transcoder);
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
|
||||
intel_de_rmw(dev_priv, reg,
|
||||
intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder),
|
||||
HSW_FRAME_START_DELAY_MASK,
|
||||
HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
|
||||
}
|
||||
@ -1784,31 +1856,31 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
|
||||
|
||||
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
|
||||
{
|
||||
/*
|
||||
* DG2's "TC1", although TC-capable output, doesn't share the same flow
|
||||
* as other platforms on the display engine side and rather rely on the
|
||||
* SNPS PHY, that is programmed separately
|
||||
*/
|
||||
if (IS_DG2(dev_priv))
|
||||
/* DG2's "TC1" output uses a SNPS PHY */
|
||||
return false;
|
||||
else if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 0))
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 13)
|
||||
return phy >= PHY_F && phy <= PHY_I;
|
||||
else if (IS_TIGERLAKE(dev_priv))
|
||||
return phy >= PHY_D && phy <= PHY_I;
|
||||
else if (IS_ICELAKE(dev_priv))
|
||||
return phy >= PHY_C && phy <= PHY_F;
|
||||
else
|
||||
return false;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
|
||||
{
|
||||
if (phy == PHY_NONE)
|
||||
return false;
|
||||
else if (IS_DG2(dev_priv))
|
||||
/*
|
||||
* All four "combo" ports and the TC1 port (PHY E) use
|
||||
* Synopsis PHYs.
|
||||
*/
|
||||
return phy <= PHY_E;
|
||||
|
||||
return false;
|
||||
/*
|
||||
* For DG2, and for DG2 only, all four "combo" ports and the TC1 port
|
||||
* (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
|
||||
*/
|
||||
return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
|
||||
}
|
||||
|
||||
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
|
||||
@ -2397,15 +2469,15 @@ static void compute_m_n(u32 *ret_m, u32 *ret_n,
|
||||
}
|
||||
|
||||
void
|
||||
intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
|
||||
intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
|
||||
int pixel_clock, int link_clock,
|
||||
struct intel_link_m_n *m_n,
|
||||
bool fec_enable)
|
||||
int bw_overhead,
|
||||
struct intel_link_m_n *m_n)
|
||||
{
|
||||
u32 data_clock = bits_per_pixel * pixel_clock;
|
||||
|
||||
if (fec_enable)
|
||||
data_clock = intel_dp_mode_to_fec_clock(data_clock);
|
||||
u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock);
|
||||
u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16,
|
||||
bw_overhead);
|
||||
u32 data_n = intel_dp_max_data_rate(link_clock, nlanes);
|
||||
|
||||
/*
|
||||
* Windows/BIOS uses fixed M/N values always. Follow suit.
|
||||
@ -2416,11 +2488,11 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
|
||||
*/
|
||||
m_n->tu = 64;
|
||||
compute_m_n(&m_n->data_m, &m_n->data_n,
|
||||
data_clock, link_clock * nlanes * 8,
|
||||
data_m, data_n,
|
||||
0x8000000);
|
||||
|
||||
compute_m_n(&m_n->link_m, &m_n->link_n,
|
||||
pixel_clock, link_clock,
|
||||
pixel_clock, link_symbol_clock,
|
||||
0x80000);
|
||||
}
|
||||
|
||||
@ -2838,67 +2910,6 @@ static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
|
||||
intel_de_read(dev_priv, PFIT_PGM_RATIOS);
|
||||
}
|
||||
|
||||
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
struct dpll clock;
|
||||
u32 mdiv;
|
||||
int refclk = 100000;
|
||||
|
||||
/* In case of DSI, DPLL will not be used */
|
||||
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
|
||||
return;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
|
||||
vlv_dpio_put(dev_priv);
|
||||
|
||||
clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
|
||||
clock.m2 = mdiv & DPIO_M2DIV_MASK;
|
||||
clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
|
||||
clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
|
||||
clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
|
||||
|
||||
pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
|
||||
}
|
||||
|
||||
static void chv_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_channel port = vlv_pipe_to_channel(pipe);
|
||||
struct dpll clock;
|
||||
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
|
||||
int refclk = 100000;
|
||||
|
||||
/* In case of DSI, DPLL will not be used */
|
||||
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
|
||||
return;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
|
||||
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
|
||||
pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
|
||||
pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
|
||||
pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
|
||||
vlv_dpio_put(dev_priv);
|
||||
|
||||
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
|
||||
clock.m2 = (pll_dw0 & 0xff) << 22;
|
||||
if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
|
||||
clock.m2 |= pll_dw2 & 0x3fffff;
|
||||
clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
|
||||
clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
|
||||
clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
|
||||
|
||||
pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
|
||||
}
|
||||
|
||||
static enum intel_output_format
|
||||
bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
|
||||
{
|
||||
@ -3790,9 +3801,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
|
||||
}
|
||||
|
||||
if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
|
||||
tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
|
||||
MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
|
||||
CHICKEN_TRANS(pipe_config->cpu_transcoder));
|
||||
tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder));
|
||||
|
||||
pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
|
||||
} else {
|
||||
@ -3821,133 +3830,27 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int i9xx_pll_refclk(struct drm_device *dev,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 dpll = pipe_config->dpll_hw_state.dpll;
|
||||
|
||||
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
|
||||
return dev_priv->display.vbt.lvds_ssc_freq;
|
||||
else if (HAS_PCH_SPLIT(dev_priv))
|
||||
return 120000;
|
||||
else if (DISPLAY_VER(dev_priv) != 2)
|
||||
return 96000;
|
||||
else
|
||||
return 48000;
|
||||
}
|
||||
|
||||
/* Returns the clock of the currently programmed mode of the given pipe. */
|
||||
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 dpll = pipe_config->dpll_hw_state.dpll;
|
||||
u32 fp;
|
||||
struct dpll clock;
|
||||
int port_clock;
|
||||
int refclk = i9xx_pll_refclk(dev, pipe_config);
|
||||
|
||||
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
|
||||
fp = pipe_config->dpll_hw_state.fp0;
|
||||
else
|
||||
fp = pipe_config->dpll_hw_state.fp1;
|
||||
|
||||
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
|
||||
if (IS_PINEVIEW(dev_priv)) {
|
||||
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
|
||||
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
|
||||
} else {
|
||||
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
|
||||
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
|
||||
}
|
||||
|
||||
if (DISPLAY_VER(dev_priv) != 2) {
|
||||
if (IS_PINEVIEW(dev_priv))
|
||||
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
|
||||
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
|
||||
else
|
||||
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
|
||||
DPLL_FPA01_P1_POST_DIV_SHIFT);
|
||||
|
||||
switch (dpll & DPLL_MODE_MASK) {
|
||||
case DPLLB_MODE_DAC_SERIAL:
|
||||
clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
|
||||
5 : 10;
|
||||
break;
|
||||
case DPLLB_MODE_LVDS:
|
||||
clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
|
||||
7 : 14;
|
||||
break;
|
||||
default:
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Unknown DPLL mode %08x in programmed "
|
||||
"mode\n", (int)(dpll & DPLL_MODE_MASK));
|
||||
return;
|
||||
}
|
||||
|
||||
if (IS_PINEVIEW(dev_priv))
|
||||
port_clock = pnv_calc_dpll_params(refclk, &clock);
|
||||
else
|
||||
port_clock = i9xx_calc_dpll_params(refclk, &clock);
|
||||
} else {
|
||||
enum pipe lvds_pipe;
|
||||
|
||||
if (IS_I85X(dev_priv) &&
|
||||
intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
|
||||
lvds_pipe == crtc->pipe) {
|
||||
u32 lvds = intel_de_read(dev_priv, LVDS);
|
||||
|
||||
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
|
||||
DPLL_FPA01_P1_POST_DIV_SHIFT);
|
||||
|
||||
if (lvds & LVDS_CLKB_POWER_UP)
|
||||
clock.p2 = 7;
|
||||
else
|
||||
clock.p2 = 14;
|
||||
} else {
|
||||
if (dpll & PLL_P1_DIVIDE_BY_TWO)
|
||||
clock.p1 = 2;
|
||||
else {
|
||||
clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
|
||||
DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
|
||||
}
|
||||
if (dpll & PLL_P2_DIVIDE_BY_4)
|
||||
clock.p2 = 4;
|
||||
else
|
||||
clock.p2 = 2;
|
||||
}
|
||||
|
||||
port_clock = i9xx_calc_dpll_params(refclk, &clock);
|
||||
}
|
||||
|
||||
/*
|
||||
* This value includes pixel_multiplier. We will use
|
||||
* port_clock to compute adjusted_mode.crtc_clock in the
|
||||
* encoder's get_config() function.
|
||||
*/
|
||||
pipe_config->port_clock = port_clock;
|
||||
}
|
||||
|
||||
int intel_dotclock_calculate(int link_freq,
|
||||
const struct intel_link_m_n *m_n)
|
||||
{
|
||||
/*
|
||||
* The calculation for the data clock is:
|
||||
* The calculation for the data clock -> pixel clock is:
|
||||
* pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
|
||||
* But we want to avoid losing precison if possible, so:
|
||||
* pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
|
||||
*
|
||||
* and the link clock is simpler:
|
||||
* link_clock = (m * link_clock) / n
|
||||
* and for link freq (10kbs units) -> pixel clock it is:
|
||||
* link_symbol_clock = link_freq * 10 / link_symbol_size
|
||||
* pixel_clock = (m * link_symbol_clock) / n
|
||||
* or for more precision:
|
||||
* pixel_clock = (m * link_freq * 10) / (n * link_symbol_size)
|
||||
*/
|
||||
|
||||
if (!m_n->link_n)
|
||||
return 0;
|
||||
|
||||
return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
|
||||
m_n->link_n);
|
||||
return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10),
|
||||
m_n->link_n * intel_dp_link_symbol_size(link_freq));
|
||||
}
|
||||
|
||||
int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
|
||||
@ -4679,6 +4582,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
|
||||
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
|
||||
|
||||
if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) {
|
||||
@ -5057,23 +4961,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Checks state where we only read out the enabling, but not the entire
|
||||
* state itself (like full infoframes or ELD for audio). These states
|
||||
* require a full modeset on bootup to fix up.
|
||||
*/
|
||||
#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
|
||||
if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
|
||||
PIPE_CONF_CHECK_BOOL(name); \
|
||||
} else { \
|
||||
pipe_config_mismatch(fastset, crtc, __stringify(name), \
|
||||
"unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
|
||||
str_yes_no(current_config->name), \
|
||||
str_yes_no(pipe_config->name)); \
|
||||
ret = false; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define PIPE_CONF_CHECK_P(name) do { \
|
||||
if (current_config->name != pipe_config->name) { \
|
||||
pipe_config_mismatch(fastset, crtc, __stringify(name), \
|
||||
@ -5261,8 +5148,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
|
||||
PIPE_CONF_CHECK_BOOL(enhanced_framing);
|
||||
PIPE_CONF_CHECK_BOOL(fec_enable);
|
||||
|
||||
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
|
||||
PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
|
||||
if (!fastset) {
|
||||
PIPE_CONF_CHECK_BOOL(has_audio);
|
||||
PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
|
||||
}
|
||||
|
||||
PIPE_CONF_CHECK_X(gmch_pfit.control);
|
||||
/* pfit ratios are autocomputed by the hw on gen4+ */
|
||||
@ -5414,7 +5303,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
|
||||
|
||||
PIPE_CONF_CHECK_I(dsc.compression_enable);
|
||||
PIPE_CONF_CHECK_I(dsc.dsc_split);
|
||||
PIPE_CONF_CHECK_I(dsc.compressed_bpp);
|
||||
PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
|
||||
|
||||
PIPE_CONF_CHECK_BOOL(splitter.enable);
|
||||
PIPE_CONF_CHECK_I(splitter.link_count);
|
||||
@ -5432,7 +5321,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
|
||||
#undef PIPE_CONF_CHECK_X
|
||||
#undef PIPE_CONF_CHECK_I
|
||||
#undef PIPE_CONF_CHECK_BOOL
|
||||
#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
|
||||
#undef PIPE_CONF_CHECK_P
|
||||
#undef PIPE_CONF_CHECK_FLAGS
|
||||
#undef PIPE_CONF_CHECK_COLOR_LUT
|
||||
@ -5523,6 +5411,16 @@ int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
crtc_state->uapi.mode_changed = true;
|
||||
|
||||
crtc_state->update_pipe = false;
|
||||
crtc_state->update_m_n = false;
|
||||
crtc_state->update_lrr = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_modeset_all_pipes_late - force a full modeset on all pipes
|
||||
* @state: intel atomic state
|
||||
@ -5556,9 +5454,8 @@ int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->update_pipe = false;
|
||||
crtc_state->update_m_n = false;
|
||||
crtc_state->update_lrr = false;
|
||||
intel_crtc_flag_modeset(crtc_state);
|
||||
|
||||
crtc_state->update_planes |= crtc_state->active_planes;
|
||||
crtc_state->async_flip_planes = 0;
|
||||
crtc_state->do_async_flip = false;
|
||||
@ -5671,17 +5568,17 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
|
||||
else
|
||||
new_crtc_state->uapi.mode_changed = false;
|
||||
|
||||
if (intel_crtc_needs_modeset(new_crtc_state) ||
|
||||
intel_compare_link_m_n(&old_crtc_state->dp_m_n,
|
||||
if (intel_compare_link_m_n(&old_crtc_state->dp_m_n,
|
||||
&new_crtc_state->dp_m_n))
|
||||
new_crtc_state->update_m_n = false;
|
||||
|
||||
if (intel_crtc_needs_modeset(new_crtc_state) ||
|
||||
(old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
|
||||
if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
|
||||
old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end))
|
||||
new_crtc_state->update_lrr = false;
|
||||
|
||||
if (!intel_crtc_needs_modeset(new_crtc_state))
|
||||
if (intel_crtc_needs_modeset(new_crtc_state))
|
||||
intel_crtc_flag_modeset(new_crtc_state);
|
||||
else
|
||||
new_crtc_state->update_pipe = true;
|
||||
}
|
||||
|
||||
@ -6453,15 +6350,14 @@ int intel_atomic_check(struct drm_device *dev,
|
||||
if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
|
||||
continue;
|
||||
|
||||
if (intel_dp_mst_crtc_needs_modeset(state, crtc))
|
||||
intel_crtc_flag_modeset(new_crtc_state);
|
||||
|
||||
if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
|
||||
enum transcoder master = new_crtc_state->mst_master_transcoder;
|
||||
|
||||
if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
|
||||
new_crtc_state->uapi.mode_changed = true;
|
||||
new_crtc_state->update_pipe = false;
|
||||
new_crtc_state->update_m_n = false;
|
||||
new_crtc_state->update_lrr = false;
|
||||
}
|
||||
if (intel_cpu_transcoders_need_modeset(state, BIT(master)))
|
||||
intel_crtc_flag_modeset(new_crtc_state);
|
||||
}
|
||||
|
||||
if (is_trans_port_sync_mode(new_crtc_state)) {
|
||||
@ -6470,21 +6366,13 @@ int intel_atomic_check(struct drm_device *dev,
|
||||
if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
|
||||
trans |= BIT(new_crtc_state->master_transcoder);
|
||||
|
||||
if (intel_cpu_transcoders_need_modeset(state, trans)) {
|
||||
new_crtc_state->uapi.mode_changed = true;
|
||||
new_crtc_state->update_pipe = false;
|
||||
new_crtc_state->update_m_n = false;
|
||||
new_crtc_state->update_lrr = false;
|
||||
}
|
||||
if (intel_cpu_transcoders_need_modeset(state, trans))
|
||||
intel_crtc_flag_modeset(new_crtc_state);
|
||||
}
|
||||
|
||||
if (new_crtc_state->bigjoiner_pipes) {
|
||||
if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
|
||||
new_crtc_state->uapi.mode_changed = true;
|
||||
new_crtc_state->update_pipe = false;
|
||||
new_crtc_state->update_m_n = false;
|
||||
new_crtc_state->update_lrr = false;
|
||||
}
|
||||
if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes))
|
||||
intel_crtc_flag_modeset(new_crtc_state);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6505,10 +6393,6 @@ int intel_atomic_check(struct drm_device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = drm_dp_mst_atomic_check(&state->base);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = intel_atomic_check_planes(state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
@ -6744,8 +6628,8 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
|
||||
intel_crtc_enable_pipe_crc(crtc);
|
||||
}
|
||||
|
||||
static void intel_update_crtc(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
static void intel_pre_update_crtc(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
@ -6787,6 +6671,15 @@ static void intel_update_crtc(struct intel_atomic_state *state,
|
||||
intel_color_commit_noarm(new_crtc_state);
|
||||
|
||||
intel_crtc_planes_update_noarm(state, crtc);
|
||||
}
|
||||
|
||||
static void intel_update_crtc(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
/* Perform vblank evasion around commit operation */
|
||||
intel_pipe_update_start(state, crtc);
|
||||
@ -6815,7 +6708,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
|
||||
* valid pipe configuration from the BIOS we need to take care
|
||||
* of enabling them on the CRTC's first fastset.
|
||||
*/
|
||||
if (intel_crtc_needs_fastset(new_crtc_state) && !modeset &&
|
||||
if (intel_crtc_needs_fastset(new_crtc_state) &&
|
||||
old_crtc_state->inherited)
|
||||
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
|
||||
}
|
||||
@ -6853,10 +6746,11 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
|
||||
if (!intel_crtc_needs_modeset(new_crtc_state))
|
||||
continue;
|
||||
|
||||
intel_pre_plane_update(state, crtc);
|
||||
|
||||
if (!old_crtc_state->hw.active)
|
||||
continue;
|
||||
|
||||
intel_pre_plane_update(state, crtc);
|
||||
intel_crtc_disable_planes(state, crtc);
|
||||
}
|
||||
|
||||
@ -6910,6 +6804,13 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
|
||||
continue;
|
||||
|
||||
intel_enable_crtc(state, crtc);
|
||||
intel_pre_update_crtc(state, crtc);
|
||||
}
|
||||
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
if (!new_crtc_state->hw.active)
|
||||
continue;
|
||||
|
||||
intel_update_crtc(state, crtc);
|
||||
}
|
||||
}
|
||||
@ -6947,6 +6848,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
|
||||
* So first lets enable all pipes that do not need a fullmodeset as
|
||||
* those don't have any external dependency.
|
||||
*/
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
if ((update_pipes & BIT(pipe)) == 0)
|
||||
continue;
|
||||
|
||||
intel_pre_update_crtc(state, crtc);
|
||||
}
|
||||
|
||||
while (update_pipes) {
|
||||
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
@ -7017,6 +6927,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
|
||||
/*
|
||||
* Finally we do the plane updates/etc. for all pipes that got enabled.
|
||||
*/
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
if ((update_pipes & BIT(pipe)) == 0)
|
||||
continue;
|
||||
|
||||
intel_pre_update_crtc(state, crtc);
|
||||
}
|
||||
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
@ -7056,29 +6975,22 @@ void intel_atomic_helper_free_state_worker(struct work_struct *work)
|
||||
|
||||
static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
|
||||
{
|
||||
struct wait_queue_entry wait_fence, wait_reset;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *new_plane_state;
|
||||
int ret, i;
|
||||
|
||||
init_wait_entry(&wait_fence, 0);
|
||||
init_wait_entry(&wait_reset, 0);
|
||||
for (;;) {
|
||||
prepare_to_wait(&intel_state->commit_ready.wait,
|
||||
&wait_fence, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
|
||||
I915_RESET_MODESET),
|
||||
&wait_reset, TASK_UNINTERRUPTIBLE);
|
||||
for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) {
|
||||
if (new_plane_state->fence) {
|
||||
ret = dma_fence_wait_timeout(new_plane_state->fence, false,
|
||||
i915_fence_timeout(i915));
|
||||
if (ret <= 0)
|
||||
break;
|
||||
|
||||
|
||||
if (i915_sw_fence_done(&intel_state->commit_ready) ||
|
||||
test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
|
||||
break;
|
||||
|
||||
schedule();
|
||||
dma_fence_put(new_plane_state->fence);
|
||||
new_plane_state->fence = NULL;
|
||||
}
|
||||
}
|
||||
finish_wait(&intel_state->commit_ready.wait, &wait_fence);
|
||||
finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
|
||||
I915_RESET_MODESET),
|
||||
&wait_reset);
|
||||
}
|
||||
|
||||
static void intel_atomic_cleanup_work(struct work_struct *work)
|
||||
@ -7370,32 +7282,6 @@ static void intel_atomic_commit_work(struct work_struct *work)
|
||||
intel_atomic_commit_tail(state);
|
||||
}
|
||||
|
||||
static int
|
||||
intel_atomic_commit_ready(struct i915_sw_fence *fence,
|
||||
enum i915_sw_fence_notify notify)
|
||||
{
|
||||
struct intel_atomic_state *state =
|
||||
container_of(fence, struct intel_atomic_state, commit_ready);
|
||||
|
||||
switch (notify) {
|
||||
case FENCE_COMPLETE:
|
||||
/* we do blocking waits in the worker, nothing to do here */
|
||||
break;
|
||||
case FENCE_FREE:
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
struct intel_atomic_helper *helper =
|
||||
&i915->display.atomic_helper;
|
||||
|
||||
if (llist_add(&state->freed, &helper->free_list))
|
||||
queue_work(i915->unordered_wq, &helper->free_work);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void intel_atomic_track_fbs(struct intel_atomic_state *state)
|
||||
{
|
||||
struct intel_plane_state *old_plane_state, *new_plane_state;
|
||||
@ -7418,10 +7304,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
|
||||
|
||||
state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
drm_atomic_state_get(&state->base);
|
||||
i915_sw_fence_init(&state->commit_ready,
|
||||
intel_atomic_commit_ready);
|
||||
|
||||
/*
|
||||
* The intel_legacy_cursor_update() fast path takes care
|
||||
* of avoiding the vblank waits for simple cursor
|
||||
@ -7454,7 +7336,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
|
||||
if (ret) {
|
||||
drm_dbg_atomic(&dev_priv->drm,
|
||||
"Preparing state failed with %i\n", ret);
|
||||
i915_sw_fence_commit(&state->commit_ready);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
|
||||
return ret;
|
||||
}
|
||||
@ -7470,8 +7351,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
|
||||
struct intel_crtc *crtc;
|
||||
int i;
|
||||
|
||||
i915_sw_fence_commit(&state->commit_ready);
|
||||
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
|
||||
intel_color_cleanup_commit(new_crtc_state);
|
||||
|
||||
@ -7485,7 +7364,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
|
||||
drm_atomic_state_get(&state->base);
|
||||
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
|
||||
|
||||
i915_sw_fence_commit(&state->commit_ready);
|
||||
if (nonblock && state->modeset) {
|
||||
queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
|
||||
} else if (nonblock) {
|
||||
|
@ -105,7 +105,6 @@ enum i9xx_plane_id {
|
||||
};
|
||||
|
||||
#define plane_name(p) ((p) + 'A')
|
||||
#define sprite_name(p, s) ((p) * DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
|
||||
|
||||
#define for_each_plane_id_on_crtc(__crtc, __p) \
|
||||
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
|
||||
@ -395,8 +394,8 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
|
||||
u8 active_pipes);
|
||||
void intel_link_compute_m_n(u16 bpp, int nlanes,
|
||||
int pixel_clock, int link_clock,
|
||||
struct intel_link_m_n *m_n,
|
||||
bool fec_enable);
|
||||
int bw_overhead,
|
||||
struct intel_link_m_n *m_n);
|
||||
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
|
||||
u32 pixel_format, u64 modifier);
|
||||
enum drm_mode_status
|
||||
@ -482,8 +481,6 @@ void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
|
||||
void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
|
||||
enum transcoder cpu_transcoder,
|
||||
struct intel_link_m_n *m_n);
|
||||
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
|
||||
int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config);
|
||||
enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
|
||||
@ -552,7 +549,7 @@ bool assert_port_valid(struct drm_i915_private *i915, enum port port);
|
||||
struct drm_device *drm = &(__i915)->drm; \
|
||||
int __ret_warn_on = !!(condition); \
|
||||
if (unlikely(__ret_warn_on)) \
|
||||
if (!drm_WARN(drm, i915_modparams.verbose_state_checks, format)) \
|
||||
if (!drm_WARN(drm, __i915->display.params.verbose_state_checks, format)) \
|
||||
drm_err(drm, format); \
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "intel_cdclk.h"
|
||||
#include "intel_display_device.h"
|
||||
#include "intel_display_limits.h"
|
||||
#include "intel_display_params.h"
|
||||
#include "intel_display_power.h"
|
||||
#include "intel_dpll_mgr.h"
|
||||
#include "intel_fbc.h"
|
||||
@ -347,15 +348,6 @@ struct intel_display {
|
||||
struct intel_global_obj obj;
|
||||
} dbuf;
|
||||
|
||||
struct {
|
||||
wait_queue_head_t waitqueue;
|
||||
|
||||
/* mutex to protect pmdemand programming sequence */
|
||||
struct mutex lock;
|
||||
|
||||
struct intel_global_obj obj;
|
||||
} pmdemand;
|
||||
|
||||
struct {
|
||||
/*
|
||||
* dkl.phy_lock protects against concurrent access of the
|
||||
@ -443,6 +435,15 @@ struct intel_display {
|
||||
bool false_color;
|
||||
} ips;
|
||||
|
||||
struct {
|
||||
wait_queue_head_t waitqueue;
|
||||
|
||||
/* mutex to protect pmdemand programming sequence */
|
||||
struct mutex lock;
|
||||
|
||||
struct intel_global_obj obj;
|
||||
} pmdemand;
|
||||
|
||||
struct {
|
||||
struct i915_power_domains domains;
|
||||
|
||||
@ -520,6 +521,7 @@ struct intel_display {
|
||||
struct intel_hotplug hotplug;
|
||||
struct intel_opregion opregion;
|
||||
struct intel_overlay *overlay;
|
||||
struct intel_display_params params;
|
||||
struct intel_vbt_data vbt;
|
||||
struct intel_wm wm;
|
||||
};
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "intel_de.h"
|
||||
#include "intel_crtc_state_dump.h"
|
||||
#include "intel_display_debugfs.h"
|
||||
#include "intel_display_debugfs_params.h"
|
||||
#include "intel_display_power.h"
|
||||
#include "intel_display_power_well.h"
|
||||
#include "intel_display_types.h"
|
||||
@ -641,6 +642,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_display_capabilities(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *i915 = node_to_i915(m->private);
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
intel_display_device_info_print(DISPLAY_INFO(i915),
|
||||
DISPLAY_RUNTIME_INFO(i915), &p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
@ -1059,6 +1071,7 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
|
||||
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
|
||||
{"i915_power_domain_info", i915_power_domain_info, 0},
|
||||
{"i915_display_info", i915_display_info, 0},
|
||||
{"i915_display_capabilities", i915_display_capabilities, 0},
|
||||
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
|
||||
{"i915_dp_mst_info", i915_dp_mst_info, 0},
|
||||
{"i915_ddb_info", i915_ddb_info, 0},
|
||||
@ -1098,6 +1111,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
|
||||
intel_hpd_debugfs_register(i915);
|
||||
intel_psr_debugfs_register(i915);
|
||||
intel_wm_debugfs_register(i915);
|
||||
intel_display_debugfs_params(i915);
|
||||
}
|
||||
|
||||
static int i915_panel_show(struct seq_file *m, void *data)
|
||||
@ -1242,6 +1256,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
|
||||
DP_DSC_YCbCr420_Native)),
|
||||
str_yes_no(drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd,
|
||||
DP_DSC_YCbCr444)));
|
||||
seq_printf(m, "DSC_Sink_BPP_Precision: %d\n",
|
||||
drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd));
|
||||
seq_printf(m, "Force_DSC_Enable: %s\n",
|
||||
str_yes_no(intel_dp->force_dsc_en));
|
||||
if (!intel_dp_is_edp(intel_dp))
|
||||
@ -1434,6 +1450,85 @@ static const struct file_operations i915_dsc_output_format_fops = {
|
||||
.write = i915_dsc_output_format_write
|
||||
};
|
||||
|
||||
static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_connector *connector = m->private;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_crtc *crtc;
|
||||
struct intel_dp *intel_dp;
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
|
||||
int ret;
|
||||
|
||||
if (!encoder)
|
||||
return -ENODEV;
|
||||
|
||||
ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc = connector->state->crtc;
|
||||
if (connector->status != connector_status_connected || !crtc) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
intel_dp = intel_attached_dp(intel_connector);
|
||||
seq_printf(m, "Force_DSC_Fractional_BPP_Enable: %s\n",
|
||||
str_yes_no(intel_dp->force_dsc_fractional_bpp_en));
|
||||
|
||||
out:
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
|
||||
const char __user *ubuf,
|
||||
size_t len, loff_t *offp)
|
||||
{
|
||||
struct drm_connector *connector =
|
||||
((struct seq_file *)file->private_data)->private;
|
||||
struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
bool dsc_fractional_bpp_enable = false;
|
||||
int ret;
|
||||
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
drm_dbg(&i915->drm,
|
||||
"Copied %zu bytes from user to force fractional bpp for DSC\n", len);
|
||||
|
||||
ret = kstrtobool_from_user(ubuf, len, &dsc_fractional_bpp_enable);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
drm_dbg(&i915->drm, "Got %s for DSC Fractional BPP Enable\n",
|
||||
(dsc_fractional_bpp_enable) ? "true" : "false");
|
||||
intel_dp->force_dsc_fractional_bpp_en = dsc_fractional_bpp_enable;
|
||||
|
||||
*offp += len;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int i915_dsc_fractional_bpp_open(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
return single_open(file, i915_dsc_fractional_bpp_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations i915_dsc_fractional_bpp_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = i915_dsc_fractional_bpp_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = i915_dsc_fractional_bpp_write
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns the Current CRTC's bpc.
|
||||
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
|
||||
@ -1511,6 +1606,9 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
|
||||
|
||||
debugfs_create_file("i915_dsc_output_format", 0644, root,
|
||||
connector, &i915_dsc_output_format_fops);
|
||||
|
||||
debugfs_create_file("i915_dsc_fractional_bpp", 0644, root,
|
||||
connector, &i915_dsc_fractional_bpp_fops);
|
||||
}
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
|
||||
|
176
drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
Normal file
176
drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
Normal file
@ -0,0 +1,176 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2023 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
|
||||
#include "intel_display_debugfs_params.h"
|
||||
#include "i915_drv.h"
|
||||
#include "intel_display_params.h"
|
||||
|
||||
/* int param */
|
||||
static int intel_display_param_int_show(struct seq_file *m, void *data)
|
||||
{
|
||||
int *value = m->private;
|
||||
|
||||
seq_printf(m, "%d\n", *value);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_display_param_int_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, intel_display_param_int_show, inode->i_private);
|
||||
}
|
||||
|
||||
static ssize_t intel_display_param_int_write(struct file *file,
|
||||
const char __user *ubuf, size_t len,
|
||||
loff_t *offp)
|
||||
{
|
||||
struct seq_file *m = file->private_data;
|
||||
int *value = m->private;
|
||||
int ret;
|
||||
|
||||
ret = kstrtoint_from_user(ubuf, len, 0, value);
|
||||
if (ret) {
|
||||
/* support boolean values too */
|
||||
bool b;
|
||||
|
||||
ret = kstrtobool_from_user(ubuf, len, &b);
|
||||
if (!ret)
|
||||
*value = b;
|
||||
}
|
||||
|
||||
return ret ?: len;
|
||||
}
|
||||
|
||||
static const struct file_operations intel_display_param_int_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = intel_display_param_int_open,
|
||||
.read = seq_read,
|
||||
.write = intel_display_param_int_write,
|
||||
.llseek = default_llseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static const struct file_operations intel_display_param_int_fops_ro = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = intel_display_param_int_open,
|
||||
.read = seq_read,
|
||||
.llseek = default_llseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
/* unsigned int param */
|
||||
static int intel_display_param_uint_show(struct seq_file *m, void *data)
|
||||
{
|
||||
unsigned int *value = m->private;
|
||||
|
||||
seq_printf(m, "%u\n", *value);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_display_param_uint_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, intel_display_param_uint_show, inode->i_private);
|
||||
}
|
||||
|
||||
static ssize_t intel_display_param_uint_write(struct file *file,
|
||||
const char __user *ubuf, size_t len,
|
||||
loff_t *offp)
|
||||
{
|
||||
struct seq_file *m = file->private_data;
|
||||
unsigned int *value = m->private;
|
||||
int ret;
|
||||
|
||||
ret = kstrtouint_from_user(ubuf, len, 0, value);
|
||||
if (ret) {
|
||||
/* support boolean values too */
|
||||
bool b;
|
||||
|
||||
ret = kstrtobool_from_user(ubuf, len, &b);
|
||||
if (!ret)
|
||||
*value = b;
|
||||
}
|
||||
|
||||
return ret ?: len;
|
||||
}
|
||||
|
||||
static const struct file_operations intel_display_param_uint_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = intel_display_param_uint_open,
|
||||
.read = seq_read,
|
||||
.write = intel_display_param_uint_write,
|
||||
.llseek = default_llseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static const struct file_operations intel_display_param_uint_fops_ro = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = intel_display_param_uint_open,
|
||||
.read = seq_read,
|
||||
.llseek = default_llseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
#define RO(mode) (((mode) & 0222) == 0)
|
||||
|
||||
__maybe_unused static struct dentry *
|
||||
intel_display_debugfs_create_int(const char *name, umode_t mode,
|
||||
struct dentry *parent, int *value)
|
||||
{
|
||||
return debugfs_create_file_unsafe(name, mode, parent, value,
|
||||
RO(mode) ? &intel_display_param_int_fops_ro :
|
||||
&intel_display_param_int_fops);
|
||||
}
|
||||
|
||||
__maybe_unused static struct dentry *
|
||||
intel_display_debugfs_create_uint(const char *name, umode_t mode,
|
||||
struct dentry *parent, unsigned int *value)
|
||||
{
|
||||
return debugfs_create_file_unsafe(name, mode, parent, value,
|
||||
RO(mode) ? &intel_display_param_uint_fops_ro :
|
||||
&intel_display_param_uint_fops);
|
||||
}
|
||||
|
||||
#define _intel_display_param_create_file(parent, name, mode, valp) \
|
||||
do { \
|
||||
if (mode) \
|
||||
_Generic(valp, \
|
||||
bool * : debugfs_create_bool, \
|
||||
int * : intel_display_debugfs_create_int, \
|
||||
unsigned int * : intel_display_debugfs_create_uint, \
|
||||
unsigned long * : debugfs_create_ulong, \
|
||||
char ** : debugfs_create_str) \
|
||||
(name, mode, parent, valp); \
|
||||
} while (0)
|
||||
|
||||
/* add a subdirectory with files for each intel display param */
|
||||
void intel_display_debugfs_params(struct drm_i915_private *i915)
|
||||
{
|
||||
struct drm_minor *minor = i915->drm.primary;
|
||||
struct dentry *dir;
|
||||
char dirname[16];
|
||||
|
||||
snprintf(dirname, sizeof(dirname), "%s_params", i915->drm.driver->name);
|
||||
dir = debugfs_lookup(dirname, minor->debugfs_root);
|
||||
if (!dir)
|
||||
dir = debugfs_create_dir(dirname, minor->debugfs_root);
|
||||
if (IS_ERR(dir))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Note: We could create files for params needing special handling
|
||||
* here. Set mode in params to 0 to skip the generic create file, or
|
||||
* just let the generic create file fail silently with -EEXIST.
|
||||
*/
|
||||
|
||||
#define REGISTER(T, x, unused, mode, ...) _intel_display_param_create_file( \
|
||||
dir, #x, mode, &i915->display.params.x);
|
||||
INTEL_DISPLAY_PARAMS_FOR_EACH(REGISTER);
|
||||
#undef REGISTER
|
||||
}
|
13
drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
Normal file
13
drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
Normal file
@ -0,0 +1,13 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2023 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_DISPLAY_DEBUGFS_PARAMS__
|
||||
#define __INTEL_DISPLAY_DEBUGFS_PARAMS__
|
||||
|
||||
struct drm_i915_private;
|
||||
|
||||
void intel_display_debugfs_params(struct drm_i915_private *i915);
|
||||
|
||||
#endif /* __INTEL_DISPLAY_DEBUGFS_PARAMS__ */
|
@ -12,6 +12,7 @@
|
||||
#include "intel_de.h"
|
||||
#include "intel_display.h"
|
||||
#include "intel_display_device.h"
|
||||
#include "intel_display_params.h"
|
||||
#include "intel_display_power.h"
|
||||
#include "intel_display_reg_defs.h"
|
||||
#include "intel_fbc.h"
|
||||
@ -937,6 +938,13 @@ void intel_display_device_probe(struct drm_i915_private *i915)
|
||||
DISPLAY_RUNTIME_INFO(i915)->ip.rel = rel;
|
||||
DISPLAY_RUNTIME_INFO(i915)->ip.step = step;
|
||||
}
|
||||
|
||||
intel_display_params_copy(&i915->display.params);
|
||||
}
|
||||
|
||||
void intel_display_device_remove(struct drm_i915_private *i915)
|
||||
{
|
||||
intel_display_params_free(&i915->display.params);
|
||||
}
|
||||
|
||||
static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915)
|
||||
@ -1105,7 +1113,7 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915)
|
||||
}
|
||||
|
||||
/* Disable nuclear pageflip by default on pre-g4x */
|
||||
if (!i915->params.nuclear_pageflip &&
|
||||
if (!i915->display.params.nuclear_pageflip &&
|
||||
DISPLAY_VER(i915) < 5 && !IS_G4X(i915))
|
||||
i915->drm.driver_features &= ~DRIVER_ATOMIC;
|
||||
}
|
||||
@ -1145,5 +1153,6 @@ bool intel_display_device_enabled(struct drm_i915_private *i915)
|
||||
/* Only valid when HAS_DISPLAY() is true */
|
||||
drm_WARN_ON(&i915->drm, !HAS_DISPLAY(i915));
|
||||
|
||||
return !i915->params.disable_display && !intel_opregion_headless_sku(i915);
|
||||
return !i915->display.params.disable_display &&
|
||||
!intel_opregion_headless_sku(i915);
|
||||
}
|
||||
|
@ -161,6 +161,7 @@ struct intel_display_device_info {
|
||||
|
||||
bool intel_display_device_enabled(struct drm_i915_private *i915);
|
||||
void intel_display_device_probe(struct drm_i915_private *i915);
|
||||
void intel_display_device_remove(struct drm_i915_private *i915);
|
||||
void intel_display_device_info_runtime_init(struct drm_i915_private *i915);
|
||||
|
||||
void intel_display_device_info_print(const struct intel_display_device_info *info,
|
||||
|
@ -181,6 +181,13 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915)
|
||||
if (!HAS_DISPLAY(i915))
|
||||
return;
|
||||
|
||||
spin_lock_init(&i915->display.fb_tracking.lock);
|
||||
mutex_init(&i915->display.backlight.lock);
|
||||
mutex_init(&i915->display.audio.mutex);
|
||||
mutex_init(&i915->display.wm.wm_mutex);
|
||||
mutex_init(&i915->display.pps.mutex);
|
||||
mutex_init(&i915->display.hdcp.hdcp_mutex);
|
||||
|
||||
intel_display_irq_init(i915);
|
||||
intel_dkl_phy_init(i915);
|
||||
intel_color_init_hooks(i915);
|
||||
|
217
drivers/gpu/drm/i915/display/intel_display_params.c
Normal file
217
drivers/gpu/drm/i915/display/intel_display_params.c
Normal file
@ -0,0 +1,217 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2023 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "intel_display_params.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define intel_display_param_named(name, T, perm, desc) \
|
||||
module_param_named(name, intel_display_modparams.name, T, perm); \
|
||||
MODULE_PARM_DESC(name, desc)
|
||||
#define intel_display_param_named_unsafe(name, T, perm, desc) \
|
||||
module_param_named_unsafe(name, intel_display_modparams.name, T, perm); \
|
||||
MODULE_PARM_DESC(name, desc)
|
||||
|
||||
static struct intel_display_params intel_display_modparams __read_mostly = {
|
||||
#define MEMBER(T, member, value, ...) .member = (value),
|
||||
INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER)
|
||||
#undef MEMBER
|
||||
};
|
||||
/*
|
||||
* Note: As a rule, keep module parameter sysfs permissions read-only
|
||||
* 0400. Runtime changes are only supported through i915 debugfs.
|
||||
*
|
||||
* For any exceptions requiring write access and runtime changes through module
|
||||
* parameter sysfs, prevent debugfs file creation by setting the parameter's
|
||||
* debugfs mode to 0.
|
||||
*/
|
||||
|
||||
intel_display_param_named_unsafe(vbt_firmware, charp, 0400,
|
||||
"Load VBT from specified file under /lib/firmware");
|
||||
|
||||
intel_display_param_named_unsafe(lvds_channel_mode, int, 0400,
|
||||
"Specify LVDS channel mode "
|
||||
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
|
||||
|
||||
intel_display_param_named_unsafe(panel_use_ssc, int, 0400,
|
||||
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
|
||||
"(default: auto from VBT)");
|
||||
|
||||
intel_display_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
|
||||
"Override/Ignore selection of SDVO panel mode in the VBT "
|
||||
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
|
||||
|
||||
intel_display_param_named_unsafe(enable_dc, int, 0400,
|
||||
"Enable power-saving display C-states. "
|
||||
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
|
||||
"3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
|
||||
|
||||
intel_display_param_named_unsafe(enable_dpt, bool, 0400,
|
||||
"Enable display page table (DPT) (default: true)");
|
||||
|
||||
intel_display_param_named_unsafe(enable_sagv, bool, 0400,
|
||||
"Enable system agent voltage/frequency scaling (SAGV) (default: true)");
|
||||
|
||||
intel_display_param_named_unsafe(disable_power_well, int, 0400,
|
||||
"Disable display power wells when possible "
|
||||
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
|
||||
|
||||
intel_display_param_named_unsafe(enable_ips, bool, 0400, "Enable IPS (default: true)");
|
||||
|
||||
intel_display_param_named_unsafe(invert_brightness, int, 0400,
|
||||
"Invert backlight brightness "
|
||||
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
|
||||
"report PCI device ID, subsystem vendor and subsystem device ID "
|
||||
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
|
||||
"It will then be included in an upcoming module version.");
|
||||
|
||||
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
|
||||
intel_display_param_named_unsafe(edp_vswing, int, 0400,
|
||||
"Ignore/Override vswing pre-emph table selection from VBT "
|
||||
"(0=use value from vbt [default], 1=low power swing(200mV),"
|
||||
"2=default swing(400mV))");
|
||||
|
||||
intel_display_param_named(enable_dpcd_backlight, int, 0400,
|
||||
"Enable support for DPCD backlight control"
|
||||
"(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
|
||||
|
||||
intel_display_param_named_unsafe(load_detect_test, bool, 0400,
|
||||
"Force-enable the VGA load detect code for testing (default:false). "
|
||||
"For developers only.");
|
||||
|
||||
intel_display_param_named_unsafe(force_reset_modeset_test, bool, 0400,
|
||||
"Force a modeset during gpu reset for testing (default:false). "
|
||||
"For developers only.");
|
||||
|
||||
intel_display_param_named(disable_display, bool, 0400,
|
||||
"Disable display (default: false)");
|
||||
|
||||
intel_display_param_named(verbose_state_checks, bool, 0400,
|
||||
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
|
||||
|
||||
intel_display_param_named_unsafe(nuclear_pageflip, bool, 0400,
|
||||
"Force enable atomic functionality on platforms that don't have full support yet.");
|
||||
|
||||
intel_display_param_named_unsafe(enable_dp_mst, bool, 0400,
|
||||
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
|
||||
|
||||
intel_display_param_named_unsafe(enable_fbc, int, 0400,
|
||||
"Enable frame buffer compression for power savings "
|
||||
"(default: -1 (use per-chip default))");
|
||||
|
||||
intel_display_param_named_unsafe(enable_psr, int, 0400,
|
||||
"Enable PSR "
|
||||
"(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
|
||||
"Default: -1 (use per-chip default)");
|
||||
|
||||
intel_display_param_named(psr_safest_params, bool, 0400,
|
||||
"Replace PSR VBT parameters by the safest and not optimal ones. This "
|
||||
"is helpful to detect if PSR issues are related to bad values set in "
|
||||
" VBT. (0=use VBT parameters, 1=use safest parameters)"
|
||||
"Default: 0");
|
||||
|
||||
intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
|
||||
"Enable PSR2 selective fetch "
|
||||
"(0=disabled, 1=enabled) "
|
||||
"Default: 1");
|
||||
|
||||
__maybe_unused
|
||||
static void _param_print_bool(struct drm_printer *p, const char *driver_name,
|
||||
const char *name, bool val)
|
||||
{
|
||||
drm_printf(p, "%s.%s=%s\n", driver_name, name, str_yes_no(val));
|
||||
}
|
||||
|
||||
__maybe_unused
|
||||
static void _param_print_int(struct drm_printer *p, const char *driver_name,
|
||||
const char *name, int val)
|
||||
{
|
||||
drm_printf(p, "%s.%s=%d\n", driver_name, name, val);
|
||||
}
|
||||
|
||||
__maybe_unused
|
||||
static void _param_print_uint(struct drm_printer *p, const char *driver_name,
|
||||
const char *name, unsigned int val)
|
||||
{
|
||||
drm_printf(p, "%s.%s=%u\n", driver_name, name, val);
|
||||
}
|
||||
|
||||
__maybe_unused
|
||||
static void _param_print_ulong(struct drm_printer *p, const char *driver_name,
|
||||
const char *name, unsigned long val)
|
||||
{
|
||||
drm_printf(p, "%s.%s=%lu\n", driver_name, name, val);
|
||||
}
|
||||
|
||||
__maybe_unused
|
||||
static void _param_print_charp(struct drm_printer *p, const char *driver_name,
|
||||
const char *name, const char *val)
|
||||
{
|
||||
drm_printf(p, "%s.%s=%s\n", driver_name, name, val);
|
||||
}
|
||||
|
||||
#define _param_print(p, driver_name, name, val) \
|
||||
_Generic(val, \
|
||||
bool : _param_print_bool, \
|
||||
int : _param_print_int, \
|
||||
unsigned int : _param_print_uint, \
|
||||
unsigned long : _param_print_ulong, \
|
||||
char * : _param_print_charp)(p, driver_name, name, val)
|
||||
|
||||
/**
|
||||
* intel_display_params_dump - dump intel display modparams
|
||||
* @i915: i915 device
|
||||
* @p: the &drm_printer
|
||||
*
|
||||
* Pretty printer for i915 modparams.
|
||||
*/
|
||||
void intel_display_params_dump(struct drm_i915_private *i915, struct drm_printer *p)
|
||||
{
|
||||
#define PRINT(T, x, ...) _param_print(p, i915->drm.driver->name, #x, i915->display.params.x);
|
||||
INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT);
|
||||
#undef PRINT
|
||||
}
|
||||
|
||||
__maybe_unused static void _param_dup_charp(char **valp)
|
||||
{
|
||||
*valp = kstrdup(*valp ? *valp : "", GFP_ATOMIC);
|
||||
}
|
||||
|
||||
__maybe_unused static void _param_nop(void *valp)
|
||||
{
|
||||
}
|
||||
|
||||
#define _param_dup(valp) \
|
||||
_Generic(valp, \
|
||||
char ** : _param_dup_charp, \
|
||||
default : _param_nop) \
|
||||
(valp)
|
||||
|
||||
void intel_display_params_copy(struct intel_display_params *dest)
|
||||
{
|
||||
*dest = intel_display_modparams;
|
||||
#define DUP(T, x, ...) _param_dup(&dest->x);
|
||||
INTEL_DISPLAY_PARAMS_FOR_EACH(DUP);
|
||||
#undef DUP
|
||||
}
|
||||
|
||||
__maybe_unused static void _param_free_charp(char **valp)
|
||||
{
|
||||
kfree(*valp);
|
||||
*valp = NULL;
|
||||
}
|
||||
|
||||
#define _param_free(valp) \
|
||||
_Generic(valp, \
|
||||
char ** : _param_free_charp, \
|
||||
default : _param_nop) \
|
||||
(valp)
|
||||
|
||||
/* free the allocated members, *not* the passed in params itself */
|
||||
void intel_display_params_free(struct intel_display_params *params)
|
||||
{
|
||||
#define FREE(T, x, ...) _param_free(¶ms->x);
|
||||
INTEL_DISPLAY_PARAMS_FOR_EACH(FREE);
|
||||
#undef FREE
|
||||
}
|
61
drivers/gpu/drm/i915/display/intel_display_params.h
Normal file
61
drivers/gpu/drm/i915/display/intel_display_params.h
Normal file
@ -0,0 +1,61 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2023 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_DISPLAY_PARAMS_H_
|
||||
#define _INTEL_DISPLAY_PARAMS_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_printer;
|
||||
struct drm_i915_private;
|
||||
|
||||
/*
|
||||
* Invoke param, a function-like macro, for each intel display param, with
|
||||
* arguments:
|
||||
*
|
||||
* param(type, name, value, mode)
|
||||
*
|
||||
* type: parameter type, one of {bool, int, unsigned int, unsigned long, char *}
|
||||
* name: name of the parameter
|
||||
* value: initial/default value of the parameter
|
||||
* mode: debugfs file permissions, one of {0400, 0600, 0}, use 0 to not create
|
||||
* debugfs file
|
||||
*/
|
||||
#define INTEL_DISPLAY_PARAMS_FOR_EACH(param) \
|
||||
param(char *, vbt_firmware, NULL, 0400) \
|
||||
param(int, lvds_channel_mode, 0, 0400) \
|
||||
param(int, panel_use_ssc, -1, 0600) \
|
||||
param(int, vbt_sdvo_panel_type, -1, 0400) \
|
||||
param(int, enable_dc, -1, 0400) \
|
||||
param(bool, enable_dpt, true, 0400) \
|
||||
param(bool, enable_sagv, true, 0600) \
|
||||
param(int, disable_power_well, -1, 0400) \
|
||||
param(bool, enable_ips, true, 0600) \
|
||||
param(int, invert_brightness, 0, 0600) \
|
||||
param(int, edp_vswing, 0, 0400) \
|
||||
param(int, enable_dpcd_backlight, -1, 0600) \
|
||||
param(bool, load_detect_test, false, 0600) \
|
||||
param(bool, force_reset_modeset_test, false, 0600) \
|
||||
param(bool, disable_display, false, 0400) \
|
||||
param(bool, verbose_state_checks, true, 0400) \
|
||||
param(bool, nuclear_pageflip, false, 0400) \
|
||||
param(bool, enable_dp_mst, true, 0600) \
|
||||
param(int, enable_fbc, -1, 0600) \
|
||||
param(int, enable_psr, -1, 0600) \
|
||||
param(bool, psr_safest_params, false, 0400) \
|
||||
param(bool, enable_psr2_sel_fetch, true, 0400) \
|
||||
|
||||
#define MEMBER(T, member, ...) T member;
|
||||
struct intel_display_params {
|
||||
INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER);
|
||||
};
|
||||
#undef MEMBER
|
||||
|
||||
void intel_display_params_dump(struct drm_i915_private *i915,
|
||||
struct drm_printer *p);
|
||||
void intel_display_params_copy(struct intel_display_params *dest);
|
||||
void intel_display_params_free(struct intel_display_params *params);
|
||||
|
||||
#endif
|
@ -967,7 +967,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
|
||||
DISPLAY_VER(dev_priv) >= 11 ?
|
||||
DC_STATE_EN_DC9 : 0;
|
||||
|
||||
if (!dev_priv->params.disable_power_well)
|
||||
if (!dev_priv->display.params.disable_power_well)
|
||||
max_dc = 0;
|
||||
|
||||
if (enable_dc >= 0 && enable_dc <= max_dc) {
|
||||
@ -1016,11 +1016,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
|
||||
|
||||
dev_priv->params.disable_power_well =
|
||||
dev_priv->display.params.disable_power_well =
|
||||
sanitize_disable_power_well_option(dev_priv,
|
||||
dev_priv->params.disable_power_well);
|
||||
dev_priv->display.params.disable_power_well);
|
||||
power_domains->allowed_dc_mask =
|
||||
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
|
||||
get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc);
|
||||
|
||||
power_domains->target_dc_state =
|
||||
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
|
||||
@ -1950,7 +1950,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
|
||||
intel_display_power_get(i915, POWER_DOMAIN_INIT);
|
||||
|
||||
/* Disable power support if the user asked so. */
|
||||
if (!i915->params.disable_power_well) {
|
||||
if (!i915->display.params.disable_power_well) {
|
||||
drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
|
||||
i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
|
||||
POWER_DOMAIN_INIT);
|
||||
@ -1977,7 +1977,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
|
||||
fetch_and_zero(&i915->display.power.domains.init_wakeref);
|
||||
|
||||
/* Remove the refcount we took to keep power well support disabled. */
|
||||
if (!i915->params.disable_power_well)
|
||||
if (!i915->display.params.disable_power_well)
|
||||
intel_display_power_put(i915, POWER_DOMAIN_INIT,
|
||||
fetch_and_zero(&i915->display.power.domains.disable_wakeref));
|
||||
|
||||
@ -2096,7 +2096,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
|
||||
* Even if power well support was disabled we still want to disable
|
||||
* power wells if power domains must be deinitialized for suspend.
|
||||
*/
|
||||
if (!i915->params.disable_power_well)
|
||||
if (!i915->display.params.disable_power_well)
|
||||
intel_display_power_put(i915, POWER_DOMAIN_INIT,
|
||||
fetch_and_zero(&i915->display.power.domains.disable_wakeref));
|
||||
|
||||
|
@ -1400,20 +1400,16 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
|
||||
enum dpio_phy phy;
|
||||
enum pipe pipe;
|
||||
u32 tmp;
|
||||
|
||||
drm_WARN_ON_ONCE(&dev_priv->drm,
|
||||
id != VLV_DISP_PW_DPIO_CMN_BC &&
|
||||
id != CHV_DISP_PW_DPIO_CMN_D);
|
||||
|
||||
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
|
||||
pipe = PIPE_A;
|
||||
if (id == VLV_DISP_PW_DPIO_CMN_BC)
|
||||
phy = DPIO_PHY0;
|
||||
} else {
|
||||
pipe = PIPE_C;
|
||||
else
|
||||
phy = DPIO_PHY1;
|
||||
}
|
||||
|
||||
/* since ref/cri clock was enabled */
|
||||
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
|
||||
@ -1428,24 +1424,24 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
/* Enable dynamic power down */
|
||||
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
|
||||
tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28);
|
||||
tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
|
||||
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
|
||||
|
||||
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
|
||||
tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
|
||||
tmp = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW6_CH1);
|
||||
tmp |= DPIO_DYNPWRDOWNEN_CH1;
|
||||
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
|
||||
vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW6_CH1, tmp);
|
||||
} else {
|
||||
/*
|
||||
* Force the non-existing CL2 off. BXT does this
|
||||
* too, so maybe it saves some power even though
|
||||
* CL2 doesn't exist?
|
||||
*/
|
||||
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
|
||||
tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30);
|
||||
tmp |= DPIO_CL2_LDOFUSE_PWRENB;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp);
|
||||
}
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
@ -1499,7 +1495,6 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
|
||||
enum dpio_channel ch, bool override, unsigned int mask)
|
||||
{
|
||||
enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
|
||||
u32 reg, val, expected, actual;
|
||||
|
||||
/*
|
||||
@ -1518,7 +1513,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
|
||||
reg = _CHV_CMN_DW6_CH1;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
val = vlv_dpio_read(dev_priv, pipe, reg);
|
||||
val = vlv_dpio_read(dev_priv, phy, reg);
|
||||
vlv_dpio_put(dev_priv);
|
||||
|
||||
/*
|
||||
|
@ -29,7 +29,7 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
|
||||
return;
|
||||
|
||||
/* reset doesn't touch the display */
|
||||
if (!dev_priv->params.force_reset_modeset_test &&
|
||||
if (!dev_priv->display.params.force_reset_modeset_test &&
|
||||
!gpu_reset_clobbers_display(dev_priv))
|
||||
return;
|
||||
|
||||
|
@ -198,6 +198,12 @@ struct intel_encoder {
|
||||
struct intel_encoder *,
|
||||
const struct intel_crtc_state *,
|
||||
const struct drm_connector_state *);
|
||||
void (*audio_enable)(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void (*audio_disable)(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state);
|
||||
/* Read out the current hw state of this connector, returning true if
|
||||
* the encoder is active. If the encoder is enabled it also set the pipe
|
||||
* it is connected to in the pipe parameter. */
|
||||
@ -624,6 +630,9 @@ struct intel_connector {
|
||||
struct drm_dp_aux *dsc_decompression_aux;
|
||||
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
|
||||
u8 fec_capability;
|
||||
|
||||
u8 dsc_hblank_expansion_quirk:1;
|
||||
u8 dsc_decompression_enabled:1;
|
||||
} dp;
|
||||
|
||||
/* Work struct to schedule a uevent on link train failure */
|
||||
@ -676,8 +685,6 @@ struct intel_atomic_state {
|
||||
|
||||
bool rps_interactive;
|
||||
|
||||
struct i915_sw_fence commit_ready;
|
||||
|
||||
struct llist_node freed;
|
||||
};
|
||||
|
||||
@ -1210,6 +1217,7 @@ struct intel_crtc_state {
|
||||
bool has_psr2;
|
||||
bool enable_psr2_sel_fetch;
|
||||
bool req_psr2_sdp_prior_scanline;
|
||||
bool has_panel_replay;
|
||||
bool wm_level_disabled;
|
||||
u32 dc3co_exitline;
|
||||
u16 su_y_granularity;
|
||||
@ -1361,7 +1369,8 @@ struct intel_crtc_state {
|
||||
struct {
|
||||
bool compression_enable;
|
||||
bool dsc_split;
|
||||
u16 compressed_bpp;
|
||||
/* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
|
||||
u16 compressed_bpp_x16;
|
||||
u8 slice_count;
|
||||
struct drm_dsc_config config;
|
||||
} dsc;
|
||||
@ -1707,9 +1716,13 @@ struct intel_psr {
|
||||
bool irq_aux_error;
|
||||
u16 su_w_granularity;
|
||||
u16 su_y_granularity;
|
||||
bool source_panel_replay_support;
|
||||
bool sink_panel_replay_support;
|
||||
bool panel_replay_enabled;
|
||||
u32 dc3co_exitline;
|
||||
u32 dc3co_exit_delay;
|
||||
struct delayed_work dc3co_work;
|
||||
u8 entry_setup_frames;
|
||||
};
|
||||
|
||||
struct intel_dp {
|
||||
@ -1808,6 +1821,7 @@ struct intel_dp {
|
||||
/* Display stream compression testing */
|
||||
bool force_dsc_en;
|
||||
int force_dsc_output_format;
|
||||
bool force_dsc_fractional_bpp_en;
|
||||
int force_dsc_bpc;
|
||||
|
||||
bool hobl_failed;
|
||||
@ -1992,17 +2006,6 @@ dp_to_lspcon(struct intel_dp *intel_dp)
|
||||
|
||||
#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
|
||||
|
||||
#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
|
||||
(intel_dp)->psr.source_support)
|
||||
|
||||
static inline bool intel_encoder_can_psr(struct intel_encoder *encoder)
|
||||
{
|
||||
if (!intel_encoder_is_dp(encoder))
|
||||
return false;
|
||||
|
||||
return CAN_PSR(enc_to_intel_dp(encoder));
|
||||
}
|
||||
|
||||
static inline struct intel_digital_port *
|
||||
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
|
||||
{
|
||||
|
@ -85,8 +85,8 @@
|
||||
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
|
||||
#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
|
||||
|
||||
/* DP DSC FEC Overhead factor = 1/(0.972261) */
|
||||
#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
|
||||
/* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */
|
||||
#define DP_DSC_FEC_OVERHEAD_FACTOR 1028530
|
||||
|
||||
/* Compliance test status bits */
|
||||
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
|
||||
@ -124,7 +124,31 @@ static void intel_dp_unset_edid(struct intel_dp *intel_dp);
|
||||
/* Is link rate UHBR and thus 128b/132b? */
|
||||
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
return crtc_state->port_clock >= 1000000;
|
||||
return drm_dp_is_uhbr_rate(crtc_state->port_clock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dp_link_symbol_size - get the link symbol size for a given link rate
|
||||
* @rate: link rate in 10kbit/s units
|
||||
*
|
||||
* Returns the link symbol size in bits/symbol units depending on the link
|
||||
* rate -> channel coding.
|
||||
*/
|
||||
int intel_dp_link_symbol_size(int rate)
|
||||
{
|
||||
return drm_dp_is_uhbr_rate(rate) ? 32 : 10;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dp_link_symbol_clock - convert link rate to link symbol clock
|
||||
* @rate: link rate in 10kbit/s units
|
||||
*
|
||||
* Returns the link symbol clock frequency in kHz units depending on the
|
||||
* link rate and channel coding.
|
||||
*/
|
||||
int intel_dp_link_symbol_clock(int rate)
|
||||
{
|
||||
return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate));
|
||||
}
|
||||
|
||||
static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
|
||||
@ -331,6 +355,9 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp)
|
||||
/*
|
||||
* The required data bandwidth for a mode with given pixel clock and bpp. This
|
||||
* is the required net bandwidth independent of the data bandwidth efficiency.
|
||||
*
|
||||
* TODO: check if callers of this functions should use
|
||||
* intel_dp_effective_data_rate() instead.
|
||||
*/
|
||||
int
|
||||
intel_dp_link_required(int pixel_clock, int bpp)
|
||||
@ -339,6 +366,22 @@ intel_dp_link_required(int pixel_clock, int bpp)
|
||||
return DIV_ROUND_UP(pixel_clock * bpp, 8);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead
|
||||
* @pixel_clock: pixel clock in kHz
|
||||
* @bpp_x16: bits per pixel .4 fixed point format
|
||||
* @bw_overhead: BW allocation overhead in 1ppm units
|
||||
*
|
||||
* Return the effective pixel data rate in kB/sec units taking into account
|
||||
* the provided SSC, FEC, DSC BW allocation overhead.
|
||||
*/
|
||||
int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
|
||||
int bw_overhead)
|
||||
{
|
||||
return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead),
|
||||
1000000 * 16 * 8);
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a link rate and lanes, get the data bandwidth.
|
||||
*
|
||||
@ -362,29 +405,27 @@ intel_dp_link_required(int pixel_clock, int bpp)
|
||||
int
|
||||
intel_dp_max_data_rate(int max_link_rate, int max_lanes)
|
||||
{
|
||||
if (max_link_rate >= 1000000) {
|
||||
/*
|
||||
* UHBR rates always use 128b/132b channel encoding, and have
|
||||
* 97.71% data bandwidth efficiency. Consider max_link_rate the
|
||||
* link bit rate in units of 10000 bps.
|
||||
*/
|
||||
int max_link_rate_kbps = max_link_rate * 10;
|
||||
|
||||
max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000);
|
||||
max_link_rate = max_link_rate_kbps / 8;
|
||||
}
|
||||
int ch_coding_efficiency =
|
||||
drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
|
||||
int max_link_rate_kbps = max_link_rate * 10;
|
||||
|
||||
/*
|
||||
* UHBR rates always use 128b/132b channel encoding, and have
|
||||
* 97.71% data bandwidth efficiency. Consider max_link_rate the
|
||||
* link bit rate in units of 10000 bps.
|
||||
*/
|
||||
/*
|
||||
* Lower than UHBR rates always use 8b/10b channel encoding, and have
|
||||
* 80% data bandwidth efficiency for SST non-FEC. However, this turns
|
||||
* out to be a nop by coincidence, and can be skipped:
|
||||
* out to be a nop by coincidence:
|
||||
*
|
||||
* int max_link_rate_kbps = max_link_rate * 10;
|
||||
* max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10);
|
||||
* max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10);
|
||||
* max_link_rate = max_link_rate_kbps / 8;
|
||||
*/
|
||||
|
||||
return max_link_rate * max_lanes;
|
||||
return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes,
|
||||
ch_coding_efficiency),
|
||||
1000000 * 8);
|
||||
}
|
||||
|
||||
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
|
||||
@ -680,8 +721,22 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
|
||||
|
||||
u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
|
||||
{
|
||||
return div_u64(mul_u32_u32(mode_clock, 1000000U),
|
||||
DP_DSC_FEC_OVERHEAD_FACTOR);
|
||||
return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR),
|
||||
1000000U);
|
||||
}
|
||||
|
||||
int intel_dp_bw_fec_overhead(bool fec_enabled)
|
||||
{
|
||||
/*
|
||||
* TODO: Calculate the actual overhead for a given mode.
|
||||
* The hard-coded 1/0.972261=2.853% overhead factor
|
||||
* corresponds (for instance) to the 8b/10b DP FEC 2.4% +
|
||||
* 0.453% DSC overhead. This is enough for a 3840 width mode,
|
||||
* which has a DSC overhead of up to ~0.2%, but may not be
|
||||
* enough for a 1024 width mode where this is ~0.8% (on a 4
|
||||
* lane DP link, with 2 DSC slices and 8 bpp color depth).
|
||||
*/
|
||||
return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1369,9 +1424,9 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
|
||||
const struct intel_connector *connector,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
bool intel_dp_supports_fec(struct intel_dp *intel_dp,
|
||||
const struct intel_connector *connector,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
|
||||
drm_dp_sink_supports_fec(connector->dp.fec_capability);
|
||||
@ -1384,6 +1439,7 @@ static bool intel_dp_supports_dsc(const struct intel_connector *connector,
|
||||
return false;
|
||||
|
||||
return intel_dsc_source_support(crtc_state) &&
|
||||
connector->dp.dsc_decompression_aux &&
|
||||
drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd);
|
||||
}
|
||||
|
||||
@ -1717,15 +1773,15 @@ static bool intel_dp_dsc_supports_format(const struct intel_connector *connector
|
||||
return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format);
|
||||
}
|
||||
|
||||
static bool is_bw_sufficient_for_dsc_config(u16 compressed_bpp, u32 link_clock,
|
||||
static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock,
|
||||
u32 lane_count, u32 mode_clock,
|
||||
enum intel_output_format output_format,
|
||||
int timeslots)
|
||||
{
|
||||
u32 available_bw, required_bw;
|
||||
|
||||
available_bw = (link_clock * lane_count * timeslots) / 8;
|
||||
required_bw = compressed_bpp * (intel_dp_mode_to_fec_clock(mode_clock));
|
||||
available_bw = (link_clock * lane_count * timeslots * 16) / 8;
|
||||
required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock));
|
||||
|
||||
return available_bw > required_bw;
|
||||
}
|
||||
@ -1733,7 +1789,7 @@ static bool is_bw_sufficient_for_dsc_config(u16 compressed_bpp, u32 link_clock,
|
||||
static int dsc_compute_link_config(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct link_config_limits *limits,
|
||||
u16 compressed_bpp,
|
||||
u16 compressed_bppx16,
|
||||
int timeslots)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
@ -1748,8 +1804,8 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
|
||||
for (lane_count = limits->min_lane_count;
|
||||
lane_count <= limits->max_lane_count;
|
||||
lane_count <<= 1) {
|
||||
if (!is_bw_sufficient_for_dsc_config(compressed_bpp, link_rate, lane_count,
|
||||
adjusted_mode->clock,
|
||||
if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate,
|
||||
lane_count, adjusted_mode->clock,
|
||||
pipe_config->output_format,
|
||||
timeslots))
|
||||
continue;
|
||||
@ -1791,7 +1847,7 @@ u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connec
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
|
||||
int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
/* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */
|
||||
switch (pipe_config->output_format) {
|
||||
@ -1808,9 +1864,9 @@ static int dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
int bpc)
|
||||
int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
int bpc)
|
||||
{
|
||||
return intel_dp_dsc_max_sink_compressed_bppx16(connector,
|
||||
pipe_config, bpc) >> 4;
|
||||
@ -1862,10 +1918,11 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
|
||||
ret = dsc_compute_link_config(intel_dp,
|
||||
pipe_config,
|
||||
limits,
|
||||
valid_dsc_bpp[i],
|
||||
valid_dsc_bpp[i] << 4,
|
||||
timeslots);
|
||||
if (ret == 0) {
|
||||
pipe_config->dsc.compressed_bpp = valid_dsc_bpp[i];
|
||||
pipe_config->dsc.compressed_bpp_x16 =
|
||||
to_bpp_x16(valid_dsc_bpp[i]);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -1881,6 +1938,7 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
|
||||
*/
|
||||
static int
|
||||
xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
|
||||
const struct intel_connector *connector,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct link_config_limits *limits,
|
||||
int dsc_max_bpp,
|
||||
@ -1888,22 +1946,38 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
|
||||
int pipe_bpp,
|
||||
int timeslots)
|
||||
{
|
||||
u16 compressed_bpp;
|
||||
u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u16 compressed_bppx16;
|
||||
u8 bppx16_step;
|
||||
int ret;
|
||||
|
||||
/* Compressed BPP should be less than the Input DSC bpp */
|
||||
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
|
||||
if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1)
|
||||
bppx16_step = 16;
|
||||
else
|
||||
bppx16_step = 16 / bppx16_incr;
|
||||
|
||||
for (compressed_bpp = dsc_max_bpp;
|
||||
compressed_bpp >= dsc_min_bpp;
|
||||
compressed_bpp--) {
|
||||
/* Compressed BPP should be less than the Input DSC bpp */
|
||||
dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step);
|
||||
dsc_min_bpp = dsc_min_bpp << 4;
|
||||
|
||||
for (compressed_bppx16 = dsc_max_bpp;
|
||||
compressed_bppx16 >= dsc_min_bpp;
|
||||
compressed_bppx16 -= bppx16_step) {
|
||||
if (intel_dp->force_dsc_fractional_bpp_en &&
|
||||
!to_bpp_frac(compressed_bppx16))
|
||||
continue;
|
||||
ret = dsc_compute_link_config(intel_dp,
|
||||
pipe_config,
|
||||
limits,
|
||||
compressed_bpp,
|
||||
compressed_bppx16,
|
||||
timeslots);
|
||||
if (ret == 0) {
|
||||
pipe_config->dsc.compressed_bpp = compressed_bpp;
|
||||
pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
|
||||
if (intel_dp->force_dsc_fractional_bpp_en &&
|
||||
to_bpp_frac(compressed_bppx16))
|
||||
drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -1924,12 +1998,14 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
|
||||
int dsc_joiner_max_bpp;
|
||||
|
||||
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
|
||||
dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config);
|
||||
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
|
||||
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
|
||||
dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
|
||||
|
||||
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
|
||||
dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3);
|
||||
dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
|
||||
pipe_config,
|
||||
pipe_bpp / 3);
|
||||
dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
|
||||
|
||||
dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock,
|
||||
@ -1939,7 +2015,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
|
||||
dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
|
||||
|
||||
if (DISPLAY_VER(i915) >= 13)
|
||||
return xelpd_dsc_compute_link_config(intel_dp, pipe_config, limits,
|
||||
return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
|
||||
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
|
||||
return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
|
||||
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
|
||||
@ -2084,19 +2160,22 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
|
||||
pipe_config->lane_count = limits->max_lane_count;
|
||||
|
||||
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
|
||||
dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config);
|
||||
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
|
||||
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
|
||||
dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
|
||||
|
||||
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
|
||||
dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3);
|
||||
dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
|
||||
pipe_config,
|
||||
pipe_bpp / 3);
|
||||
dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
|
||||
dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
|
||||
|
||||
/* Compressed BPP should be less than the Input DSC bpp */
|
||||
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
|
||||
|
||||
pipe_config->dsc.compressed_bpp = max(dsc_min_bpp, dsc_max_bpp);
|
||||
pipe_config->dsc.compressed_bpp_x16 =
|
||||
to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp));
|
||||
|
||||
pipe_config->pipe_bpp = pipe_bpp;
|
||||
|
||||
@ -2118,8 +2197,9 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
int ret;
|
||||
|
||||
pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
|
||||
intel_dp_supports_fec(intel_dp, connector, pipe_config);
|
||||
pipe_config->fec_enable = pipe_config->fec_enable ||
|
||||
(!intel_dp_is_edp(intel_dp) &&
|
||||
intel_dp_supports_fec(intel_dp, connector, pipe_config));
|
||||
|
||||
if (!intel_dp_supports_dsc(connector, pipe_config))
|
||||
return -EINVAL;
|
||||
@ -2184,18 +2264,18 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
|
||||
ret = intel_dp_dsc_compute_params(connector, pipe_config);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Cannot compute valid DSC parameters for Input Bpp = %d "
|
||||
"Compressed BPP = %d\n",
|
||||
"Cannot compute valid DSC parameters for Input Bpp = %d"
|
||||
"Compressed BPP = " BPP_X16_FMT "\n",
|
||||
pipe_config->pipe_bpp,
|
||||
pipe_config->dsc.compressed_bpp);
|
||||
BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
|
||||
return ret;
|
||||
}
|
||||
|
||||
pipe_config->dsc.compression_enable = true;
|
||||
drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
|
||||
"Compressed Bpp = %d Slice Count = %d\n",
|
||||
"Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n",
|
||||
pipe_config->pipe_bpp,
|
||||
pipe_config->dsc.compressed_bpp,
|
||||
BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
|
||||
pipe_config->dsc.slice_count);
|
||||
|
||||
return 0;
|
||||
@ -2307,6 +2387,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
const struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
@ -2315,6 +2397,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
||||
bool dsc_needed;
|
||||
int ret = 0;
|
||||
|
||||
if (pipe_config->fec_enable &&
|
||||
!intel_dp_supports_fec(intel_dp, connector, pipe_config))
|
||||
return -EINVAL;
|
||||
|
||||
if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
|
||||
adjusted_mode->crtc_clock))
|
||||
pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
|
||||
@ -2362,15 +2448,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
||||
|
||||
if (pipe_config->dsc.compression_enable) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
|
||||
"DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n",
|
||||
pipe_config->lane_count, pipe_config->port_clock,
|
||||
pipe_config->pipe_bpp,
|
||||
pipe_config->dsc.compressed_bpp);
|
||||
BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"DP link rate required %i available %i\n",
|
||||
intel_dp_link_required(adjusted_mode->crtc_clock,
|
||||
pipe_config->dsc.compressed_bpp),
|
||||
to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)),
|
||||
intel_dp_max_data_rate(pipe_config->port_clock,
|
||||
pipe_config->lane_count));
|
||||
} else {
|
||||
@ -2439,12 +2525,22 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
|
||||
/*
|
||||
* Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
|
||||
* VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
|
||||
* Colorimetry Format indication.
|
||||
*/
|
||||
vsc->revision = 0x5;
|
||||
if (crtc_state->has_panel_replay) {
|
||||
/*
|
||||
* Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
|
||||
* VSC SDP supporting 3D stereo, Panel Replay, and Pixel
|
||||
* Encoding/Colorimetry Format indication.
|
||||
*/
|
||||
vsc->revision = 0x7;
|
||||
} else {
|
||||
/*
|
||||
* Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
|
||||
* VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
|
||||
* Colorimetry Format indication.
|
||||
*/
|
||||
vsc->revision = 0x5;
|
||||
}
|
||||
|
||||
vsc->length = 0x13;
|
||||
|
||||
/* DP 1.4a spec, Table 2-120 */
|
||||
@ -2553,6 +2649,21 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
|
||||
vsc->revision = 0x4;
|
||||
vsc->length = 0xe;
|
||||
}
|
||||
} else if (crtc_state->has_panel_replay) {
|
||||
if (intel_dp->psr.colorimetry_support &&
|
||||
intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
|
||||
/* [Panel Replay with colorimetry info] */
|
||||
intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
|
||||
vsc);
|
||||
} else {
|
||||
/*
|
||||
* [Panel Replay without colorimetry info]
|
||||
* Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
|
||||
* VSC SDP supporting 3D stereo + Panel Replay.
|
||||
*/
|
||||
vsc->revision = 0x6;
|
||||
vsc->length = 0x10;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* [PSR1]
|
||||
@ -2629,7 +2740,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
|
||||
static void
|
||||
intel_dp_drrs_compute_config(struct intel_connector *connector,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
int link_bpp)
|
||||
int link_bpp_x16)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
const struct drm_display_mode *downclock_mode =
|
||||
@ -2654,9 +2765,10 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
|
||||
if (pipe_config->splitter.enable)
|
||||
pixel_clock /= pipe_config->splitter.link_count;
|
||||
|
||||
intel_link_compute_m_n(link_bpp, pipe_config->lane_count, pixel_clock,
|
||||
pipe_config->port_clock, &pipe_config->dp_m2_n2,
|
||||
pipe_config->fec_enable);
|
||||
intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock,
|
||||
pipe_config->port_clock,
|
||||
intel_dp_bw_fec_overhead(pipe_config->fec_enable),
|
||||
&pipe_config->dp_m2_n2);
|
||||
|
||||
/* FIXME: abstract this better */
|
||||
if (pipe_config->splitter.enable)
|
||||
@ -2757,7 +2869,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
const struct drm_display_mode *fixed_mode;
|
||||
struct intel_connector *connector = intel_dp->attached_connector;
|
||||
int ret = 0, link_bpp;
|
||||
int ret = 0, link_bpp_x16;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
|
||||
pipe_config->has_pch_encoder = true;
|
||||
@ -2806,10 +2918,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
drm_dp_enhanced_frame_cap(intel_dp->dpcd);
|
||||
|
||||
if (pipe_config->dsc.compression_enable)
|
||||
link_bpp = pipe_config->dsc.compressed_bpp;
|
||||
link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
|
||||
else
|
||||
link_bpp = intel_dp_output_bpp(pipe_config->output_format,
|
||||
pipe_config->pipe_bpp);
|
||||
link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format,
|
||||
pipe_config->pipe_bpp));
|
||||
|
||||
if (intel_dp->mso_link_count) {
|
||||
int n = intel_dp->mso_link_count;
|
||||
@ -2833,12 +2945,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
|
||||
|
||||
intel_link_compute_m_n(link_bpp,
|
||||
intel_link_compute_m_n(link_bpp_x16,
|
||||
pipe_config->lane_count,
|
||||
adjusted_mode->crtc_clock,
|
||||
pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n,
|
||||
pipe_config->fec_enable);
|
||||
intel_dp_bw_fec_overhead(pipe_config->fec_enable),
|
||||
&pipe_config->dp_m_n);
|
||||
|
||||
/* FIXME: abstract this better */
|
||||
if (pipe_config->splitter.enable)
|
||||
@ -2849,7 +2961,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
intel_vrr_compute_config(pipe_config, conn_state);
|
||||
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
|
||||
intel_dp_drrs_compute_config(connector, pipe_config, link_bpp);
|
||||
intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16);
|
||||
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
|
||||
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
|
||||
|
||||
@ -2917,24 +3029,179 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
|
||||
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
|
||||
}
|
||||
|
||||
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
bool enable)
|
||||
static int
|
||||
write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
int ret;
|
||||
int err;
|
||||
u8 val;
|
||||
|
||||
if (!crtc_state->dsc.compression_enable)
|
||||
return;
|
||||
err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
|
||||
enable ? DP_DECOMPRESSION_EN : 0);
|
||||
if (ret < 0)
|
||||
if (set)
|
||||
val |= flag;
|
||||
else
|
||||
val &= ~flag;
|
||||
|
||||
return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_sink_set_dsc_decompression(struct intel_connector *connector,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
|
||||
if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux,
|
||||
DP_DECOMPRESSION_EN, enable) < 0)
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Failed to %s sink decompression state\n",
|
||||
str_enable_disable(enable));
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
struct drm_dp_aux *aux = connector->port ?
|
||||
connector->port->passthrough_aux : NULL;
|
||||
|
||||
if (!aux)
|
||||
return;
|
||||
|
||||
if (write_dsc_decompression_flag(aux,
|
||||
DP_DSC_PASSTHROUGH_EN, enable) < 0)
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Failed to %s sink compression passthrough state\n",
|
||||
str_enable_disable(enable));
|
||||
}
|
||||
|
||||
static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
|
||||
const struct intel_connector *connector,
|
||||
bool for_get_ref)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
struct drm_connector *_connector_iter;
|
||||
struct drm_connector_state *old_conn_state;
|
||||
struct drm_connector_state *new_conn_state;
|
||||
int ref_count = 0;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* On SST the decompression AUX device won't be shared, each connector
|
||||
* uses for this its own AUX targeting the sink device.
|
||||
*/
|
||||
if (!connector->mst_port)
|
||||
return connector->dp.dsc_decompression_enabled ? 1 : 0;
|
||||
|
||||
for_each_oldnew_connector_in_state(&state->base, _connector_iter,
|
||||
old_conn_state, new_conn_state, i) {
|
||||
const struct intel_connector *
|
||||
connector_iter = to_intel_connector(_connector_iter);
|
||||
|
||||
if (connector_iter->mst_port != connector->mst_port)
|
||||
continue;
|
||||
|
||||
if (!connector_iter->dp.dsc_decompression_enabled)
|
||||
continue;
|
||||
|
||||
drm_WARN_ON(&i915->drm,
|
||||
(for_get_ref && !new_conn_state->crtc) ||
|
||||
(!for_get_ref && !old_conn_state->crtc));
|
||||
|
||||
if (connector_iter->dp.dsc_decompression_aux ==
|
||||
connector->dp.dsc_decompression_aux)
|
||||
ref_count++;
|
||||
}
|
||||
|
||||
return ref_count;
|
||||
}
|
||||
|
||||
static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state,
|
||||
struct intel_connector *connector)
|
||||
{
|
||||
bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0;
|
||||
|
||||
connector->dp.dsc_decompression_enabled = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state,
|
||||
struct intel_connector *connector)
|
||||
{
|
||||
connector->dp.dsc_decompression_enabled = false;
|
||||
|
||||
return intel_dp_dsc_aux_ref_count(state, connector, false) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device
|
||||
* @state: atomic state
|
||||
* @connector: connector to enable the decompression for
|
||||
* @new_crtc_state: new state for the CRTC driving @connector
|
||||
*
|
||||
* Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
|
||||
* register of the appropriate sink/branch device. On SST this is always the
|
||||
* sink device, whereas on MST based on each device's DSC capabilities it's
|
||||
* either the last branch device (enabling decompression in it) or both the
|
||||
* last branch device (enabling passthrough in it) and the sink device
|
||||
* (enabling decompression in it).
|
||||
*/
|
||||
void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
|
||||
struct intel_connector *connector,
|
||||
const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
|
||||
if (!new_crtc_state->dsc.compression_enable)
|
||||
return;
|
||||
|
||||
if (drm_WARN_ON(&i915->drm,
|
||||
!connector->dp.dsc_decompression_aux ||
|
||||
connector->dp.dsc_decompression_enabled))
|
||||
return;
|
||||
|
||||
if (!intel_dp_dsc_aux_get_ref(state, connector))
|
||||
return;
|
||||
|
||||
intel_dp_sink_set_dsc_passthrough(connector, true);
|
||||
intel_dp_sink_set_dsc_decompression(connector, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device
|
||||
* @state: atomic state
|
||||
* @connector: connector to disable the decompression for
|
||||
* @old_crtc_state: old state for the CRTC driving @connector
|
||||
*
|
||||
* Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
|
||||
* register of the appropriate sink/branch device, corresponding to the
|
||||
* sequence in intel_dp_sink_enable_decompression().
|
||||
*/
|
||||
void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
|
||||
struct intel_connector *connector,
|
||||
const struct intel_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
|
||||
if (!old_crtc_state->dsc.compression_enable)
|
||||
return;
|
||||
|
||||
if (drm_WARN_ON(&i915->drm,
|
||||
!connector->dp.dsc_decompression_aux ||
|
||||
!connector->dp.dsc_decompression_enabled))
|
||||
return;
|
||||
|
||||
if (!intel_dp_dsc_aux_put_ref(state, connector))
|
||||
return;
|
||||
|
||||
intel_dp_sink_set_dsc_decompression(connector, false);
|
||||
intel_dp_sink_set_dsc_passthrough(connector, false);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
|
||||
{
|
||||
@ -3771,7 +4038,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
return i915->params.enable_dp_mst &&
|
||||
return i915->display.params.enable_dp_mst &&
|
||||
intel_dp_mst_source_support(intel_dp) &&
|
||||
drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
|
||||
}
|
||||
@ -3789,13 +4056,13 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
|
||||
encoder->base.base.id, encoder->base.name,
|
||||
str_yes_no(intel_dp_mst_source_support(intel_dp)),
|
||||
str_yes_no(sink_can_mst),
|
||||
str_yes_no(i915->params.enable_dp_mst));
|
||||
str_yes_no(i915->display.params.enable_dp_mst));
|
||||
|
||||
if (!intel_dp_mst_source_support(intel_dp))
|
||||
return;
|
||||
|
||||
intel_dp->is_mst = sink_can_mst &&
|
||||
i915->params.enable_dp_mst;
|
||||
i915->display.params.enable_dp_mst;
|
||||
|
||||
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
|
||||
intel_dp->is_mst);
|
||||
@ -3865,11 +4132,16 @@ static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
|
||||
sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
|
||||
sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
|
||||
|
||||
if (vsc->revision == 0x6) {
|
||||
sdp->db[0] = 1;
|
||||
sdp->db[3] = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
|
||||
* per DP 1.4a spec.
|
||||
* Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry
|
||||
* Format as per DP 1.4a spec and DP 2.0 respectively.
|
||||
*/
|
||||
if (vsc->revision != 0x5)
|
||||
if (!(vsc->revision == 0x5 || vsc->revision == 0x7))
|
||||
goto out;
|
||||
|
||||
/* VSC SDP Payload for DB16 through DB18 */
|
||||
@ -4049,7 +4321,10 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
|
||||
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
|
||||
u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
|
||||
|
||||
/* TODO: Add DSC case (DIP_ENABLE_PPS) */
|
||||
/* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
|
||||
if (!enable && HAS_DSC(dev_priv))
|
||||
val &= ~VDIP_ENABLE_PPS;
|
||||
|
||||
/* When PSR is enabled, this routine doesn't disable VSC DIP */
|
||||
if (!crtc_state->has_psr)
|
||||
val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
|
||||
@ -5409,6 +5684,7 @@ intel_dp_detect(struct drm_connector *connector,
|
||||
if (status == connector_status_disconnected) {
|
||||
memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
|
||||
memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
|
||||
intel_dp->psr.sink_panel_replay_support = false;
|
||||
|
||||
if (intel_dp->is_mst) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
@ -6037,8 +6313,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
* (eg. Acer Chromebook C710), so we'll check it only if multiple
|
||||
* ports are attempting to use the same AUX CH, according to VBT.
|
||||
*/
|
||||
if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
|
||||
!intel_digital_port_connected(encoder)) {
|
||||
if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) {
|
||||
/*
|
||||
* If this fails, presume the DPCD answer came
|
||||
* from some other port using the same AUX CH.
|
||||
@ -6046,10 +6321,27 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
* FIXME maybe cleaner to check this before the
|
||||
* DPCD read? Would need sort out the VDD handling...
|
||||
*/
|
||||
drm_info(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] HPD is down, disabling eDP\n",
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
goto out_vdd_off;
|
||||
if (!intel_digital_port_connected(encoder)) {
|
||||
drm_info(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] HPD is down, disabling eDP\n",
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
goto out_vdd_off;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unfortunately even the HPD based detection fails on
|
||||
* eg. Asus B360M-A (CFL+CNP), so as a last resort fall
|
||||
* back to checking for a VGA branch device. Only do this
|
||||
* on known affected platforms to minimize false positives.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
|
||||
(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
|
||||
DP_DWN_STRM_PORT_TYPE_ANALOG) {
|
||||
drm_info(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
goto out_vdd_off;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->drm.mode_config.mutex);
|
||||
@ -6238,16 +6530,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
|
||||
"HDCP init failed, skipping.\n");
|
||||
}
|
||||
|
||||
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
|
||||
* 0xd. Failure to do so will result in spurious interrupts being
|
||||
* generated on the port when a cable is not attached.
|
||||
*/
|
||||
if (IS_G45(dev_priv)) {
|
||||
u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
|
||||
intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
|
||||
(temp & ~0xf) | 0xd);
|
||||
}
|
||||
|
||||
intel_dp->frl.is_trained = false;
|
||||
intel_dp->frl.trained_rate_gbps = 0;
|
||||
|
||||
|
@ -57,9 +57,12 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
|
||||
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
|
||||
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
bool enable);
|
||||
void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
|
||||
struct intel_connector *connector,
|
||||
const struct intel_crtc_state *new_crtc_state);
|
||||
void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
|
||||
struct intel_connector *connector,
|
||||
const struct intel_crtc_state *old_crtc_state);
|
||||
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
|
||||
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder);
|
||||
void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
|
||||
@ -78,6 +81,8 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder,
|
||||
bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp);
|
||||
bool intel_dp_is_edp(struct intel_dp *intel_dp);
|
||||
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
|
||||
int intel_dp_link_symbol_size(int rate);
|
||||
int intel_dp_link_symbol_clock(int rate);
|
||||
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
|
||||
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
|
||||
bool long_hpd);
|
||||
@ -98,6 +103,8 @@ bool intel_dp_source_supports_tps4(struct drm_i915_private *i915);
|
||||
|
||||
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
|
||||
int intel_dp_link_required(int pixel_clock, int bpp);
|
||||
int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
|
||||
int bw_overhead);
|
||||
int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
|
||||
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
|
||||
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
|
||||
@ -125,6 +132,10 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
|
||||
enum intel_output_format output_format,
|
||||
u32 pipe_bpp,
|
||||
u32 timeslots);
|
||||
int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config);
|
||||
int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
int bpc);
|
||||
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
|
||||
int mode_clock, int mode_hdisplay,
|
||||
bool bigjoiner);
|
||||
@ -136,7 +147,16 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
|
||||
return ~((1 << lane_count) - 1) & 0xf;
|
||||
}
|
||||
|
||||
bool intel_dp_supports_fec(struct intel_dp *intel_dp,
|
||||
const struct intel_connector *connector,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
|
||||
int intel_dp_bw_fec_overhead(bool fec_enabled);
|
||||
|
||||
bool intel_dp_supports_fec(struct intel_dp *intel_dp,
|
||||
const struct intel_connector *connector,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
|
||||
u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp);
|
||||
|
||||
void intel_ddi_update_pipe(struct intel_atomic_state *state,
|
||||
|
@ -74,7 +74,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
|
||||
|
||||
static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
if (index)
|
||||
return 0;
|
||||
@ -83,12 +83,12 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
* The clock divider is based off the hrawclk, and would like to run at
|
||||
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
|
||||
*/
|
||||
return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
|
||||
return DIV_ROUND_CLOSEST(RUNTIME_INFO(i915)->rawclk_freq, 2000);
|
||||
}
|
||||
|
||||
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
u32 freq;
|
||||
|
||||
@ -101,18 +101,18 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
* divide by 2000 and use that
|
||||
*/
|
||||
if (dig_port->aux_ch == AUX_CH_A)
|
||||
freq = dev_priv->display.cdclk.hw.cdclk;
|
||||
freq = i915->display.cdclk.hw.cdclk;
|
||||
else
|
||||
freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
|
||||
freq = RUNTIME_INFO(i915)->rawclk_freq;
|
||||
return DIV_ROUND_CLOSEST(freq, 2000);
|
||||
}
|
||||
|
||||
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
|
||||
if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
|
||||
if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
|
||||
/* Workaround for non-ULT HSW */
|
||||
switch (index) {
|
||||
case 0: return 63;
|
||||
@ -165,12 +165,11 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
|
||||
u32 aux_clock_divider)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv =
|
||||
to_i915(dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
u32 timeout;
|
||||
|
||||
/* Max timeout value on G4x-BDW: 1.6ms */
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
if (IS_BROADWELL(i915))
|
||||
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
|
||||
else
|
||||
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
|
||||
@ -229,8 +228,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
|
||||
u32 aux_send_ctl_flags)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *i915 =
|
||||
to_i915(dig_port->base.base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
bool is_tc_port = intel_phy_is_tc(i915, phy);
|
||||
i915_reg_t ch_ctl, ch_data[5];
|
||||
@ -531,9 +529,40 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
switch (aux_ch) {
|
||||
case AUX_CH_B:
|
||||
case AUX_CH_C:
|
||||
case AUX_CH_D:
|
||||
return VLV_DP_AUX_CH_CTL(aux_ch);
|
||||
default:
|
||||
MISSING_CASE(aux_ch);
|
||||
return VLV_DP_AUX_CH_CTL(AUX_CH_B);
|
||||
}
|
||||
}
|
||||
|
||||
static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
switch (aux_ch) {
|
||||
case AUX_CH_B:
|
||||
case AUX_CH_C:
|
||||
case AUX_CH_D:
|
||||
return VLV_DP_AUX_CH_DATA(aux_ch, index);
|
||||
default:
|
||||
MISSING_CASE(aux_ch);
|
||||
return VLV_DP_AUX_CH_DATA(AUX_CH_B, index);
|
||||
}
|
||||
}
|
||||
|
||||
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
@ -550,7 +579,6 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
|
||||
|
||||
static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
@ -567,7 +595,6 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
|
||||
|
||||
static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
@ -586,7 +613,6 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
|
||||
|
||||
static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
@ -605,7 +631,6 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
|
||||
|
||||
static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
@ -625,7 +650,6 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
|
||||
|
||||
static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
@ -645,7 +669,6 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
|
||||
|
||||
static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
@ -668,7 +691,6 @@ static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
|
||||
|
||||
static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
@ -691,7 +713,7 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
|
||||
|
||||
static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
@ -702,16 +724,16 @@ static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
|
||||
case AUX_CH_USBC2:
|
||||
case AUX_CH_USBC3:
|
||||
case AUX_CH_USBC4:
|
||||
return XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch);
|
||||
return XELPDP_DP_AUX_CH_CTL(i915, aux_ch);
|
||||
default:
|
||||
MISSING_CASE(aux_ch);
|
||||
return XELPDP_DP_AUX_CH_CTL(dev_priv, AUX_CH_A);
|
||||
return XELPDP_DP_AUX_CH_CTL(i915, AUX_CH_A);
|
||||
}
|
||||
}
|
||||
|
||||
static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
|
||||
@ -722,10 +744,10 @@ static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
|
||||
case AUX_CH_USBC2:
|
||||
case AUX_CH_USBC3:
|
||||
case AUX_CH_USBC4:
|
||||
return XELPDP_DP_AUX_CH_DATA(dev_priv, aux_ch, index);
|
||||
return XELPDP_DP_AUX_CH_DATA(i915, aux_ch, index);
|
||||
default:
|
||||
MISSING_CASE(aux_ch);
|
||||
return XELPDP_DP_AUX_CH_DATA(dev_priv, AUX_CH_A, index);
|
||||
return XELPDP_DP_AUX_CH_DATA(i915, AUX_CH_A, index);
|
||||
}
|
||||
}
|
||||
|
||||
@ -739,49 +761,52 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp)
|
||||
|
||||
void intel_dp_aux_init(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *encoder = &dig_port->base;
|
||||
enum aux_ch aux_ch = dig_port->aux_ch;
|
||||
char buf[AUX_CH_NAME_BUFSIZE];
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 14) {
|
||||
if (DISPLAY_VER(i915) >= 14) {
|
||||
intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
|
||||
intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
|
||||
} else if (DISPLAY_VER(dev_priv) >= 12) {
|
||||
} else if (DISPLAY_VER(i915) >= 12) {
|
||||
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
|
||||
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
|
||||
} else if (DISPLAY_VER(dev_priv) >= 9) {
|
||||
} else if (DISPLAY_VER(i915) >= 9) {
|
||||
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
|
||||
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
|
||||
} else if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
} else if (HAS_PCH_SPLIT(i915)) {
|
||||
intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
|
||||
intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
|
||||
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
|
||||
intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
|
||||
intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
|
||||
} else {
|
||||
intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
|
||||
intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
|
||||
}
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 9)
|
||||
if (DISPLAY_VER(i915) >= 9)
|
||||
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
|
||||
else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
|
||||
else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
|
||||
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
|
||||
else if (HAS_PCH_SPLIT(dev_priv))
|
||||
else if (HAS_PCH_SPLIT(i915))
|
||||
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
|
||||
else
|
||||
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 9)
|
||||
if (DISPLAY_VER(i915) >= 9)
|
||||
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
|
||||
else
|
||||
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
|
||||
|
||||
intel_dp->aux.drm_dev = &dev_priv->drm;
|
||||
intel_dp->aux.drm_dev = &i915->drm;
|
||||
drm_dp_aux_init(&intel_dp->aux);
|
||||
|
||||
/* Failure to allocate our preferred name is not critical */
|
||||
intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
|
||||
aux_ch_name(dev_priv, buf, sizeof(buf), aux_ch),
|
||||
aux_ch_name(i915, buf, sizeof(buf), aux_ch),
|
||||
encoder->base.name);
|
||||
|
||||
intel_dp->aux.transfer = intel_dp_aux_transfer;
|
||||
|
@ -146,7 +146,7 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
|
||||
* HDR static metadata we need to start maintaining table of
|
||||
* ranges for such panels.
|
||||
*/
|
||||
if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
|
||||
if (i915->display.params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
|
||||
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
|
||||
BIT(HDMI_STATIC_METADATA_TYPE1))) {
|
||||
drm_info(&i915->drm,
|
||||
@ -489,7 +489,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
|
||||
/* Check the VBT and user's module parameters to figure out which
|
||||
* interfaces to probe
|
||||
*/
|
||||
switch (i915->params.enable_dpcd_backlight) {
|
||||
switch (i915->display.params.enable_dpcd_backlight) {
|
||||
case INTEL_DP_AUX_BACKLIGHT_OFF:
|
||||
return -ENODEV;
|
||||
case INTEL_DP_AUX_BACKLIGHT_AUTO:
|
||||
|
@ -21,13 +21,14 @@
|
||||
#define __xe2lpd_aux_ch_idx(aux_ch) \
|
||||
(aux_ch >= AUX_CH_USBC1 ? aux_ch : AUX_CH_USBC4 + 1 + (aux_ch) - AUX_CH_A)
|
||||
|
||||
/* TODO: Remove implicit dev_priv */
|
||||
#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
|
||||
#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
|
||||
#define _DPA_AUX_CH_CTL 0x64010
|
||||
#define _DPB_AUX_CH_CTL 0x64110
|
||||
#define _XELPDP_USBC1_AUX_CH_CTL 0x16f210
|
||||
#define _XELPDP_USBC2_AUX_CH_CTL 0x16f410
|
||||
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, \
|
||||
_DPB_AUX_CH_CTL)
|
||||
#define VLV_DP_AUX_CH_CTL(aux_ch) _MMIO(VLV_DISPLAY_BASE + \
|
||||
_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL))
|
||||
#define _XELPDP_DP_AUX_CH_CTL(aux_ch) \
|
||||
_MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \
|
||||
_DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL, \
|
||||
@ -69,13 +70,14 @@
|
||||
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK REG_GENMASK(4, 0) /* skl+ */
|
||||
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) REG_FIELD_PREP(DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK, (c) - 1)
|
||||
|
||||
/* TODO: Remove implicit dev_priv */
|
||||
#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
|
||||
#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
|
||||
#define _DPA_AUX_CH_DATA1 0x64014
|
||||
#define _DPB_AUX_CH_DATA1 0x64114
|
||||
#define _XELPDP_USBC1_AUX_CH_DATA1 0x16f214
|
||||
#define _XELPDP_USBC2_AUX_CH_DATA1 0x16f414
|
||||
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, \
|
||||
_DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
|
||||
#define VLV_DP_AUX_CH_DATA(aux_ch, i) _MMIO(VLV_DISPLAY_BASE + _PORT(aux_ch, _DPA_AUX_CH_DATA1, \
|
||||
_DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
|
||||
#define _XELPDP_DP_AUX_CH_DATA(aux_ch, i) \
|
||||
_MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \
|
||||
_DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1, \
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_fixed.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
@ -43,6 +44,9 @@
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_hotplug.h"
|
||||
#include "intel_link_bw.h"
|
||||
#include "intel_psr.h"
|
||||
#include "intel_vdsc.h"
|
||||
#include "skl_scaler.h"
|
||||
|
||||
static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp,
|
||||
@ -66,6 +70,73 @@ static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_connector *connector,
|
||||
bool ssc, bool dsc, int bpp_x16)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
|
||||
int dsc_slice_count = 0;
|
||||
int overhead;
|
||||
|
||||
flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
|
||||
flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
|
||||
flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
|
||||
|
||||
if (dsc) {
|
||||
flags |= DRM_DP_BW_OVERHEAD_DSC;
|
||||
/* TODO: add support for bigjoiner */
|
||||
dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
|
||||
adjusted_mode->clock,
|
||||
adjusted_mode->hdisplay,
|
||||
false);
|
||||
}
|
||||
|
||||
overhead = drm_dp_bw_overhead(crtc_state->lane_count,
|
||||
adjusted_mode->hdisplay,
|
||||
dsc_slice_count,
|
||||
bpp_x16,
|
||||
flags);
|
||||
|
||||
/*
|
||||
* TODO: clarify whether a minimum required by the fixed FEC overhead
|
||||
* in the bspec audio programming sequence is required here.
|
||||
*/
|
||||
return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable));
|
||||
}
|
||||
|
||||
static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_connector *connector,
|
||||
int overhead,
|
||||
int bpp_x16,
|
||||
struct intel_link_m_n *m_n)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
|
||||
/* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */
|
||||
intel_link_compute_m_n(bpp_x16, crtc_state->lane_count,
|
||||
adjusted_mode->crtc_clock,
|
||||
crtc_state->port_clock,
|
||||
overhead,
|
||||
m_n);
|
||||
|
||||
m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n);
|
||||
}
|
||||
|
||||
static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
|
||||
{
|
||||
int effective_data_rate =
|
||||
intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead);
|
||||
|
||||
/*
|
||||
* TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted
|
||||
* to calculate PBN with the BW overhead passed to it.
|
||||
*/
|
||||
return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
|
||||
}
|
||||
|
||||
static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
int max_bpp,
|
||||
@ -94,20 +165,67 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
|
||||
crtc_state->lane_count = limits->max_lane_count;
|
||||
crtc_state->port_clock = limits->max_rate;
|
||||
|
||||
if (dsc) {
|
||||
if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
|
||||
return -EINVAL;
|
||||
|
||||
crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
|
||||
}
|
||||
|
||||
mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
|
||||
crtc_state->port_clock,
|
||||
crtc_state->lane_count);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
|
||||
min_bpp, max_bpp);
|
||||
|
||||
for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
|
||||
int local_bw_overhead;
|
||||
int remote_bw_overhead;
|
||||
int link_bpp_x16;
|
||||
int remote_tu;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
|
||||
|
||||
ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc);
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
|
||||
dsc ? bpp << 4 : bpp,
|
||||
dsc);
|
||||
link_bpp_x16 = to_bpp_x16(dsc ? bpp :
|
||||
intel_dp_output_bpp(crtc_state->output_format, bpp));
|
||||
|
||||
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
|
||||
false, dsc, link_bpp_x16);
|
||||
remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
|
||||
true, dsc, link_bpp_x16);
|
||||
|
||||
intel_dp_mst_compute_m_n(crtc_state, connector,
|
||||
local_bw_overhead,
|
||||
link_bpp_x16,
|
||||
&crtc_state->dp_m_n);
|
||||
|
||||
/*
|
||||
* The TU size programmed to the HW determines which slots in
|
||||
* an MTP frame are used for this stream, which needs to match
|
||||
* the payload size programmed to the first downstream branch
|
||||
* device's payload table.
|
||||
*
|
||||
* Note that atm the payload's PBN value DRM core sends via
|
||||
* the ALLOCATE_PAYLOAD side-band message matches the payload
|
||||
* size (which it calculates from the PBN value) it programs
|
||||
* to the first branch device's payload table. The allocation
|
||||
* in the payload table could be reduced though (to
|
||||
* crtc_state->dp_m_n.tu), provided that the driver doesn't
|
||||
* enable SSC on the corresponding link.
|
||||
*/
|
||||
crtc_state->pbn = intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
|
||||
link_bpp_x16,
|
||||
remote_bw_overhead);
|
||||
|
||||
remote_tu = DIV_ROUND_UP(dfixed_const(crtc_state->pbn), mst_state->pbn_div.full);
|
||||
|
||||
drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu);
|
||||
crtc_state->dp_m_n.tu = remote_tu;
|
||||
|
||||
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
|
||||
connector->port,
|
||||
@ -116,13 +234,9 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
|
||||
return slots;
|
||||
|
||||
if (slots >= 0) {
|
||||
ret = drm_dp_mst_atomic_check(state);
|
||||
/*
|
||||
* If we got slots >= 0 and we can fit those based on check
|
||||
* then we can exit the loop. Otherwise keep trying.
|
||||
*/
|
||||
if (!ret)
|
||||
break;
|
||||
drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -137,7 +251,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
|
||||
if (!dsc)
|
||||
crtc_state->pipe_bpp = bpp;
|
||||
else
|
||||
crtc_state->dsc.compressed_bpp = bpp;
|
||||
crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(bpp);
|
||||
drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
|
||||
}
|
||||
|
||||
@ -149,10 +263,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
struct drm_connector_state *conn_state,
|
||||
struct link_config_limits *limits)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
int slots = -EINVAL;
|
||||
int link_bpp;
|
||||
|
||||
/*
|
||||
* FIXME: allocate the BW according to link_bpp, which in the case of
|
||||
@ -167,16 +278,6 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
if (slots < 0)
|
||||
return slots;
|
||||
|
||||
link_bpp = intel_dp_output_bpp(crtc_state->output_format, crtc_state->pipe_bpp);
|
||||
|
||||
intel_link_compute_m_n(link_bpp,
|
||||
crtc_state->lane_count,
|
||||
adjusted_mode->crtc_clock,
|
||||
crtc_state->port_clock,
|
||||
&crtc_state->dp_m_n,
|
||||
crtc_state->fec_enable);
|
||||
crtc_state->dp_m_n.tu = slots;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -188,15 +289,12 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
int slots = -EINVAL;
|
||||
int i, num_bpc;
|
||||
u8 dsc_bpc[3] = {};
|
||||
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
|
||||
u8 dsc_max_bpc;
|
||||
bool need_timeslot_recalc = false;
|
||||
u32 last_compressed_bpp;
|
||||
int min_compressed_bpp, max_compressed_bpp;
|
||||
|
||||
/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
|
||||
if (DISPLAY_VER(i915) >= 12)
|
||||
@ -232,46 +330,32 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
if (max_bpp > sink_max_bpp)
|
||||
max_bpp = sink_max_bpp;
|
||||
|
||||
min_bpp = max(min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
|
||||
max_bpp = min(max_bpp, to_bpp_int(limits->link.max_bpp_x16));
|
||||
max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
|
||||
crtc_state,
|
||||
max_bpp / 3);
|
||||
max_compressed_bpp = min(max_compressed_bpp,
|
||||
to_bpp_int(limits->link.max_bpp_x16));
|
||||
|
||||
slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_bpp,
|
||||
min_bpp, limits,
|
||||
conn_state, 2 * 3, true);
|
||||
min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
|
||||
min_compressed_bpp = max(min_compressed_bpp,
|
||||
to_bpp_int_roundup(limits->link.min_bpp_x16));
|
||||
|
||||
drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
|
||||
min_compressed_bpp, max_compressed_bpp);
|
||||
|
||||
/* Align compressed bpps according to our own constraints */
|
||||
max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp,
|
||||
crtc_state->pipe_bpp);
|
||||
min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp,
|
||||
crtc_state->pipe_bpp);
|
||||
|
||||
slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp,
|
||||
min_compressed_bpp, limits,
|
||||
conn_state, 1, true);
|
||||
|
||||
if (slots < 0)
|
||||
return slots;
|
||||
|
||||
last_compressed_bpp = crtc_state->dsc.compressed_bpp;
|
||||
|
||||
crtc_state->dsc.compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915,
|
||||
last_compressed_bpp,
|
||||
crtc_state->pipe_bpp);
|
||||
|
||||
if (crtc_state->dsc.compressed_bpp != last_compressed_bpp)
|
||||
need_timeslot_recalc = true;
|
||||
|
||||
/*
|
||||
* Apparently some MST hubs dislike if vcpi slots are not matching precisely
|
||||
* the actual compressed bpp we use.
|
||||
*/
|
||||
if (need_timeslot_recalc) {
|
||||
slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
|
||||
crtc_state->dsc.compressed_bpp,
|
||||
crtc_state->dsc.compressed_bpp,
|
||||
limits, conn_state, 2 * 3, true);
|
||||
if (slots < 0)
|
||||
return slots;
|
||||
}
|
||||
|
||||
intel_link_compute_m_n(crtc_state->dsc.compressed_bpp,
|
||||
crtc_state->lane_count,
|
||||
adjusted_mode->crtc_clock,
|
||||
crtc_state->port_clock,
|
||||
&crtc_state->dp_m_n,
|
||||
crtc_state->fec_enable);
|
||||
crtc_state->dp_m_n.tu = slots;
|
||||
|
||||
return 0;
|
||||
}
|
||||
static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
|
||||
@ -297,8 +381,103 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_mst_dsc_source_support(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
|
||||
|
||||
/*
|
||||
* FIXME: Enabling DSC on ICL results in blank screen and FIFO pipe /
|
||||
* transcoder underruns, re-enable DSC after fixing this issue.
|
||||
*/
|
||||
return DISPLAY_VER(i915) >= 12 && intel_dsc_source_support(crtc_state);
|
||||
}
|
||||
|
||||
static int mode_hblank_period_ns(const struct drm_display_mode *mode)
|
||||
{
|
||||
return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay,
|
||||
NSEC_PER_SEC / 1000),
|
||||
mode->crtc_clock);
|
||||
}
|
||||
|
||||
static bool
|
||||
hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
|
||||
if (!connector->dp.dsc_hblank_expansion_quirk)
|
||||
return false;
|
||||
|
||||
if (mode_hblank_period_ns(adjusted_mode) > 300)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
struct link_config_limits *limits,
|
||||
bool dsc)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
int min_bpp_x16 = limits->link.min_bpp_x16;
|
||||
|
||||
if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state))
|
||||
return true;
|
||||
|
||||
if (!dsc) {
|
||||
if (intel_dp_mst_dsc_source_support(crtc_state)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
connector->base.base.id, connector->base.name);
|
||||
return false;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
connector->base.base.id, connector->base.name);
|
||||
|
||||
if (limits->link.max_bpp_x16 < to_bpp_x16(24))
|
||||
return false;
|
||||
|
||||
limits->link.min_bpp_x16 = to_bpp_x16(24);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
|
||||
|
||||
if (limits->max_rate < 540000)
|
||||
min_bpp_x16 = to_bpp_x16(13);
|
||||
else if (limits->max_rate < 810000)
|
||||
min_bpp_x16 = to_bpp_x16(10);
|
||||
|
||||
if (limits->link.min_bpp_x16 >= min_bpp_x16)
|
||||
return true;
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " BPP_X16_FMT " in DSC mode due to hblank expansion quirk\n",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
connector->base.base.id, connector->base.name,
|
||||
BPP_X16_ARGS(min_bpp_x16));
|
||||
|
||||
if (limits->link.max_bpp_x16 < min_bpp_x16)
|
||||
return false;
|
||||
|
||||
limits->link.min_bpp_x16 = min_bpp_x16;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
|
||||
const struct intel_connector *connector,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
bool dsc,
|
||||
struct link_config_limits *limits)
|
||||
@ -326,10 +505,16 @@ intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
|
||||
|
||||
intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits);
|
||||
|
||||
return intel_dp_compute_config_link_bpp_limits(intel_dp,
|
||||
crtc_state,
|
||||
dsc,
|
||||
limits);
|
||||
if (!intel_dp_compute_config_link_bpp_limits(intel_dp,
|
||||
crtc_state,
|
||||
dsc,
|
||||
limits))
|
||||
return false;
|
||||
|
||||
return adjust_limits_for_dsc_hblank_expansion_quirk(connector,
|
||||
crtc_state,
|
||||
limits,
|
||||
dsc);
|
||||
}
|
||||
|
||||
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
@ -339,12 +524,18 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_dp *intel_dp = &intel_mst->primary->dp;
|
||||
const struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
struct link_config_limits limits;
|
||||
bool dsc_needed;
|
||||
int ret = 0;
|
||||
|
||||
if (pipe_config->fec_enable &&
|
||||
!intel_dp_supports_fec(intel_dp, connector, pipe_config))
|
||||
return -EINVAL;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return -EINVAL;
|
||||
|
||||
@ -354,6 +545,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
dsc_needed = intel_dp->force_dsc_en ||
|
||||
!intel_dp_mst_compute_config_limits(intel_dp,
|
||||
connector,
|
||||
pipe_config,
|
||||
false,
|
||||
&limits);
|
||||
@ -375,7 +567,11 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
str_yes_no(ret),
|
||||
str_yes_no(intel_dp->force_dsc_en));
|
||||
|
||||
if (!intel_dp_mst_dsc_source_support(pipe_config))
|
||||
return -EINVAL;
|
||||
|
||||
if (!intel_dp_mst_compute_config_limits(intel_dp,
|
||||
connector,
|
||||
pipe_config,
|
||||
true,
|
||||
&limits))
|
||||
@ -420,6 +616,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
|
||||
|
||||
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -459,6 +657,130 @@ intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
|
||||
return transcoders;
|
||||
}
|
||||
|
||||
static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr,
|
||||
struct drm_dp_mst_port *parent_port)
|
||||
{
|
||||
const struct intel_digital_connector_state *conn_state;
|
||||
struct intel_connector *connector;
|
||||
u8 mask = 0;
|
||||
int i;
|
||||
|
||||
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
|
||||
if (!conn_state->base.crtc)
|
||||
continue;
|
||||
|
||||
if (&connector->mst_port->mst_mgr != mst_mgr)
|
||||
continue;
|
||||
|
||||
if (connector->port != parent_port &&
|
||||
!drm_dp_mst_port_downstream_of_parent(mst_mgr,
|
||||
connector->port,
|
||||
parent_port))
|
||||
continue;
|
||||
|
||||
mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe);
|
||||
}
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr,
|
||||
struct intel_link_bw_limits *limits)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
struct intel_crtc *crtc;
|
||||
u8 mst_pipe_mask;
|
||||
u8 fec_pipe_mask = 0;
|
||||
int ret;
|
||||
|
||||
mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
|
||||
|
||||
for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) {
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
/* Atomic connector check should've added all the MST CRTCs. */
|
||||
if (drm_WARN_ON(&i915->drm, !crtc_state))
|
||||
return -EINVAL;
|
||||
|
||||
if (crtc_state->fec_enable)
|
||||
fec_pipe_mask |= BIT(crtc->pipe);
|
||||
}
|
||||
|
||||
if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask)
|
||||
return 0;
|
||||
|
||||
limits->force_fec_pipes |= mst_pipe_mask;
|
||||
|
||||
ret = intel_modeset_pipes_in_mask_early(state, "MST FEC",
|
||||
mst_pipe_mask);
|
||||
|
||||
return ret ? : -EAGAIN;
|
||||
}
|
||||
|
||||
static int intel_dp_mst_check_bw(struct intel_atomic_state *state,
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr,
|
||||
struct drm_dp_mst_topology_state *mst_state,
|
||||
struct intel_link_bw_limits *limits)
|
||||
{
|
||||
struct drm_dp_mst_port *mst_port;
|
||||
u8 mst_port_pipes;
|
||||
int ret;
|
||||
|
||||
ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port);
|
||||
if (ret != -ENOSPC)
|
||||
return ret;
|
||||
|
||||
mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port);
|
||||
|
||||
ret = intel_link_bw_reduce_bpp(state, limits,
|
||||
mst_port_pipes, "MST link BW");
|
||||
|
||||
return ret ? : -EAGAIN;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dp_mst_atomic_check_link - check all modeset MST link configuration
|
||||
* @state: intel atomic state
|
||||
* @limits: link BW limits
|
||||
*
|
||||
* Check the link configuration for all modeset MST outputs. If the
|
||||
* configuration is invalid @limits will be updated if possible to
|
||||
* reduce the total BW, after which the configuration for all CRTCs in
|
||||
* @state must be recomputed with the updated @limits.
|
||||
*
|
||||
* Returns:
|
||||
* - 0 if the confugration is valid
|
||||
* - %-EAGAIN, if the configuration is invalid and @limits got updated
|
||||
* with fallback values with which the configuration of all CRTCs in
|
||||
* @state must be recomputed
|
||||
* - Other negative error, if the configuration is invalid without a
|
||||
* fallback possibility, or the check failed for another reason
|
||||
*/
|
||||
int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
|
||||
struct intel_link_bw_limits *limits)
|
||||
{
|
||||
struct drm_dp_mst_topology_mgr *mgr;
|
||||
struct drm_dp_mst_topology_state *mst_state;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
|
||||
ret = intel_dp_mst_check_fec_change(state, mgr, limits);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_dp_mst_check_bw(state, mgr, mst_state,
|
||||
limits);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
@ -479,19 +801,23 @@ static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
|
||||
* that shares the same MST stream as mode changed,
|
||||
* intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
|
||||
* a fastset when possible.
|
||||
*
|
||||
* On TGL+ this is required since each stream go through a master transcoder,
|
||||
* so if the master transcoder needs modeset, all other streams in the
|
||||
* topology need a modeset. All platforms need to add the atomic state
|
||||
* for all streams in the topology, since a modeset on one may require
|
||||
* changing the MST link BW usage of the others, which in turn needs a
|
||||
* recomputation of the corresponding CRTC states.
|
||||
*/
|
||||
static int
|
||||
intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
|
||||
struct intel_atomic_state *state)
|
||||
intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
|
||||
struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct drm_connector_list_iter connector_list_iter;
|
||||
struct intel_connector *connector_iter;
|
||||
int ret = 0;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) < 12)
|
||||
return 0;
|
||||
|
||||
if (!intel_connector_needs_modeset(state, &connector->base))
|
||||
return 0;
|
||||
|
||||
@ -545,7 +871,7 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state);
|
||||
ret = intel_dp_mst_atomic_topology_check(intel_connector, state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -587,10 +913,6 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(old_conn_state->connector);
|
||||
struct drm_dp_mst_topology_state *new_mst_state =
|
||||
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
|
||||
struct drm_dp_mst_atomic_payload *new_payload =
|
||||
drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "active links %d\n",
|
||||
@ -598,9 +920,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
|
||||
|
||||
intel_hdcp_disable(intel_mst->connector);
|
||||
|
||||
drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
|
||||
|
||||
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
|
||||
intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
|
||||
}
|
||||
|
||||
static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
||||
@ -634,6 +954,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
||||
|
||||
intel_disable_transcoder(old_crtc_state);
|
||||
|
||||
drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
|
||||
|
||||
clear_act_sent(encoder, old_crtc_state);
|
||||
|
||||
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
|
||||
@ -646,6 +968,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
||||
|
||||
intel_ddi_disable_transcoder_func(old_crtc_state);
|
||||
|
||||
intel_dsc_disable(old_crtc_state);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 9)
|
||||
skl_scaler_disable(old_crtc_state);
|
||||
else
|
||||
@ -662,9 +986,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
||||
* BSpec 4287: disable DIP after the transcoder is disabled and before
|
||||
* the transcoder clock select is set to none.
|
||||
*/
|
||||
if (last_mst_stream)
|
||||
intel_dp_set_infoframes(&dig_port->base, false,
|
||||
old_crtc_state, NULL);
|
||||
intel_dp_set_infoframes(&dig_port->base, false,
|
||||
old_crtc_state, NULL);
|
||||
/*
|
||||
* From TGL spec: "If multi-stream slave transcoder: Configure
|
||||
* Transcoder Clock Select to direct no clock to the transcoder"
|
||||
@ -754,6 +1077,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
|
||||
|
||||
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
|
||||
|
||||
intel_dp_sink_enable_decompression(state, connector, pipe_config);
|
||||
|
||||
if (first_mst_stream)
|
||||
dig_port->base.pre_enable(state, &dig_port->base,
|
||||
pipe_config, NULL);
|
||||
@ -776,6 +1101,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
|
||||
if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
|
||||
intel_ddi_enable_transcoder_clock(encoder, pipe_config);
|
||||
|
||||
intel_dsc_dp_pps_write(&dig_port->base, pipe_config);
|
||||
intel_ddi_set_dp_msa(pipe_config, conn_state);
|
||||
}
|
||||
|
||||
@ -792,11 +1118,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
||||
struct drm_dp_mst_topology_state *mst_state =
|
||||
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
|
||||
enum transcoder trans = pipe_config->cpu_transcoder;
|
||||
bool first_mst_stream = intel_dp->active_mst_links == 1;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
|
||||
|
||||
clear_act_sent(encoder, pipe_config);
|
||||
|
||||
if (intel_dp_is_uhbr(pipe_config)) {
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
@ -810,6 +1135,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
||||
|
||||
intel_ddi_enable_transcoder_func(encoder, pipe_config);
|
||||
|
||||
clear_act_sent(encoder, pipe_config);
|
||||
|
||||
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0,
|
||||
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
|
||||
|
||||
@ -818,15 +1145,16 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
||||
|
||||
wait_for_act_sent(encoder, pipe_config);
|
||||
|
||||
if (first_mst_stream)
|
||||
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
|
||||
|
||||
drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
|
||||
drm_atomic_get_mst_payload_state(mst_state, connector->port));
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable)
|
||||
intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0,
|
||||
FECSTALL_DIS_DPTSTREAM_DPTTG);
|
||||
else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
|
||||
intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0,
|
||||
FECSTALL_DIS_DPTSTREAM_DPTTG);
|
||||
if (DISPLAY_VER(dev_priv) >= 12)
|
||||
intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
|
||||
FECSTALL_DIS_DPTSTREAM_DPTTG,
|
||||
pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
|
||||
|
||||
intel_audio_sdp_split_update(pipe_config);
|
||||
|
||||
@ -834,12 +1162,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
||||
|
||||
intel_crtc_vblank_on(pipe_config);
|
||||
|
||||
intel_audio_codec_enable(encoder, pipe_config, conn_state);
|
||||
|
||||
/* Enable hdcp if it's desired */
|
||||
if (conn_state->content_protection ==
|
||||
DRM_MODE_CONTENT_PROTECTION_DESIRED)
|
||||
intel_hdcp_enable(state, encoder, pipe_config, conn_state);
|
||||
intel_hdcp_enable(state, encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
|
||||
@ -974,8 +1297,20 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
* - Also check if compression would allow for the mode
|
||||
* - Calculate the overhead using drm_dp_bw_overhead() /
|
||||
* drm_dp_bw_channel_coding_efficiency(), similarly to the
|
||||
* compute config code, as drm_dp_calc_pbn_mode() doesn't
|
||||
* account with all the overheads.
|
||||
* - Check here and during compute config the BW reported by
|
||||
* DFP_Link_Available_Payload_Bandwidth_Number (or the
|
||||
* corresponding link capabilities of the sink) in case the
|
||||
* stream is uncompressed for it by the last branch device.
|
||||
*/
|
||||
if (mode_rate > max_rate || mode->clock > max_dotclk ||
|
||||
drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) {
|
||||
drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
|
||||
*status = MODE_CLOCK_HIGH;
|
||||
return 0;
|
||||
}
|
||||
@ -1139,6 +1474,36 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
|
||||
intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector);
|
||||
}
|
||||
|
||||
static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
struct drm_dp_desc desc;
|
||||
u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
|
||||
if (!connector->dp.dsc_decompression_aux)
|
||||
return false;
|
||||
|
||||
if (drm_dp_read_desc(connector->dp.dsc_decompression_aux,
|
||||
&desc, true) < 0)
|
||||
return false;
|
||||
|
||||
if (!drm_dp_has_quirk(&desc,
|
||||
DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
|
||||
return false;
|
||||
|
||||
if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd) < 0)
|
||||
return false;
|
||||
|
||||
if (!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
|
||||
return false;
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n",
|
||||
connector->base.base.id, connector->base.name);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_port *port,
|
||||
const char *pathprop)
|
||||
@ -1161,6 +1526,11 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
|
||||
intel_connector->port = port;
|
||||
drm_dp_mst_get_port_malloc(port);
|
||||
|
||||
intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
|
||||
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
|
||||
intel_connector->dp.dsc_hblank_expansion_quirk =
|
||||
detect_dsc_hblank_expansion_quirk(intel_connector);
|
||||
|
||||
connector = &intel_connector->base;
|
||||
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_DisplayPort);
|
||||
@ -1172,14 +1542,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
|
||||
|
||||
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
|
||||
|
||||
/*
|
||||
* TODO: set the AUX for the actual MST port decompressing the stream.
|
||||
* At the moment the driver only supports enabling this globally in the
|
||||
* first downstream MST branch, via intel_dp's (root port) AUX.
|
||||
*/
|
||||
intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
|
||||
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
struct drm_encoder *enc =
|
||||
&intel_dp->mst_encoders[pipe]->base.base;
|
||||
@ -1260,6 +1622,8 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
|
||||
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
|
||||
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
|
||||
intel_encoder->enable = intel_mst_enable_dp;
|
||||
intel_encoder->audio_enable = intel_audio_codec_enable;
|
||||
intel_encoder->audio_disable = intel_audio_codec_disable;
|
||||
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
|
||||
intel_encoder->get_config = intel_dp_mst_enc_get_config;
|
||||
intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
|
||||
@ -1407,3 +1771,91 @@ int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct intel_connector *
|
||||
get_connector_in_state_for_crtc(struct intel_atomic_state *state,
|
||||
const struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_connector_state *old_conn_state;
|
||||
struct drm_connector_state *new_conn_state;
|
||||
struct drm_connector *_connector;
|
||||
int i;
|
||||
|
||||
for_each_oldnew_connector_in_state(&state->base, _connector,
|
||||
old_conn_state, new_conn_state, i) {
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(_connector);
|
||||
|
||||
if (old_conn_state->crtc == &crtc->base ||
|
||||
new_conn_state->crtc == &crtc->base)
|
||||
return connector;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC
|
||||
* @state: atomic state
|
||||
* @crtc: CRTC for which to check the modeset requirement
|
||||
*
|
||||
* Check if any change in a MST topology requires a forced modeset on @crtc in
|
||||
* this topology. One such change is enabling/disabling the DSC decompression
|
||||
* state in the first branch device's UFP DPCD as required by one CRTC, while
|
||||
* the other @crtc in the same topology is still active, requiring a full modeset
|
||||
* on @crtc.
|
||||
*/
|
||||
bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
const struct intel_connector *crtc_connector;
|
||||
const struct drm_connector_state *conn_state;
|
||||
const struct drm_connector *_connector;
|
||||
int i;
|
||||
|
||||
if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc),
|
||||
INTEL_OUTPUT_DP_MST))
|
||||
return false;
|
||||
|
||||
crtc_connector = get_connector_in_state_for_crtc(state, crtc);
|
||||
|
||||
if (!crtc_connector)
|
||||
/* None of the connectors in the topology needs modeset */
|
||||
return false;
|
||||
|
||||
for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
|
||||
const struct intel_connector *connector =
|
||||
to_intel_connector(_connector);
|
||||
const struct intel_crtc_state *new_crtc_state;
|
||||
const struct intel_crtc_state *old_crtc_state;
|
||||
struct intel_crtc *crtc_iter;
|
||||
|
||||
if (connector->mst_port != crtc_connector->mst_port ||
|
||||
!conn_state->crtc)
|
||||
continue;
|
||||
|
||||
crtc_iter = to_intel_crtc(conn_state->crtc);
|
||||
|
||||
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter);
|
||||
old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter);
|
||||
|
||||
if (!intel_crtc_needs_modeset(new_crtc_state))
|
||||
continue;
|
||||
|
||||
if (old_crtc_state->dsc.compression_enable ==
|
||||
new_crtc_state->dsc.compression_enable)
|
||||
continue;
|
||||
/*
|
||||
* Toggling the decompression flag because of this stream in
|
||||
* the first downstream branch device's UFP DPCD may reset the
|
||||
* whole branch device. To avoid the reset while other streams
|
||||
* are also active modeset the whole MST topology in this
|
||||
* case.
|
||||
*/
|
||||
if (connector->dp.dsc_decompression_aux ==
|
||||
&connector->mst_port->aux)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
struct intel_digital_port;
|
||||
struct intel_dp;
|
||||
struct intel_link_bw_limits;
|
||||
|
||||
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
|
||||
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
|
||||
@ -22,5 +23,9 @@ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
|
||||
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
|
||||
int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
|
||||
struct intel_link_bw_limits *limits);
|
||||
bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
|
||||
#endif /* __INTEL_DP_MST_H__ */
|
||||
|
@ -666,6 +666,20 @@ enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
|
||||
}
|
||||
}
|
||||
|
||||
enum dpio_phy vlv_pipe_to_phy(enum pipe pipe)
|
||||
{
|
||||
switch (pipe) {
|
||||
default:
|
||||
MISSING_CASE(pipe);
|
||||
fallthrough;
|
||||
case PIPE_A:
|
||||
case PIPE_B:
|
||||
return DPIO_PHY0;
|
||||
case PIPE_C:
|
||||
return DPIO_PHY1;
|
||||
}
|
||||
}
|
||||
|
||||
enum dpio_channel vlv_pipe_to_channel(enum pipe pipe)
|
||||
{
|
||||
switch (pipe) {
|
||||
@ -689,50 +703,50 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
u32 val;
|
||||
int i;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
/* Clear calc init */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
|
||||
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
|
||||
|
||||
if (crtc_state->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
|
||||
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
|
||||
}
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch));
|
||||
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
|
||||
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val);
|
||||
|
||||
if (crtc_state->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch));
|
||||
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
|
||||
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val);
|
||||
}
|
||||
|
||||
/* Program swing deemph */
|
||||
for (i = 0; i < crtc_state->lane_count; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
|
||||
val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
|
||||
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
|
||||
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
|
||||
}
|
||||
|
||||
/* Program swing margin */
|
||||
for (i = 0; i < crtc_state->lane_count; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
|
||||
val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
|
||||
|
||||
val &= ~DPIO_SWING_MARGIN000_MASK;
|
||||
val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
|
||||
@ -745,7 +759,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
||||
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
|
||||
val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -755,23 +769,23 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
||||
* 27 for ch0 and ch1.
|
||||
*/
|
||||
for (i = 0; i < crtc_state->lane_count; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
|
||||
val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i));
|
||||
if (uniq_trans_scale)
|
||||
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
|
||||
else
|
||||
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val);
|
||||
}
|
||||
|
||||
/* Start swing calculation */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
|
||||
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
|
||||
|
||||
if (crtc_state->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
|
||||
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
|
||||
}
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
@ -782,43 +796,43 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
|
||||
bool reset)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
u32 val;
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
|
||||
if (reset)
|
||||
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
else
|
||||
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val);
|
||||
|
||||
if (crtc_state->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch));
|
||||
if (reset)
|
||||
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
else
|
||||
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val);
|
||||
}
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
if (reset)
|
||||
val &= ~DPIO_PCS_CLK_SOFT_RESET;
|
||||
else
|
||||
val |= DPIO_PCS_CLK_SOFT_RESET;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val);
|
||||
|
||||
if (crtc_state->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
if (reset)
|
||||
val &= ~DPIO_PCS_CLK_SOFT_RESET;
|
||||
else
|
||||
val |= DPIO_PCS_CLK_SOFT_RESET;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val);
|
||||
}
|
||||
}
|
||||
|
||||
@ -829,6 +843,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
unsigned int lane_mask =
|
||||
intel_dp_unused_lane_mask(crtc_state->lane_count);
|
||||
@ -851,40 +866,40 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
||||
|
||||
/* program left/right clock distribution */
|
||||
if (pipe != PIPE_B) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
|
||||
val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
|
||||
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
|
||||
if (ch == DPIO_CH0)
|
||||
val |= CHV_BUFLEFTENA1_FORCE;
|
||||
if (ch == DPIO_CH1)
|
||||
val |= CHV_BUFRIGHTENA1_FORCE;
|
||||
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
|
||||
vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
|
||||
} else {
|
||||
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
|
||||
val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
|
||||
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
|
||||
if (ch == DPIO_CH0)
|
||||
val |= CHV_BUFLEFTENA2_FORCE;
|
||||
if (ch == DPIO_CH1)
|
||||
val |= CHV_BUFRIGHTENA2_FORCE;
|
||||
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
|
||||
vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
|
||||
}
|
||||
|
||||
/* program clock channel usage */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
|
||||
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
|
||||
if (pipe != PIPE_B)
|
||||
val &= ~CHV_PCS_USEDCLKCHANNEL;
|
||||
else
|
||||
val |= CHV_PCS_USEDCLKCHANNEL;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
|
||||
|
||||
if (crtc_state->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
|
||||
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
|
||||
if (pipe != PIPE_B)
|
||||
val &= ~CHV_PCS_USEDCLKCHANNEL;
|
||||
else
|
||||
val |= CHV_PCS_USEDCLKCHANNEL;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -892,12 +907,12 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
||||
* matches the pipe, but here we need to
|
||||
* pick the CL based on the port.
|
||||
*/
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
|
||||
if (pipe != PIPE_B)
|
||||
val &= ~CHV_CMN_USEDCLKCHANNEL;
|
||||
else
|
||||
val |= CHV_CMN_USEDCLKCHANNEL;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
}
|
||||
@ -910,21 +925,21 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
int data, i, stagger;
|
||||
u32 val;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
/* allow hardware to manage TX FIFO reset source */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
|
||||
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
|
||||
|
||||
if (crtc_state->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
|
||||
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
|
||||
}
|
||||
|
||||
/* Program Tx lane latency optimal setting*/
|
||||
@ -934,7 +949,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
||||
data = 0x0;
|
||||
else
|
||||
data = (i == 1) ? 0x0 : 0x1;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
|
||||
vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i),
|
||||
data << DPIO_UPAR_SHIFT);
|
||||
}
|
||||
|
||||
@ -950,17 +965,17 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
||||
else
|
||||
stagger = 0x2;
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
|
||||
val |= DPIO_TX2_STAGGER_MASK(0x1f);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
|
||||
|
||||
if (crtc_state->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
|
||||
val |= DPIO_TX2_STAGGER_MASK(0x1f);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
|
||||
}
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch),
|
||||
DPIO_LANESTAGGER_STRAP(stagger) |
|
||||
DPIO_LANESTAGGER_STRAP_OVRD |
|
||||
DPIO_TX1_STAGGER_MASK(0x1f) |
|
||||
@ -968,7 +983,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
||||
DPIO_TX2_STAGGER_MULT(0));
|
||||
|
||||
if (crtc_state->lane_count > 2) {
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch),
|
||||
DPIO_LANESTAGGER_STRAP(stagger) |
|
||||
DPIO_LANESTAGGER_STRAP_OVRD |
|
||||
DPIO_TX1_STAGGER_MASK(0x1f) |
|
||||
@ -998,19 +1013,20 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
|
||||
u32 val;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
/* disable left/right clock distribution */
|
||||
if (pipe != PIPE_B) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
|
||||
val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
|
||||
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
|
||||
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
|
||||
vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
|
||||
} else {
|
||||
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
|
||||
val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
|
||||
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
|
||||
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
|
||||
vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
|
||||
}
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
@ -1036,22 +1052,22 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
|
||||
vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), 0x00000000);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(port), demph_reg_value);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_TX_DW2(port),
|
||||
uniqtranscale_reg_value);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_TX_DW3(port), 0x0C782040);
|
||||
|
||||
if (tx3_demph)
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_TX3_DW4(port), tx3_demph);
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11(port), 0x00030000);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9(port), preemph_reg_value);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
}
|
||||
@ -1063,24 +1079,24 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
|
||||
/* Program Tx lane resets to default */
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port),
|
||||
DPIO_PCS_TX_LANE2_RESET |
|
||||
DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port),
|
||||
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
|
||||
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
|
||||
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
|
||||
DPIO_PCS_CLK_SOFT_RESET);
|
||||
|
||||
/* Fix up inter-pair skew failure */
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12(port), 0x00750f00);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_TX_DW11(port), 0x00001500);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_TX_DW14(port), 0x40400000);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
}
|
||||
@ -1094,23 +1110,24 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
|
||||
u32 val;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
/* Enable clock channels for this port */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
|
||||
val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(port));
|
||||
val = 0;
|
||||
if (pipe)
|
||||
val |= (1<<21);
|
||||
else
|
||||
val &= ~(1<<21);
|
||||
val |= 0x001000c4;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8(port), val);
|
||||
|
||||
/* Program lane clock */
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14(port), 0x00760018);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23(port), 0x00400888);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
}
|
||||
@ -1122,10 +1139,10 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), 0x00000000);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), 0x00e00060);
|
||||
vlv_dpio_put(dev_priv);
|
||||
}
|
||||
|
@ -44,6 +44,7 @@ u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
|
||||
|
||||
enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port);
|
||||
enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port);
|
||||
enum dpio_phy vlv_pipe_to_phy(enum pipe pipe);
|
||||
enum dpio_channel vlv_pipe_to_channel(enum pipe pipe);
|
||||
|
||||
void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
||||
@ -116,6 +117,10 @@ static inline enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_p
|
||||
{
|
||||
return DPIO_PHY0;
|
||||
}
|
||||
static inline enum dpio_phy vlv_pipe_to_phy(enum pipe pipe)
|
||||
{
|
||||
return DPIO_PHY0;
|
||||
}
|
||||
static inline enum dpio_channel vlv_pipe_to_channel(enum pipe pipe)
|
||||
{
|
||||
return DPIO_CH0;
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_dpll.h"
|
||||
#include "intel_lvds.h"
|
||||
#include "intel_lvds_regs.h"
|
||||
#include "intel_panel.h"
|
||||
#include "intel_pps.h"
|
||||
#include "intel_snps_phy.h"
|
||||
@ -311,7 +312,7 @@ static const struct intel_limit intel_limits_bxt = {
|
||||
* divided-down version of it.
|
||||
*/
|
||||
/* m1 is reserved as 0 in Pineview, n is a ring counter */
|
||||
int pnv_calc_dpll_params(int refclk, struct dpll *clock)
|
||||
static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
|
||||
{
|
||||
clock->m = clock->m2 + 2;
|
||||
clock->p = clock->p1 * clock->p2;
|
||||
@ -342,7 +343,7 @@ int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
|
||||
return clock->dot;
|
||||
}
|
||||
|
||||
int vlv_calc_dpll_params(int refclk, struct dpll *clock)
|
||||
static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
|
||||
{
|
||||
clock->m = clock->m1 * clock->m2;
|
||||
clock->p = clock->p1 * clock->p2 * 5;
|
||||
@ -368,6 +369,176 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
|
||||
return clock->dot;
|
||||
}
|
||||
|
||||
static int i9xx_pll_refclk(struct drm_device *dev,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 dpll = pipe_config->dpll_hw_state.dpll;
|
||||
|
||||
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
|
||||
return dev_priv->display.vbt.lvds_ssc_freq;
|
||||
else if (HAS_PCH_SPLIT(dev_priv))
|
||||
return 120000;
|
||||
else if (DISPLAY_VER(dev_priv) != 2)
|
||||
return 96000;
|
||||
else
|
||||
return 48000;
|
||||
}
|
||||
|
||||
/* Returns the clock of the currently programmed mode of the given pipe. */
|
||||
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 dpll = pipe_config->dpll_hw_state.dpll;
|
||||
u32 fp;
|
||||
struct dpll clock;
|
||||
int port_clock;
|
||||
int refclk = i9xx_pll_refclk(dev, pipe_config);
|
||||
|
||||
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
|
||||
fp = pipe_config->dpll_hw_state.fp0;
|
||||
else
|
||||
fp = pipe_config->dpll_hw_state.fp1;
|
||||
|
||||
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
|
||||
if (IS_PINEVIEW(dev_priv)) {
|
||||
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
|
||||
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
|
||||
} else {
|
||||
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
|
||||
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
|
||||
}
|
||||
|
||||
if (DISPLAY_VER(dev_priv) != 2) {
|
||||
if (IS_PINEVIEW(dev_priv))
|
||||
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
|
||||
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
|
||||
else
|
||||
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
|
||||
DPLL_FPA01_P1_POST_DIV_SHIFT);
|
||||
|
||||
switch (dpll & DPLL_MODE_MASK) {
|
||||
case DPLLB_MODE_DAC_SERIAL:
|
||||
clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
|
||||
5 : 10;
|
||||
break;
|
||||
case DPLLB_MODE_LVDS:
|
||||
clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
|
||||
7 : 14;
|
||||
break;
|
||||
default:
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Unknown DPLL mode %08x in programmed "
|
||||
"mode\n", (int)(dpll & DPLL_MODE_MASK));
|
||||
return;
|
||||
}
|
||||
|
||||
if (IS_PINEVIEW(dev_priv))
|
||||
port_clock = pnv_calc_dpll_params(refclk, &clock);
|
||||
else
|
||||
port_clock = i9xx_calc_dpll_params(refclk, &clock);
|
||||
} else {
|
||||
enum pipe lvds_pipe;
|
||||
|
||||
if (IS_I85X(dev_priv) &&
|
||||
intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
|
||||
lvds_pipe == crtc->pipe) {
|
||||
u32 lvds = intel_de_read(dev_priv, LVDS);
|
||||
|
||||
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
|
||||
DPLL_FPA01_P1_POST_DIV_SHIFT);
|
||||
|
||||
if (lvds & LVDS_CLKB_POWER_UP)
|
||||
clock.p2 = 7;
|
||||
else
|
||||
clock.p2 = 14;
|
||||
} else {
|
||||
if (dpll & PLL_P1_DIVIDE_BY_TWO)
|
||||
clock.p1 = 2;
|
||||
else {
|
||||
clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
|
||||
DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
|
||||
}
|
||||
if (dpll & PLL_P2_DIVIDE_BY_4)
|
||||
clock.p2 = 4;
|
||||
else
|
||||
clock.p2 = 2;
|
||||
}
|
||||
|
||||
port_clock = i9xx_calc_dpll_params(refclk, &clock);
|
||||
}
|
||||
|
||||
/*
|
||||
* This value includes pixel_multiplier. We will use
|
||||
* port_clock to compute adjusted_mode.crtc_clock in the
|
||||
* encoder's get_config() function.
|
||||
*/
|
||||
pipe_config->port_clock = port_clock;
|
||||
}
|
||||
|
||||
void vlv_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
struct dpll clock;
|
||||
u32 mdiv;
|
||||
int refclk = 100000;
|
||||
|
||||
/* In case of DSI, DPLL will not be used */
|
||||
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
|
||||
return;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
mdiv = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(crtc->pipe));
|
||||
vlv_dpio_put(dev_priv);
|
||||
|
||||
clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
|
||||
clock.m2 = mdiv & DPIO_M2DIV_MASK;
|
||||
clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
|
||||
clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
|
||||
clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
|
||||
|
||||
pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
|
||||
}
|
||||
|
||||
void chv_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum dpio_channel port = vlv_pipe_to_channel(crtc->pipe);
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
struct dpll clock;
|
||||
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
|
||||
int refclk = 100000;
|
||||
|
||||
/* In case of DSI, DPLL will not be used */
|
||||
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
|
||||
return;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(port));
|
||||
pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(port));
|
||||
pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(port));
|
||||
pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(port));
|
||||
pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
|
||||
vlv_dpio_put(dev_priv);
|
||||
|
||||
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
|
||||
clock.m2 = (pll_dw0 & 0xff) << 22;
|
||||
if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
|
||||
clock.m2 |= pll_dw2 & 0x3fffff;
|
||||
clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
|
||||
clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
|
||||
clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
|
||||
|
||||
pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns whether the given set of divisors are valid for a given refclk with
|
||||
* the given connectors.
|
||||
@ -1003,12 +1174,10 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_encoder *encoder =
|
||||
intel_get_crtc_new_encoder(state, crtc_state);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
int ret;
|
||||
|
||||
ret = intel_cx0pll_calc_state(crtc_state, encoder);
|
||||
@ -1016,10 +1185,7 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
return ret;
|
||||
|
||||
/* TODO: Do the readback via intel_compute_shared_dplls() */
|
||||
if (intel_is_c10phy(i915, phy))
|
||||
crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
|
||||
else
|
||||
crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
|
||||
crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
|
||||
|
||||
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
|
||||
|
||||
@ -1645,7 +1811,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
|
||||
}
|
||||
|
||||
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
enum dpio_phy phy)
|
||||
{
|
||||
u32 reg_val;
|
||||
|
||||
@ -1653,30 +1819,31 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
|
||||
* PLLB opamp always calibrates to max value of 0x3f, force enable it
|
||||
* and set it to a reasonable value instead.
|
||||
*/
|
||||
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
|
||||
reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
|
||||
reg_val &= 0xffffff00;
|
||||
reg_val |= 0x00000030;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
|
||||
|
||||
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
|
||||
reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
|
||||
reg_val &= 0x00ffffff;
|
||||
reg_val |= 0x8c000000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
|
||||
|
||||
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
|
||||
reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
|
||||
reg_val &= 0xffffff00;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
|
||||
|
||||
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
|
||||
reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
|
||||
reg_val &= 0x00ffffff;
|
||||
reg_val |= 0xb0000000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
|
||||
}
|
||||
|
||||
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 mdiv;
|
||||
u32 bestn, bestm1, bestm2, bestp1, bestp2;
|
||||
@ -1694,18 +1861,18 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
|
||||
|
||||
/* PLL B needs special handling */
|
||||
if (pipe == PIPE_B)
|
||||
vlv_pllb_recal_opamp(dev_priv, pipe);
|
||||
vlv_pllb_recal_opamp(dev_priv, phy);
|
||||
|
||||
/* Set up Tx target for periodic Rcomp update */
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9_BCAST, 0x0100000f);
|
||||
|
||||
/* Disable target IRef on PLL */
|
||||
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
|
||||
reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW8(pipe));
|
||||
reg_val &= 0x00ffffff;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW8(pipe), reg_val);
|
||||
|
||||
/* Disable fast lock */
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
|
||||
|
||||
/* Set idtafcrecal before PLL is enabled */
|
||||
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
|
||||
@ -1719,46 +1886,46 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
|
||||
* Note: don't use the DAC post divider as it seems unstable.
|
||||
*/
|
||||
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
|
||||
|
||||
mdiv |= DPIO_ENABLE_CALIBRATION;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
|
||||
|
||||
/* Set HBR and RBR LPF coefficients */
|
||||
if (crtc_state->port_clock == 162000 ||
|
||||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
|
||||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
|
||||
0x009f0003);
|
||||
else
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
|
||||
0x00d0000f);
|
||||
|
||||
if (intel_crtc_has_dp_encoder(crtc_state)) {
|
||||
/* Use SSC source */
|
||||
if (pipe == PIPE_A)
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
|
||||
0x0df40000);
|
||||
else
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
|
||||
0x0df70000);
|
||||
} else { /* HDMI or VGA */
|
||||
/* Use bend source */
|
||||
if (pipe == PIPE_A)
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
|
||||
0x0df70000);
|
||||
else
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
|
||||
0x0df40000);
|
||||
}
|
||||
|
||||
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
|
||||
coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(pipe));
|
||||
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
|
||||
if (intel_crtc_has_dp_encoder(crtc_state))
|
||||
coreclk |= 0x01000000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(pipe), coreclk);
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
|
||||
vlv_dpio_write(dev_priv, phy, VLV_PLL_DW11(pipe), 0x87871000);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
}
|
||||
@ -1809,6 +1976,7 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_channel port = vlv_pipe_to_channel(pipe);
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
u32 loopfilter, tribuf_calcntr;
|
||||
u32 bestm2, bestp1, bestp2, bestm2_frac;
|
||||
u32 dpio_val;
|
||||
@ -1825,39 +1993,39 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
/* p1 and p2 divider */
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
|
||||
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(port),
|
||||
5 << DPIO_CHV_S1_DIV_SHIFT |
|
||||
bestp1 << DPIO_CHV_P1_DIV_SHIFT |
|
||||
bestp2 << DPIO_CHV_P2_DIV_SHIFT |
|
||||
1 << DPIO_CHV_K_DIV_SHIFT);
|
||||
|
||||
/* Feedback post-divider - m2 */
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(port), bestm2);
|
||||
|
||||
/* Feedback refclk divider - n and m1 */
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
|
||||
vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(port),
|
||||
DPIO_CHV_M1_DIV_BY_2 |
|
||||
1 << DPIO_CHV_N_DIV_SHIFT);
|
||||
|
||||
/* M2 fraction division */
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(port), bestm2_frac);
|
||||
|
||||
/* M2 fraction division enable */
|
||||
dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
|
||||
dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
|
||||
dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
|
||||
dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
|
||||
if (bestm2_frac)
|
||||
dpio_val |= DPIO_CHV_FRAC_DIV_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(port), dpio_val);
|
||||
|
||||
/* Program digital lock detect threshold */
|
||||
dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
|
||||
dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(port));
|
||||
dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
|
||||
DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
|
||||
dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
|
||||
if (!bestm2_frac)
|
||||
dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(port), dpio_val);
|
||||
|
||||
/* Loop filter */
|
||||
if (vco == 5400000) {
|
||||
@ -1882,16 +2050,16 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
|
||||
loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
|
||||
tribuf_calcntr = 0;
|
||||
}
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(port), loopfilter);
|
||||
|
||||
dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
|
||||
dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(port));
|
||||
dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
|
||||
dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(port), dpio_val);
|
||||
|
||||
/* AFC Recal */
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
|
||||
vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
|
||||
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port),
|
||||
vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)) |
|
||||
DPIO_AFC_RECAL);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
@ -1903,14 +2071,15 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_channel port = vlv_pipe_to_channel(pipe);
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
|
||||
u32 tmp;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
/* Enable back the 10bit clock to display controller */
|
||||
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
|
||||
tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
|
||||
tmp |= DPIO_DCLKP_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), tmp);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
|
||||
@ -2031,6 +2200,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
{
|
||||
enum dpio_channel port = vlv_pipe_to_channel(pipe);
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
|
||||
u32 val;
|
||||
|
||||
/* Make sure the pipe isn't still relying on us */
|
||||
@ -2047,9 +2217,9 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
/* Disable 10bit clock to display controller */
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
|
||||
val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
|
||||
val &= ~DPIO_DCLKP_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
|
||||
vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), val);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
}
|
||||
|
@ -20,8 +20,6 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
int vlv_calc_dpll_params(int refclk, struct dpll *clock);
|
||||
int pnv_calc_dpll_params(int refclk, struct dpll *clock);
|
||||
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
|
||||
u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
|
||||
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
|
||||
@ -41,6 +39,13 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
|
||||
struct dpll *best_clock);
|
||||
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
|
||||
|
||||
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void vlv_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void chv_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
|
||||
void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
|
||||
void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
|
||||
|
||||
|
@ -219,6 +219,26 @@ intel_tc_pll_enable_reg(struct drm_i915_private *i915,
|
||||
return MG_PLL_ENABLE(tc_port);
|
||||
}
|
||||
|
||||
static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
if (pll->info->power_domain)
|
||||
pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
|
||||
|
||||
pll->info->funcs->enable(i915, pll);
|
||||
pll->on = true;
|
||||
}
|
||||
|
||||
static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
pll->info->funcs->disable(i915, pll);
|
||||
pll->on = false;
|
||||
|
||||
if (pll->info->power_domain)
|
||||
intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_enable_shared_dpll - enable a CRTC's shared DPLL
|
||||
* @crtc_state: CRTC, and its state, which has a shared DPLL
|
||||
@ -258,8 +278,8 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
|
||||
drm_WARN_ON(&i915->drm, pll->on);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
|
||||
pll->info->funcs->enable(i915, pll);
|
||||
pll->on = true;
|
||||
|
||||
_intel_enable_shared_dpll(i915, pll);
|
||||
|
||||
out:
|
||||
mutex_unlock(&i915->display.dpll.lock);
|
||||
@ -304,8 +324,8 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
|
||||
goto out;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
|
||||
pll->info->funcs->disable(i915, pll);
|
||||
pll->on = false;
|
||||
|
||||
_intel_disable_shared_dpll(i915, pll);
|
||||
|
||||
out:
|
||||
mutex_unlock(&i915->display.dpll.lock);
|
||||
@ -631,9 +651,9 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
|
||||
};
|
||||
|
||||
static const struct dpll_info pch_plls[] = {
|
||||
{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
|
||||
{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
|
||||
{ },
|
||||
{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
|
||||
{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr pch_pll_mgr = {
|
||||
@ -1239,13 +1259,16 @@ static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
|
||||
};
|
||||
|
||||
static const struct dpll_info hsw_plls[] = {
|
||||
{ "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
|
||||
{ "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
|
||||
{ "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
|
||||
{ "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
|
||||
{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
|
||||
{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
|
||||
{ },
|
||||
{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
|
||||
{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
|
||||
{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
|
||||
{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
|
||||
.flags = INTEL_DPLL_ALWAYS_ON, },
|
||||
{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
|
||||
.flags = INTEL_DPLL_ALWAYS_ON, },
|
||||
{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
|
||||
.flags = INTEL_DPLL_ALWAYS_ON, },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr hsw_pll_mgr = {
|
||||
@ -1921,11 +1944,12 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
|
||||
};
|
||||
|
||||
static const struct dpll_info skl_plls[] = {
|
||||
{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
|
||||
{ "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
|
||||
{ "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
|
||||
{ "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
|
||||
{ },
|
||||
{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
|
||||
.flags = INTEL_DPLL_ALWAYS_ON, },
|
||||
{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
|
||||
{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
|
||||
{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr skl_pll_mgr = {
|
||||
@ -2376,10 +2400,10 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
|
||||
};
|
||||
|
||||
static const struct dpll_info bxt_plls[] = {
|
||||
{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
|
||||
{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
|
||||
{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
|
||||
{ },
|
||||
{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
|
||||
{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
|
||||
{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr bxt_pll_mgr = {
|
||||
@ -3834,18 +3858,6 @@ static void combo_pll_enable(struct drm_i915_private *i915,
|
||||
{
|
||||
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
|
||||
|
||||
if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
|
||||
pll->info->id == DPLL_ID_EHL_DPLL4) {
|
||||
|
||||
/*
|
||||
* We need to disable DC states when this DPLL is enabled.
|
||||
* This can be done by taking a reference on DPLL4 power
|
||||
* domain.
|
||||
*/
|
||||
pll->wakeref = intel_display_power_get(i915,
|
||||
POWER_DOMAIN_DC_OFF);
|
||||
}
|
||||
|
||||
icl_pll_power_enable(i915, pll, enable_reg);
|
||||
|
||||
icl_dpll_write(i915, pll);
|
||||
@ -3941,11 +3953,6 @@ static void combo_pll_disable(struct drm_i915_private *i915,
|
||||
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
|
||||
|
||||
icl_pll_disable(i915, pll, enable_reg);
|
||||
|
||||
if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
|
||||
pll->info->id == DPLL_ID_EHL_DPLL4)
|
||||
intel_display_power_put(i915, POWER_DOMAIN_DC_OFF,
|
||||
pll->wakeref);
|
||||
}
|
||||
|
||||
static void tbt_pll_disable(struct drm_i915_private *i915,
|
||||
@ -4014,14 +4021,14 @@ static const struct intel_shared_dpll_funcs mg_pll_funcs = {
|
||||
};
|
||||
|
||||
static const struct dpll_info icl_plls[] = {
|
||||
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
|
||||
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
|
||||
{ "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
|
||||
{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
|
||||
{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
|
||||
{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
|
||||
{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
|
||||
{ },
|
||||
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
|
||||
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
|
||||
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
|
||||
{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
|
||||
{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
|
||||
{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
|
||||
{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr icl_pll_mgr = {
|
||||
@ -4035,10 +4042,11 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
|
||||
};
|
||||
|
||||
static const struct dpll_info ehl_plls[] = {
|
||||
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
|
||||
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
|
||||
{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
|
||||
{ },
|
||||
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
|
||||
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
|
||||
{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
|
||||
.power_domain = POWER_DOMAIN_DC_OFF, },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr ehl_pll_mgr = {
|
||||
@ -4058,16 +4066,16 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
|
||||
};
|
||||
|
||||
static const struct dpll_info tgl_plls[] = {
|
||||
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
|
||||
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
|
||||
{ "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
|
||||
{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
|
||||
{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
|
||||
{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
|
||||
{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
|
||||
{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
|
||||
{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
|
||||
{ },
|
||||
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
|
||||
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
|
||||
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
|
||||
{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
|
||||
{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
|
||||
{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
|
||||
{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
|
||||
{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
|
||||
{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr tgl_pll_mgr = {
|
||||
@ -4081,10 +4089,10 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
|
||||
};
|
||||
|
||||
static const struct dpll_info rkl_plls[] = {
|
||||
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
|
||||
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
|
||||
{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
|
||||
{ },
|
||||
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
|
||||
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
|
||||
{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr rkl_pll_mgr = {
|
||||
@ -4097,11 +4105,11 @@ static const struct intel_dpll_mgr rkl_pll_mgr = {
|
||||
};
|
||||
|
||||
static const struct dpll_info dg1_plls[] = {
|
||||
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
|
||||
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
|
||||
{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
|
||||
{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
|
||||
{ },
|
||||
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
|
||||
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
|
||||
{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
|
||||
{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr dg1_pll_mgr = {
|
||||
@ -4114,11 +4122,11 @@ static const struct intel_dpll_mgr dg1_pll_mgr = {
|
||||
};
|
||||
|
||||
static const struct dpll_info adls_plls[] = {
|
||||
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
|
||||
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
|
||||
{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
|
||||
{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
|
||||
{ },
|
||||
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
|
||||
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
|
||||
{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
|
||||
{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr adls_pll_mgr = {
|
||||
@ -4131,14 +4139,14 @@ static const struct intel_dpll_mgr adls_pll_mgr = {
|
||||
};
|
||||
|
||||
static const struct dpll_info adlp_plls[] = {
|
||||
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
|
||||
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
|
||||
{ "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
|
||||
{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
|
||||
{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
|
||||
{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
|
||||
{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
|
||||
{ },
|
||||
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
|
||||
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
|
||||
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
|
||||
{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
|
||||
{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
|
||||
{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
|
||||
{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr adlp_pll_mgr = {
|
||||
@ -4365,12 +4373,8 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
|
||||
|
||||
pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
|
||||
|
||||
if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
|
||||
pll->on &&
|
||||
pll->info->id == DPLL_ID_EHL_DPLL4) {
|
||||
pll->wakeref = intel_display_power_get(i915,
|
||||
POWER_DOMAIN_DC_OFF);
|
||||
}
|
||||
if (pll->on && pll->info->power_domain)
|
||||
pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
|
||||
|
||||
pll->state.pipe_mask = 0;
|
||||
for_each_intel_crtc(&i915->drm, crtc) {
|
||||
@ -4417,8 +4421,7 @@ static void sanitize_dpll_state(struct drm_i915_private *i915,
|
||||
"%s enabled but not in use, disabling\n",
|
||||
pll->info->name);
|
||||
|
||||
pll->info->funcs->disable(i915, pll);
|
||||
pll->on = false;
|
||||
_intel_disable_shared_dpll(i915, pll);
|
||||
}
|
||||
|
||||
void intel_dpll_sanitize_state(struct drm_i915_private *i915)
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "intel_display_power.h"
|
||||
#include "intel_wakeref.h"
|
||||
|
||||
#define for_each_shared_dpll(__i915, __pll, __i) \
|
||||
@ -270,6 +271,11 @@ struct dpll_info {
|
||||
*/
|
||||
enum intel_dpll_id id;
|
||||
|
||||
/**
|
||||
* @power_domain: extra power domain required by the DPLL
|
||||
*/
|
||||
enum intel_display_power_domain power_domain;
|
||||
|
||||
#define INTEL_DPLL_ALWAYS_ON (1 << 0)
|
||||
/**
|
||||
* @flags:
|
||||
|
@ -9,8 +9,6 @@
|
||||
#include "gt/gen8_ppgtt.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dpt.h"
|
||||
#include "intel_fb.h"
|
||||
@ -318,25 +316,3 @@ void intel_dpt_destroy(struct i915_address_space *vm)
|
||||
i915_vm_put(&dpt->vm);
|
||||
}
|
||||
|
||||
void intel_dpt_configure(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
|
||||
if (DISPLAY_VER(i915) == 14) {
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum plane_id plane_id;
|
||||
|
||||
for_each_plane_id_on_crtc(crtc, plane_id) {
|
||||
if (plane_id == PLANE_CURSOR)
|
||||
continue;
|
||||
|
||||
intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
|
||||
PLANE_CHICKEN_DISABLE_DPT,
|
||||
i915->params.enable_dpt ? 0 : PLANE_CHICKEN_DISABLE_DPT);
|
||||
}
|
||||
} else if (DISPLAY_VER(i915) == 13) {
|
||||
intel_de_rmw(i915, CHICKEN_MISC_2,
|
||||
CHICKEN_MISC_DISABLE_DPT,
|
||||
i915->params.enable_dpt ? 0 : CHICKEN_MISC_DISABLE_DPT);
|
||||
}
|
||||
}
|
||||
|
@ -10,7 +10,6 @@ struct drm_i915_private;
|
||||
|
||||
struct i915_address_space;
|
||||
struct i915_vma;
|
||||
struct intel_crtc;
|
||||
struct intel_framebuffer;
|
||||
|
||||
void intel_dpt_destroy(struct i915_address_space *vm);
|
||||
@ -20,6 +19,5 @@ void intel_dpt_suspend(struct drm_i915_private *i915);
|
||||
void intel_dpt_resume(struct drm_i915_private *i915);
|
||||
struct i915_address_space *
|
||||
intel_dpt_create(struct intel_framebuffer *fb);
|
||||
void intel_dpt_configure(struct intel_crtc *crtc);
|
||||
|
||||
#endif /* __INTEL_DPT_H__ */
|
||||
|
34
drivers/gpu/drm/i915/display/intel_dpt_common.c
Normal file
34
drivers/gpu/drm/i915/display/intel_dpt_common.c
Normal file
@ -0,0 +1,34 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2023 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "i915_reg.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dpt_common.h"
|
||||
|
||||
void intel_dpt_configure(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
|
||||
if (DISPLAY_VER(i915) == 14) {
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum plane_id plane_id;
|
||||
|
||||
for_each_plane_id_on_crtc(crtc, plane_id) {
|
||||
if (plane_id == PLANE_CURSOR)
|
||||
continue;
|
||||
|
||||
intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
|
||||
PLANE_CHICKEN_DISABLE_DPT,
|
||||
i915->display.params.enable_dpt ? 0 :
|
||||
PLANE_CHICKEN_DISABLE_DPT);
|
||||
}
|
||||
} else if (DISPLAY_VER(i915) == 13) {
|
||||
intel_de_rmw(i915, CHICKEN_MISC_2,
|
||||
CHICKEN_MISC_DISABLE_DPT,
|
||||
i915->display.params.enable_dpt ? 0 :
|
||||
CHICKEN_MISC_DISABLE_DPT);
|
||||
}
|
||||
}
|
13
drivers/gpu/drm/i915/display/intel_dpt_common.h
Normal file
13
drivers/gpu/drm/i915/display/intel_dpt_common.h
Normal file
@ -0,0 +1,13 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2023 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_DPT_COMMON_H__
|
||||
#define __INTEL_DPT_COMMON_H__
|
||||
|
||||
struct intel_crtc;
|
||||
|
||||
void intel_dpt_configure(struct intel_crtc *crtc);
|
||||
|
||||
#endif /* __INTEL_DPT_COMMON_H__ */
|
@ -4,9 +4,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "gem/i915_gem_internal.h"
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_irq.h"
|
||||
#include "i915_reg.h"
|
||||
@ -14,12 +11,13 @@
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dsb.h"
|
||||
#include "intel_dsb_buffer.h"
|
||||
#include "intel_dsb_regs.h"
|
||||
#include "intel_vblank.h"
|
||||
#include "intel_vrr.h"
|
||||
#include "skl_watermark.h"
|
||||
|
||||
struct i915_vma;
|
||||
#define CACHELINE_BYTES 64
|
||||
|
||||
enum dsb_id {
|
||||
INVALID_DSB = -1,
|
||||
@ -32,8 +30,7 @@ enum dsb_id {
|
||||
struct intel_dsb {
|
||||
enum dsb_id id;
|
||||
|
||||
u32 *cmd_buf;
|
||||
struct i915_vma *vma;
|
||||
struct intel_dsb_buffer dsb_buf;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
/*
|
||||
@ -109,15 +106,17 @@ static void intel_dsb_dump(struct intel_dsb *dsb)
|
||||
{
|
||||
struct intel_crtc *crtc = dsb->crtc;
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
const u32 *buf = dsb->cmd_buf;
|
||||
int i;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] DSB %d commands {\n",
|
||||
crtc->base.base.id, crtc->base.name, dsb->id);
|
||||
for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
|
||||
drm_dbg_kms(&i915->drm,
|
||||
" 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
|
||||
i * 4, buf[i], buf[i+1], buf[i+2], buf[i+3]);
|
||||
" 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
|
||||
intel_dsb_buffer_read(&dsb->dsb_buf, i),
|
||||
intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
|
||||
intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
|
||||
intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
|
||||
drm_dbg_kms(&i915->drm, "}\n");
|
||||
}
|
||||
|
||||
@ -129,8 +128,6 @@ static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
|
||||
|
||||
static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
|
||||
{
|
||||
u32 *buf = dsb->cmd_buf;
|
||||
|
||||
if (!assert_dsb_has_room(dsb))
|
||||
return;
|
||||
|
||||
@ -139,14 +136,13 @@ static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
|
||||
|
||||
dsb->ins_start_offset = dsb->free_pos;
|
||||
|
||||
buf[dsb->free_pos++] = ldw;
|
||||
buf[dsb->free_pos++] = udw;
|
||||
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, ldw);
|
||||
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, udw);
|
||||
}
|
||||
|
||||
static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
|
||||
u32 opcode, i915_reg_t reg)
|
||||
{
|
||||
const u32 *buf = dsb->cmd_buf;
|
||||
u32 prev_opcode, prev_reg;
|
||||
|
||||
/*
|
||||
@ -157,8 +153,10 @@ static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
|
||||
if (dsb->free_pos == 0)
|
||||
return false;
|
||||
|
||||
prev_opcode = buf[dsb->ins_start_offset + 1] & ~DSB_REG_VALUE_MASK;
|
||||
prev_reg = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
|
||||
prev_opcode = intel_dsb_buffer_read(&dsb->dsb_buf,
|
||||
dsb->ins_start_offset + 1) & ~DSB_REG_VALUE_MASK;
|
||||
prev_reg = intel_dsb_buffer_read(&dsb->dsb_buf,
|
||||
dsb->ins_start_offset + 1) & DSB_REG_VALUE_MASK;
|
||||
|
||||
return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
|
||||
}
|
||||
@ -191,6 +189,8 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_
|
||||
void intel_dsb_reg_write(struct intel_dsb *dsb,
|
||||
i915_reg_t reg, u32 val)
|
||||
{
|
||||
u32 old_val;
|
||||
|
||||
/*
|
||||
* For example the buffer will look like below for 3 dwords for auto
|
||||
* increment register:
|
||||
@ -214,31 +214,32 @@ void intel_dsb_reg_write(struct intel_dsb *dsb,
|
||||
(DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
|
||||
i915_mmio_reg_offset(reg));
|
||||
} else {
|
||||
u32 *buf = dsb->cmd_buf;
|
||||
|
||||
if (!assert_dsb_has_room(dsb))
|
||||
return;
|
||||
|
||||
/* convert to indexed write? */
|
||||
if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
|
||||
u32 prev_val = buf[dsb->ins_start_offset + 0];
|
||||
u32 prev_val = intel_dsb_buffer_read(&dsb->dsb_buf,
|
||||
dsb->ins_start_offset + 0);
|
||||
|
||||
buf[dsb->ins_start_offset + 0] = 1; /* count */
|
||||
buf[dsb->ins_start_offset + 1] =
|
||||
(DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
|
||||
i915_mmio_reg_offset(reg);
|
||||
buf[dsb->ins_start_offset + 2] = prev_val;
|
||||
intel_dsb_buffer_write(&dsb->dsb_buf,
|
||||
dsb->ins_start_offset + 0, 1); /* count */
|
||||
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1,
|
||||
(DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
|
||||
i915_mmio_reg_offset(reg));
|
||||
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, prev_val);
|
||||
|
||||
dsb->free_pos++;
|
||||
}
|
||||
|
||||
buf[dsb->free_pos++] = val;
|
||||
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
|
||||
/* Update the count */
|
||||
buf[dsb->ins_start_offset]++;
|
||||
old_val = intel_dsb_buffer_read(&dsb->dsb_buf, dsb->ins_start_offset);
|
||||
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset, old_val + 1);
|
||||
|
||||
/* if number of data words is odd, then the last dword should be 0.*/
|
||||
if (dsb->free_pos & 0x1)
|
||||
buf[dsb->free_pos] = 0;
|
||||
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -297,8 +298,8 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
|
||||
aligned_tail = ALIGN(tail, CACHELINE_BYTES);
|
||||
|
||||
if (aligned_tail > tail)
|
||||
memset(&dsb->cmd_buf[dsb->free_pos], 0,
|
||||
aligned_tail - tail);
|
||||
intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
|
||||
aligned_tail - tail);
|
||||
|
||||
dsb->free_pos = aligned_tail / 4;
|
||||
}
|
||||
@ -317,7 +318,7 @@ void intel_dsb_finish(struct intel_dsb *dsb)
|
||||
|
||||
intel_dsb_align_tail(dsb);
|
||||
|
||||
i915_gem_object_flush_map(dsb->vma->obj);
|
||||
intel_dsb_buffer_flush_map(&dsb->dsb_buf);
|
||||
}
|
||||
|
||||
static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
|
||||
@ -361,7 +362,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
|
||||
ctrl | DSB_ENABLE);
|
||||
|
||||
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
|
||||
i915_ggtt_offset(dsb->vma));
|
||||
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
|
||||
|
||||
if (dewake_scanline >= 0) {
|
||||
int diff, hw_dewake_scanline;
|
||||
@ -383,7 +384,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
|
||||
}
|
||||
|
||||
intel_de_write_fw(dev_priv, DSB_TAIL(pipe, dsb->id),
|
||||
i915_ggtt_offset(dsb->vma) + tail);
|
||||
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -408,7 +409,7 @@ void intel_dsb_wait(struct intel_dsb *dsb)
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
|
||||
u32 offset = i915_ggtt_offset(dsb->vma);
|
||||
u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
|
||||
|
||||
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
|
||||
DSB_ENABLE | DSB_HALT);
|
||||
@ -445,12 +446,9 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
intel_wakeref_t wakeref;
|
||||
struct intel_dsb *dsb;
|
||||
struct i915_vma *vma;
|
||||
unsigned int size;
|
||||
u32 *buf;
|
||||
|
||||
if (!HAS_DSB(i915))
|
||||
return NULL;
|
||||
@ -464,37 +462,13 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
|
||||
/* ~1 qword per instruction, full cachelines */
|
||||
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
|
||||
|
||||
if (HAS_LMEM(i915)) {
|
||||
obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size),
|
||||
I915_BO_ALLOC_CONTIGUOUS);
|
||||
if (IS_ERR(obj))
|
||||
goto out_put_rpm;
|
||||
} else {
|
||||
obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
|
||||
if (IS_ERR(obj))
|
||||
goto out_put_rpm;
|
||||
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
|
||||
}
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
|
||||
if (IS_ERR(vma)) {
|
||||
i915_gem_object_put(obj);
|
||||
if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
|
||||
goto out_put_rpm;
|
||||
}
|
||||
|
||||
buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
|
||||
if (IS_ERR(buf)) {
|
||||
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
|
||||
goto out_put_rpm;
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
|
||||
dsb->id = DSB1;
|
||||
dsb->vma = vma;
|
||||
dsb->crtc = crtc;
|
||||
dsb->cmd_buf = buf;
|
||||
dsb->size = size / 4; /* in dwords */
|
||||
dsb->free_pos = 0;
|
||||
dsb->ins_start_offset = 0;
|
||||
@ -522,6 +496,6 @@ out:
|
||||
*/
|
||||
void intel_dsb_cleanup(struct intel_dsb *dsb)
|
||||
{
|
||||
i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
|
||||
intel_dsb_buffer_cleanup(&dsb->dsb_buf);
|
||||
kfree(dsb);
|
||||
}
|
||||
|
82
drivers/gpu/drm/i915/display/intel_dsb_buffer.c
Normal file
82
drivers/gpu/drm/i915/display/intel_dsb_buffer.c
Normal file
@ -0,0 +1,82 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2023, Intel Corporation.
|
||||
*/
|
||||
|
||||
#include "gem/i915_gem_internal.h"
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_vma.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dsb_buffer.h"
|
||||
|
||||
u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
|
||||
{
|
||||
return i915_ggtt_offset(dsb_buf->vma);
|
||||
}
|
||||
|
||||
void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
|
||||
{
|
||||
dsb_buf->cmd_buf[idx] = val;
|
||||
}
|
||||
|
||||
u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
|
||||
{
|
||||
return dsb_buf->cmd_buf[idx];
|
||||
}
|
||||
|
||||
void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
|
||||
{
|
||||
WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
|
||||
|
||||
memset(&dsb_buf->cmd_buf[idx], val, size);
|
||||
}
|
||||
|
||||
bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
u32 *buf;
|
||||
|
||||
if (HAS_LMEM(i915)) {
|
||||
obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size),
|
||||
I915_BO_ALLOC_CONTIGUOUS);
|
||||
if (IS_ERR(obj))
|
||||
return false;
|
||||
} else {
|
||||
obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
|
||||
if (IS_ERR(obj))
|
||||
return false;
|
||||
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
|
||||
}
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
|
||||
if (IS_ERR(vma)) {
|
||||
i915_gem_object_put(obj);
|
||||
return false;
|
||||
}
|
||||
|
||||
buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
|
||||
if (IS_ERR(buf)) {
|
||||
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
|
||||
return false;
|
||||
}
|
||||
|
||||
dsb_buf->vma = vma;
|
||||
dsb_buf->cmd_buf = buf;
|
||||
dsb_buf->buf_size = size;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
|
||||
{
|
||||
i915_vma_unpin_and_release(&dsb_buf->vma, I915_VMA_RELEASE_MAP);
|
||||
}
|
||||
|
||||
void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
|
||||
{
|
||||
i915_gem_object_flush_map(dsb_buf->vma->obj);
|
||||
}
|
29
drivers/gpu/drm/i915/display/intel_dsb_buffer.h
Normal file
29
drivers/gpu/drm/i915/display/intel_dsb_buffer.h
Normal file
@ -0,0 +1,29 @@
|
||||
/* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2023 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_DSB_BUFFER_H
|
||||
#define _INTEL_DSB_BUFFER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct intel_crtc;
|
||||
struct i915_vma;
|
||||
|
||||
struct intel_dsb_buffer {
|
||||
u32 *cmd_buf;
|
||||
struct i915_vma *vma;
|
||||
size_t buf_size;
|
||||
};
|
||||
|
||||
u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf);
|
||||
void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val);
|
||||
u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx);
|
||||
void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size);
|
||||
bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf,
|
||||
size_t size);
|
||||
void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf);
|
||||
void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf);
|
||||
|
||||
#endif
|
@ -55,43 +55,6 @@
|
||||
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
|
||||
#define MIPI_PORT_SHIFT 3
|
||||
|
||||
/* base offsets for gpio pads */
|
||||
#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130
|
||||
#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
|
||||
#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
|
||||
#define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140
|
||||
#define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150
|
||||
#define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160
|
||||
#define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180
|
||||
#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190
|
||||
#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170
|
||||
#define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100
|
||||
#define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0
|
||||
#define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0
|
||||
|
||||
#define VLV_GPIO_PCONF0(base_offset) (base_offset)
|
||||
#define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8)
|
||||
|
||||
struct gpio_map {
|
||||
u16 base_offset;
|
||||
bool init;
|
||||
};
|
||||
|
||||
static struct gpio_map vlv_gpio_table[] = {
|
||||
{ VLV_GPIO_NC_0_HV_DDI0_HPD },
|
||||
{ VLV_GPIO_NC_1_HV_DDI0_DDC_SDA },
|
||||
{ VLV_GPIO_NC_2_HV_DDI0_DDC_SCL },
|
||||
{ VLV_GPIO_NC_3_PANEL0_VDDEN },
|
||||
{ VLV_GPIO_NC_4_PANEL0_BKLTEN },
|
||||
{ VLV_GPIO_NC_5_PANEL0_BKLTCTL },
|
||||
{ VLV_GPIO_NC_6_HV_DDI1_HPD },
|
||||
{ VLV_GPIO_NC_7_HV_DDI1_DDC_SDA },
|
||||
{ VLV_GPIO_NC_8_HV_DDI1_DDC_SCL },
|
||||
{ VLV_GPIO_NC_9_PANEL1_VDDEN },
|
||||
{ VLV_GPIO_NC_10_PANEL1_BKLTEN },
|
||||
{ VLV_GPIO_NC_11_PANEL1_BKLTCTL },
|
||||
};
|
||||
|
||||
struct i2c_adapter_lookup {
|
||||
u16 slave_addr;
|
||||
struct intel_dsi *intel_dsi;
|
||||
@ -103,19 +66,6 @@ struct i2c_adapter_lookup {
|
||||
#define CHV_GPIO_IDX_START_SW 100
|
||||
#define CHV_GPIO_IDX_START_SE 198
|
||||
|
||||
#define CHV_VBT_MAX_PINS_PER_FMLY 15
|
||||
|
||||
#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
|
||||
#define CHV_GPIO_GPIOEN (1 << 15)
|
||||
#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
|
||||
#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
|
||||
#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
|
||||
#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
|
||||
#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
|
||||
|
||||
#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
|
||||
#define CHV_GPIO_CFGLOCK (1 << 31)
|
||||
|
||||
/* ICL DSI Display GPIO Pins */
|
||||
#define ICL_GPIO_DDSP_HPD_A 0
|
||||
#define ICL_GPIO_L_VDDEN_1 1
|
||||
@ -142,7 +92,7 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
|
||||
if (seq_port) {
|
||||
if (intel_dsi->ports & BIT(PORT_B))
|
||||
return PORT_B;
|
||||
else if (intel_dsi->ports & BIT(PORT_C))
|
||||
if (intel_dsi->ports & BIT(PORT_C))
|
||||
return PORT_C;
|
||||
}
|
||||
|
||||
@ -243,75 +193,93 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
return data;
|
||||
}
|
||||
|
||||
static void vlv_exec_gpio(struct intel_connector *connector,
|
||||
u8 gpio_source, u8 gpio_index, bool value)
|
||||
static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
|
||||
const char *con_id, u8 idx, bool value)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct gpio_map *map;
|
||||
u16 pconf0, padval;
|
||||
u32 tmp;
|
||||
u8 port;
|
||||
/* XXX: this table is a quick ugly hack. */
|
||||
static struct gpio_desc *soc_gpio_table[U8_MAX + 1];
|
||||
struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index];
|
||||
|
||||
if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
|
||||
drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n",
|
||||
gpio_index);
|
||||
return;
|
||||
}
|
||||
|
||||
map = &vlv_gpio_table[gpio_index];
|
||||
|
||||
if (connector->panel.vbt.dsi.seq_version >= 3) {
|
||||
/* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
|
||||
port = IOSF_PORT_GPIO_NC;
|
||||
if (gpio_desc) {
|
||||
gpiod_set_value(gpio_desc, value);
|
||||
} else {
|
||||
if (gpio_source == 0) {
|
||||
port = IOSF_PORT_GPIO_NC;
|
||||
} else if (gpio_source == 1) {
|
||||
gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx,
|
||||
value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
|
||||
if (IS_ERR(gpio_desc)) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"GPIO index %u request failed (%pe)\n",
|
||||
gpio_index, gpio_desc);
|
||||
return;
|
||||
}
|
||||
|
||||
soc_gpio_table[gpio_index] = gpio_desc;
|
||||
}
|
||||
}
|
||||
|
||||
static void soc_opaque_gpio_set_value(struct intel_connector *connector,
|
||||
u8 gpio_index, const char *chip,
|
||||
const char *con_id, u8 idx, bool value)
|
||||
{
|
||||
struct gpiod_lookup_table *lookup;
|
||||
|
||||
lookup = kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
|
||||
if (!lookup)
|
||||
return;
|
||||
|
||||
lookup->dev_id = "0000:00:02.0";
|
||||
lookup->table[0] =
|
||||
GPIO_LOOKUP_IDX(chip, idx, con_id, idx, GPIO_ACTIVE_HIGH);
|
||||
|
||||
gpiod_add_lookup_table(lookup);
|
||||
|
||||
soc_gpio_set_value(connector, gpio_index, con_id, idx, value);
|
||||
|
||||
gpiod_remove_lookup_table(lookup);
|
||||
kfree(lookup);
|
||||
}
|
||||
|
||||
static void vlv_gpio_set_value(struct intel_connector *connector,
|
||||
u8 gpio_source, u8 gpio_index, bool value)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
|
||||
/* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
|
||||
if (connector->panel.vbt.dsi.seq_version < 3) {
|
||||
if (gpio_source == 1) {
|
||||
drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
|
||||
return;
|
||||
} else {
|
||||
}
|
||||
if (gpio_source > 1) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"unknown gpio source %u\n", gpio_source);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
pconf0 = VLV_GPIO_PCONF0(map->base_offset);
|
||||
padval = VLV_GPIO_PAD_VAL(map->base_offset);
|
||||
|
||||
vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
|
||||
if (!map->init) {
|
||||
/* FIXME: remove constant below */
|
||||
vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
|
||||
map->init = true;
|
||||
}
|
||||
|
||||
tmp = 0x4 | value;
|
||||
vlv_iosf_sb_write(dev_priv, port, padval, tmp);
|
||||
vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
|
||||
soc_opaque_gpio_set_value(connector, gpio_index,
|
||||
"INT33FC:01", "Panel N", gpio_index, value);
|
||||
}
|
||||
|
||||
static void chv_exec_gpio(struct intel_connector *connector,
|
||||
u8 gpio_source, u8 gpio_index, bool value)
|
||||
static void chv_gpio_set_value(struct intel_connector *connector,
|
||||
u8 gpio_source, u8 gpio_index, bool value)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
u16 cfg0, cfg1;
|
||||
u16 family_num;
|
||||
u8 port;
|
||||
|
||||
if (connector->panel.vbt.dsi.seq_version >= 3) {
|
||||
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
|
||||
/* XXX: it's unclear whether 255->57 is part of SE. */
|
||||
gpio_index -= CHV_GPIO_IDX_START_SE;
|
||||
port = CHV_IOSF_PORT_GPIO_SE;
|
||||
soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:03", "Panel SE",
|
||||
gpio_index - CHV_GPIO_IDX_START_SE, value);
|
||||
} else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
|
||||
gpio_index -= CHV_GPIO_IDX_START_SW;
|
||||
port = CHV_IOSF_PORT_GPIO_SW;
|
||||
soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:00", "Panel SW",
|
||||
gpio_index - CHV_GPIO_IDX_START_SW, value);
|
||||
} else if (gpio_index >= CHV_GPIO_IDX_START_E) {
|
||||
gpio_index -= CHV_GPIO_IDX_START_E;
|
||||
port = CHV_IOSF_PORT_GPIO_E;
|
||||
soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:02", "Panel E",
|
||||
gpio_index - CHV_GPIO_IDX_START_E, value);
|
||||
} else {
|
||||
port = CHV_IOSF_PORT_GPIO_N;
|
||||
soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N",
|
||||
gpio_index - CHV_GPIO_IDX_START_N, value);
|
||||
}
|
||||
} else {
|
||||
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
|
||||
@ -328,56 +296,15 @@ static void chv_exec_gpio(struct intel_connector *connector,
|
||||
return;
|
||||
}
|
||||
|
||||
port = CHV_IOSF_PORT_GPIO_N;
|
||||
soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N",
|
||||
gpio_index - CHV_GPIO_IDX_START_N, value);
|
||||
}
|
||||
|
||||
family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
|
||||
gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
|
||||
|
||||
cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
|
||||
cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
|
||||
|
||||
vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
|
||||
vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
|
||||
vlv_iosf_sb_write(dev_priv, port, cfg0,
|
||||
CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
|
||||
CHV_GPIO_GPIOTXSTATE(value));
|
||||
vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
|
||||
}
|
||||
|
||||
static void bxt_exec_gpio(struct intel_connector *connector,
|
||||
u8 gpio_source, u8 gpio_index, bool value)
|
||||
static void bxt_gpio_set_value(struct intel_connector *connector,
|
||||
u8 gpio_index, bool value)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
/* XXX: this table is a quick ugly hack. */
|
||||
static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
|
||||
struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
|
||||
|
||||
if (!gpio_desc) {
|
||||
gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
|
||||
NULL, gpio_index,
|
||||
value ? GPIOD_OUT_LOW :
|
||||
GPIOD_OUT_HIGH);
|
||||
|
||||
if (IS_ERR_OR_NULL(gpio_desc)) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"GPIO index %u request failed (%ld)\n",
|
||||
gpio_index, PTR_ERR(gpio_desc));
|
||||
return;
|
||||
}
|
||||
|
||||
bxt_gpio_table[gpio_index] = gpio_desc;
|
||||
}
|
||||
|
||||
gpiod_set_value(gpio_desc, value);
|
||||
}
|
||||
|
||||
static void icl_exec_gpio(struct intel_connector *connector,
|
||||
u8 gpio_source, u8 gpio_index, bool value)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
|
||||
soc_gpio_set_value(connector, gpio_index, NULL, gpio_index, value);
|
||||
}
|
||||
|
||||
enum {
|
||||
@ -462,44 +389,45 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
|
||||
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
{
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
struct intel_connector *connector = intel_dsi->attached_connector;
|
||||
u8 gpio_source, gpio_index = 0, gpio_number;
|
||||
u8 gpio_source = 0, gpio_index = 0, gpio_number;
|
||||
bool value;
|
||||
bool native = DISPLAY_VER(dev_priv) >= 11;
|
||||
int size;
|
||||
bool native = DISPLAY_VER(i915) >= 11;
|
||||
|
||||
if (connector->panel.vbt.dsi.seq_version >= 3)
|
||||
gpio_index = *data++;
|
||||
if (connector->panel.vbt.dsi.seq_version >= 3) {
|
||||
size = 3;
|
||||
|
||||
gpio_number = *data++;
|
||||
gpio_index = data[0];
|
||||
gpio_number = data[1];
|
||||
value = data[2] & BIT(0);
|
||||
|
||||
/* gpio source in sequence v2 only */
|
||||
if (connector->panel.vbt.dsi.seq_version == 2)
|
||||
gpio_source = (*data >> 1) & 3;
|
||||
else
|
||||
gpio_source = 0;
|
||||
if (connector->panel.vbt.dsi.seq_version >= 4 && data[2] & BIT(1))
|
||||
native = false;
|
||||
} else {
|
||||
size = 2;
|
||||
|
||||
if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
|
||||
native = false;
|
||||
gpio_number = data[0];
|
||||
value = data[1] & BIT(0);
|
||||
|
||||
/* pull up/down */
|
||||
value = *data++ & 1;
|
||||
if (connector->panel.vbt.dsi.seq_version == 2)
|
||||
gpio_source = (data[1] >> 1) & 3;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
|
||||
drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
|
||||
gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
|
||||
|
||||
if (native)
|
||||
icl_native_gpio_set_value(dev_priv, gpio_number, value);
|
||||
else if (DISPLAY_VER(dev_priv) >= 11)
|
||||
icl_exec_gpio(connector, gpio_source, gpio_index, value);
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
vlv_exec_gpio(connector, gpio_source, gpio_number, value);
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
chv_exec_gpio(connector, gpio_source, gpio_number, value);
|
||||
else
|
||||
bxt_exec_gpio(connector, gpio_source, gpio_index, value);
|
||||
icl_native_gpio_set_value(i915, gpio_number, value);
|
||||
else if (DISPLAY_VER(i915) >= 9)
|
||||
bxt_gpio_set_value(connector, gpio_index, value);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
vlv_gpio_set_value(connector, gpio_source, gpio_number, value);
|
||||
else if (IS_CHERRYVIEW(i915))
|
||||
chv_gpio_set_value(connector, gpio_source, gpio_number, value);
|
||||
|
||||
return data;
|
||||
return data + size;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
@ -658,6 +586,7 @@ static const fn_mipi_elem_exec exec_elem[] = {
|
||||
*/
|
||||
|
||||
static const char * const seq_name[] = {
|
||||
[MIPI_SEQ_END] = "MIPI_SEQ_END",
|
||||
[MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
|
||||
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
|
||||
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
|
||||
@ -673,10 +602,10 @@ static const char * const seq_name[] = {
|
||||
|
||||
static const char *sequence_name(enum mipi_seq seq_id)
|
||||
{
|
||||
if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
|
||||
if (seq_id < ARRAY_SIZE(seq_name))
|
||||
return seq_name[seq_id];
|
||||
else
|
||||
return "(unknown)";
|
||||
|
||||
return "(unknown)";
|
||||
}
|
||||
|
||||
static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
|
||||
@ -707,13 +636,10 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
|
||||
if (connector->panel.vbt.dsi.seq_version >= 3)
|
||||
data += 4;
|
||||
|
||||
while (1) {
|
||||
while (*data != MIPI_SEQ_ELEM_END) {
|
||||
u8 operation_byte = *data++;
|
||||
u8 operation_size = 0;
|
||||
|
||||
if (operation_byte == MIPI_SEQ_ELEM_END)
|
||||
break;
|
||||
|
||||
if (operation_byte < ARRAY_SIZE(exec_elem))
|
||||
mipi_elem_exec = exec_elem[operation_byte];
|
||||
else
|
||||
@ -873,36 +799,34 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
||||
* multiply by 100 to preserve remainder
|
||||
*/
|
||||
if (intel_dsi->video_mode == BURST_MODE) {
|
||||
if (mipi_config->target_burst_mode_freq) {
|
||||
u32 bitrate = intel_dsi_bitrate(intel_dsi);
|
||||
u32 bitrate;
|
||||
|
||||
/*
|
||||
* Sometimes the VBT contains a slightly lower clock,
|
||||
* then the bitrate we have calculated, in this case
|
||||
* just replace it with the calculated bitrate.
|
||||
*/
|
||||
if (mipi_config->target_burst_mode_freq < bitrate &&
|
||||
intel_fuzzy_clock_check(
|
||||
mipi_config->target_burst_mode_freq,
|
||||
bitrate))
|
||||
mipi_config->target_burst_mode_freq = bitrate;
|
||||
|
||||
if (mipi_config->target_burst_mode_freq < bitrate) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Burst mode freq is less than computed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
burst_mode_ratio = DIV_ROUND_UP(
|
||||
mipi_config->target_burst_mode_freq * 100,
|
||||
bitrate);
|
||||
|
||||
intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
|
||||
} else {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Burst mode target is not set\n");
|
||||
if (mipi_config->target_burst_mode_freq == 0) {
|
||||
drm_err(&dev_priv->drm, "Burst mode target is not set\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
bitrate = intel_dsi_bitrate(intel_dsi);
|
||||
|
||||
/*
|
||||
* Sometimes the VBT contains a slightly lower clock, then
|
||||
* the bitrate we have calculated, in this case just replace it
|
||||
* with the calculated bitrate.
|
||||
*/
|
||||
if (mipi_config->target_burst_mode_freq < bitrate &&
|
||||
intel_fuzzy_clock_check(mipi_config->target_burst_mode_freq,
|
||||
bitrate))
|
||||
mipi_config->target_burst_mode_freq = bitrate;
|
||||
|
||||
if (mipi_config->target_burst_mode_freq < bitrate) {
|
||||
drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
burst_mode_ratio =
|
||||
DIV_ROUND_UP(mipi_config->target_burst_mode_freq * 100, bitrate);
|
||||
|
||||
intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
|
||||
} else
|
||||
burst_mode_ratio = 100;
|
||||
|
||||
@ -964,6 +888,7 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
|
||||
struct intel_connector *connector = intel_dsi->attached_connector;
|
||||
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
|
||||
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
|
||||
struct gpiod_lookup_table *gpiod_lookup_table = NULL;
|
||||
bool want_backlight_gpio = false;
|
||||
bool want_panel_gpio = false;
|
||||
struct pinctrl *pinctrl;
|
||||
@ -971,12 +896,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
|
||||
|
||||
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
|
||||
mipi_config->pwm_blc == PPS_BLC_PMIC) {
|
||||
gpiod_add_lookup_table(&pmic_panel_gpio_table);
|
||||
gpiod_lookup_table = &pmic_panel_gpio_table;
|
||||
want_panel_gpio = true;
|
||||
}
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
|
||||
gpiod_add_lookup_table(&soc_panel_gpio_table);
|
||||
gpiod_lookup_table = &soc_panel_gpio_table;
|
||||
want_panel_gpio = true;
|
||||
want_backlight_gpio = true;
|
||||
|
||||
@ -993,6 +918,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
|
||||
"Failed to set pinmux to PWM\n");
|
||||
}
|
||||
|
||||
if (gpiod_lookup_table)
|
||||
gpiod_add_lookup_table(gpiod_lookup_table);
|
||||
|
||||
if (want_panel_gpio) {
|
||||
intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
|
||||
if (IS_ERR(intel_dsi->gpio_panel)) {
|
||||
@ -1011,15 +939,13 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
|
||||
intel_dsi->gpio_backlight = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (gpiod_lookup_table)
|
||||
gpiod_remove_lookup_table(gpiod_lookup_table);
|
||||
}
|
||||
|
||||
void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
|
||||
{
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_connector *connector = intel_dsi->attached_connector;
|
||||
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
|
||||
|
||||
if (intel_dsi->gpio_panel) {
|
||||
gpiod_put(intel_dsi->gpio_panel);
|
||||
intel_dsi->gpio_panel = NULL;
|
||||
@ -1029,13 +955,4 @@ void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
|
||||
gpiod_put(intel_dsi->gpio_backlight);
|
||||
intel_dsi->gpio_backlight = NULL;
|
||||
}
|
||||
|
||||
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
|
||||
mipi_config->pwm_blc == PPS_BLC_PMIC)
|
||||
gpiod_remove_lookup_table(&pmic_panel_gpio_table);
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
|
||||
pinctrl_unregister_mappings(soc_pwm_pinctrl_map);
|
||||
gpiod_remove_lookup_table(&soc_panel_gpio_table);
|
||||
}
|
||||
}
|
||||
|
@ -764,7 +764,7 @@ bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
|
||||
|
||||
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
|
||||
{
|
||||
return fb && to_i915(fb->dev)->params.enable_dpt &&
|
||||
return fb && to_i915(fb->dev)->display.params.enable_dpt &&
|
||||
intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
|
||||
}
|
||||
|
||||
@ -1930,10 +1930,10 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
|
||||
if (!atomic_read(&front->bits))
|
||||
return 0;
|
||||
|
||||
if (dma_resv_test_signaled(obj->base.resv, dma_resv_usage_rw(false)))
|
||||
if (dma_resv_test_signaled(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false)))
|
||||
goto flush;
|
||||
|
||||
ret = dma_resv_get_singleton(obj->base.resv, dma_resv_usage_rw(false),
|
||||
ret = dma_resv_get_singleton(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false),
|
||||
&fence);
|
||||
if (ret || !fence)
|
||||
goto flush;
|
||||
@ -2093,7 +2093,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
|
||||
}
|
||||
}
|
||||
|
||||
fb->obj[i] = &obj->base;
|
||||
fb->obj[i] = intel_bo_to_drm_bo(obj);
|
||||
}
|
||||
|
||||
ret = intel_fill_fb_info(dev_priv, intel_fb);
|
||||
|
@ -608,6 +608,7 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
|
||||
static void ivb_fbc_activate(struct intel_fbc *fbc)
|
||||
{
|
||||
struct drm_i915_private *i915 = fbc->i915;
|
||||
u32 dpfc_ctl;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 10)
|
||||
glk_fbc_program_cfb_stride(fbc);
|
||||
@ -617,8 +618,13 @@ static void ivb_fbc_activate(struct intel_fbc *fbc)
|
||||
if (intel_gt_support_legacy_fencing(to_gt(i915)))
|
||||
snb_fbc_program_fence(fbc);
|
||||
|
||||
/* wa_14019417088 Alternative WA*/
|
||||
dpfc_ctl = ivb_dpfc_ctl(fbc);
|
||||
if (DISPLAY_VER(i915) >= 20)
|
||||
intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
|
||||
|
||||
intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
|
||||
DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
|
||||
DPFC_CTL_EN | dpfc_ctl);
|
||||
}
|
||||
|
||||
static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
|
||||
@ -1022,10 +1028,13 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *
|
||||
struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
|
||||
unsigned int effective_w, effective_h, max_w, max_h;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 10) {
|
||||
if (DISPLAY_VER(i915) >= 11) {
|
||||
max_w = 8192;
|
||||
max_h = 4096;
|
||||
} else if (DISPLAY_VER(i915) >= 10) {
|
||||
max_w = 5120;
|
||||
max_h = 4096;
|
||||
} else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
|
||||
} else if (DISPLAY_VER(i915) >= 7) {
|
||||
max_w = 4096;
|
||||
max_h = 4096;
|
||||
} else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
|
||||
@ -1044,6 +1053,31 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *
|
||||
return effective_w <= max_w && effective_h <= max_h;
|
||||
}
|
||||
|
||||
static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
|
||||
unsigned int w, h, max_w, max_h;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 10) {
|
||||
max_w = 5120;
|
||||
max_h = 4096;
|
||||
} else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
|
||||
max_w = 4096;
|
||||
max_h = 4096;
|
||||
} else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
|
||||
max_w = 4096;
|
||||
max_h = 2048;
|
||||
} else {
|
||||
max_w = 2048;
|
||||
max_h = 1536;
|
||||
}
|
||||
|
||||
w = drm_rect_width(&plane_state->uapi.src) >> 16;
|
||||
h = drm_rect_height(&plane_state->uapi.src) >> 16;
|
||||
|
||||
return w <= max_w && h <= max_h;
|
||||
}
|
||||
|
||||
static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state)
|
||||
{
|
||||
const struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
@ -1174,7 +1208,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!i915->params.enable_fbc) {
|
||||
if (!i915->display.params.enable_fbc) {
|
||||
plane_state->no_fbc_reason = "disabled per module param or by default";
|
||||
return 0;
|
||||
}
|
||||
@ -1241,11 +1275,16 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
|
||||
if (!intel_fbc_plane_size_valid(plane_state)) {
|
||||
plane_state->no_fbc_reason = "plane size too big";
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
|
||||
plane_state->no_fbc_reason = "surface size too big";
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Work around a problem on GEN9+ HW, where enabling FBC on a plane
|
||||
* having a Y offset that isn't divisible by 4 causes FIFO underrun
|
||||
@ -1751,8 +1790,8 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
|
||||
*/
|
||||
static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
|
||||
{
|
||||
if (i915->params.enable_fbc >= 0)
|
||||
return !!i915->params.enable_fbc;
|
||||
if (i915->display.params.enable_fbc >= 0)
|
||||
return !!i915->display.params.enable_fbc;
|
||||
|
||||
if (!HAS_FBC(i915))
|
||||
return 0;
|
||||
@ -1824,9 +1863,9 @@ void intel_fbc_init(struct drm_i915_private *i915)
|
||||
if (need_fbc_vtd_wa(i915))
|
||||
DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
|
||||
|
||||
i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
|
||||
i915->display.params.enable_fbc = intel_sanitize_fbc_option(i915);
|
||||
drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
|
||||
i915->params.enable_fbc);
|
||||
i915->display.params.enable_fbc);
|
||||
|
||||
for_each_fbc_id(i915, fbc_id)
|
||||
i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "intel_crtc.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fdi.h"
|
||||
#include "intel_fdi_regs.h"
|
||||
@ -338,8 +339,11 @@ int ilk_fdi_compute_config(struct intel_crtc *crtc,
|
||||
|
||||
pipe_config->fdi_lanes = lane;
|
||||
|
||||
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
|
||||
link_bw, &pipe_config->fdi_m_n, false);
|
||||
intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp),
|
||||
lane, fdi_dotclock,
|
||||
link_bw,
|
||||
intel_dp_bw_fec_overhead(false),
|
||||
&pipe_config->fdi_m_n);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -265,8 +265,6 @@ static void frontbuffer_release(struct kref *ref)
|
||||
spin_unlock(&intel_bo_to_i915(obj)->display.fb_tracking.lock);
|
||||
|
||||
i915_active_fini(&front->write);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
kfree_rcu(front, rcu);
|
||||
}
|
||||
|
||||
|
@ -923,7 +923,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _intel_hdcp_enable(struct intel_connector *connector)
|
||||
static int intel_hdcp1_enable(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
@ -1058,7 +1058,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = _intel_hdcp_enable(connector);
|
||||
ret = intel_hdcp1_enable(connector);
|
||||
if (ret) {
|
||||
drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret);
|
||||
intel_hdcp_update_value(connector,
|
||||
@ -2324,10 +2324,10 @@ intel_hdcp_set_streams(struct intel_digital_port *dig_port,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_hdcp_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
static int _intel_hdcp_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_connector *connector =
|
||||
@ -2388,7 +2388,7 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
|
||||
*/
|
||||
if (ret && intel_hdcp_capable(connector) &&
|
||||
hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
|
||||
ret = _intel_hdcp_enable(connector);
|
||||
ret = intel_hdcp1_enable(connector);
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
@ -2404,6 +2404,27 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_hdcp_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
|
||||
/*
|
||||
* Enable hdcp if it's desired or if userspace is enabled and
|
||||
* driver set its state to undesired
|
||||
*/
|
||||
if (conn_state->content_protection ==
|
||||
DRM_MODE_CONTENT_PROTECTION_DESIRED ||
|
||||
(conn_state->content_protection ==
|
||||
DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
|
||||
DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
|
||||
_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
|
||||
}
|
||||
|
||||
int intel_hdcp_disable(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
@ -2491,7 +2512,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
||||
}
|
||||
|
||||
if (desired_and_not_enabled || content_protection_type_changed)
|
||||
intel_hdcp_enable(state, encoder, crtc_state, conn_state);
|
||||
_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
|
||||
}
|
||||
|
||||
void intel_hdcp_component_fini(struct drm_i915_private *i915)
|
||||
|
@ -28,10 +28,10 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
|
||||
int intel_hdcp_init(struct intel_connector *connector,
|
||||
struct intel_digital_port *dig_port,
|
||||
const struct intel_hdcp_shim *hdcp_shim);
|
||||
int intel_hdcp_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_hdcp_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state);
|
||||
int intel_hdcp_disable(struct intel_connector *connector);
|
||||
void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
|
@ -3030,16 +3030,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
|
||||
"HDCP init failed, skipping.\n");
|
||||
}
|
||||
|
||||
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
|
||||
* 0xd. Failure to do so will result in spurious interrupts being
|
||||
* generated on the port when a cable is not attached.
|
||||
*/
|
||||
if (IS_G45(dev_priv)) {
|
||||
u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
|
||||
intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
|
||||
(temp & ~0xf) | 0xd);
|
||||
}
|
||||
|
||||
cec_fill_conn_info_from_drm(&conn_info, connector);
|
||||
|
||||
intel_hdmi->cec_notifier =
|
||||
|
@ -1361,11 +1361,24 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
|
||||
bxt_hpd_detection_setup(dev_priv);
|
||||
}
|
||||
|
||||
static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915)
|
||||
{
|
||||
/*
|
||||
* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
|
||||
* 0xd. Failure to do so will result in spurious interrupts being
|
||||
* generated on the port when a cable is not attached.
|
||||
*/
|
||||
intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd);
|
||||
}
|
||||
|
||||
static void i915_hpd_enable_detection(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin];
|
||||
|
||||
if (IS_G45(i915))
|
||||
g45_hpd_peg_band_gap_wa(i915);
|
||||
|
||||
/* HPD sense and interrupt enable are one and the same */
|
||||
i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en);
|
||||
}
|
||||
@ -1389,6 +1402,9 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
|
||||
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
|
||||
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
|
||||
|
||||
if (IS_G45(dev_priv))
|
||||
g45_hpd_peg_band_gap_wa(dev_priv);
|
||||
|
||||
/* Ignore TV since it's buggy */
|
||||
i915_hotplug_interrupt_update_locked(dev_priv,
|
||||
HOTPLUG_INT_EN_MASK |
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp_mst.h"
|
||||
#include "intel_fdi.h"
|
||||
#include "intel_link_bw.h"
|
||||
|
||||
@ -21,6 +22,7 @@ void intel_link_bw_init_limits(struct drm_i915_private *i915, struct intel_link_
|
||||
{
|
||||
enum pipe pipe;
|
||||
|
||||
limits->force_fec_pipes = 0;
|
||||
limits->bpp_limit_reached_pipes = 0;
|
||||
for_each_pipe(i915, pipe)
|
||||
limits->max_bpp_x16[pipe] = INT_MAX;
|
||||
@ -53,11 +55,11 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
enum pipe max_bpp_pipe = INVALID_PIPE;
|
||||
struct intel_crtc *crtc;
|
||||
int max_bpp = 0;
|
||||
int max_bpp_x16 = 0;
|
||||
|
||||
for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
|
||||
struct intel_crtc_state *crtc_state;
|
||||
int link_bpp;
|
||||
int link_bpp_x16;
|
||||
|
||||
if (limits->bpp_limit_reached_pipes & BIT(crtc->pipe))
|
||||
continue;
|
||||
@ -68,7 +70,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
|
||||
return PTR_ERR(crtc_state);
|
||||
|
||||
if (crtc_state->dsc.compression_enable)
|
||||
link_bpp = crtc_state->dsc.compressed_bpp;
|
||||
link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16;
|
||||
else
|
||||
/*
|
||||
* TODO: for YUV420 the actual link bpp is only half
|
||||
@ -76,10 +78,10 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
|
||||
* is based on the pipe bpp value, set the actual link bpp
|
||||
* limit here once the MST BW allocation is fixed.
|
||||
*/
|
||||
link_bpp = crtc_state->pipe_bpp;
|
||||
link_bpp_x16 = to_bpp_x16(crtc_state->pipe_bpp);
|
||||
|
||||
if (link_bpp > max_bpp) {
|
||||
max_bpp = link_bpp;
|
||||
if (link_bpp_x16 > max_bpp_x16) {
|
||||
max_bpp_x16 = link_bpp_x16;
|
||||
max_bpp_pipe = crtc->pipe;
|
||||
}
|
||||
}
|
||||
@ -87,7 +89,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
|
||||
if (max_bpp_pipe == INVALID_PIPE)
|
||||
return -ENOSPC;
|
||||
|
||||
limits->max_bpp_x16[max_bpp_pipe] = to_bpp_x16(max_bpp) - 1;
|
||||
limits->max_bpp_x16[max_bpp_pipe] = max_bpp_x16 - 1;
|
||||
|
||||
return intel_modeset_pipes_in_mask_early(state, reason,
|
||||
BIT(max_bpp_pipe));
|
||||
@ -143,6 +145,10 @@ static int check_all_link_config(struct intel_atomic_state *state,
|
||||
/* TODO: Check additional shared display link configurations like MST */
|
||||
int ret;
|
||||
|
||||
ret = intel_dp_mst_atomic_check_link(state, limits);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_fdi_atomic_check_link(state, limits);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -158,6 +164,12 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
|
||||
bool bpps_changed = false;
|
||||
enum pipe pipe;
|
||||
|
||||
/* FEC can't be forced off after it was forced on. */
|
||||
if (drm_WARN_ON(&i915->drm,
|
||||
(old_limits->force_fec_pipes & new_limits->force_fec_pipes) !=
|
||||
old_limits->force_fec_pipes))
|
||||
return false;
|
||||
|
||||
for_each_pipe(i915, pipe) {
|
||||
/* The bpp limit can only decrease. */
|
||||
if (drm_WARN_ON(&i915->drm,
|
||||
@ -172,7 +184,9 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
|
||||
|
||||
/* At least one limit must change. */
|
||||
if (drm_WARN_ON(&i915->drm,
|
||||
!bpps_changed))
|
||||
!bpps_changed &&
|
||||
new_limits->force_fec_pipes ==
|
||||
old_limits->force_fec_pipes))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -16,6 +16,7 @@ struct intel_atomic_state;
|
||||
struct intel_crtc_state;
|
||||
|
||||
struct intel_link_bw_limits {
|
||||
u8 force_fec_pipes;
|
||||
u8 bpp_limit_reached_pipes;
|
||||
/* in 1/16 bpp units */
|
||||
int max_bpp_x16[I915_MAX_PIPES];
|
||||
|
@ -794,8 +794,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
|
||||
unsigned int val;
|
||||
|
||||
/* use the module option value if specified */
|
||||
if (i915->params.lvds_channel_mode > 0)
|
||||
return i915->params.lvds_channel_mode == 2;
|
||||
if (i915->display.params.lvds_channel_mode > 0)
|
||||
return i915->display.params.lvds_channel_mode == 2;
|
||||
|
||||
/* single channel LVDS is limited to 112 MHz */
|
||||
if (fixed_mode->clock > 112999)
|
||||
|
@ -318,6 +318,12 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private
|
||||
const struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
if (crtc_state->dsc.compression_enable) {
|
||||
drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
|
||||
connector->dp.dsc_decompression_enabled = true;
|
||||
} else {
|
||||
connector->dp.dsc_decompression_enabled = false;
|
||||
}
|
||||
conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
|
||||
}
|
||||
}
|
||||
|
@ -244,7 +244,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
|
||||
verify_crtc_state(state, crtc);
|
||||
intel_shared_dpll_state_verify(state, crtc);
|
||||
intel_mpllb_state_verify(state, crtc);
|
||||
intel_c10pll_state_verify(state, crtc);
|
||||
intel_cx0pll_state_verify(state, crtc);
|
||||
}
|
||||
|
||||
void intel_modeset_verify_disabled(struct intel_atomic_state *state)
|
||||
|
@ -841,7 +841,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_opregion *opregion = &dev_priv->display.opregion;
|
||||
const struct firmware *fw = NULL;
|
||||
const char *name = dev_priv->params.vbt_firmware;
|
||||
const char *name = dev_priv->display.params.vbt_firmware;
|
||||
int ret;
|
||||
|
||||
if (!name || !*name)
|
||||
|
@ -46,8 +46,8 @@
|
||||
|
||||
bool intel_panel_use_ssc(struct drm_i915_private *i915)
|
||||
{
|
||||
if (i915->params.panel_use_ssc >= 0)
|
||||
return i915->params.panel_use_ssc != 0;
|
||||
if (i915->display.params.panel_use_ssc >= 0)
|
||||
return i915->display.params.panel_use_ssc != 0;
|
||||
return i915->display.vbt.lvds_use_ssc &&
|
||||
!intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "intel_crt.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dpll.h"
|
||||
#include "intel_fdi.h"
|
||||
#include "intel_fdi_regs.h"
|
||||
#include "intel_lvds.h"
|
||||
|
@ -90,7 +90,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum pipe pipe = intel_dp->pps.pps_pipe;
|
||||
bool pll_enabled, release_cl_override = false;
|
||||
enum dpio_phy phy = DPIO_PHY(pipe);
|
||||
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
|
||||
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
|
||||
u32 DP;
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "i915_reg.h"
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_crtc.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
@ -172,6 +173,15 @@
|
||||
* irrelevant for normal operation.
|
||||
*/
|
||||
|
||||
bool intel_encoder_can_psr(struct intel_encoder *encoder)
|
||||
{
|
||||
if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
return CAN_PSR(enc_to_intel_dp(encoder)) ||
|
||||
CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool psr_global_enabled(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_connector *connector = intel_dp->attached_connector;
|
||||
@ -179,9 +189,9 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
|
||||
|
||||
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
|
||||
case I915_PSR_DEBUG_DEFAULT:
|
||||
if (i915->params.enable_psr == -1)
|
||||
if (i915->display.params.enable_psr == -1)
|
||||
return connector->panel.vbt.psr.enable;
|
||||
return i915->params.enable_psr;
|
||||
return i915->display.params.enable_psr;
|
||||
case I915_PSR_DEBUG_DISABLE:
|
||||
return false;
|
||||
default:
|
||||
@ -198,7 +208,7 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
|
||||
case I915_PSR_DEBUG_FORCE_PSR1:
|
||||
return false;
|
||||
default:
|
||||
if (i915->params.enable_psr == 1)
|
||||
if (i915->display.params.enable_psr == 1)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
@ -474,27 +484,41 @@ exit:
|
||||
intel_dp->psr.su_y_granularity = y;
|
||||
}
|
||||
|
||||
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
|
||||
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 pr_dpcd = 0;
|
||||
|
||||
intel_dp->psr.sink_panel_replay_support = false;
|
||||
drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
|
||||
|
||||
if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Panel replay is not supported by panel\n");
|
||||
return;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Panel replay is supported by panel\n");
|
||||
intel_dp->psr.sink_panel_replay_support = true;
|
||||
}
|
||||
|
||||
static void _psr_init_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
|
||||
|
||||
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
|
||||
sizeof(intel_dp->psr_dpcd));
|
||||
|
||||
if (!intel_dp->psr_dpcd[0])
|
||||
return;
|
||||
drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
|
||||
drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
|
||||
intel_dp->psr_dpcd[0]);
|
||||
|
||||
if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"PSR support not currently available for this panel\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Panel lacks power state control, PSR cannot be enabled\n");
|
||||
return;
|
||||
}
|
||||
@ -503,8 +527,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
|
||||
intel_dp->psr.sink_sync_latency =
|
||||
intel_dp_get_sink_sync_latency(intel_dp);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 9 &&
|
||||
(intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
|
||||
if (DISPLAY_VER(i915) >= 9 &&
|
||||
intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
|
||||
bool y_req = intel_dp->psr_dpcd[1] &
|
||||
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
|
||||
bool alpm = intel_dp_get_alpm_status(intel_dp);
|
||||
@ -521,14 +545,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
|
||||
* GTC first.
|
||||
*/
|
||||
intel_dp->psr.sink_psr2_support = y_req && alpm;
|
||||
drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
|
||||
drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
|
||||
intel_dp->psr.sink_psr2_support ? "" : "not ");
|
||||
}
|
||||
}
|
||||
|
||||
if (intel_dp->psr.sink_psr2_support) {
|
||||
intel_dp->psr.colorimetry_support =
|
||||
intel_dp_get_colorimetry_status(intel_dp);
|
||||
intel_dp_get_su_granularity(intel_dp);
|
||||
}
|
||||
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
_panel_replay_init_dpcd(intel_dp);
|
||||
|
||||
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
|
||||
sizeof(intel_dp->psr_dpcd));
|
||||
|
||||
if (intel_dp->psr_dpcd[0])
|
||||
_psr_init_dpcd(intel_dp);
|
||||
|
||||
if (intel_dp->psr.sink_psr2_support) {
|
||||
intel_dp->psr.colorimetry_support =
|
||||
intel_dp_get_colorimetry_status(intel_dp);
|
||||
intel_dp_get_su_granularity(intel_dp);
|
||||
}
|
||||
}
|
||||
|
||||
@ -574,8 +609,11 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u8 dpcd_val = DP_PSR_ENABLE;
|
||||
|
||||
/* Enable ALPM at sink for psr2 */
|
||||
if (intel_dp->psr.panel_replay_enabled)
|
||||
return;
|
||||
|
||||
if (intel_dp->psr.psr2_enabled) {
|
||||
/* Enable ALPM at sink for psr2 */
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
|
||||
DP_ALPM_ENABLE |
|
||||
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
|
||||
@ -592,6 +630,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
|
||||
dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
|
||||
|
||||
if (intel_dp->psr.entry_setup_frames > 0)
|
||||
dpcd_val |= DP_PSR_FRAME_CAPTURE;
|
||||
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
|
||||
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
|
||||
@ -606,7 +647,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
|
||||
if (DISPLAY_VER(dev_priv) >= 11)
|
||||
val |= EDP_PSR_TP4_TIME_0us;
|
||||
|
||||
if (dev_priv->params.psr_safest_params) {
|
||||
if (dev_priv->display.params.psr_safest_params) {
|
||||
val |= EDP_PSR_TP1_TIME_2500us;
|
||||
val |= EDP_PSR_TP2_TP3_TIME_2500us;
|
||||
goto check_tp3_sel;
|
||||
@ -690,6 +731,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
|
||||
if (DISPLAY_VER(dev_priv) >= 8)
|
||||
val |= EDP_PSR_CRC_ENABLE;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 20)
|
||||
val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
|
||||
|
||||
intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
|
||||
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
|
||||
}
|
||||
@ -700,7 +744,7 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u32 val = 0;
|
||||
|
||||
if (dev_priv->params.psr_safest_params)
|
||||
if (dev_priv->display.params.psr_safest_params)
|
||||
return EDP_PSR2_TP2_TIME_2500us;
|
||||
|
||||
if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
|
||||
@ -727,11 +771,38 @@ static int psr2_block_count(struct intel_dp *intel_dp)
|
||||
return psr2_block_count_lines(intel_dp) / 4;
|
||||
}
|
||||
|
||||
static u8 frames_before_su_entry(struct intel_dp *intel_dp)
|
||||
{
|
||||
u8 frames_before_su_entry;
|
||||
|
||||
frames_before_su_entry = max_t(u8,
|
||||
intel_dp->psr.sink_sync_latency + 1,
|
||||
2);
|
||||
|
||||
/* Entry setup frames must be at least 1 less than frames before SU entry */
|
||||
if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
|
||||
frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
|
||||
|
||||
return frames_before_su_entry;
|
||||
}
|
||||
|
||||
static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
|
||||
intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
|
||||
0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
|
||||
|
||||
intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
|
||||
TRANS_DP2_PANEL_REPLAY_ENABLE);
|
||||
}
|
||||
|
||||
static void hsw_activate_psr2(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
|
||||
u32 val = EDP_PSR2_ENABLE;
|
||||
u32 psr_val = 0;
|
||||
|
||||
val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
|
||||
|
||||
@ -741,7 +812,8 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
|
||||
if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
|
||||
val |= EDP_Y_COORDINATE_ENABLE;
|
||||
|
||||
val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
|
||||
val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
|
||||
|
||||
val |= intel_psr2_get_tp_time(intel_dp);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 12) {
|
||||
@ -785,6 +857,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
|
||||
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
|
||||
val |= EDP_PSR2_SU_SDP_SCANLINE;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 20)
|
||||
psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
|
||||
|
||||
if (intel_dp->psr.psr2_sel_fetch_enabled) {
|
||||
u32 tmp;
|
||||
|
||||
@ -798,7 +873,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
|
||||
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
|
||||
* recommending keep this bit unset while PSR2 is enabled.
|
||||
*/
|
||||
intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
|
||||
intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
|
||||
|
||||
intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
|
||||
}
|
||||
@ -943,7 +1018,7 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
|
||||
if (!dev_priv->params.enable_psr2_sel_fetch &&
|
||||
if (!dev_priv->display.params.enable_psr2_sel_fetch &&
|
||||
intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 sel fetch not enabled, disabled by parameter\n");
|
||||
@ -1056,7 +1131,7 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
|
||||
fast_wake_lines > max_wake_lines)
|
||||
return false;
|
||||
|
||||
if (i915->params.psr_safest_params)
|
||||
if (i915->display.params.psr_safest_params)
|
||||
io_wake_lines = fast_wake_lines = max_wake_lines;
|
||||
|
||||
/* According to Bspec lower limit should be set as 7 lines. */
|
||||
@ -1066,6 +1141,39 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
|
||||
int entry_setup_frames = 0;
|
||||
|
||||
if (psr_setup_time < 0) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"PSR condition failed: Invalid PSR setup time (0x%02x)\n",
|
||||
intel_dp->psr_dpcd[1]);
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
|
||||
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
|
||||
if (DISPLAY_VER(i915) >= 20) {
|
||||
/* setup entry frames can be up to 3 frames */
|
||||
entry_setup_frames = 1;
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"PSR setup entry frames %d\n",
|
||||
entry_setup_frames);
|
||||
} else {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"PSR condition failed: PSR setup time (%d us) too long\n",
|
||||
psr_setup_time);
|
||||
return -ETIME;
|
||||
}
|
||||
}
|
||||
|
||||
return entry_setup_frames;
|
||||
}
|
||||
|
||||
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
@ -1206,24 +1314,42 @@ unsupported:
|
||||
return false;
|
||||
}
|
||||
|
||||
void intel_psr_compute_config(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
static bool _psr_compute_config(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
int psr_setup_time;
|
||||
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
|
||||
int entry_setup_frames;
|
||||
|
||||
/*
|
||||
* Current PSR panels don't work reliably with VRR enabled
|
||||
* So if VRR is enabled, do not enable PSR.
|
||||
*/
|
||||
if (crtc_state->vrr.enable)
|
||||
return;
|
||||
return false;
|
||||
|
||||
if (!CAN_PSR(intel_dp))
|
||||
return;
|
||||
return false;
|
||||
|
||||
entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
|
||||
|
||||
if (entry_setup_frames >= 0) {
|
||||
intel_dp->psr.entry_setup_frames = entry_setup_frames;
|
||||
} else {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR condition failed: PSR setup timing not met\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void intel_psr_compute_config(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
|
||||
|
||||
if (!psr_global_enabled(intel_dp)) {
|
||||
drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
|
||||
@ -1242,23 +1368,14 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
|
||||
return;
|
||||
}
|
||||
|
||||
psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
|
||||
if (psr_setup_time < 0) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR condition failed: Invalid PSR setup time (0x%02x)\n",
|
||||
intel_dp->psr_dpcd[1]);
|
||||
return;
|
||||
}
|
||||
if (CAN_PANEL_REPLAY(intel_dp))
|
||||
crtc_state->has_panel_replay = true;
|
||||
else
|
||||
crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
|
||||
|
||||
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
|
||||
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR condition failed: PSR setup time (%d us) too long\n",
|
||||
psr_setup_time);
|
||||
if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
|
||||
return;
|
||||
}
|
||||
|
||||
crtc_state->has_psr = true;
|
||||
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
|
||||
|
||||
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
|
||||
@ -1279,18 +1396,23 @@ void intel_psr_get_config(struct intel_encoder *encoder,
|
||||
return;
|
||||
|
||||
intel_dp = &dig_port->dp;
|
||||
if (!CAN_PSR(intel_dp))
|
||||
if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
|
||||
return;
|
||||
|
||||
mutex_lock(&intel_dp->psr.lock);
|
||||
if (!intel_dp->psr.enabled)
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* Not possible to read EDP_PSR/PSR2_CTL registers as it is
|
||||
* enabled/disabled because of frontbuffer tracking and others.
|
||||
*/
|
||||
pipe_config->has_psr = true;
|
||||
if (intel_dp->psr.panel_replay_enabled) {
|
||||
pipe_config->has_panel_replay = true;
|
||||
} else {
|
||||
/*
|
||||
* Not possible to read EDP_PSR/PSR2_CTL registers as it is
|
||||
* enabled/disabled because of frontbuffer tracking and others.
|
||||
*/
|
||||
pipe_config->has_psr = true;
|
||||
}
|
||||
|
||||
pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
|
||||
pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
|
||||
|
||||
@ -1327,8 +1449,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
|
||||
|
||||
lockdep_assert_held(&intel_dp->psr.lock);
|
||||
|
||||
/* psr1 and psr2 are mutually exclusive.*/
|
||||
if (intel_dp->psr.psr2_enabled)
|
||||
/* psr1, psr2 and panel-replay are mutually exclusive.*/
|
||||
if (intel_dp->psr.panel_replay_enabled)
|
||||
dg2_activate_panel_replay(intel_dp);
|
||||
else if (intel_dp->psr.psr2_enabled)
|
||||
hsw_activate_psr2(intel_dp);
|
||||
else
|
||||
hsw_activate_psr1(intel_dp);
|
||||
@ -1452,12 +1576,10 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
|
||||
* All supported adlp panels have 1-based X granularity, this may
|
||||
* cause issues if non-supported panels are used.
|
||||
*/
|
||||
if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
|
||||
intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
|
||||
ADLP_1_BASED_X_GRANULARITY);
|
||||
else if (IS_ALDERLAKE_P(dev_priv))
|
||||
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
|
||||
ADLP_1_BASED_X_GRANULARITY);
|
||||
if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
|
||||
IS_ALDERLAKE_P(dev_priv))
|
||||
intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
|
||||
0, ADLP_1_BASED_X_GRANULARITY);
|
||||
|
||||
/* Wa_16012604467:adlp,mtl[a0,b0] */
|
||||
if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
|
||||
@ -1508,6 +1630,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
|
||||
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
|
||||
|
||||
intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
|
||||
intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
|
||||
intel_dp->psr.busy_frontbuffer_bits = 0;
|
||||
intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
|
||||
intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
|
||||
@ -1523,8 +1646,12 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
|
||||
if (!psr_interrupt_error_check(intel_dp))
|
||||
return;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
|
||||
intel_dp->psr.psr2_enabled ? "2" : "1");
|
||||
if (intel_dp->psr.panel_replay_enabled)
|
||||
drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
|
||||
else
|
||||
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
|
||||
intel_dp->psr.psr2_enabled ? "2" : "1");
|
||||
|
||||
intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
|
||||
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
|
||||
intel_psr_enable_sink(intel_dp);
|
||||
@ -1553,7 +1680,10 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_dp->psr.psr2_enabled) {
|
||||
if (intel_dp->psr.panel_replay_enabled) {
|
||||
intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
|
||||
TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
|
||||
} else if (intel_dp->psr.psr2_enabled) {
|
||||
tgl_disallow_dc3co_on_psr2_exit(intel_dp);
|
||||
|
||||
val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
|
||||
@ -1602,8 +1732,11 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
|
||||
if (!intel_dp->psr.enabled)
|
||||
return;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
|
||||
intel_dp->psr.psr2_enabled ? "2" : "1");
|
||||
if (intel_dp->psr.panel_replay_enabled)
|
||||
drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
|
||||
else
|
||||
drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
|
||||
intel_dp->psr.psr2_enabled ? "2" : "1");
|
||||
|
||||
intel_psr_exit(intel_dp);
|
||||
intel_psr_wait_exit_locked(intel_dp);
|
||||
@ -1636,6 +1769,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
|
||||
|
||||
intel_dp->psr.enabled = false;
|
||||
intel_dp->psr.panel_replay_enabled = false;
|
||||
intel_dp->psr.psr2_enabled = false;
|
||||
intel_dp->psr.psr2_sel_fetch_enabled = false;
|
||||
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
|
||||
@ -2207,7 +2341,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (!crtc_state->has_psr)
|
||||
if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
|
||||
return;
|
||||
|
||||
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
|
||||
@ -2693,9 +2827,12 @@ void intel_psr_init(struct intel_dp *intel_dp)
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
|
||||
if (!HAS_PSR(dev_priv))
|
||||
if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
|
||||
return;
|
||||
|
||||
if (!intel_dp_is_edp(intel_dp))
|
||||
intel_psr_init_dpcd(intel_dp);
|
||||
|
||||
/*
|
||||
* HSW spec explicitly says PSR is tied to port A.
|
||||
* BDW+ platforms have a instance of PSR registers per transcoder but
|
||||
@ -2711,7 +2848,10 @@ void intel_psr_init(struct intel_dp *intel_dp)
|
||||
return;
|
||||
}
|
||||
|
||||
intel_dp->psr.source_support = true;
|
||||
if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
|
||||
intel_dp->psr.source_panel_replay_support = true;
|
||||
else
|
||||
intel_dp->psr.source_support = true;
|
||||
|
||||
/* Set link_standby x link_off defaults */
|
||||
if (DISPLAY_VER(dev_priv) < 12)
|
||||
@ -2728,12 +2868,19 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
|
||||
{
|
||||
struct drm_dp_aux *aux = &intel_dp->aux;
|
||||
int ret;
|
||||
unsigned int offset;
|
||||
|
||||
ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
|
||||
offset = intel_dp->psr.panel_replay_enabled ?
|
||||
DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
|
||||
|
||||
ret = drm_dp_dpcd_readb(aux, offset, status);
|
||||
if (ret != 1)
|
||||
return ret;
|
||||
|
||||
ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
|
||||
offset = intel_dp->psr.panel_replay_enabled ?
|
||||
DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
|
||||
|
||||
ret = drm_dp_dpcd_readb(aux, offset, error_status);
|
||||
if (ret != 1)
|
||||
return ret;
|
||||
|
||||
@ -2954,7 +3101,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
|
||||
status = live_status[status_val];
|
||||
}
|
||||
|
||||
seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
|
||||
seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
|
||||
}
|
||||
|
||||
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
|
||||
@ -2967,18 +3114,22 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
|
||||
bool enabled;
|
||||
u32 val;
|
||||
|
||||
seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
|
||||
seq_printf(m, "Sink support: PSR = %s",
|
||||
str_yes_no(psr->sink_support));
|
||||
|
||||
if (psr->sink_support)
|
||||
seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
|
||||
seq_puts(m, "\n");
|
||||
seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
|
||||
|
||||
if (!psr->sink_support)
|
||||
if (!(psr->sink_support || psr->sink_panel_replay_support))
|
||||
return 0;
|
||||
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
mutex_lock(&psr->lock);
|
||||
|
||||
if (psr->enabled)
|
||||
if (psr->panel_replay_enabled)
|
||||
status = "Panel Replay Enabled";
|
||||
else if (psr->enabled)
|
||||
status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
|
||||
else
|
||||
status = "disabled";
|
||||
@ -2991,14 +3142,17 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (psr->psr2_enabled) {
|
||||
if (psr->panel_replay_enabled) {
|
||||
val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
|
||||
enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
|
||||
} else if (psr->psr2_enabled) {
|
||||
val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
|
||||
enabled = val & EDP_PSR2_ENABLE;
|
||||
} else {
|
||||
val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
|
||||
enabled = val & EDP_PSR_ENABLE;
|
||||
}
|
||||
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
|
||||
seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
|
||||
str_enabled_disabled(enabled), val);
|
||||
psr_source_status(intel_dp, m);
|
||||
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
|
||||
@ -3136,6 +3290,16 @@ void intel_psr_debugfs_register(struct drm_i915_private *i915)
|
||||
i915, &i915_edp_psr_status_fops);
|
||||
}
|
||||
|
||||
static const char *psr_mode_str(struct intel_dp *intel_dp)
|
||||
{
|
||||
if (intel_dp->psr.panel_replay_enabled)
|
||||
return "PANEL-REPLAY";
|
||||
else if (intel_dp->psr.enabled)
|
||||
return "PSR";
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
static int i915_psr_sink_status_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct intel_connector *connector = m->private;
|
||||
@ -3150,12 +3314,19 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
|
||||
"reserved",
|
||||
"sink internal error",
|
||||
};
|
||||
static const char * const panel_replay_status[] = {
|
||||
"Sink device frame is locked to the Source device",
|
||||
"Sink device is coasting, using the VTotal target",
|
||||
"Sink device is governing the frame rate (frame rate unlock is granted)",
|
||||
"Sink device in the process of re-locking with the Source device",
|
||||
};
|
||||
const char *str;
|
||||
int ret;
|
||||
u8 status, error_status;
|
||||
u32 idx;
|
||||
|
||||
if (!CAN_PSR(intel_dp)) {
|
||||
seq_puts(m, "PSR Unsupported\n");
|
||||
if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
|
||||
seq_puts(m, "PSR/Panel-Replay Unsupported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -3166,15 +3337,20 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
status &= DP_PSR_SINK_STATE_MASK;
|
||||
if (status < ARRAY_SIZE(sink_status))
|
||||
str = sink_status[status];
|
||||
else
|
||||
str = "unknown";
|
||||
str = "unknown";
|
||||
if (intel_dp->psr.panel_replay_enabled) {
|
||||
idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
|
||||
if (idx < ARRAY_SIZE(panel_replay_status))
|
||||
str = panel_replay_status[idx];
|
||||
} else if (intel_dp->psr.enabled) {
|
||||
idx = status & DP_PSR_SINK_STATE_MASK;
|
||||
if (idx < ARRAY_SIZE(sink_status))
|
||||
str = sink_status[idx];
|
||||
}
|
||||
|
||||
seq_printf(m, "Sink PSR status: 0x%x [%s]\n", status, str);
|
||||
seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
|
||||
|
||||
seq_printf(m, "Sink PSR error status: 0x%x", error_status);
|
||||
seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
|
||||
|
||||
if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
|
||||
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
|
||||
@ -3183,11 +3359,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
|
||||
else
|
||||
seq_puts(m, "\n");
|
||||
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
|
||||
seq_puts(m, "\tPSR RFB storage error\n");
|
||||
seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
|
||||
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
|
||||
seq_puts(m, "\tPSR VSC SDP uncorrectable error\n");
|
||||
seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
|
||||
if (error_status & DP_PSR_LINK_CRC_ERROR)
|
||||
seq_puts(m, "\tPSR Link CRC error\n");
|
||||
seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -3207,13 +3383,16 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
struct dentry *root = connector->base.debugfs_entry;
|
||||
|
||||
if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
|
||||
return;
|
||||
if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) {
|
||||
if (!(HAS_DP20(i915) &&
|
||||
connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort))
|
||||
return;
|
||||
}
|
||||
|
||||
debugfs_create_file("i915_psr_sink_status", 0444, root,
|
||||
connector, &i915_psr_sink_status_fops);
|
||||
|
||||
if (HAS_PSR(i915))
|
||||
if (HAS_PSR(i915) || HAS_DP20(i915))
|
||||
debugfs_create_file("i915_psr_status", 0444, root,
|
||||
connector, &i915_psr_status_fops);
|
||||
}
|
||||
|
@ -21,6 +21,13 @@ struct intel_encoder;
|
||||
struct intel_plane;
|
||||
struct intel_plane_state;
|
||||
|
||||
#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
|
||||
(intel_dp)->psr.source_support)
|
||||
|
||||
#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
|
||||
(intel_dp)->psr.source_panel_replay_support)
|
||||
|
||||
bool intel_encoder_can_psr(struct intel_encoder *encoder);
|
||||
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
|
||||
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
|
@ -35,6 +35,8 @@
|
||||
#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 3)
|
||||
#define EDP_PSR_MAX_SLEEP_TIME_MASK REG_GENMASK(24, 20)
|
||||
#define EDP_PSR_MAX_SLEEP_TIME(x) REG_FIELD_PREP(EDP_PSR_MAX_SLEEP_TIME_MASK, (x))
|
||||
#define LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK REG_GENMASK(17, 16)
|
||||
#define LNL_EDP_PSR_ENTRY_SETUP_FRAMES(x) REG_FIELD_PREP(LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK, (x))
|
||||
#define EDP_PSR_SKIP_AUX_EXIT REG_BIT(12)
|
||||
#define EDP_PSR_TP_MASK REG_BIT(11)
|
||||
#define EDP_PSR_TP_TP1_TP2 REG_FIELD_PREP(EDP_PSR_TP_MASK, 0)
|
||||
|
@ -34,9 +34,6 @@
|
||||
* These qp tables are as per the C model
|
||||
* and it has the rows pointing to bpps which increment
|
||||
* in steps of 0.5
|
||||
* We do not support fractional bpps as of today,
|
||||
* hence we would skip the fractional bpps during
|
||||
* our references for qp calclulations.
|
||||
*/
|
||||
static const u8 rc_range_minqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = {
|
||||
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
|
@ -1788,17 +1788,28 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
||||
intel_sdvo_get_eld(intel_sdvo, pipe_config);
|
||||
}
|
||||
|
||||
static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
|
||||
static void intel_sdvo_disable_audio(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
|
||||
|
||||
if (!old_crtc_state->has_audio)
|
||||
return;
|
||||
|
||||
intel_sdvo_set_audio_state(intel_sdvo, 0);
|
||||
}
|
||||
|
||||
static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
|
||||
static void intel_sdvo_enable_audio(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
|
||||
const u8 *eld = crtc_state->eld;
|
||||
|
||||
if (!crtc_state->has_audio)
|
||||
return;
|
||||
|
||||
intel_sdvo_set_audio_state(intel_sdvo, 0);
|
||||
|
||||
intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
|
||||
@ -1819,8 +1830,7 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
u32 temp;
|
||||
|
||||
if (old_crtc_state->has_audio)
|
||||
intel_sdvo_disable_audio(intel_sdvo);
|
||||
encoder->audio_disable(encoder, old_crtc_state, conn_state);
|
||||
|
||||
intel_sdvo_set_active_outputs(intel_sdvo, 0);
|
||||
if (0)
|
||||
@ -1914,8 +1924,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
|
||||
DRM_MODE_DPMS_ON);
|
||||
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag);
|
||||
|
||||
if (pipe_config->has_audio)
|
||||
intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
|
||||
encoder->audio_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
@ -3391,6 +3400,8 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
intel_encoder->pre_enable = intel_sdvo_pre_enable;
|
||||
intel_encoder->enable = intel_enable_sdvo;
|
||||
intel_encoder->audio_enable = intel_sdvo_enable_audio;
|
||||
intel_encoder->audio_disable = intel_sdvo_disable_audio;
|
||||
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
|
||||
intel_encoder->get_config = intel_sdvo_get_config;
|
||||
|
||||
|
@ -48,6 +48,11 @@
|
||||
#include "intel_frontbuffer.h"
|
||||
#include "intel_sprite.h"
|
||||
|
||||
static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite)
|
||||
{
|
||||
return pipe * DISPLAY_RUNTIME_INFO(i915)->num_sprites[pipe] + sprite + 'A';
|
||||
}
|
||||
|
||||
static void i9xx_plane_linear_gamma(u16 gamma[8])
|
||||
{
|
||||
/* The points are not evenly spaced. */
|
||||
@ -1636,7 +1641,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
|
||||
0, plane_funcs,
|
||||
formats, num_formats, modifiers,
|
||||
DRM_PLANE_TYPE_OVERLAY,
|
||||
"sprite %c", sprite_name(pipe, sprite));
|
||||
"sprite %c", sprite_name(dev_priv, pipe, sprite));
|
||||
kfree(modifiers);
|
||||
|
||||
if (ret)
|
||||
|
@ -77,8 +77,8 @@ intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf,
|
||||
static void
|
||||
calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
|
||||
{
|
||||
int bpp = to_bpp_int(vdsc_cfg->bits_per_pixel);
|
||||
int bpc = vdsc_cfg->bits_per_component;
|
||||
int bpp = vdsc_cfg->bits_per_pixel >> 4;
|
||||
int qp_bpc_modifier = (bpc - 8) * 2;
|
||||
int uncompressed_bpg_rate;
|
||||
int first_line_bpg_offset;
|
||||
@ -148,7 +148,13 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
|
||||
static const s8 ofs_und8[] = {
|
||||
10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
|
||||
};
|
||||
|
||||
/*
|
||||
* For 420 format since bits_per_pixel (bpp) is set to target bpp * 2,
|
||||
* QP table values for target bpp 4.0 to 4.4375 (rounded to 4.0) are
|
||||
* actually for bpp 8 to 8.875 (rounded to 4.0 * 2 i.e 8).
|
||||
* Similarly values for target bpp 4.5 to 4.8375 (rounded to 4.5)
|
||||
* are for bpp 9 to 9.875 (rounded to 4.5 * 2 i.e 9), and so on.
|
||||
*/
|
||||
bpp_i = bpp - 8;
|
||||
for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
|
||||
u8 range_bpg_offset;
|
||||
@ -178,6 +184,9 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
|
||||
range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
|
||||
}
|
||||
} else {
|
||||
/* fractional bpp part * 10000 (for precision up to 4 decimal places) */
|
||||
int fractional_bits = to_bpp_frac(vdsc_cfg->bits_per_pixel);
|
||||
|
||||
static const s8 ofs_und6[] = {
|
||||
0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
|
||||
};
|
||||
@ -191,7 +200,14 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
|
||||
10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
|
||||
};
|
||||
|
||||
bpp_i = (2 * (bpp - 6));
|
||||
/*
|
||||
* QP table rows have values in increment of 0.5.
|
||||
* So 6.0 bpp to 6.4375 will have index 0, 6.5 to 6.9375 will have index 1,
|
||||
* and so on.
|
||||
* 0.5 fractional part with 4 decimal precision becomes 5000
|
||||
*/
|
||||
bpp_i = ((bpp - 6) + (fractional_bits < 5000 ? 0 : 1));
|
||||
|
||||
for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
|
||||
u8 range_bpg_offset;
|
||||
|
||||
@ -248,7 +264,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
|
||||
u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
|
||||
u16 compressed_bpp = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
|
||||
int err;
|
||||
int ret;
|
||||
|
||||
@ -279,8 +295,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
|
||||
/* Gen 11 does not support VBR */
|
||||
vdsc_cfg->vbr_enable = false;
|
||||
|
||||
/* Gen 11 only supports integral values of bpp */
|
||||
vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
|
||||
vdsc_cfg->bits_per_pixel = pipe_config->dsc.compressed_bpp_x16;
|
||||
|
||||
/*
|
||||
* According to DSC 1.2 specs in Section 4.1 if native_420 is set
|
||||
@ -874,7 +889,7 @@ static void intel_dsc_get_pps_config(struct intel_crtc_state *crtc_state)
|
||||
if (vdsc_cfg->native_420)
|
||||
vdsc_cfg->bits_per_pixel >>= 1;
|
||||
|
||||
crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
|
||||
crtc_state->dsc.compressed_bpp_x16 = vdsc_cfg->bits_per_pixel;
|
||||
|
||||
/* PPS 2 */
|
||||
pps_temp = intel_dsc_pps_read_and_verify(crtc_state, 2);
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include "skl_scaler.h"
|
||||
#include "skl_universal_plane.h"
|
||||
#include "skl_watermark.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "pxp/intel_pxp.h"
|
||||
|
||||
static const u32 skl_plane_formats[] = {
|
||||
@ -1007,7 +1006,8 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
|
||||
* The DPT object contains only one vma, so the VMA's offset
|
||||
* within the DPT is always 0.
|
||||
*/
|
||||
drm_WARN_ON(&i915->drm, plane_state->dpt_vma->node.start);
|
||||
drm_WARN_ON(&i915->drm, plane_state->dpt_vma &&
|
||||
plane_state->dpt_vma->node.start);
|
||||
drm_WARN_ON(&i915->drm, offset & 0x1fffff);
|
||||
return offset >> 9;
|
||||
} else {
|
||||
@ -1855,16 +1855,19 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb)
|
||||
}
|
||||
}
|
||||
|
||||
static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
|
||||
static void check_protection(struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
|
||||
struct drm_i915_private *i915 = to_i915(plane->base.dev);
|
||||
const struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
|
||||
return intel_pxp_key_check(i915->pxp, obj, false) == 0;
|
||||
}
|
||||
if (DISPLAY_VER(i915) < 11)
|
||||
return;
|
||||
|
||||
static bool pxp_is_borked(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
|
||||
plane_state->decrypt = intel_pxp_key_check(i915->pxp, obj, false) == 0;
|
||||
plane_state->force_black = i915_gem_object_is_protected(obj) &&
|
||||
!plane_state->decrypt;
|
||||
}
|
||||
|
||||
static int skl_plane_check(struct intel_crtc_state *crtc_state,
|
||||
@ -1911,10 +1914,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 11) {
|
||||
plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
|
||||
plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
|
||||
}
|
||||
check_protection(plane_state);
|
||||
|
||||
/* HW only has 8 bits pixel precision, disable plane if invisible */
|
||||
if (!(plane_state->hw.alpha >> 8))
|
||||
@ -2489,7 +2489,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (!dev_priv->params.enable_dpt &&
|
||||
if (!dev_priv->display.params.enable_dpt &&
|
||||
intel_fb_modifier_uses_dpt(dev_priv, fb->modifier)) {
|
||||
drm_dbg_kms(&dev_priv->drm, "DPT disabled, skipping initial FB\n");
|
||||
goto error;
|
||||
|
@ -412,7 +412,7 @@ static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
|
||||
if (!i915->params.enable_sagv)
|
||||
if (!i915->display.params.enable_sagv)
|
||||
return false;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 12)
|
||||
@ -3702,7 +3702,8 @@ static int intel_sagv_status_show(struct seq_file *m, void *unused)
|
||||
};
|
||||
|
||||
seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
|
||||
seq_printf(m, "SAGV modparam: %s\n", str_enabled_disabled(i915->params.enable_sagv));
|
||||
seq_printf(m, "SAGV modparam: %s\n",
|
||||
str_enabled_disabled(i915->display.params.enable_sagv));
|
||||
seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
|
||||
seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
|
||||
|
||||
|
@ -561,6 +561,12 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
glk_dsi_disable_mipi_io(encoder);
|
||||
}
|
||||
|
||||
static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
|
||||
{
|
||||
return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
}
|
||||
|
||||
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
@ -570,7 +576,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
drm_dbg_kms(&dev_priv->drm, "\n");
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
|
||||
i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
|
||||
i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
|
||||
|
||||
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
|
||||
@ -589,7 +595,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
* On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
|
||||
* Port A only. MIPI Port C has no similar bit for checking.
|
||||
*/
|
||||
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || port == PORT_A) &&
|
||||
if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
|
||||
intel_de_wait_for_clear(dev_priv, port_ctrl,
|
||||
AFE_LATCHOUT, 30))
|
||||
drm_err(&dev_priv->drm, "DSI LP not going Low\n");
|
||||
@ -627,8 +633,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
|
||||
}
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
|
||||
u32 temp;
|
||||
|
||||
temp = intel_de_read(dev_priv, port_ctrl);
|
||||
@ -664,8 +669,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
|
||||
enum port port;
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
|
||||
|
||||
/* de-assert ip_tg_enable signal */
|
||||
intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0);
|
||||
@ -955,9 +959,8 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
|
||||
/* XXX: this only works for one DSI output */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
i915_reg_t ctrl_reg = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE;
|
||||
i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
|
||||
bool enabled = intel_de_read(dev_priv, port_ctrl) & DPI_ENABLE;
|
||||
|
||||
/*
|
||||
* Due to some hardware limitations on VLV/CHV, the DPI enable
|
||||
|
@ -89,6 +89,7 @@ i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj,
|
||||
|
||||
if (!front) {
|
||||
RCU_INIT_POINTER(obj->frontbuffer, NULL);
|
||||
drm_gem_object_put(intel_bo_to_drm_bo(obj));
|
||||
} else if (rcu_access_pointer(obj->frontbuffer)) {
|
||||
cur = rcu_dereference_protected(obj->frontbuffer, true);
|
||||
kref_get(&cur->ref);
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "i915_request.h"
|
||||
#include "intel_engine_types.h"
|
||||
#include "intel_wakeref.h"
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gt_pm.h"
|
||||
|
||||
static inline bool
|
||||
|
@ -167,6 +167,20 @@ void intel_gt_release_all(struct drm_i915_private *i915);
|
||||
(id__)++) \
|
||||
for_each_if(((gt__) = (i915__)->gt[(id__)]))
|
||||
|
||||
/* Simple iterator over all initialised engines */
|
||||
#define for_each_engine(engine__, gt__, id__) \
|
||||
for ((id__) = 0; \
|
||||
(id__) < I915_NUM_ENGINES; \
|
||||
(id__)++) \
|
||||
for_each_if ((engine__) = (gt__)->engine[(id__)])
|
||||
|
||||
/* Iterator over subset of engines selected by mask */
|
||||
#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
|
||||
for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
|
||||
(tmp__) ? \
|
||||
((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
|
||||
0;)
|
||||
|
||||
void intel_gt_info_print(const struct intel_gt_info *info,
|
||||
struct drm_printer *p);
|
||||
|
||||
|
@ -6,8 +6,8 @@
|
||||
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "i915_drv.h" /* for_each_engine! */
|
||||
#include "intel_engine.h"
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gt_debugfs.h"
|
||||
#include "intel_gt_engines_debugfs.h"
|
||||
|
||||
|
@ -3047,7 +3047,7 @@ put_obj:
|
||||
|
||||
static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
{
|
||||
u32 per_ctx_start[CACHELINE_DWORDS] = {0};
|
||||
u32 per_ctx_start[CACHELINE_DWORDS] = {};
|
||||
unsigned char *bb_start_sva;
|
||||
|
||||
if (!wa_ctx->per_ctx.valid)
|
||||
|
@ -56,7 +56,7 @@ static const struct pixel_format bdw_pixel_formats[] = {
|
||||
{DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
|
||||
|
||||
/* non-supported format has bpp default to 0 */
|
||||
{0, 0, NULL},
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct pixel_format skl_pixel_formats[] = {
|
||||
@ -76,7 +76,7 @@ static const struct pixel_format skl_pixel_formats[] = {
|
||||
{DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
|
||||
|
||||
/* non-supported format has bpp default to 0 */
|
||||
{0, 0, NULL},
|
||||
{}
|
||||
};
|
||||
|
||||
static int bdw_format_to_drm(int format)
|
||||
@ -293,7 +293,7 @@ static const struct cursor_mode_format cursor_pixel_formats[] = {
|
||||
{DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
|
||||
|
||||
/* non-supported format has bpp default to 0 */
|
||||
{0, 0, 0, 0, NULL},
|
||||
{}
|
||||
};
|
||||
|
||||
static int cursor_mode_to_drm(int mode)
|
||||
|
@ -538,7 +538,7 @@ static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
|
||||
int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
|
||||
enum dpio_phy phy = DPIO_PHY0;
|
||||
enum dpio_channel ch = DPIO_CH0;
|
||||
struct dpll clock = {0};
|
||||
struct dpll clock = {};
|
||||
u32 temp;
|
||||
|
||||
/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
|
||||
@ -2576,7 +2576,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
|
||||
|
||||
static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = gvt->gt->i915;
|
||||
int ret;
|
||||
|
||||
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
|
||||
|
@ -32,6 +32,8 @@
|
||||
|
||||
#include <drm/drm_debugfs.h>
|
||||
|
||||
#include "display/intel_display_params.h"
|
||||
|
||||
#include "gem/i915_gem_context.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gt_buffer_pool.h"
|
||||
@ -67,13 +69,13 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
||||
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
|
||||
|
||||
intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
|
||||
intel_display_device_info_print(DISPLAY_INFO(i915), DISPLAY_RUNTIME_INFO(i915), &p);
|
||||
i915_print_iommu_status(i915, &p);
|
||||
intel_gt_info_print(&to_gt(i915)->info, &p);
|
||||
intel_driver_caps_print(&i915->caps, &p);
|
||||
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
i915_params_dump(&i915->params, &p);
|
||||
intel_display_params_dump(i915, &p);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
|
||||
return 0;
|
||||
|
@ -231,16 +231,10 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||
mutex_init(&dev_priv->display.backlight.lock);
|
||||
|
||||
mutex_init(&dev_priv->sb_lock);
|
||||
cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
mutex_init(&dev_priv->display.audio.mutex);
|
||||
mutex_init(&dev_priv->display.wm.wm_mutex);
|
||||
mutex_init(&dev_priv->display.pps.mutex);
|
||||
mutex_init(&dev_priv->display.hdcp.hdcp_mutex);
|
||||
|
||||
i915_memcpy_init_early(dev_priv);
|
||||
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
|
||||
|
||||
@ -909,6 +903,8 @@ static void i915_driver_release(struct drm_device *dev)
|
||||
intel_runtime_pm_driver_release(rpm);
|
||||
|
||||
i915_driver_late_release(dev_priv);
|
||||
|
||||
intel_display_device_remove(dev_priv);
|
||||
}
|
||||
|
||||
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
|
||||
|
@ -396,20 +396,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
|
||||
return i915->gt[0];
|
||||
}
|
||||
|
||||
/* Simple iterator over all initialised engines */
|
||||
#define for_each_engine(engine__, gt__, id__) \
|
||||
for ((id__) = 0; \
|
||||
(id__) < I915_NUM_ENGINES; \
|
||||
(id__)++) \
|
||||
for_each_if ((engine__) = (gt__)->engine[(id__)])
|
||||
|
||||
/* Iterator over subset of engines selected by mask */
|
||||
#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
|
||||
for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
|
||||
(tmp__) ? \
|
||||
((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
|
||||
0;)
|
||||
|
||||
#define rb_to_uabi_engine(rb) \
|
||||
rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
|
||||
|
||||
@ -418,11 +404,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
|
||||
(engine__); \
|
||||
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
|
||||
|
||||
#define for_each_uabi_class_engine(engine__, class__, i915__) \
|
||||
for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
|
||||
(engine__) && (engine__)->uabi_class == (class__); \
|
||||
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
|
||||
|
||||
#define INTEL_INFO(i915) ((i915)->__info)
|
||||
#define RUNTIME_INFO(i915) (&(i915)->__runtime)
|
||||
#define DRIVER_CAPS(i915) (&(i915)->caps)
|
||||
@ -575,6 +556,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
||||
#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
|
||||
#define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO)
|
||||
#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
|
||||
#define IS_LUNARLAKE(i915) 0
|
||||
|
||||
#define IS_DG2_G10(i915) \
|
||||
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
|
||||
|
@ -1306,8 +1306,6 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
i915_gem_init__mm(dev_priv);
|
||||
i915_gem_init__contexts(dev_priv);
|
||||
|
||||
spin_lock_init(&dev_priv->display.fb_tracking.lock);
|
||||
}
|
||||
|
||||
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
|
||||
|
@ -660,6 +660,7 @@ static void err_print_params(struct drm_i915_error_state_buf *m,
|
||||
struct drm_printer p = i915_error_printer(m);
|
||||
|
||||
i915_params_dump(params, &p);
|
||||
intel_display_params_dump(m->i915, &p);
|
||||
}
|
||||
|
||||
static void err_print_pciid(struct drm_i915_error_state_buf *m,
|
||||
@ -1027,6 +1028,7 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
|
||||
static void cleanup_params(struct i915_gpu_coredump *error)
|
||||
{
|
||||
i915_params_free(&error->params);
|
||||
intel_display_params_free(&error->display_params);
|
||||
}
|
||||
|
||||
static void cleanup_uc(struct intel_uc_coredump *uc)
|
||||
@ -1988,6 +1990,7 @@ static void capture_gen(struct i915_gpu_coredump *error)
|
||||
error->suspend_count = i915->suspend_count;
|
||||
|
||||
i915_params_copy(&error->params, &i915->params);
|
||||
intel_display_params_copy(&error->display_params);
|
||||
memcpy(&error->device_info,
|
||||
INTEL_INFO(i915),
|
||||
sizeof(error->device_info));
|
||||
@ -2174,7 +2177,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
|
||||
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
|
||||
pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
|
||||
pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
|
||||
pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
|
||||
pr_info("Please see https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html for details.\n");
|
||||
pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
|
||||
pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
|
||||
pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user