linux/include/drm/display/drm_dp_tunnel.h
Imre Deak 295654f7e5 drm/dp: Add support for DP tunneling
Add support for Display Port tunneling. For now this includes the
support for Bandwidth Allocation Mode (BWA), leaving adding Panel Replay
support for later.

BWA allows using displays that share the same (Thunderbolt) link with
their maximum resolution. Atm, this may not be possible due to the
coarse granularity of partitioning the link BW among the displays on the
link: the BW allocation policy is in a SW/FW/HW component on the link
(on Thunderbolt it's the SW or FW Connection Manager), independent of
the driver. This policy will set the DPRX maximum rate and lane count
DPCD registers the GFX driver will see (0x00000, 0x00001, 0x02200,
0x02201) based on the available link BW.

The granularity of the current BW allocation policy is coarse, based on
the required link rate in the 1.62Gbs..8.1Gbps range and it may prevent
using higher resolutions all together: the display connected first will
get a share of the link BW which corresponds to its full DPRX capability
(regardless of the actual mode it uses). A subsequent display connected
will only get the remaining BW, which could be well below its full
capability.

BWA solves the above coarse granularity (reducing it to a 250Mbs..1Gps
range) and first-come/first-served issues by letting the driver request
the BW for each display on a link which reflects the actual modes the
displays use.

This patch adds the DRM core helper functions, while a follow-up change
in the patchset takes them into use in the i915 driver.

v2:
- Fix prepare_to_wait vs. wake-up cond check order in
  allocate_tunnel_bw(). (Ville)
- Move tunnel==NULL checks from callers in drivers to here. (Ville)
- Avoid var inits in declaration blocks that can fail or have
  side-effects. (Ville)
- Use u8 for driver and group IDs. (Ville)
- Simplify API removing drm_dp_tunnel_get/put_untracked(). (Ville)
- Reuse str_yes_no() instead of a local yes_no_chr(). (Ville)
- s/drm_dp_tunnel_atomic_clear_state()/free_tunnel_state() and unexport
  the function. (Ville)
- s/clear_tunnel_group_state()/free_group_state() and move kfree() to
  this function. (Ville)
- Add separate group_free_bw() helper and describe what the tunnel
  estimated BW includes. (Ville)
- Improve help text for CONFIG_DRM_DISPLAY_DP_TUNNEL. (Ville)
- Add code comment explaining the purpose of DPCD reg read helpers.
  (Ville)
- Add code comment describing the tunnel group name prefix format.
  (Ville)
- Report the allocated BW as undetermined until the first allocation
  request.
- Skip allocation requests matching the previous request.
- Clear any stale BW request status flags before a new request.
- Add missing error return check of drm_dp_tunnel_atomic_get_group_state()
  in drm_dp_tunnel_atomic_set_stream_bw().
- Add drm_dp_tunnel_get_allocated_bw().
- s/drm_dp_tunnel_atomic_get_tunnel_bw/drm_dp_tunnel_atomic_get_required_bw
- Fix return value description in function doc of drm_dp_tunnel_detect().
- Add function documentation to all exported functions.

v3:
- Improve grouping of fields in drm_dp_tunnel_group struct. (Uma)
- Fix validating the BW granularity DPCD reg value. (Uma)
- Document return value of check_and_clear_status_change(). (Uma)
- Fix resetting drm_dp_tunnel_ref::tunnel in drm_dp_tunnel_ref_put().
  (Ville)
- Allow for ALLOCATED_BW to change after a BWA enable/disable sequence.

Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Uma Shankar <uma.shankar@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240226185246.1276018-2-imre.deak@intel.com
2024-02-27 17:34:10 +02:00

249 lines
6.2 KiB
C

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __DRM_DP_TUNNEL_H__
#define __DRM_DP_TUNNEL_H__
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/types.h>
struct drm_dp_aux;
struct drm_device;
struct drm_atomic_state;
struct drm_dp_tunnel_mgr;
struct drm_dp_tunnel_state;
struct ref_tracker;
struct drm_dp_tunnel_ref {
struct drm_dp_tunnel *tunnel;
struct ref_tracker *tracker;
};
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL
struct drm_dp_tunnel *
drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker);
void
drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker);
static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel,
struct drm_dp_tunnel_ref *tunnel_ref)
{
tunnel_ref->tunnel = drm_dp_tunnel_get(tunnel, &tunnel_ref->tracker);
}
static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref)
{
drm_dp_tunnel_put(tunnel_ref->tunnel, &tunnel_ref->tracker);
tunnel_ref->tunnel = NULL;
}
struct drm_dp_tunnel *
drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
struct drm_dp_aux *aux);
int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel);
bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw);
int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel);
void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr,
struct drm_dp_aux *aux);
int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel);
const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel);
struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
struct drm_dp_tunnel *tunnel);
struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,
const struct drm_dp_tunnel *tunnel);
struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
const struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
struct drm_dp_tunnel *tunnel,
u8 stream_id, int bw);
int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
const struct drm_dp_tunnel *tunnel,
u32 *stream_mask);
int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
u32 *failed_stream_mask);
int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state);
struct drm_dp_tunnel_mgr *
drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count);
void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr);
#else
static inline struct drm_dp_tunnel *
drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker)
{
return NULL;
}
static inline void
drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker) {}
static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel,
struct drm_dp_tunnel_ref *tunnel_ref) {}
static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref) {}
static inline struct drm_dp_tunnel *
drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
struct drm_dp_aux *aux)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int
drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
{
return 0;
}
static inline int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
{
return -EOPNOTSUPP;
}
static inline int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)
{
return -EOPNOTSUPP;
}
static inline bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)
{
return false;
}
static inline int
drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
{
return -EOPNOTSUPP;
}
static inline int
drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
{
return -1;
}
static inline int
drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
{
return -EOPNOTSUPP;
}
static inline void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel) {}
static inline int
drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr,
struct drm_dp_aux *aux)
{
return -EOPNOTSUPP;
}
static inline int
drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)
{
return 0;
}
static inline int
drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)
{
return 0;
}
static inline int
drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)
{
return -1;
}
static inline const char *
drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
{
return NULL;
}
static inline struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
struct drm_dp_tunnel *tunnel)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
const struct drm_dp_tunnel *tunnel)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int
drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
struct drm_dp_tunnel *tunnel,
u8 stream_id, int bw)
{
return -EOPNOTSUPP;
}
static inline int
drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
const struct drm_dp_tunnel *tunnel,
u32 *stream_mask)
{
return -EOPNOTSUPP;
}
static inline int
drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
u32 *failed_stream_mask)
{
return -EOPNOTSUPP;
}
static inline int
drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)
{
return 0;
}
static inline struct drm_dp_tunnel_mgr *
drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline
void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr) {}
#endif /* CONFIG_DRM_DISPLAY_DP_TUNNEL */
#endif /* __DRM_DP_TUNNEL_H__ */