UAPI Changes:

- Rename xe perf layer as xe observation layer (Ashutosh)
 
 Driver Changes:
 - Drop trace_xe_hw_fence_free (Brost)
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmaP5OEACgkQ+mJfZA7r
 E8omlQf/SruWuGV3SaQq+gj3DmwPgYeFEE8Oml+xOTukex9OOEI2CbTKiKLpBm/y
 b83DYCCH2J0J3vb59GJsyHUQm8ETTbx1k6frErGHcQ5hYZoddrZw7bonH6gGCNoc
 Itg4j95FHIJPcysMrcmyYqWSnJpJxFfXuoFekgz3NQYnvCPHWNMmtaqD1PEyK9dr
 kQIg0V+mKyd2L4p4EoDxzIaEEyyp3zbrjQssWqTuE6Nh3R+1AvfgAOWmcOAUDASR
 Ptmy7helXgCPh/HO4qap8+aKseMUGqTr7LOJ+WTsWpbgp0zuMMHiGmqxSLMQgjiY
 Xn9I5ltbzcjH91SyWND9YVC9GXAdiQ==
 =+fR2
 -----END PGP SIGNATURE-----

Merge tag 'drm-xe-next-fixes-2024-07-11' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next

UAPI Changes:
- Rename xe perf layer as xe observation layer (Ashutosh)

Driver Changes:
- Drop trace_xe_hw_fence_free (Brost)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Zo_3ustogPDVKZwu@intel.com
This commit is contained in:
Dave Airlie 2024-07-12 12:52:15 +10:00
commit 864204e467
61 changed files with 1063 additions and 584 deletions

View File

@ -608,7 +608,6 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
unsigned int fb_width, fb_height;
struct drm_mode_rect *clips;
uint32_t num_clips;
int ret;
/* either *both* CRTC and FB must be set, or neither */
if (crtc && !fb) {
@ -635,14 +634,12 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
}
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, fb->format->format,
fb->modifier);
if (ret) {
if (!drm_plane_has_format(plane, fb->format->format, fb->modifier)) {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n",
plane->base.id, plane->name,
&fb->format->format, fb->modifier);
return ret;
return -EINVAL;
}
/* Give drivers some help against integer overflows */

View File

@ -789,12 +789,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
* case.
*/
if (!plane->format_default) {
ret = drm_plane_check_pixel_format(plane,
fb->format->format,
fb->modifier);
if (ret) {
if (!drm_plane_has_format(plane, fb->format->format, fb->modifier)) {
drm_dbg_kms(dev, "Invalid pixel format %p4cc, modifier 0x%llx\n",
&fb->format->format, fb->modifier);
ret = -EINVAL;
goto out;
}
}

View File

@ -272,8 +272,6 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
/* drm_plane.c */
int drm_plane_register_all(struct drm_device *dev);
void drm_plane_unregister_all(struct drm_device *dev);
int drm_plane_check_pixel_format(struct drm_plane *plane,
u32 format, u64 modifier);
struct drm_mode_rect *
__drm_plane_get_damage_clips(const struct drm_plane_state *state);

View File

@ -877,8 +877,17 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
return 0;
}
int drm_plane_check_pixel_format(struct drm_plane *plane,
u32 format, u64 modifier)
/**
* drm_plane_has_format - Check whether the plane supports this format and modifier combination
* @plane: drm plane
* @format: pixel format (DRM_FORMAT_*)
* @modifier: data layout modifier
*
* Returns:
* Whether the plane supports the specified format and modifier combination.
*/
bool drm_plane_has_format(struct drm_plane *plane,
u32 format, u64 modifier)
{
unsigned int i;
@ -887,25 +896,26 @@ int drm_plane_check_pixel_format(struct drm_plane *plane,
break;
}
if (i == plane->format_count)
return -EINVAL;
return false;
if (plane->funcs->format_mod_supported) {
if (!plane->funcs->format_mod_supported(plane, format, modifier))
return -EINVAL;
return false;
} else {
if (!plane->modifier_count)
return 0;
return true;
for (i = 0; i < plane->modifier_count; i++) {
if (modifier == plane->modifiers[i])
break;
}
if (i == plane->modifier_count)
return -EINVAL;
return false;
}
return 0;
return true;
}
EXPORT_SYMBOL(drm_plane_has_format);
static int __setplane_check(struct drm_plane *plane,
struct drm_crtc *crtc,
@ -924,12 +934,10 @@ static int __setplane_check(struct drm_plane *plane,
}
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, fb->format->format,
fb->modifier);
if (ret) {
if (!drm_plane_has_format(plane, fb->format->format, fb->modifier)) {
DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n",
&fb->format->format, fb->modifier);
return ret;
return -EINVAL;
}
/* Give drivers some help against integer overflows */
@ -964,7 +972,7 @@ bool drm_any_plane_has_format(struct drm_device *dev,
struct drm_plane *plane;
drm_for_each_plane(plane, dev) {
if (drm_plane_check_pixel_format(plane, format, modifier) == 0)
if (drm_plane_has_format(plane, format, modifier))
return true;
}

View File

@ -232,6 +232,28 @@ void drm_vblank_work_flush(struct drm_vblank_work *work)
}
EXPORT_SYMBOL(drm_vblank_work_flush);
/**
* drm_vblank_work_flush_all - flush all currently pending vblank work on crtc.
* @crtc: crtc for which vblank work to flush
*
* Wait until all currently queued vblank work on @crtc
* has finished executing once.
*/
void drm_vblank_work_flush_all(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(crtc)];
spin_lock_irq(&dev->event_lock);
wait_event_lock_irq(vblank->work_wait_queue,
list_empty(&vblank->pending_work),
dev->event_lock);
spin_unlock_irq(&dev->event_lock);
kthread_flush_worker(vblank->worker);
}
EXPORT_SYMBOL(drm_vblank_work_flush_all);
/**
* drm_vblank_work_init - initialize a vblank work item
* @work: vblank work item

View File

@ -225,8 +225,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->uapi.plane->dev);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
int src_x, src_y, src_w;
u32 offset;
@ -267,7 +267,7 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
* despite them not using the linear offset anymore.
*/
if (DISPLAY_VER(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
unsigned int alignment = intel_surf_alignment(fb, 0);
unsigned int alignment = plane->min_alignment(plane, fb, 0);
int cpp = fb->format->cpp[0];
while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].mapping_stride) {
@ -764,6 +764,66 @@ i8xx_plane_max_stride(struct intel_plane *plane,
return 8 * 1024;
}
static unsigned int vlv_primary_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
switch (fb->modifier) {
case I915_FORMAT_MOD_X_TILED:
if (HAS_ASYNC_FLIPS(i915))
return 256 * 1024;
return 4 * 1024;
case DRM_FORMAT_MOD_LINEAR:
return 128 * 1024;
default:
MISSING_CASE(fb->modifier);
return 0;
}
}
static unsigned int g4x_primary_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
switch (fb->modifier) {
case I915_FORMAT_MOD_X_TILED:
if (HAS_ASYNC_FLIPS(i915))
return 256 * 1024;
return 4 * 1024;
case DRM_FORMAT_MOD_LINEAR:
return 4 * 1024;
default:
MISSING_CASE(fb->modifier);
return 0;
}
}
static unsigned int i965_plane_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
switch (fb->modifier) {
case I915_FORMAT_MOD_X_TILED:
return 4 * 1024;
case DRM_FORMAT_MOD_LINEAR:
return 128 * 1024;
default:
MISSING_CASE(fb->modifier);
return 0;
}
}
static unsigned int i9xx_plane_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
return 0;
}
static const struct drm_plane_funcs i965_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@ -869,6 +929,15 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->max_stride = ilk_primary_max_stride;
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
plane->min_alignment = vlv_primary_min_alignment;
else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
plane->min_alignment = g4x_primary_min_alignment;
else if (DISPLAY_VER(dev_priv) == 4)
plane->min_alignment = i965_plane_min_alignment;
else
plane->min_alignment = i9xx_plane_min_alignment;
if (IS_I830(dev_priv) || IS_I845G(dev_priv)) {
plane->update_arm = i830_plane_update_arm;
} else {

View File

@ -135,8 +135,9 @@ static int _lnl_compute_aux_less_wake_time(int port_clock)
t1 + tcds, 1000);
}
static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
static int
_lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int aux_less_wake_time, aux_less_wake_lines, silence_period,
@ -168,7 +169,7 @@ static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
}
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int check_entry_lines;
@ -220,7 +221,7 @@ static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
}
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;

View File

@ -15,7 +15,7 @@ struct intel_connector;
void intel_alpm_init_dpcd(struct intel_dp *intel_dp);
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
const struct intel_crtc_state *crtc_state);
void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);

View File

@ -43,6 +43,7 @@
#include "i9xx_plane_regs.h"
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
@ -801,18 +802,30 @@ void intel_plane_update_noarm(struct intel_plane *plane,
plane->update_noarm(plane, crtc_state, plane_state);
}
void intel_plane_async_flip(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
bool async_flip)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_plane_async_flip(plane, crtc, async_flip);
plane->async_flip(plane, crtc_state, plane_state, async_flip);
}
void intel_plane_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_plane_update_arm(plane, crtc);
if (crtc_state->do_async_flip && plane->async_flip) {
intel_plane_async_flip(plane, crtc_state, plane_state, true);
return;
}
if (crtc_state->do_async_flip && plane->async_flip)
plane->async_flip(plane, crtc_state, plane_state, true);
else
plane->update_arm(plane, crtc_state, plane_state);
trace_intel_plane_update_arm(plane, crtc);
plane->update_arm(plane, crtc_state, plane_state);
}
void intel_plane_disable_arm(struct intel_plane *plane,
@ -1189,7 +1202,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
intel_display_rps_mark_interactive(dev_priv, state, false);
/* Should only be called after a successful intel_prepare_plane_fb()! */
intel_plane_unpin_fb(old_plane_state);
}
@ -1202,3 +1214,14 @@ void intel_plane_helper_add(struct intel_plane *plane)
{
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
}
void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state)
{
if (!old_plane_state->ggtt_vma ||
old_plane_state->ggtt_vma == new_plane_state->ggtt_vma)
return;
drm_vblank_work_init(&old_plane_state->unpin_work, old_plane_state->uapi.crtc,
intel_cursor_unpin_work);
}

View File

@ -32,6 +32,10 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
struct intel_crtc *crtc);
void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
const struct intel_plane_state *from_plane_state);
void intel_plane_async_flip(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
bool async_flip);
void intel_plane_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
@ -67,5 +71,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
void intel_plane_helper_add(struct intel_plane *plane);
bool intel_plane_needs_physical(struct intel_plane *plane);
void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state);
#endif /* __INTEL_ATOMIC_PLANE_H__ */

View File

@ -1902,19 +1902,24 @@ void intel_color_post_update(const struct intel_crtc_state *crtc_state)
i915->display.funcs.color->color_post_update(crtc_state);
}
void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->hw.active ||
intel_crtc_needs_modeset(crtc_state))
return;
if (!intel_crtc_needs_color_update(crtc_state))
return;
if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
return;
crtc_state->dsb = intel_dsb_prepare(crtc_state, INTEL_DSB_0, 1024);
crtc_state->dsb = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 1024);
if (!crtc_state->dsb)
return;

View File

@ -19,7 +19,8 @@ int intel_color_init(struct drm_i915_private *i915);
void intel_color_crtc_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_color_prepare_commit(struct intel_crtc_state *crtc_state);
void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state);
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state);

View File

@ -24,7 +24,6 @@
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_dsb.h"
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_pipe_crc.h"
@ -497,6 +496,19 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
if (intel_crtc_needs_vblank_work(new_crtc_state))
intel_crtc_vblank_work_init(new_crtc_state);
if (state->base.legacy_cursor_update) {
struct intel_plane *plane;
struct intel_plane_state *old_plane_state, *new_plane_state;
int i;
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
if (old_plane_state->uapi.crtc == &crtc->base)
intel_plane_init_cursor_vblank_work(old_plane_state,
new_plane_state);
}
}
intel_vblank_evade_init(old_crtc_state, new_crtc_state, &evade);
if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
@ -560,6 +572,23 @@ static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
#endif
void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
unsigned long irqflags;
if (!crtc_state->uapi.event)
return;
drm_WARN_ON(crtc->base.dev, drm_crtc_vblank_get(&crtc->base) != 0);
spin_lock_irqsave(&crtc->base.dev->event_lock, irqflags);
drm_crtc_arm_vblank_event(&crtc->base, crtc_state->uapi.event);
spin_unlock_irqrestore(&crtc->base.dev->event_lock, irqflags);
crtc_state->uapi.event = NULL;
}
/**
* intel_pipe_update_end() - end update of a set of display registers
* @state: the atomic state
@ -601,16 +630,26 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
drm_vblank_work_schedule(&new_crtc_state->vblank_work,
drm_crtc_accurate_vblank_count(&crtc->base) + 1,
false);
} else if (new_crtc_state->uapi.event) {
drm_WARN_ON(&dev_priv->drm,
drm_crtc_vblank_get(&crtc->base) != 0);
} else {
intel_crtc_arm_vblank_event(new_crtc_state);
}
spin_lock(&crtc->base.dev->event_lock);
drm_crtc_arm_vblank_event(&crtc->base,
new_crtc_state->uapi.event);
spin_unlock(&crtc->base.dev->event_lock);
if (state->base.legacy_cursor_update) {
struct intel_plane *plane;
struct intel_plane_state *old_plane_state;
int i;
new_crtc_state->uapi.event = NULL;
for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
if (old_plane_state->uapi.crtc == &crtc->base &&
old_plane_state->unpin_work.vblank) {
drm_vblank_work_schedule(&old_plane_state->unpin_work,
drm_crtc_accurate_vblank_count(&crtc->base) + 1,
false);
/* Remove plane from atomic state, cleanup/free is done from vblank worker. */
memset(&state->base.planes[i], 0, sizeof(state->base.planes[i]));
}
}
}
/*

View File

@ -28,6 +28,7 @@ struct intel_crtc_state;
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state);
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);

View File

@ -194,6 +194,13 @@ i845_cursor_max_stride(struct intel_plane *plane,
return 2048;
}
static unsigned int i845_cursor_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
return 32;
}
static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 cntl = 0;
@ -344,6 +351,28 @@ i9xx_cursor_max_stride(struct intel_plane *plane,
return plane->base.dev->mode_config.cursor_width * 4;
}
static unsigned int i830_cursor_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
/* "AlmadorM Errata Requires 32-bpp cursor data to be 16KB aligned." */
return 16 * 1024; /* physical */
}
static unsigned int i85x_cursor_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
return 256; /* physical */
}
static unsigned int i9xx_cursor_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
return 4 * 1024; /* physical for i915/i945 */
}
static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@ -732,6 +761,17 @@ static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
return format == DRM_FORMAT_ARGB8888;
}
void intel_cursor_unpin_work(struct kthread_work *base)
{
struct drm_vblank_work *work = to_drm_vblank_work(base);
struct intel_plane_state *plane_state =
container_of(work, typeof(*plane_state), unpin_work);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
intel_plane_unpin_fb(plane_state);
intel_plane_destroy_state(&plane->base, &plane_state->uapi);
}
static int
intel_legacy_cursor_update(struct drm_plane *_plane,
struct drm_crtc *_crtc,
@ -875,14 +915,25 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
intel_psr_unlock(crtc_state);
intel_plane_unpin_fb(old_plane_state);
if (old_plane_state->ggtt_vma != new_plane_state->ggtt_vma) {
drm_vblank_work_init(&old_plane_state->unpin_work, &crtc->base,
intel_cursor_unpin_work);
drm_vblank_work_schedule(&old_plane_state->unpin_work,
drm_crtc_accurate_vblank_count(&crtc->base) + 1,
false);
old_plane_state = NULL;
} else {
intel_plane_unpin_fb(old_plane_state);
}
out_free:
if (new_crtc_state)
intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
if (ret)
intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
else
else if (old_plane_state)
intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
return ret;
@ -942,12 +993,21 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
cursor->max_stride = i845_cursor_max_stride;
cursor->min_alignment = i845_cursor_min_alignment;
cursor->update_arm = i845_cursor_update_arm;
cursor->disable_arm = i845_cursor_disable_arm;
cursor->get_hw_state = i845_cursor_get_hw_state;
cursor->check_plane = i845_check_cursor;
} else {
cursor->max_stride = i9xx_cursor_max_stride;
if (IS_I830(dev_priv))
cursor->min_alignment = i830_cursor_min_alignment;
else if (IS_I85X(dev_priv))
cursor->min_alignment = i85x_cursor_min_alignment;
else
cursor->min_alignment = i9xx_cursor_min_alignment;
cursor->update_arm = i9xx_cursor_update_arm;
cursor->disable_arm = i9xx_cursor_disable_arm;
cursor->get_hw_state = i9xx_cursor_get_hw_state;

View File

@ -9,9 +9,12 @@
enum pipe;
struct drm_i915_private;
struct intel_plane;
struct kthread_work;
struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe);
void intel_cursor_unpin_work(struct kthread_work *base);
#endif

View File

@ -3279,6 +3279,10 @@ void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
{
pll_state->use_c10 = false;
pll_state->tbt_mode = intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder));
if (pll_state->tbt_mode)
return;
if (intel_encoder_is_c10phy(encoder)) {
intel_c10pll_readout_hw_state(encoder, &pll_state->c10);
pll_state->use_c10 = true;
@ -3325,6 +3329,8 @@ static bool mtl_compare_hw_state_c20(const struct intel_c20pll_state *a,
bool intel_cx0pll_compare_hw_state(const struct intel_cx0pll_state *a,
const struct intel_cx0pll_state *b)
{
if (a->tbt_mode || b->tbt_mode)
return true;
if (a->use_c10 != b->use_c10)
return false;
@ -3420,12 +3426,11 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
return;
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state);
if (mpll_hw_state.tbt_mode)
return;
if (intel_encoder_is_c10phy(encoder))
intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10);
else

View File

@ -2096,6 +2096,9 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
u32 ln0, ln1, pin_assignment;
u8 width;
if (DISPLAY_VER(dev_priv) >= 14)
return;
if (!intel_encoder_is_tc(&dig_port->base) ||
intel_tc_port_in_tbt_alt_mode(dig_port))
return;
@ -4024,14 +4027,12 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
static void mtl_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
intel_cx0pll_readout_hw_state(encoder, &crtc_state->dpll_hw_state.cx0pll);
if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
if (crtc_state->dpll_hw_state.cx0pll.tbt_mode)
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
} else {
intel_cx0pll_readout_hw_state(encoder, &crtc_state->dpll_hw_state.cx0pll);
else
crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
}
intel_ddi_get_config(encoder, crtc_state);
}

View File

@ -68,6 +68,7 @@
#include "intel_crtc_state_dump.h"
#include "intel_cursor_regs.h"
#include "intel_cx0_phy.h"
#include "intel_cursor.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
@ -1160,8 +1161,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
* Apart from the async flip bit we want to
* preserve the old state for the plane.
*/
plane->async_flip(plane, old_crtc_state,
old_plane_state, false);
intel_plane_async_flip(plane, old_crtc_state,
old_plane_state, false);
need_vbl_wait = true;
}
}
@ -6718,7 +6719,7 @@ int intel_atomic_check(struct drm_device *dev,
static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
{
struct intel_crtc_state *crtc_state;
struct intel_crtc_state __maybe_unused *crtc_state;
struct intel_crtc *crtc;
int i, ret;
@ -6726,10 +6727,8 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
if (ret < 0)
return ret;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (intel_crtc_needs_color_update(crtc_state))
intel_color_prepare_commit(crtc_state);
}
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
intel_color_prepare_commit(state, crtc);
return 0;
}
@ -7022,6 +7021,8 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
continue;
intel_crtc_disable_planes(state, crtc);
drm_vblank_work_flush_all(&crtc->base);
}
/* Only disable port sync and MST slaves */

View File

@ -346,6 +346,7 @@ static void flip_done_handler(struct drm_i915_private *i915,
spin_lock(&i915->drm.event_lock);
if (crtc->flip_done_event) {
trace_intel_crtc_flip_done(crtc);
drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event);
crtc->flip_done_event = NULL;
}
@ -836,14 +837,53 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
if (DISPLAY_VER(dev_priv) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
GEN12_PIPEDMC_FAULT |
GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE5_FAULT |
GEN9_PIPE_PLANE4_FAULT |
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
else if (DISPLAY_VER(dev_priv) >= 11)
return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
return GEN12_PIPEDMC_FAULT |
GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE5_FAULT |
GEN9_PIPE_PLANE4_FAULT |
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
else if (DISPLAY_VER(dev_priv) == 12)
return GEN12_PIPEDMC_FAULT |
GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE7_FAULT |
GEN11_PIPE_PLANE6_FAULT |
GEN11_PIPE_PLANE5_FAULT |
GEN9_PIPE_PLANE4_FAULT |
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
else if (DISPLAY_VER(dev_priv) == 11)
return GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE7_FAULT |
GEN11_PIPE_PLANE6_FAULT |
GEN11_PIPE_PLANE5_FAULT |
GEN9_PIPE_PLANE4_FAULT |
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
else if (DISPLAY_VER(dev_priv) >= 9)
return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
return GEN9_PIPE_CURSOR_FAULT |
GEN9_PIPE_PLANE4_FAULT |
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
else
return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
return GEN8_PIPE_CURSOR_FAULT |
GEN8_PIPE_SPRITE_FAULT |
GEN8_PIPE_PRIMARY_FAULT;
}
static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)

View File

@ -54,6 +54,9 @@ intel_display_param_named_unsafe(enable_dc, int, 0400,
intel_display_param_named_unsafe(enable_dpt, bool, 0400,
"Enable display page table (DPT) (default: true)");
intel_display_param_named_unsafe(enable_dsb, bool, 0400,
"Enable display state buffer (DSB) (default: true)");
intel_display_param_named_unsafe(enable_sagv, bool, 0400,
"Enable system agent voltage/frequency scaling (SAGV) (default: true)");

View File

@ -31,6 +31,7 @@ struct drm_i915_private;
param(int, vbt_sdvo_panel_type, -1, 0400) \
param(int, enable_dc, -1, 0400) \
param(bool, enable_dpt, true, 0400) \
param(bool, enable_dsb, true, 0600) \
param(bool, enable_sagv, true, 0600) \
param(int, disable_power_well, -1, 0400) \
param(bool, enable_ips, true, 0600) \

View File

@ -78,6 +78,29 @@ TRACE_EVENT(intel_pipe_disable,
__entry->frame[PIPE_C], __entry->scanline[PIPE_C])
);
TRACE_EVENT(intel_crtc_flip_done,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
__assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u",
__get_str(dev), pipe_name(__entry->pipe),
__entry->frame, __entry->scanline)
);
TRACE_EVENT(intel_pipe_crc,
TP_PROTO(struct intel_crtc *crtc, const u32 *crcs),
TP_ARGS(crtc, crcs),
@ -308,6 +331,33 @@ TRACE_EVENT(vlv_fifo_size,
__entry->sprite0_start, __entry->sprite1_start, __entry->fifo_size)
);
TRACE_EVENT(intel_plane_async_flip,
TP_PROTO(struct intel_plane *plane, struct intel_crtc *crtc, bool async_flip),
TP_ARGS(plane, crtc, async_flip),
TP_STRUCT__entry(
__string(dev, __dev_name_kms(plane))
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
__field(bool, async_flip)
__string(name, plane->base.name)
),
TP_fast_assign(
__assign_str(dev);
__assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->async_flip = async_flip;
),
TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, async_flip=%s",
__get_str(dev), pipe_name(__entry->pipe), __get_str(name),
__entry->frame, __entry->scanline, str_yes_no(__entry->async_flip))
);
TRACE_EVENT(intel_plane_update_noarm,
TP_PROTO(struct intel_plane *plane, struct intel_crtc *crtc),
TP_ARGS(plane, crtc),

View File

@ -146,6 +146,8 @@ struct intel_framebuffer {
};
struct i915_address_space *dpt_vm;
unsigned int min_alignment;
};
enum intel_hotplug_state {
@ -742,6 +744,9 @@ struct intel_plane_state {
struct intel_fb_view view;
u32 phys_dma_addr; /* for cursor_needs_physical */
/* for legacy cursor fb unpin */
struct drm_vblank_work unpin_work;
/* Plane pxp decryption state */
bool decrypt;
@ -1566,6 +1571,9 @@ struct intel_plane {
int (*max_height)(const struct drm_framebuffer *fb,
int color_plane,
unsigned int rotation);
unsigned int (*min_alignment)(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane);
unsigned int (*max_stride)(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);

View File

@ -265,6 +265,7 @@ struct intel_cx0pll_state {
};
bool ssc_enabled;
bool use_c10;
bool tbt_mode;
};
struct intel_dpll_hw_state {

View File

@ -85,10 +85,10 @@ struct intel_dsb {
static bool assert_dsb_has_room(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_display *display = to_intel_display(crtc->base.dev);
/* each instruction is 2 dwords */
return !drm_WARN(&i915->drm, dsb->free_pos > dsb->size - 2,
return !drm_WARN(display->drm, dsb->free_pos > dsb->size - 2,
"[CRTC:%d:%s] DSB %d buffer overflow\n",
crtc->base.base.id, crtc->base.name, dsb->id);
}
@ -96,25 +96,25 @@ static bool assert_dsb_has_room(struct intel_dsb *dsb)
static void intel_dsb_dump(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_display *display = to_intel_display(crtc->base.dev);
int i;
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] DSB %d commands {\n",
drm_dbg_kms(display->drm, "[CRTC:%d:%s] DSB %d commands {\n",
crtc->base.base.id, crtc->base.name, dsb->id);
for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
drm_dbg_kms(&i915->drm,
drm_dbg_kms(display->drm,
" 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
intel_dsb_buffer_read(&dsb->dsb_buf, i),
intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
drm_dbg_kms(&i915->drm, "}\n");
drm_dbg_kms(display->drm, "}\n");
}
static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
static bool is_dsb_busy(struct intel_display *display, enum pipe pipe,
enum intel_dsb_id dsb_id)
{
return intel_de_read_fw(i915, DSB_CTRL(pipe, dsb_id)) & DSB_STATUS_BUSY;
return intel_de_read_fw(display, DSB_CTRL(pipe, dsb_id)) & DSB_STATUS_BUSY;
}
static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
@ -343,27 +343,27 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
int dewake_scanline)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 tail;
tail = dsb->free_pos * 4;
if (drm_WARN_ON(&dev_priv->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
return;
if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
drm_err(&dev_priv->drm, "[CRTC:%d:%s] DSB %d is busy\n",
if (is_dsb_busy(display, pipe, dsb->id)) {
drm_err(display->drm, "[CRTC:%d:%s] DSB %d is busy\n",
crtc->base.base.id, crtc->base.name, dsb->id);
return;
}
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
ctrl | DSB_ENABLE);
intel_de_write_fw(dev_priv, DSB_CHICKEN(pipe, dsb->id),
intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id),
dsb_chicken(crtc));
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
if (dewake_scanline >= 0) {
@ -371,7 +371,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
hw_dewake_scanline = intel_crtc_scanline_to_hw(crtc, dewake_scanline);
intel_de_write_fw(dev_priv, DSB_PMCTRL(pipe, dsb->id),
intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id),
DSB_ENABLE_DEWAKE |
DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
@ -380,12 +380,12 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
* or close to racing past the target scanline.
*/
diff = dewake_scanline - intel_get_crtc_scanline(crtc);
intel_de_write_fw(dev_priv, DSB_PMCTRL_2(pipe, dsb->id),
intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id),
(diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) |
DSB_BLOCK_DEWAKE_EXTENSION);
}
intel_de_write_fw(dev_priv, DSB_TAIL(pipe, dsb->id),
intel_de_write_fw(display, DSB_TAIL(pipe, dsb->id),
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
}
@ -407,21 +407,21 @@ void intel_dsb_commit(struct intel_dsb *dsb,
void intel_dsb_wait(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
if (wait_for(!is_dsb_busy(display, pipe, dsb->id), 1)) {
u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
DSB_ENABLE | DSB_HALT);
drm_err(&dev_priv->drm,
drm_err(display->drm,
"[CRTC:%d:%s] DSB %d timed out waiting for idle (current head=0x%x, head=0x%x, tail=0x%x)\n",
crtc->base.base.id, crtc->base.name, dsb->id,
intel_de_read_fw(dev_priv, DSB_CURRENT_HEAD(pipe, dsb->id)) - offset,
intel_de_read_fw(dev_priv, DSB_HEAD(pipe, dsb->id)) - offset,
intel_de_read_fw(dev_priv, DSB_TAIL(pipe, dsb->id)) - offset);
intel_de_read_fw(display, DSB_CURRENT_HEAD(pipe, dsb->id)) - offset,
intel_de_read_fw(display, DSB_HEAD(pipe, dsb->id)) - offset,
intel_de_read_fw(display, DSB_TAIL(pipe, dsb->id)) - offset);
intel_dsb_dump(dsb);
}
@ -429,12 +429,13 @@ void intel_dsb_wait(struct intel_dsb *dsb)
/* Attempt to reset it */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id), 0);
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0);
}
/**
* intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
* @crtc_state: the CRTC state
* @state: the atomic state
* @crtc: the CRTC
* @dsb_id: the DSB engine to use
* @max_cmds: number of commands we need to fit into command buffer
*
@ -444,12 +445,14 @@ void intel_dsb_wait(struct intel_dsb *dsb)
* Returns:
* DSB context, NULL on failure
*/
struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
struct intel_crtc *crtc,
enum intel_dsb_id dsb_id,
unsigned int max_cmds)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
intel_wakeref_t wakeref;
struct intel_dsb *dsb;
unsigned int size;
@ -457,6 +460,9 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
if (!HAS_DSB(i915))
return NULL;
if (!i915->display.params.enable_dsb)
return NULL;
/* TODO: DSB is broken in Xe KMD, so disabling it until fixed */
if (!IS_ENABLED(I915))
return NULL;

View File

@ -10,6 +10,7 @@
#include "i915_reg_defs.h"
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_dsb;
@ -22,7 +23,8 @@ enum intel_dsb_id {
I915_MAX_DSBS,
};
struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
struct intel_crtc *crtc,
enum intel_dsb_id dsb_id,
unsigned int max_cmds);
void intel_dsb_finish(struct intel_dsb *dsb);

View File

@ -45,18 +45,18 @@
#define DSB_TLBTRANS_SM_STATE_MASK REG_GENMASK(21, 20)
#define DSB_SAFE_WINDOW REG_BIT(19)
#define DSB_POINTERS_SM_STATE_MASK REG_GENMASK(18, 17)
#define DSB_BUSY_ON_DELAYED_VBLANK REG_BIT(16)
#define DSB_BUSY_DURING_DELAYED_VBLANK REG_BIT(16)
#define DSB_MMIO_ARB_SM_STATE_MASK REG_GENMASK(15, 13)
#define DSB_MMIO_INST_SM_STATE_MASK REG_GENMASK(11, 7)
#define DSB_RESET_SM_STATE_MASK REG_GENMASK(5, 4)
#define DSB_RUN_SM_STATE_MASK REG_GENMASK(2, 0)
#define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28)
#define DSB_ATS_FAULT_INT_EN REG_BIT(20)
#define DSB_ATS_FAULT_INT_EN REG_BIT(20) /* mtl+ */
#define DSB_GTT_FAULT_INT_EN REG_BIT(19)
#define DSB_RSPTIMEOUT_INT_EN REG_BIT(18)
#define DSB_POLL_ERR_INT_EN REG_BIT(17)
#define DSB_PROG_INT_EN REG_BIT(16)
#define DSB_ATS_FAULT_INT_STATUS REG_BIT(4)
#define DSB_ATS_FAULT_INT_STATUS REG_BIT(4) /* mtl+ */
#define DSB_GTT_FAULT_INT_STATUS REG_BIT(3)
#define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2)
#define DSB_POLL_ERR_INT_STATUS REG_BIT(1)

View File

@ -37,3 +37,47 @@ void intel_encoder_link_check_queue_work(struct intel_encoder *encoder, int dela
mod_delayed_work(i915->unordered_wq,
&encoder->link_check_work, msecs_to_jiffies(delay_ms));
}
void intel_encoder_suspend_all(struct intel_display *display)
{
struct intel_encoder *encoder;
if (!HAS_DISPLAY(display))
return;
/*
* TODO: check and remove holding the modeset locks if none of
* the encoders depends on this.
*/
drm_modeset_lock_all(display->drm);
for_each_intel_encoder(display->drm, encoder)
if (encoder->suspend)
encoder->suspend(encoder);
drm_modeset_unlock_all(display->drm);
for_each_intel_encoder(display->drm, encoder)
if (encoder->suspend_complete)
encoder->suspend_complete(encoder);
}
void intel_encoder_shutdown_all(struct intel_display *display)
{
struct intel_encoder *encoder;
if (!HAS_DISPLAY(display))
return;
/*
* TODO: check and remove holding the modeset locks if none of
* the encoders depends on this.
*/
drm_modeset_lock_all(display->drm);
for_each_intel_encoder(display->drm, encoder)
if (encoder->shutdown)
encoder->shutdown(encoder);
drm_modeset_unlock_all(display->drm);
for_each_intel_encoder(display->drm, encoder)
if (encoder->shutdown_complete)
encoder->shutdown_complete(encoder);
}

View File

@ -6,6 +6,7 @@
#ifndef __INTEL_ENCODER_H__
#define __INTEL_ENCODER_H__
struct intel_display;
struct intel_encoder;
void intel_encoder_link_check_init(struct intel_encoder *encoder,
@ -13,4 +14,7 @@ void intel_encoder_link_check_init(struct intel_encoder *encoder,
void intel_encoder_link_check_queue_work(struct intel_encoder *encoder, int delay_ms);
void intel_encoder_link_check_flush_work(struct intel_encoder *encoder);
void intel_encoder_suspend_all(struct intel_display *display);
void intel_encoder_shutdown_all(struct intel_display *display);
#endif /* __INTEL_ENCODER_H__ */

View File

@ -11,6 +11,7 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
#include "intel_atomic_plane.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
@ -584,12 +585,6 @@ static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int color_pl
return intel_fb_rc_ccs_cc_plane(fb) == color_plane;
}
static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane)
{
return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
color_plane == 1;
}
bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
{
return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
@ -776,105 +771,6 @@ bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
}
unsigned int intel_cursor_alignment(const struct drm_i915_private *i915)
{
if (IS_I830(i915))
return 16 * 1024;
else if (IS_I85X(i915))
return 256;
else if (IS_I845G(i915) || IS_I865G(i915))
return 32;
else
return 4 * 1024;
}
static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
{
if (DISPLAY_VER(dev_priv) >= 9)
return 256 * 1024;
else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return 128 * 1024;
else if (DISPLAY_VER(dev_priv) >= 4)
return 4 * 1024;
else
return 0;
}
unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
if (intel_fb_uses_dpt(fb)) {
/* AUX_DIST needs only 4K alignment */
if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return 512 * 4096;
/*
* FIXME ADL sees GGTT/DMAR faults with async
* flips unless we align to 16k at least.
* Figure out what's going on here...
*/
if (IS_ALDERLAKE_P(dev_priv) &&
!intel_fb_is_ccs_modifier(fb->modifier) &&
HAS_ASYNC_FLIPS(dev_priv))
return 512 * 16 * 1024;
return 512 * 4096;
}
/* AUX_DIST needs only 4K alignment */
if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return 4096;
if (is_semiplanar_uv_plane(fb, color_plane)) {
/*
* TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
* alignment for linear UV planes on all platforms.
*/
if (DISPLAY_VER(dev_priv) >= 12) {
if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
return intel_linear_alignment(dev_priv);
return intel_tile_row_size(fb, color_plane);
}
return 4096;
}
drm_WARN_ON(&dev_priv->drm, color_plane != 0);
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
return intel_linear_alignment(dev_priv);
case I915_FORMAT_MOD_X_TILED:
if (HAS_ASYNC_FLIPS(dev_priv))
return 256 * 1024;
return 0;
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_4_TILED:
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
return 16 * 1024;
default:
MISSING_CASE(fb->modifier);
return 0;
}
}
void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
const struct drm_framebuffer *fb,
int color_plane)
@ -1099,17 +995,12 @@ u32 intel_plane_compute_aligned_offset(int *x, int *y,
const struct intel_plane_state *state,
int color_plane)
{
struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
struct drm_i915_private *i915 = to_i915(intel_plane->base.dev);
struct intel_plane *plane = to_intel_plane(state->uapi.plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = state->hw.fb;
unsigned int rotation = state->hw.rotation;
unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
unsigned int alignment;
if (intel_plane->id == PLANE_CURSOR)
alignment = intel_cursor_alignment(i915);
else
alignment = intel_surf_alignment(fb, color_plane);
unsigned int alignment = plane->min_alignment(plane, fb, color_plane);
return intel_compute_aligned_offset(i915, x, y, fb, color_plane,
pitch, rotation, alignment);
@ -1123,11 +1014,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
struct drm_i915_private *i915 = to_i915(fb->dev);
unsigned int height, alignment, unused;
if (DISPLAY_VER(i915) >= 12 &&
!intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) &&
is_semiplanar_uv_plane(fb, color_plane))
alignment = intel_tile_row_size(fb, color_plane);
else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
alignment = intel_tile_size(i915);
else
alignment = 0;
@ -1617,6 +1504,32 @@ bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb)
fb->base.modifier == I915_FORMAT_MOD_Yf_TILED;
}
static unsigned int intel_fb_min_alignment(const struct drm_framebuffer *fb)
{
struct drm_i915_private *i915 = to_i915(fb->dev);
struct intel_plane *plane;
unsigned int min_alignment = 0;
for_each_intel_plane(&i915->drm, plane) {
unsigned int plane_min_alignment;
if (!drm_plane_has_format(&plane->base, fb->format->format, fb->modifier))
continue;
plane_min_alignment = plane->min_alignment(plane, fb, 0);
drm_WARN_ON(&i915->drm, plane_min_alignment &&
!is_power_of_2(plane_min_alignment));
if (intel_plane_needs_physical(plane))
continue;
min_alignment = max(min_alignment, plane_min_alignment);
}
return min_alignment;
}
int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *fb)
{
struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base);
@ -1699,6 +1612,8 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
return -EINVAL;
}
fb->min_alignment = intel_fb_min_alignment(&fb->base);
return 0;
}

View File

@ -60,9 +60,6 @@ unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane
unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height);
unsigned int intel_cursor_alignment(const struct drm_i915_private *i915);
unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
int color_plane);
void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
const struct drm_framebuffer *fb,

View File

@ -103,8 +103,9 @@ err:
struct i915_vma *
intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
bool phys_cursor,
const struct i915_gtt_view *view,
unsigned int alignment,
unsigned int phys_alignment,
bool uses_fence,
unsigned long *out_flags)
{
@ -113,7 +114,6 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
intel_wakeref_t wakeref;
struct i915_gem_ww_ctx ww;
unsigned int alignment;
struct i915_vma *vma;
unsigned int pinctl;
int ret;
@ -121,10 +121,6 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
if (phys_cursor)
alignment = intel_cursor_alignment(dev_priv);
else
alignment = intel_surf_alignment(fb, 0);
if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
return ERR_PTR(-EINVAL);
@ -162,8 +158,8 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
i915_gem_ww_ctx_init(&ww, true);
retry:
ret = i915_gem_object_lock(obj, &ww);
if (!ret && phys_cursor)
ret = i915_gem_object_attach_phys(obj, alignment);
if (!ret && phys_alignment)
ret = i915_gem_object_attach_phys(obj, phys_alignment);
else if (!ret && HAS_LMEM(dev_priv))
ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0);
if (!ret)
@ -234,6 +230,26 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags)
i915_vma_put(vma);
}
static unsigned int
intel_plane_fb_min_alignment(const struct intel_plane_state *plane_state)
{
const struct intel_framebuffer *fb = to_intel_framebuffer(plane_state->hw.fb);
return fb->min_alignment;
}
static unsigned int
intel_plane_fb_min_phys_alignment(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
if (!intel_plane_needs_physical(plane))
return 0;
return plane->min_alignment(plane, fb, 0);
}
int intel_plane_pin_fb(struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@ -242,8 +258,9 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct i915_vma *vma;
if (!intel_fb_uses_dpt(&fb->base)) {
vma = intel_fb_pin_to_ggtt(&fb->base, intel_plane_needs_physical(plane),
&plane_state->view.gtt,
vma = intel_fb_pin_to_ggtt(&fb->base, &plane_state->view.gtt,
intel_plane_fb_min_alignment(plane_state),
intel_plane_fb_min_phys_alignment(plane_state),
intel_plane_uses_fence(plane_state),
&plane_state->flags);
if (IS_ERR(vma))
@ -261,7 +278,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
plane_state->phys_dma_addr =
i915_gem_object_get_dma_address(intel_fb_obj(&fb->base), 0);
} else {
unsigned int alignment = intel_surf_alignment(&fb->base, 0);
unsigned int alignment = intel_plane_fb_min_alignment(plane_state);
vma = intel_dpt_pin_to_ggtt(fb->dpt_vm, alignment / 512);
if (IS_ERR(vma))

View File

@ -15,8 +15,9 @@ struct i915_gtt_view;
struct i915_vma *
intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
bool phys_cursor,
const struct i915_gtt_view *view,
unsigned int alignment,
unsigned int phys_alignment,
bool uses_fence,
unsigned long *out_flags);

View File

@ -228,8 +228,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
vma = intel_fb_pin_to_ggtt(&fb->base, false,
&view, false, &flags);
vma = intel_fb_pin_to_ggtt(&fb->base, &view,
fb->min_alignment, 0,
false, &flags);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_unlock;

View File

@ -571,6 +571,20 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (intel_dp_is_edp(intel_dp)) {
if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
drm_dbg_kms(&i915->drm,
"Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
return;
}
if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
drm_dbg_kms(&i915->drm,
"Panel doesn't support early transport, eDP Panel Replay not possible\n");
return;
}
}
intel_dp->psr.sink_panel_replay_support = true;
if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
@ -706,6 +720,7 @@ static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
if (crtc_state->has_sel_update)
val |= DP_PANEL_REPLAY_SU_ENABLE;
@ -713,7 +728,14 @@ static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
if (crtc_state->enable_psr2_su_region_et)
val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
if (crtc_state->req_psr2_sdp_prior_scanline)
panel_replay_config2 |=
DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
panel_replay_config2);
}
static void _psr_enable_sink(struct intel_dp *intel_dp,
@ -732,6 +754,9 @@ static void _psr_enable_sink(struct intel_dp *intel_dp,
val |= DP_PSR_CRC_VERIFICATION;
}
if (crtc_state->req_psr2_sdp_prior_scanline)
val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
if (crtc_state->enable_psr2_su_region_et)
val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
@ -741,15 +766,31 @@ static void _psr_enable_sink(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
}
static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
u8 val;
/*
* eDP Panel Replay uses always ALPM
* PSR2 uses ALPM but PSR1 doesn't
*/
if (!intel_dp_is_edp(intel_dp) || (!crtc_state->has_panel_replay &&
!crtc_state->has_sel_update))
return;
val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
if (crtc_state->has_panel_replay)
val |= DP_ALPM_MODE_AUX_LESS;
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
}
void intel_psr_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
/* Enable ALPM at sink for psr2 */
if (!crtc_state->has_panel_replay && crtc_state->has_sel_update)
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE |
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
intel_psr_enable_sink_alpm(intel_dp, crtc_state);
crtc_state->has_panel_replay ?
_panel_replay_enable_sink(intel_dp, crtc_state) :
@ -910,6 +951,19 @@ static u8 frames_before_su_entry(struct intel_dp *intel_dp)
static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
u32 val = psr->su_region_et_enabled ?
LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
val |= EDP_PSR2_SU_SDP_SCANLINE;
intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
val);
}
intel_de_rmw(dev_priv,
PSR2_MAN_TRK_CTL(dev_priv, intel_dp->psr.transcoder),
@ -1265,14 +1319,15 @@ static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
}
static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
const struct intel_crtc_state *crtc_state,
bool aux_less)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
crtc_state->hw.adjusted_mode.crtc_vblank_start;
int wake_lines;
if (crtc_state->has_panel_replay)
if (aux_less)
wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
else
wake_lines = DISPLAY_VER(i915) < 20 ?
@ -1289,6 +1344,27 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
return true;
}
static bool alpm_config_valid(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool aux_less)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
drm_dbg_kms(&i915->drm,
"PSR2/Panel Replay not enabled, Unable to use long enough wake times\n");
return false;
}
if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
drm_dbg_kms(&i915->drm,
"PSR2/Panel Replay not enabled, too short vblank time\n");
return false;
}
return true;
}
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@ -1366,24 +1442,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
if (!alpm_config_valid(intel_dp, crtc_state, false))
return false;
}
if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, Unable to use long enough wake times\n");
return false;
}
/* Vblank >= PSR2_CTL Block Count Number maximum line count */
if (!wake_lines_fit_into_vblank(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, too short vblank time\n");
return false;
}
if (!crtc_state->enable_psr2_sel_fetch &&
(crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
@ -1396,9 +1456,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
if (psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay))
crtc_state->enable_psr2_su_region_et = true;
return true;
}
@ -1423,6 +1480,12 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
goto unsupported;
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"Selective update not enabled, SDP indication do not fit in hblank\n");
goto unsupported;
}
if (crtc_state->has_panel_replay && (DISPLAY_VER(dev_priv) < 14 ||
!intel_dp->psr.sink_panel_replay_su_support))
goto unsupported;
@ -1439,6 +1502,9 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
}
crtc_state->enable_psr2_su_region_et =
psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
return true;
unsupported:
@ -1476,9 +1542,15 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
return true;
}
static bool _panel_replay_compute_config(struct intel_dp *intel_dp)
static bool
_panel_replay_compute_config(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp;
if (!CAN_PANEL_REPLAY(intel_dp))
return false;
@ -1488,6 +1560,32 @@ static bool _panel_replay_compute_config(struct intel_dp *intel_dp)
return false;
}
if (!intel_dp_is_edp(intel_dp))
return true;
/* Remaining checks are for eDP only */
/* 128b/132b Panel Replay is not supported on eDP */
if (intel_dp_is_uhbr(crtc_state)) {
drm_dbg_kms(&i915->drm,
"Panel Replay is not supported with 128b/132b\n");
return false;
}
/* HW will not allow Panel Replay on eDP when HDCP enabled */
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED ||
(conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
drm_dbg_kms(&i915->drm,
"Panel Replay is not supported with HDCP\n");
return false;
}
if (!alpm_config_valid(intel_dp, crtc_state, true))
return false;
return true;
}
@ -1526,7 +1624,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp);
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
crtc_state,
conn_state);
crtc_state->has_psr = crtc_state->has_panel_replay ? true :
_psr_compute_config(intel_dp, crtc_state);
@ -3105,9 +3205,11 @@ void intel_psr_init(struct intel_dp *intel_dp)
return;
}
if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) ||
DISPLAY_VER(dev_priv) >= 20)
intel_dp->psr.source_panel_replay_support = true;
else
if (HAS_PSR(dev_priv) && intel_dp_is_edp(intel_dp))
intel_dp->psr.source_support = true;
/* Set link_standby x link_off defaults */

View File

@ -253,6 +253,21 @@ int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
return DIV_ROUND_UP(pixel_rate * num, den);
}
static unsigned int vlv_sprite_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
switch (fb->modifier) {
case I915_FORMAT_MOD_X_TILED:
return 4 * 1024;
case DRM_FORMAT_MOD_LINEAR:
return 128 * 1024;
default:
MISSING_CASE(fb->modifier);
return 0;
}
}
static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 sprctl = 0;
@ -964,6 +979,13 @@ hsw_sprite_max_stride(struct intel_plane *plane,
return min(8192 * cpp, 16 * 1024);
}
static unsigned int g4x_sprite_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
return 4 * 1024;
}
static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 dvscntr = 0;
@ -1570,6 +1592,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = vlv_sprite_get_hw_state;
plane->check_plane = vlv_sprite_check;
plane->max_stride = i965_plane_max_stride;
plane->min_alignment = vlv_sprite_min_alignment;
plane->min_cdclk = vlv_plane_min_cdclk;
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
@ -1596,6 +1619,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->min_cdclk = ivb_sprite_min_cdclk;
}
plane->min_alignment = g4x_sprite_min_alignment;
formats = snb_sprite_formats;
num_formats = ARRAY_SIZE(snb_sprite_formats);
@ -1607,6 +1632,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = g4x_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
plane->max_stride = g4x_sprite_max_stride;
plane->min_alignment = g4x_sprite_min_alignment;
plane->min_cdclk = g4x_sprite_min_cdclk;
if (IS_SANDYBRIDGE(dev_priv)) {

View File

@ -557,6 +557,16 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
}
int intel_mode_vdisplay(const struct drm_display_mode *mode)
{
int vdisplay = mode->crtc_vdisplay;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vdisplay = DIV_ROUND_UP(vdisplay, 2);
return vdisplay;
}
int intel_mode_vblank_start(const struct drm_display_mode *mode)
{
int vblank_start = mode->crtc_vblank_start;

View File

@ -20,6 +20,7 @@ struct intel_vblank_evade_ctx {
bool need_vlv_dsi_wa;
};
int intel_mode_vdisplay(const struct drm_display_mode *mode);
int intel_mode_vblank_start(const struct drm_display_mode *mode);
int intel_mode_vblank_end(const struct drm_display_mode *mode);
int intel_mode_vtotal(const struct drm_display_mode *mode);

View File

@ -137,7 +137,7 @@ static unsigned int
cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required)
{
int multiplier_m = 1, multiplier_n = 1, vtotal, desired_refresh_rate;
long long adjusted_pixel_rate;
u64 adjusted_pixel_rate;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
desired_refresh_rate = drm_mode_vrefresh(adjusted_mode);

View File

@ -503,6 +503,77 @@ skl_plane_max_stride(struct intel_plane *plane,
max_pixels, max_bytes);
}
static u32 tgl_plane_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
/* PLANE_SURF GGTT -> DPT alignment */
int mult = intel_fb_uses_dpt(fb) ? 512 : 1;
/* AUX_DIST needs only 4K alignment */
if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return mult * 4 * 1024;
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_4_TILED:
/*
* FIXME ADL sees GGTT/DMAR faults with async
* flips unless we align to 16k at least.
* Figure out what's going on here...
*/
if (IS_ALDERLAKE_P(i915) && HAS_ASYNC_FLIPS(i915))
return mult * 16 * 1024;
return mult * 4 * 1024;
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS:
case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
/*
* Align to at least 4x1 main surface
* tiles (16K) to match 64B of AUX.
*/
return max(mult * 4 * 1024, 16 * 1024);
default:
MISSING_CASE(fb->modifier);
return 0;
}
}
static u32 skl_plane_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane)
{
/*
* AUX_DIST needs only 4K alignment,
* as does ICL UV PLANE_SURF.
*/
if (color_plane != 0)
return 4 * 1024;
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
return 256 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
default:
MISSING_CASE(fb->modifier);
return 0;
}
}
/* Preoffset values for YUV to RGB Conversion */
#define PREOFF_YUV_TO_RGB_HI 0x1800
#define PREOFF_YUV_TO_RGB_ME 0x0000
@ -1680,11 +1751,12 @@ skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
int main_x, int main_y, u32 main_offset,
int ccs_plane)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
int aux_x = plane_state->view.color_plane[ccs_plane].x;
int aux_y = plane_state->view.color_plane[ccs_plane].y;
u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset;
unsigned int alignment = intel_surf_alignment(fb, ccs_plane);
unsigned int alignment = plane->min_alignment(plane, fb, ccs_plane);
int hsub;
int vsub;
@ -1728,7 +1800,7 @@ int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
const struct drm_framebuffer *fb = plane_state->hw.fb;
int aux_plane = skl_main_to_aux_plane(fb, 0);
u32 aux_offset = plane_state->view.color_plane[aux_plane].offset;
unsigned int alignment = intel_surf_alignment(fb, 0);
unsigned int alignment = plane->min_alignment(plane, fb, 0);
int w = drm_rect_width(&plane_state->uapi.src) >> 16;
intel_add_fb_offsets(x, y, plane_state, 0);
@ -1784,7 +1856,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
int min_width = intel_plane_min_width(plane, fb, 0, rotation);
int max_width = intel_plane_max_width(plane, fb, 0, rotation);
int max_height = intel_plane_max_height(plane, fb, 0, rotation);
unsigned int alignment = intel_surf_alignment(fb, 0);
unsigned int alignment = plane->min_alignment(plane, fb, 0);
int aux_plane = skl_main_to_aux_plane(fb, 0);
u32 offset;
int ret;
@ -1873,7 +1945,7 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
if (ccs_plane) {
u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset;
unsigned int alignment = intel_surf_alignment(fb, uv_plane);
unsigned int alignment = plane->min_alignment(plane, fb, uv_plane);
if (offset > aux_offset)
offset = intel_plane_adjust_aligned_offset(&x, &y,
@ -2430,6 +2502,11 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
else
plane->max_stride = skl_plane_max_stride;
if (DISPLAY_VER(dev_priv) >= 12)
plane->min_alignment = tgl_plane_min_alignment;
else
plane->min_alignment = skl_plane_min_alignment;
if (DISPLAY_VER(dev_priv) >= 11) {
plane->update_noarm = icl_plane_update_noarm;
plane->update_arm = icl_plane_update_arm;

View File

@ -53,6 +53,7 @@
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
#include "display/intel_dpt.h"
#include "display/intel_encoder.h"
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
@ -933,50 +934,6 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
i915_gem_flush_free_objects(to_i915(dev));
}
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
if (!HAS_DISPLAY(dev_priv))
return;
/*
* TODO: check and remove holding the modeset locks if none of
* the encoders depends on this.
*/
drm_modeset_lock_all(&dev_priv->drm);
for_each_intel_encoder(&dev_priv->drm, encoder)
if (encoder->suspend)
encoder->suspend(encoder);
drm_modeset_unlock_all(&dev_priv->drm);
for_each_intel_encoder(&dev_priv->drm, encoder)
if (encoder->suspend_complete)
encoder->suspend_complete(encoder);
}
static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
if (!HAS_DISPLAY(dev_priv))
return;
/*
* TODO: check and remove holding the modeset locks if none of
* the encoders depends on this.
*/
drm_modeset_lock_all(&dev_priv->drm);
for_each_intel_encoder(&dev_priv->drm, encoder)
if (encoder->shutdown)
encoder->shutdown(encoder);
drm_modeset_unlock_all(&dev_priv->drm);
for_each_intel_encoder(&dev_priv->drm, encoder)
if (encoder->shutdown_complete)
encoder->shutdown_complete(encoder);
}
void i915_driver_shutdown(struct drm_i915_private *i915)
{
disable_rpm_wakeref_asserts(&i915->runtime_pm);
@ -999,8 +956,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
if (HAS_DISPLAY(i915))
intel_display_driver_suspend_access(i915);
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
intel_encoder_suspend_all(&i915->display);
intel_encoder_shutdown_all(&i915->display);
intel_dmc_suspend(i915);
@ -1083,7 +1040,7 @@ static int i915_drm_suspend(struct drm_device *dev)
if (HAS_DISPLAY(dev_priv))
intel_display_driver_suspend_access(dev_priv);
intel_suspend_encoders(dev_priv);
intel_encoder_suspend_all(&dev_priv->display);
/* Must be called before GGTT is suspended. */
intel_dpt_suspend(dev_priv);

View File

@ -2499,51 +2499,41 @@
#define GEN8_DE_PIPE_IMR(pipe) _MMIO(0x44404 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IIR(pipe) _MMIO(0x44408 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IER(pipe) _MMIO(0x4440c + (0x10 * (pipe)))
#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
#define XELPD_PIPE_SOFT_UNDERRUN (1 << 22)
#define XELPD_PIPE_HARD_UNDERRUN (1 << 21)
#define GEN12_PIPE_VBLANK_UNMOD (1 << 19)
#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
#define GEN8_PIPE_PRIMARY_FLIP_DONE (1 << 4)
#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
#define GEN11_PIPE_PLANE7_FAULT (1 << 22)
#define GEN11_PIPE_PLANE6_FAULT (1 << 21)
#define GEN11_PIPE_PLANE5_FAULT (1 << 20)
#define GEN9_PIPE_PLANE4_FAULT (1 << 10)
#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
#define GEN9_PIPE_PLANE1_FAULT (1 << 7)
#define GEN9_PIPE_PLANE4_FLIP_DONE (1 << 6)
#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + (p)))
#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN8_PIPE_CURSOR_FAULT | \
GEN8_PIPE_SPRITE_FAULT | \
GEN8_PIPE_PRIMARY_FAULT)
#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN9_PIPE_CURSOR_FAULT | \
GEN9_PIPE_PLANE4_FAULT | \
GEN9_PIPE_PLANE3_FAULT | \
GEN9_PIPE_PLANE2_FAULT | \
GEN9_PIPE_PLANE1_FAULT)
#define GEN11_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \
GEN11_PIPE_PLANE7_FAULT | \
GEN11_PIPE_PLANE6_FAULT | \
GEN11_PIPE_PLANE5_FAULT)
#define RKL_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \
GEN11_PIPE_PLANE5_FAULT)
#define GEN8_PIPE_FIFO_UNDERRUN REG_BIT(31)
#define GEN8_PIPE_CDCLK_CRC_ERROR REG_BIT(29)
#define GEN8_PIPE_CDCLK_CRC_DONE REG_BIT(28)
#define GEN12_PIPEDMC_INTERRUPT REG_BIT(26) /* tgl+ */
#define GEN12_PIPEDMC_FAULT REG_BIT(25) /* tgl+ */
#define MTL_PIPEDMC_ATS_FAULT REG_BIT(24) /* mtl+ */
#define XELPD_PIPE_SOFT_UNDERRUN REG_BIT(22) /* adl/dg2+ */
#define GEN11_PIPE_PLANE7_FAULT REG_BIT(22) /* icl/tgl */
#define XELPD_PIPE_HARD_UNDERRUN REG_BIT(21) /* adl/dg2+ */
#define GEN11_PIPE_PLANE6_FAULT REG_BIT(21) /* icl/tgl */
#define GEN11_PIPE_PLANE5_FAULT REG_BIT(20) /* icl+ */
#define GEN12_PIPE_VBLANK_UNMOD REG_BIT(19) /* tgl+ */
#define MTL_PLANE_ATS_FAULT REG_BIT(18) /* mtl+ */
#define GEN11_PIPE_PLANE7_FLIP_DONE REG_BIT(18) /* icl/tgl */
#define GEN11_PIPE_PLANE6_FLIP_DONE REG_BIT(17) /* icl/tgl */
#define GEN11_PIPE_PLANE5_FLIP_DONE REG_BIT(16) /* icl+ */
#define GEN9_PIPE_CURSOR_FAULT REG_BIT(11) /* skl+ */
#define GEN9_PIPE_PLANE4_FAULT REG_BIT(10) /* skl+ */
#define GEN8_PIPE_CURSOR_FAULT REG_BIT(10) /* bdw */
#define GEN9_PIPE_PLANE3_FAULT REG_BIT(9) /* skl+ */
#define GEN8_PIPE_SPRITE_FAULT REG_BIT(9) /* bdw */
#define GEN9_PIPE_PLANE2_FAULT REG_BIT(8) /* skl+ */
#define GEN8_PIPE_PRIMARY_FAULT REG_BIT(8) /* bdw */
#define GEN9_PIPE_PLANE1_FAULT REG_BIT(7) /* skl+ */
#define GEN9_PIPE_PLANE4_FLIP_DONE REG_BIT(6) /* skl+ */
#define GEN9_PIPE_PLANE3_FLIP_DONE REG_BIT(5) /* skl+ */
#define GEN8_PIPE_SPRITE_FLIP_DONE REG_BIT(5) /* bdw */
#define GEN9_PIPE_PLANE2_FLIP_DONE REG_BIT(4) /* skl+ */
#define GEN8_PIPE_PRIMARY_FLIP_DONE REG_BIT(4) /* bdw */
#define GEN9_PIPE_PLANE1_FLIP_DONE REG_BIT(3) /* skl+ */
#define GEN9_PIPE_PLANE_FLIP_DONE(plane_id) \
REG_BIT(((plane_id) >= PLANE_5 ? 16 - PLANE_5 : 3 - PLANE_1) + (plane_id)) /* skl+ */
#define GEN8_PIPE_SCAN_LINE_EVENT REG_BIT(2)
#define GEN8_PIPE_VSYNC REG_BIT(1)
#define GEN8_PIPE_VBLANK REG_BIT(0)
#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A)
#define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1)

View File

@ -96,10 +96,10 @@ xe-y += xe_bb.o \
xe_mocs.o \
xe_module.o \
xe_oa.o \
xe_observation.o \
xe_pat.o \
xe_pci.o \
xe_pcode.o \
xe_perf.o \
xe_pm.o \
xe_preempt_fence.o \
xe_pt.o \

View File

@ -23,6 +23,7 @@
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_encoder.h"
#include "intel_fbdev.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@ -270,21 +271,6 @@ void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
gen11_de_irq_postinstall(xe);
}
static void intel_suspend_encoders(struct xe_device *xe)
{
struct drm_device *dev = &xe->drm;
struct intel_encoder *encoder;
if (has_display(xe))
return;
drm_modeset_lock_all(dev);
for_each_intel_encoder(dev, encoder)
if (encoder->suspend)
encoder->suspend(encoder);
drm_modeset_unlock_all(dev);
}
static bool suspend_to_idle(void)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
@ -315,7 +301,7 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
intel_hpd_cancel_work(xe);
intel_suspend_encoders(xe);
intel_encoder_suspend_all(&xe->display);
intel_opregion_suspend(xe, s2idle ? PCI_D1 : PCI_D3cold);

View File

@ -334,8 +334,9 @@ static void __xe_unpin_fb_vma(struct i915_vma *vma)
struct i915_vma *
intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
bool phys_cursor,
const struct i915_gtt_view *view,
unsigned int alignment,
unsigned int phys_alignment,
bool uses_fence,
unsigned long *out_flags)
{

View File

@ -210,8 +210,8 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
intel_fb_fill_view(to_intel_framebuffer(fb),
plane_state->uapi.rotation, &plane_state->view);
vma = intel_fb_pin_to_ggtt(fb, false, &plane_state->view.gtt,
false, &plane_state->flags);
vma = intel_fb_pin_to_ggtt(fb, &plane_state->view.gtt,
0, 0, false, &plane_state->flags);
if (IS_ERR(vma))
goto nofb;

View File

@ -42,9 +42,9 @@
#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_observation.h"
#include "xe_pat.h"
#include "xe_pcode.h"
#include "xe_perf.h"
#include "xe_pm.h"
#include "xe_query.h"
#include "xe_sriov.h"
@ -142,7 +142,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_PERF, xe_perf_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
};
static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)

View File

@ -463,7 +463,7 @@ struct xe_device {
/** @heci_gsc: graphics security controller */
struct xe_heci_gsc heci_gsc;
/** @oa: oa perf counter subsystem */
/** @oa: oa observation subsystem */
struct xe_oa oa;
/** @needs_flr_on_fini: requests function-reset on fini */

View File

@ -389,7 +389,7 @@ struct xe_gt {
u8 instances_per_class[XE_ENGINE_CLASS_MAX];
} user_engines;
/** @oa: oa perf counter subsystem per gt info */
/** @oa: oa observation subsystem per gt info */
struct xe_oa_gt oa;
};

View File

@ -187,7 +187,6 @@ static void xe_hw_fence_release(struct dma_fence *dma_fence)
{
struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
trace_xe_hw_fence_free(fence);
XE_WARN_ON(!list_empty(&fence->irq_link));
call_rcu(&dma_fence->rcu, fence_free);
}

View File

@ -11,7 +11,7 @@
#include "xe_drv.h"
#include "xe_hw_fence.h"
#include "xe_pci.h"
#include "xe_perf.h"
#include "xe_observation.h"
#include "xe_sched_job.h"
struct xe_modparam xe_modparam = {
@ -80,8 +80,8 @@ static const struct init_funcs init_funcs[] = {
.exit = xe_unregister_pci_driver,
},
{
.init = xe_perf_sysctl_register,
.exit = xe_perf_sysctl_unregister,
.init = xe_observation_sysctl_register,
.exit = xe_observation_sysctl_unregister,
},
};

View File

@ -32,7 +32,7 @@
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_oa.h"
#include "xe_perf.h"
#include "xe_observation.h"
#include "xe_pm.h"
#include "xe_sched_job.h"
#include "xe_sriov.h"
@ -481,7 +481,7 @@ static int __xe_oa_read(struct xe_oa_stream *stream, char __user *buf,
OASTATUS_RELEVANT_BITS, 0);
/*
* Signal to userspace that there is non-zero OA status to read via
* @DRM_XE_PERF_IOCTL_STATUS perf fd ioctl
* @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl
*/
if (stream->oa_status & OASTATUS_RELEVANT_BITS)
return -EIO;
@ -1158,15 +1158,15 @@ static long xe_oa_ioctl_locked(struct xe_oa_stream *stream,
unsigned long arg)
{
switch (cmd) {
case DRM_XE_PERF_IOCTL_ENABLE:
case DRM_XE_OBSERVATION_IOCTL_ENABLE:
return xe_oa_enable_locked(stream);
case DRM_XE_PERF_IOCTL_DISABLE:
case DRM_XE_OBSERVATION_IOCTL_DISABLE:
return xe_oa_disable_locked(stream);
case DRM_XE_PERF_IOCTL_CONFIG:
case DRM_XE_OBSERVATION_IOCTL_CONFIG:
return xe_oa_config_locked(stream, arg);
case DRM_XE_PERF_IOCTL_STATUS:
case DRM_XE_OBSERVATION_IOCTL_STATUS:
return xe_oa_status_locked(stream, arg);
case DRM_XE_PERF_IOCTL_INFO:
case DRM_XE_OBSERVATION_IOCTL_INFO:
return xe_oa_info_locked(stream, arg);
}
@ -1209,7 +1209,7 @@ static int xe_oa_release(struct inode *inode, struct file *file)
xe_oa_destroy_locked(stream);
mutex_unlock(&gt->oa.gt_lock);
/* Release the reference the perf stream kept on the driver */
/* Release the reference the OA stream kept on the driver */
drm_dev_put(&gt_to_xe(gt)->drm);
return 0;
@ -1222,7 +1222,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
unsigned long start = vma->vm_start;
int i, ret;
if (xe_perf_stream_paranoid && !perfmon_capable()) {
if (xe_observation_paranoid && !perfmon_capable()) {
drm_dbg(&stream->oa->xe->drm, "Insufficient privilege to map OA buffer\n");
return -EACCES;
}
@ -1789,8 +1789,8 @@ static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number
* @file: @drm_file
*
* The functions opens an OA stream. An OA stream, opened with specified
* properties, enables perf counter samples to be collected, either
* periodically (time based sampling), or on request (using perf queries)
* properties, enables OA counter samples to be collected, either
* periodically (time based sampling), or on request (using OA queries)
*/
int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *file)
{
@ -1836,8 +1836,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
privileged_op = true;
}
if (privileged_op && xe_perf_stream_paranoid && !perfmon_capable()) {
drm_dbg(&oa->xe->drm, "Insufficient privileges to open xe perf stream\n");
if (privileged_op && xe_observation_paranoid && !perfmon_capable()) {
drm_dbg(&oa->xe->drm, "Insufficient privileges to open xe OA stream\n");
ret = -EACCES;
goto err_exec_q;
}
@ -2097,7 +2097,7 @@ int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *fi
return -ENODEV;
}
if (xe_perf_stream_paranoid && !perfmon_capable()) {
if (xe_observation_paranoid && !perfmon_capable()) {
drm_dbg(&oa->xe->drm, "Insufficient privileges to add xe OA config\n");
return -EACCES;
}
@ -2181,7 +2181,7 @@ reg_err:
/**
* xe_oa_remove_config_ioctl - Removes one OA config
* @dev: @drm_device
* @data: pointer to struct @drm_xe_perf_param
* @data: pointer to struct @drm_xe_observation_param
* @file: @drm_file
*/
int xe_oa_remove_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *file)
@ -2197,7 +2197,7 @@ int xe_oa_remove_config_ioctl(struct drm_device *dev, u64 data, struct drm_file
return -ENODEV;
}
if (xe_perf_stream_paranoid && !perfmon_capable()) {
if (xe_observation_paranoid && !perfmon_capable()) {
drm_dbg(&oa->xe->drm, "Insufficient privileges to remove xe OA config\n");
return -EACCES;
}
@ -2381,7 +2381,7 @@ static int xe_oa_init_gt(struct xe_gt *gt)
/*
* Fused off engines can result in oa_unit's with num_engines == 0. These units
* will appear in OA unit query, but no perf streams can be opened on them.
* will appear in OA unit query, but no OA streams can be opened on them.
*/
gt->oa.num_oa_units = num_oa_units;
gt->oa.oa_unit = u;

View File

@ -0,0 +1,93 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023-2024 Intel Corporation
*/
#include <linux/errno.h>
#include <linux/sysctl.h>
#include <drm/xe_drm.h>
#include "xe_oa.h"
#include "xe_observation.h"
u32 xe_observation_paranoid = true;
static struct ctl_table_header *sysctl_header;
static int xe_oa_ioctl(struct drm_device *dev, struct drm_xe_observation_param *arg,
struct drm_file *file)
{
switch (arg->observation_op) {
case DRM_XE_OBSERVATION_OP_STREAM_OPEN:
return xe_oa_stream_open_ioctl(dev, arg->param, file);
case DRM_XE_OBSERVATION_OP_ADD_CONFIG:
return xe_oa_add_config_ioctl(dev, arg->param, file);
case DRM_XE_OBSERVATION_OP_REMOVE_CONFIG:
return xe_oa_remove_config_ioctl(dev, arg->param, file);
default:
return -EINVAL;
}
}
/**
* xe_observation_ioctl - The top level observation layer ioctl
* @dev: @drm_device
* @data: pointer to struct @drm_xe_observation_param
* @file: @drm_file
*
* The function is called for different observation streams types and
* allows execution of different operations supported by those stream
* types.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_observation_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_xe_observation_param *arg = data;
if (arg->extensions)
return -EINVAL;
switch (arg->observation_type) {
case DRM_XE_OBSERVATION_TYPE_OA:
return xe_oa_ioctl(dev, arg, file);
default:
return -EINVAL;
}
}
static struct ctl_table observation_ctl_table[] = {
{
.procname = "observation_paranoid",
.data = &xe_observation_paranoid,
.maxlen = sizeof(xe_observation_paranoid),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{}
};
/**
* xe_observation_sysctl_register - Register xe_observation_paranoid sysctl
*
* Normally only superuser/root can access observation stream
* data. However, superuser can set xe_observation_paranoid sysctl to 0 to
* allow non-privileged users to also access observation data.
*
* Return: always returns 0
*/
int xe_observation_sysctl_register(void)
{
sysctl_header = register_sysctl("dev/xe", observation_ctl_table);
return 0;
}
/**
* xe_observation_sysctl_unregister - Unregister xe_observation_paranoid sysctl
*/
void xe_observation_sysctl_unregister(void)
{
unregister_sysctl_table(sysctl_header);
}

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023-2024 Intel Corporation
*/
#ifndef _XE_OBSERVATION_H_
#define _XE_OBSERVATION_H_
#include <linux/types.h>
struct drm_device;
struct drm_file;
extern u32 xe_observation_paranoid;
int xe_observation_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int xe_observation_sysctl_register(void);
void xe_observation_sysctl_unregister(void);
#endif

View File

@ -1,92 +0,0 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023-2024 Intel Corporation
*/
#include <linux/errno.h>
#include <linux/sysctl.h>
#include <drm/xe_drm.h>
#include "xe_oa.h"
#include "xe_perf.h"
u32 xe_perf_stream_paranoid = true;
static struct ctl_table_header *sysctl_header;
static int xe_oa_ioctl(struct drm_device *dev, struct drm_xe_perf_param *arg,
struct drm_file *file)
{
switch (arg->perf_op) {
case DRM_XE_PERF_OP_STREAM_OPEN:
return xe_oa_stream_open_ioctl(dev, arg->param, file);
case DRM_XE_PERF_OP_ADD_CONFIG:
return xe_oa_add_config_ioctl(dev, arg->param, file);
case DRM_XE_PERF_OP_REMOVE_CONFIG:
return xe_oa_remove_config_ioctl(dev, arg->param, file);
default:
return -EINVAL;
}
}
/**
* xe_perf_ioctl - The top level perf layer ioctl
* @dev: @drm_device
* @data: pointer to struct @drm_xe_perf_param
* @file: @drm_file
*
* The function is called for different perf streams types and allows execution
* of different operations supported by those perf stream types.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_perf_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_xe_perf_param *arg = data;
if (arg->extensions)
return -EINVAL;
switch (arg->perf_type) {
case DRM_XE_PERF_TYPE_OA:
return xe_oa_ioctl(dev, arg, file);
default:
return -EINVAL;
}
}
static struct ctl_table perf_ctl_table[] = {
{
.procname = "perf_stream_paranoid",
.data = &xe_perf_stream_paranoid,
.maxlen = sizeof(xe_perf_stream_paranoid),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{}
};
/**
* xe_perf_sysctl_register - Register "perf_stream_paranoid" sysctl
*
* Normally only superuser/root can access perf counter data. However,
* superuser can set perf_stream_paranoid sysctl to 0 to allow non-privileged
* users to also access perf data.
*
* Return: always returns 0
*/
int xe_perf_sysctl_register(void)
{
sysctl_header = register_sysctl("dev/xe", perf_ctl_table);
return 0;
}
/**
* xe_perf_sysctl_unregister - Unregister "perf_stream_paranoid" sysctl
*/
void xe_perf_sysctl_unregister(void)
{
unregister_sysctl_table(sysctl_header);
}

View File

@ -1,20 +0,0 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023-2024 Intel Corporation
*/
#ifndef _XE_PERF_H_
#define _XE_PERF_H_
#include <linux/types.h>
struct drm_device;
struct drm_file;
extern u32 xe_perf_stream_paranoid;
int xe_perf_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int xe_perf_sysctl_register(void);
void xe_perf_sysctl_unregister(void);
#endif

View File

@ -341,11 +341,6 @@ DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
TP_ARGS(fence)
);
DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free,
TP_PROTO(struct xe_hw_fence *fence),
TP_ARGS(fence)
);
TRACE_EVENT(xe_reg_rw,
TP_PROTO(struct xe_gt *gt, bool write, u32 reg, u64 val, int len),

View File

@ -972,6 +972,8 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
#define drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
bool drm_plane_has_format(struct drm_plane *plane,
u32 format, u64 modifier);
bool drm_any_plane_has_format(struct drm_device *dev,
u32 format, u64 modifier);

View File

@ -17,6 +17,7 @@ struct drm_crtc;
* drm_vblank_work_init()
* drm_vblank_work_cancel_sync()
* drm_vblank_work_flush()
* drm_vblank_work_flush_all()
*/
struct drm_vblank_work {
/**
@ -67,5 +68,6 @@ void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
void (*func)(struct kthread_work *work));
bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work);
void drm_vblank_work_flush(struct drm_vblank_work *work);
void drm_vblank_work_flush_all(struct drm_crtc *crtc);
#endif /* !_DRM_VBLANK_WORK_H_ */

View File

@ -80,7 +80,7 @@ extern "C" {
* - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
* - &DRM_IOCTL_XE_EXEC
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_PERF
* - &DRM_IOCTL_XE_OBSERVATION
*/
/*
@ -101,7 +101,7 @@ extern "C" {
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
#define DRM_XE_EXEC 0x09
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_PERF 0x0b
#define DRM_XE_OBSERVATION 0x0b
/* Must be kept compact -- no holes */
@ -116,7 +116,7 @@ extern "C" {
#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_PERF DRM_IOW(DRM_COMMAND_BASE + DRM_XE_PERF, struct drm_xe_perf_param)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
/**
* DOC: Xe IOCTL Extensions
@ -1376,66 +1376,67 @@ struct drm_xe_wait_user_fence {
};
/**
* enum drm_xe_perf_type - Perf stream types
* enum drm_xe_observation_type - Observation stream types
*/
enum drm_xe_perf_type {
/** @DRM_XE_PERF_TYPE_OA: OA perf stream type */
DRM_XE_PERF_TYPE_OA,
enum drm_xe_observation_type {
/** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */
DRM_XE_OBSERVATION_TYPE_OA,
};
/**
* enum drm_xe_perf_op - Perf stream ops
* enum drm_xe_observation_op - Observation stream ops
*/
enum drm_xe_perf_op {
/** @DRM_XE_PERF_OP_STREAM_OPEN: Open a perf counter stream */
DRM_XE_PERF_OP_STREAM_OPEN,
enum drm_xe_observation_op {
/** @DRM_XE_OBSERVATION_OP_STREAM_OPEN: Open an observation stream */
DRM_XE_OBSERVATION_OP_STREAM_OPEN,
/** @DRM_XE_PERF_OP_ADD_CONFIG: Add perf stream config */
DRM_XE_PERF_OP_ADD_CONFIG,
/** @DRM_XE_OBSERVATION_OP_ADD_CONFIG: Add observation stream config */
DRM_XE_OBSERVATION_OP_ADD_CONFIG,
/** @DRM_XE_PERF_OP_REMOVE_CONFIG: Remove perf stream config */
DRM_XE_PERF_OP_REMOVE_CONFIG,
/** @DRM_XE_OBSERVATION_OP_REMOVE_CONFIG: Remove observation stream config */
DRM_XE_OBSERVATION_OP_REMOVE_CONFIG,
};
/**
* struct drm_xe_perf_param - Input of &DRM_XE_PERF
* struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION
*
* The perf layer enables multiplexing perf counter streams of multiple
* types. The actual params for a particular stream operation are supplied
* via the @param pointer (use __copy_from_user to get these params).
* The observation layer enables multiplexing observation streams of
* multiple types. The actual params for a particular stream operation are
* supplied via the @param pointer (use __copy_from_user to get these
* params).
*/
struct drm_xe_perf_param {
struct drm_xe_observation_param {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @perf_type: Perf stream type, of enum @drm_xe_perf_type */
__u64 perf_type;
/** @perf_op: Perf op, of enum @drm_xe_perf_op */
__u64 perf_op;
/** @observation_type: observation stream type, of enum @drm_xe_observation_type */
__u64 observation_type;
/** @observation_op: observation stream op, of enum @drm_xe_observation_op */
__u64 observation_op;
/** @param: Pointer to actual stream params */
__u64 param;
};
/**
* enum drm_xe_perf_ioctls - Perf fd ioctl's
* enum drm_xe_observation_ioctls - Observation stream fd ioctl's
*
* Information exchanged between userspace and kernel for perf fd ioctl's
* is stream type specific
* Information exchanged between userspace and kernel for observation fd
* ioctl's is stream type specific
*/
enum drm_xe_perf_ioctls {
/** @DRM_XE_PERF_IOCTL_ENABLE: Enable data capture for a stream */
DRM_XE_PERF_IOCTL_ENABLE = _IO('i', 0x0),
enum drm_xe_observation_ioctls {
/** @DRM_XE_OBSERVATION_IOCTL_ENABLE: Enable data capture for an observation stream */
DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0),
/** @DRM_XE_PERF_IOCTL_DISABLE: Disable data capture for a stream */
DRM_XE_PERF_IOCTL_DISABLE = _IO('i', 0x1),
/** @DRM_XE_OBSERVATION_IOCTL_DISABLE: Disable data capture for a observation stream */
DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1),
/** @DRM_XE_PERF_IOCTL_CONFIG: Change stream configuration */
DRM_XE_PERF_IOCTL_CONFIG = _IO('i', 0x2),
/** @DRM_XE_OBSERVATION_IOCTL_CONFIG: Change observation stream configuration */
DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2),
/** @DRM_XE_PERF_IOCTL_STATUS: Return stream status */
DRM_XE_PERF_IOCTL_STATUS = _IO('i', 0x3),
/** @DRM_XE_OBSERVATION_IOCTL_STATUS: Return observation stream status */
DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3),
/** @DRM_XE_PERF_IOCTL_INFO: Return stream info */
DRM_XE_PERF_IOCTL_INFO = _IO('i', 0x4),
/** @DRM_XE_OBSERVATION_IOCTL_INFO: Return observation stream info */
DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4),
};
/**
@ -1546,12 +1547,12 @@ enum drm_xe_oa_format_type {
* Stream params are specified as a chain of @drm_xe_ext_set_property
* struct's, with @property values from enum @drm_xe_oa_property_id and
* @drm_xe_user_extension base.name set to @DRM_XE_OA_EXTENSION_SET_PROPERTY.
* @param field in struct @drm_xe_perf_param points to the first
* @param field in struct @drm_xe_observation_param points to the first
* @drm_xe_ext_set_property struct.
*
* Exactly the same mechanism is also used for stream reconfiguration using
* the @DRM_XE_PERF_IOCTL_CONFIG perf fd ioctl, though only a subset of
* properties below can be specified for stream reconfiguration.
* Exactly the same mechanism is also used for stream reconfiguration using the
* @DRM_XE_OBSERVATION_IOCTL_CONFIG observation stream fd ioctl, though only a
* subset of properties below can be specified for stream reconfiguration.
*/
enum drm_xe_oa_property_id {
#define DRM_XE_OA_EXTENSION_SET_PROPERTY 0
@ -1571,11 +1572,11 @@ enum drm_xe_oa_property_id {
/**
* @DRM_XE_OA_PROPERTY_OA_METRIC_SET: OA metrics defining contents of OA
* reports, previously added via @DRM_XE_PERF_OP_ADD_CONFIG.
* reports, previously added via @DRM_XE_OBSERVATION_OP_ADD_CONFIG.
*/
DRM_XE_OA_PROPERTY_OA_METRIC_SET,
/** @DRM_XE_OA_PROPERTY_OA_FORMAT: Perf counter report format */
/** @DRM_XE_OA_PROPERTY_OA_FORMAT: OA counter report format */
DRM_XE_OA_PROPERTY_OA_FORMAT,
/*
* OA_FORMAT's are specified the same way as in PRM/Bspec 52198/60942,
@ -1596,13 +1597,13 @@ enum drm_xe_oa_property_id {
/**
* @DRM_XE_OA_PROPERTY_OA_DISABLED: A value of 1 will open the OA
* stream in a DISABLED state (see @DRM_XE_PERF_IOCTL_ENABLE).
* stream in a DISABLED state (see @DRM_XE_OBSERVATION_IOCTL_ENABLE).
*/
DRM_XE_OA_PROPERTY_OA_DISABLED,
/**
* @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID: Open the stream for a specific
* @exec_queue_id. Perf queries can be executed on this exec queue.
* @exec_queue_id. OA queries can be executed on this exec queue.
*/
DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID,
@ -1622,7 +1623,7 @@ enum drm_xe_oa_property_id {
/**
* struct drm_xe_oa_config - OA metric configuration
*
* Multiple OA configs can be added using @DRM_XE_PERF_OP_ADD_CONFIG. A
* Multiple OA configs can be added using @DRM_XE_OBSERVATION_OP_ADD_CONFIG. A
* particular config can be specified when opening an OA stream using
* @DRM_XE_OA_PROPERTY_OA_METRIC_SET property.
*/
@ -1645,8 +1646,9 @@ struct drm_xe_oa_config {
/**
* struct drm_xe_oa_stream_status - OA stream status returned from
* @DRM_XE_PERF_IOCTL_STATUS perf fd ioctl. Userspace can call the ioctl to
* query stream status in response to EIO errno from perf fd read().
* @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl. Userspace can
* call the ioctl to query stream status in response to EIO errno from
* observation fd read().
*/
struct drm_xe_oa_stream_status {
/** @extensions: Pointer to the first extension struct, if any */
@ -1665,7 +1667,7 @@ struct drm_xe_oa_stream_status {
/**
* struct drm_xe_oa_stream_info - OA stream info returned from
* @DRM_XE_PERF_IOCTL_INFO perf fd ioctl
* @DRM_XE_OBSERVATION_IOCTL_INFO observation stream fd ioctl
*/
struct drm_xe_oa_stream_info {
/** @extensions: Pointer to the first extension struct, if any */