Files
linux/drivers/gpu/drm/i915/gt/intel_context.h

375 lines
9.7 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__
#include <linux/bitops.h>
#include <linux/lockdep.h>
#include <linux/types.h>
#include "i915_active.h"
#include "i915_drv.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
#include "intel_ring_types.h"
#include "intel_timeline_types.h"
#include "i915_trace.h"
#define CE_TRACE(ce, fmt, ...) do { \
const struct intel_context *ce__ = (ce); \
ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \
ce__->timeline->fence_context, \
##__VA_ARGS__); \
} while (0)
drm/i915: Improve user experience and driver robustness under SIGINT or similar We have long standing customer complaints that pressing Ctrl-C (or to the effect of) causes engine resets with otherwise well behaving programs. Not only is logging engine resets during normal operation not desirable since it creates support incidents, but more fundamentally we should avoid going the engine reset path when we can since any engine reset introduces a chance of harming an innocent context. Reason for this undesirable behaviour is that the driver currently does not distinguish between banned contexts and non-persistent contexts which have been closed. To fix this we add the distinction between the two reasons for revoking contexts, which then allows the strict timeout only be applied to banned, while innocent contexts (well behaving) can preempt cleanly and exit without triggering the engine reset path. Note that the added context exiting category applies both to closed non- persistent context, and any exiting context when hangcheck has been disabled by the user. At the same time we rename the backend operation from 'ban' to 'revoke' which more accurately describes the actual semantics. (There is no ban at the backend level since banning is a concept driven by the scheduling frontend. Backends are simply able to revoke a running context so that is the more appropriate name chosen.) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220527072452.2225610-1-tvrtko.ursulin@linux.intel.com
2022-05-27 08:24:52 +01:00
#define INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS (1)
struct i915_gem_ww_ctx;
void intel_context_init(struct intel_context *ce,
struct intel_engine_cs *engine);
void intel_context_fini(struct intel_context *ce);
void i915_context_module_exit(void);
int i915_context_module_init(void);
struct intel_context *
intel_context_create(struct intel_engine_cs *engine);
int intel_context_alloc_state(struct intel_context *ce);
void intel_context_free(struct intel_context *ce);
int intel_context_reconfigure_sseu(struct intel_context *ce,
const struct intel_sseu sseu);
#define PARENT_SCRATCH_SIZE PAGE_SIZE
static inline bool intel_context_is_child(struct intel_context *ce)
{
return !!ce->parallel.parent;
}
static inline bool intel_context_is_parent(struct intel_context *ce)
{
return !!ce->parallel.number_children;
}
static inline bool intel_context_is_pinned(struct intel_context *ce);
static inline struct intel_context *
intel_context_to_parent(struct intel_context *ce)
{
if (intel_context_is_child(ce)) {
/*
* The parent holds ref count to the child so it is always safe
* for the parent to access the child, but the child has a
* pointer to the parent without a ref. To ensure this is safe
* the child should only access the parent pointer while the
* parent is pinned.
*/
GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
return ce->parallel.parent;
} else {
return ce;
}
}
static inline bool intel_context_is_parallel(struct intel_context *ce)
{
return intel_context_is_child(ce) || intel_context_is_parent(ce);
}
void intel_context_bind_parent_child(struct intel_context *parent,
struct intel_context *child);
#define for_each_child(parent, ce)\
list_for_each_entry(ce, &(parent)->parallel.child_list,\
parallel.child_link)
#define for_each_child_safe(parent, ce, cn)\
list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\
parallel.child_link)
/**
* intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
* @ce - the context
*
* Acquire a lock on the pinned status of the HW context, such that the context
* can neither be bound to the GPU or unbound whilst the lock is held, i.e.
* intel_context_is_pinned() remains stable.
*/
static inline int intel_context_lock_pinned(struct intel_context *ce)
__acquires(ce->pin_mutex)
{
return mutex_lock_interruptible(&ce->pin_mutex);
}
/**
* intel_context_is_pinned - Reports the 'pinned' status
* @ce - the context
*
* While in use by the GPU, the context, along with its ring and page
* tables is pinned into memory and the GTT.
*
* Returns: true if the context is currently pinned for use by the GPU.
*/
static inline bool
intel_context_is_pinned(struct intel_context *ce)
{
return atomic_read(&ce->pin_count);
}
static inline void intel_context_cancel_request(struct intel_context *ce,
struct i915_request *rq)
{
GEM_BUG_ON(!ce->ops->cancel_request);
return ce->ops->cancel_request(ce, rq);
}
/**
* intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
* @ce - the context
*
* Releases the lock earlier acquired by intel_context_unlock_pinned().
*/
static inline void intel_context_unlock_pinned(struct intel_context *ce)
__releases(ce->pin_mutex)
{
mutex_unlock(&ce->pin_mutex);
}
int __intel_context_do_pin(struct intel_context *ce);
int __intel_context_do_pin_ww(struct intel_context *ce,
struct i915_gem_ww_ctx *ww);
static inline bool intel_context_pin_if_active(struct intel_context *ce)
{
return atomic_inc_not_zero(&ce->pin_count);
}
static inline int intel_context_pin(struct intel_context *ce)
{
if (likely(intel_context_pin_if_active(ce)))
return 0;
return __intel_context_do_pin(ce);
}
static inline int intel_context_pin_ww(struct intel_context *ce,
struct i915_gem_ww_ctx *ww)
{
if (likely(intel_context_pin_if_active(ce)))
return 0;
return __intel_context_do_pin_ww(ce, ww);
}
static inline void __intel_context_pin(struct intel_context *ce)
{
GEM_BUG_ON(!intel_context_is_pinned(ce));
atomic_inc(&ce->pin_count);
}
void __intel_context_do_unpin(struct intel_context *ce, int sub);
static inline void intel_context_sched_disable_unpin(struct intel_context *ce)
{
__intel_context_do_unpin(ce, 2);
}
static inline void intel_context_unpin(struct intel_context *ce)
{
if (!ce->ops->sched_disable) {
__intel_context_do_unpin(ce, 1);
} else {
/*
* Move ownership of this pin to the scheduling disable which is
* an async operation. When that operation completes the above
* intel_context_sched_disable_unpin is called potentially
* unpinning the context.
*/
while (!atomic_add_unless(&ce->pin_count, -1, 1)) {
if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) {
ce->ops->sched_disable(ce);
break;
}
}
}
}
void intel_context_enter_engine(struct intel_context *ce);
void intel_context_exit_engine(struct intel_context *ce);
static inline void intel_context_enter(struct intel_context *ce)
{
lockdep_assert_held(&ce->timeline->mutex);
if (!ce->active_count++)
ce->ops->enter(ce);
}
static inline void intel_context_mark_active(struct intel_context *ce)
{
drm/i915: Don't disable interrupts and pretend a lock as been acquired in __timeline_mark_lock(). This is a revert of commits d67739268cf0e ("drm/i915/gt: Mark up the nested engine-pm timeline lock as irqsafe") 6c69a45445af9 ("drm/i915/gt: Mark context->active_count as protected by timeline->mutex") 6dcb85a0ad990 ("drm/i915: Hold irq-off for the entire fake lock period") The existing code leads to a different behaviour depending on whether lockdep is enabled or not. Any following lock that is acquired without disabling interrupts (but needs to) will not be noticed by lockdep. This it not just a lockdep annotation but is used but an actual mutex_t that is properly used as a lock but in case of __timeline_mark_lock() lockdep is only told that it is acquired but no lock has been acquired. It appears that its purpose is just satisfy the lockdep_assert_held() check in intel_context_mark_active(). The other problem with disabling interrupts is that on PREEMPT_RT interrupts are also disabled which leads to problems for instance later during memory allocation. Add a CONTEXT_IS_PARKING bit to intel_engine_cs and set_bit/clear_bit it instead of mutex_acquire/mutex_release. Use test_bit in the two identified spots which relied on the lockdep annotation. Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/YbO8Ie1Nj7XcQPNQ@linutronix.de
2021-12-10 21:44:17 +01:00
lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
test_bit(CONTEXT_IS_PARKING, &ce->flags));
++ce->active_count;
}
static inline void intel_context_exit(struct intel_context *ce)
{
lockdep_assert_held(&ce->timeline->mutex);
GEM_BUG_ON(!ce->active_count);
if (!--ce->active_count)
ce->ops->exit(ce);
}
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
kref_get(&ce->ref);
return ce;
}
static inline void intel_context_put(struct intel_context *ce)
{
kref_put(&ce->ref, ce->ops->destroy);
}
static inline struct intel_timeline *__must_check
intel_context_timeline_lock(struct intel_context *ce)
__acquires(&ce->timeline->mutex)
{
struct intel_timeline *tl = ce->timeline;
int err;
drm/i915: Multi-BB execbuf Allow multiple batch buffers to be submitted in a single execbuf IOCTL after a context has been configured with the 'set_parallel' extension. The number batches is implicit based on the contexts configuration. This is implemented with a series of loops. First a loop is used to find all the batches, a loop to pin all the HW contexts, a loop to create all the requests, a loop to submit (emit BB start, etc...) all the requests, a loop to tie the requests to the VMAs they touch, and finally a loop to commit the requests to the backend. A composite fence is also created for the generated requests to return to the user and to stick in dma resv slots. No behavior from the existing IOCTL should be changed aside from when throttling because the ring for a context is full. In this situation, i915 will now wait while holding the object locks. This change was done because the code is much simpler to wait while holding the locks and we believe there isn't a huge benefit of dropping these locks. If this proves false we can restructure the code to drop the locks during the wait. IGT: https://patchwork.freedesktop.org/patch/447008/?series=93071&rev=1 media UMD: https://github.com/intel/media-driver/pull/1252 v2: (Matthew Brost) - Return proper error value if i915_request_create fails v3: (John Harrison) - Add comment explaining create / add order loops + locking - Update commit message explaining different in IOCTL behavior - Line wrap some comments - eb_add_request returns void - Return -EINVAL rather triggering BUG_ON if cmd parser used (Checkpatch) - Check eb->batch_len[*current_batch] v4: (CI) - Set batch len if passed if via execbuf args - Call __i915_request_skip after __i915_request_commit (Kernel test robot) - Initialize rq to NULL in eb_pin_timeline v5: (John Harrison) - Fix typo in comments near bb order loops Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: John Harrison <John.C.Harrison@Intel.com> Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-21-matthew.brost@intel.com
2021-10-14 10:20:00 -07:00
if (intel_context_is_parent(ce))
err = mutex_lock_interruptible_nested(&tl->mutex, 0);
else if (intel_context_is_child(ce))
err = mutex_lock_interruptible_nested(&tl->mutex,
ce->parallel.child_index + 1);
else
err = mutex_lock_interruptible(&tl->mutex);
if (err)
return ERR_PTR(err);
return tl;
}
static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
__releases(&tl->mutex)
{
mutex_unlock(&tl->mutex);
}
int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *rq);
struct i915_request *intel_context_create_request(struct intel_context *ce);
struct i915_request *
intel_context_find_active_request(struct intel_context *ce);
static inline bool intel_context_is_barrier(const struct intel_context *ce)
{
return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
}
static inline bool intel_context_is_closed(const struct intel_context *ce)
{
return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
}
static inline bool intel_context_has_inflight(const struct intel_context *ce)
{
return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags);
}
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{
return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
}
static inline void intel_context_set_use_semaphores(struct intel_context *ce)
{
set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
}
static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
{
clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
}
static inline bool intel_context_is_banned(const struct intel_context *ce)
{
return test_bit(CONTEXT_BANNED, &ce->flags);
}
static inline bool intel_context_set_banned(struct intel_context *ce)
{
return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
}
drm/i915: Improve user experience and driver robustness under SIGINT or similar We have long standing customer complaints that pressing Ctrl-C (or to the effect of) causes engine resets with otherwise well behaving programs. Not only is logging engine resets during normal operation not desirable since it creates support incidents, but more fundamentally we should avoid going the engine reset path when we can since any engine reset introduces a chance of harming an innocent context. Reason for this undesirable behaviour is that the driver currently does not distinguish between banned contexts and non-persistent contexts which have been closed. To fix this we add the distinction between the two reasons for revoking contexts, which then allows the strict timeout only be applied to banned, while innocent contexts (well behaving) can preempt cleanly and exit without triggering the engine reset path. Note that the added context exiting category applies both to closed non- persistent context, and any exiting context when hangcheck has been disabled by the user. At the same time we rename the backend operation from 'ban' to 'revoke' which more accurately describes the actual semantics. (There is no ban at the backend level since banning is a concept driven by the scheduling frontend. Backends are simply able to revoke a running context so that is the more appropriate name chosen.) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220527072452.2225610-1-tvrtko.ursulin@linux.intel.com
2022-05-27 08:24:52 +01:00
bool intel_context_ban(struct intel_context *ce, struct i915_request *rq);
static inline bool intel_context_is_schedulable(const struct intel_context *ce)
{
drm/i915: Improve user experience and driver robustness under SIGINT or similar We have long standing customer complaints that pressing Ctrl-C (or to the effect of) causes engine resets with otherwise well behaving programs. Not only is logging engine resets during normal operation not desirable since it creates support incidents, but more fundamentally we should avoid going the engine reset path when we can since any engine reset introduces a chance of harming an innocent context. Reason for this undesirable behaviour is that the driver currently does not distinguish between banned contexts and non-persistent contexts which have been closed. To fix this we add the distinction between the two reasons for revoking contexts, which then allows the strict timeout only be applied to banned, while innocent contexts (well behaving) can preempt cleanly and exit without triggering the engine reset path. Note that the added context exiting category applies both to closed non- persistent context, and any exiting context when hangcheck has been disabled by the user. At the same time we rename the backend operation from 'ban' to 'revoke' which more accurately describes the actual semantics. (There is no ban at the backend level since banning is a concept driven by the scheduling frontend. Backends are simply able to revoke a running context so that is the more appropriate name chosen.) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220527072452.2225610-1-tvrtko.ursulin@linux.intel.com
2022-05-27 08:24:52 +01:00
return !test_bit(CONTEXT_EXITING, &ce->flags) &&
!test_bit(CONTEXT_BANNED, &ce->flags);
}
drm/i915: Improve user experience and driver robustness under SIGINT or similar We have long standing customer complaints that pressing Ctrl-C (or to the effect of) causes engine resets with otherwise well behaving programs. Not only is logging engine resets during normal operation not desirable since it creates support incidents, but more fundamentally we should avoid going the engine reset path when we can since any engine reset introduces a chance of harming an innocent context. Reason for this undesirable behaviour is that the driver currently does not distinguish between banned contexts and non-persistent contexts which have been closed. To fix this we add the distinction between the two reasons for revoking contexts, which then allows the strict timeout only be applied to banned, while innocent contexts (well behaving) can preempt cleanly and exit without triggering the engine reset path. Note that the added context exiting category applies both to closed non- persistent context, and any exiting context when hangcheck has been disabled by the user. At the same time we rename the backend operation from 'ban' to 'revoke' which more accurately describes the actual semantics. (There is no ban at the backend level since banning is a concept driven by the scheduling frontend. Backends are simply able to revoke a running context so that is the more appropriate name chosen.) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220527072452.2225610-1-tvrtko.ursulin@linux.intel.com
2022-05-27 08:24:52 +01:00
static inline bool intel_context_is_exiting(const struct intel_context *ce)
{
return test_bit(CONTEXT_EXITING, &ce->flags);
}
drm/i915: Improve user experience and driver robustness under SIGINT or similar We have long standing customer complaints that pressing Ctrl-C (or to the effect of) causes engine resets with otherwise well behaving programs. Not only is logging engine resets during normal operation not desirable since it creates support incidents, but more fundamentally we should avoid going the engine reset path when we can since any engine reset introduces a chance of harming an innocent context. Reason for this undesirable behaviour is that the driver currently does not distinguish between banned contexts and non-persistent contexts which have been closed. To fix this we add the distinction between the two reasons for revoking contexts, which then allows the strict timeout only be applied to banned, while innocent contexts (well behaving) can preempt cleanly and exit without triggering the engine reset path. Note that the added context exiting category applies both to closed non- persistent context, and any exiting context when hangcheck has been disabled by the user. At the same time we rename the backend operation from 'ban' to 'revoke' which more accurately describes the actual semantics. (There is no ban at the backend level since banning is a concept driven by the scheduling frontend. Backends are simply able to revoke a running context so that is the more appropriate name chosen.) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220527072452.2225610-1-tvrtko.ursulin@linux.intel.com
2022-05-27 08:24:52 +01:00
static inline bool intel_context_set_exiting(struct intel_context *ce)
{
return test_and_set_bit(CONTEXT_EXITING, &ce->flags);
}
drm/i915: Improve user experience and driver robustness under SIGINT or similar We have long standing customer complaints that pressing Ctrl-C (or to the effect of) causes engine resets with otherwise well behaving programs. Not only is logging engine resets during normal operation not desirable since it creates support incidents, but more fundamentally we should avoid going the engine reset path when we can since any engine reset introduces a chance of harming an innocent context. Reason for this undesirable behaviour is that the driver currently does not distinguish between banned contexts and non-persistent contexts which have been closed. To fix this we add the distinction between the two reasons for revoking contexts, which then allows the strict timeout only be applied to banned, while innocent contexts (well behaving) can preempt cleanly and exit without triggering the engine reset path. Note that the added context exiting category applies both to closed non- persistent context, and any exiting context when hangcheck has been disabled by the user. At the same time we rename the backend operation from 'ban' to 'revoke' which more accurately describes the actual semantics. (There is no ban at the backend level since banning is a concept driven by the scheduling frontend. Backends are simply able to revoke a running context so that is the more appropriate name chosen.) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220527072452.2225610-1-tvrtko.ursulin@linux.intel.com
2022-05-27 08:24:52 +01:00
bool intel_context_exit_nonpersistent(struct intel_context *ce,
struct i915_request *rq);
static inline bool
intel_context_force_single_submission(const struct intel_context *ce)
{
return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
}
static inline void
intel_context_set_single_submission(struct intel_context *ce)
{
__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
}
static inline bool
intel_context_nopreempt(const struct intel_context *ce)
{
return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
static inline void
intel_context_set_nopreempt(struct intel_context *ce)
{
set_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
static inline void
intel_context_clear_nopreempt(struct intel_context *ce)
{
clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
u64 intel_context_get_total_runtime_ns(const struct intel_context *ce);
u64 intel_context_get_avg_runtime_ns(struct intel_context *ce);
static inline u64 intel_context_clock(void)
{
/* As we mix CS cycles with CPU clocks, use the raw monotonic clock. */
return ktime_get_raw_fast_ns();
}
#endif /* __INTEL_CONTEXT_H__ */