drm/i915: Make i915_spin_request() static
No users now outside of i915_wait_request(), so we can make it private to i915_gem_request.c, and assume the caller knows the seqno. In the process, also remove i915_gem_request_started() as that was only ever used by i915_spin_request(). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michal Winiarski <michal.winiarski@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170922120333.25535-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
parent
7fd0b1a259
commit
b2f2f0fc69
@ -1021,12 +1021,28 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
|
||||
return this_cpu != cpu;
|
||||
}
|
||||
|
||||
bool __i915_spin_request(const struct drm_i915_gem_request *req,
|
||||
u32 seqno, int state, unsigned long timeout_us)
|
||||
static bool __i915_spin_request(const struct drm_i915_gem_request *req,
|
||||
u32 seqno, int state, unsigned long timeout_us)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
unsigned int irq, cpu;
|
||||
|
||||
GEM_BUG_ON(!seqno);
|
||||
|
||||
/*
|
||||
* Only wait for the request if we know it is likely to complete.
|
||||
*
|
||||
* We don't track the timestamps around requests, nor the average
|
||||
* request length, so we do not have a good indicator that this
|
||||
* request will complete within the timeout. What we do know is the
|
||||
* order in which requests are executed by the engine and so we can
|
||||
* tell if the request has started. If the request hasn't started yet,
|
||||
* it is a fair assumption that it will not complete within our
|
||||
* relatively short timeout.
|
||||
*/
|
||||
if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
|
||||
return false;
|
||||
|
||||
/* When waiting for high frequency requests, e.g. during synchronous
|
||||
* rendering split between the CPU and GPU, the finite amount of time
|
||||
* required to set up the irq and wait upon it limits the response
|
||||
@ -1040,8 +1056,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
|
||||
irq = atomic_read(&engine->irq_count);
|
||||
timeout_us += local_clock_us(&cpu);
|
||||
do {
|
||||
if (i915_seqno_passed(intel_engine_get_seqno(req->engine),
|
||||
seqno))
|
||||
if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
|
||||
return seqno == i915_gem_request_global_seqno(req);
|
||||
|
||||
/* Seqno are meant to be ordered *before* the interrupt. If
|
||||
@ -1153,7 +1168,7 @@ restart:
|
||||
GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
|
||||
|
||||
/* Optimistic short spin before touching IRQs */
|
||||
if (i915_spin_request(req, state, 5))
|
||||
if (__i915_spin_request(req, wait.seqno, state, 5))
|
||||
goto complete;
|
||||
|
||||
set_current_state(state);
|
||||
@ -1210,7 +1225,7 @@ wakeup:
|
||||
continue;
|
||||
|
||||
/* Only spin if we know the GPU is processing this request */
|
||||
if (i915_spin_request(req, state, 2))
|
||||
if (__i915_spin_request(req, wait.seqno, state, 2))
|
||||
break;
|
||||
|
||||
if (!intel_wait_check_request(&wait, req)) {
|
||||
|
@ -312,26 +312,6 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
|
||||
return (s32)(seq1 - seq2) >= 0;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
__i915_gem_request_started(const struct drm_i915_gem_request *req, u32 seqno)
|
||||
{
|
||||
GEM_BUG_ON(!seqno);
|
||||
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
|
||||
seqno - 1);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_request_started(const struct drm_i915_gem_request *req)
|
||||
{
|
||||
u32 seqno;
|
||||
|
||||
seqno = i915_gem_request_global_seqno(req);
|
||||
if (!seqno)
|
||||
return false;
|
||||
|
||||
return __i915_gem_request_started(req, seqno);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
|
||||
{
|
||||
@ -352,21 +332,6 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req)
|
||||
return __i915_gem_request_completed(req, seqno);
|
||||
}
|
||||
|
||||
bool __i915_spin_request(const struct drm_i915_gem_request *request,
|
||||
u32 seqno, int state, unsigned long timeout_us);
|
||||
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
|
||||
int state, unsigned long timeout_us)
|
||||
{
|
||||
u32 seqno;
|
||||
|
||||
seqno = i915_gem_request_global_seqno(request);
|
||||
if (!seqno)
|
||||
return 0;
|
||||
|
||||
return (__i915_gem_request_started(request, seqno) &&
|
||||
__i915_spin_request(request, seqno, state, timeout_us));
|
||||
}
|
||||
|
||||
/* We treat requests as fences. This is not be to confused with our
|
||||
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
|
||||
* We use the fences to synchronize access from the CPU with activity on the
|
||||
|
Loading…
Reference in New Issue
Block a user