forked from Minki/linux
drm/i915: Convert 'i915_seqno_passed' calls into 'i915_gem_request_completed'
Almost everywhere that caled i915_seqno_passed() was really asking 'has the given seqno popped out of the hardware yet?'. Thus it had to query the current hardware seqno and then do a signed delta comparison (which copes with wrapping around zero but not with seqno values more than 2GB apart, although the latter is unlikely!). Now that the majority of seqno instances have been replaced with request structures, it is possible to convert this test to be request based as well. There is now a 'i915_gem_request_completed()' function which takes a request and returns true or false as appropriate. Note that this currently just wraps up the original _passed() test but a later patch in the series will reduce this to simply returning a cached internal value, i.e.: _completed(req) { return req->completed; }' This checkin converts almost all _seqno_passed() calls. The only one left is in the semaphore code which still requires seqnos not request structures. For: VIZ-4377 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Thomas Daniel <Thomas.Daniel@intel.com> [danvet: Drop hunk touching the trace_irq code since I've dropped the patch which converts that, and resolve resulting conflict.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
ff79e85702
commit
1b5a433a4d
@ -547,8 +547,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
||||
i915_gem_request_get_seqno(work->flip_queued_req),
|
||||
dev_priv->next_seqno,
|
||||
work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
|
||||
i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
|
||||
i915_gem_request_get_seqno(work->flip_queued_req)));
|
||||
i915_gem_request_completed(work->flip_queued_req, true));
|
||||
} else
|
||||
seq_printf(m, "Flip not associated with any ring\n");
|
||||
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
|
||||
|
@ -2062,6 +2062,12 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
|
||||
*pdst = src;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: i915_gem_request_completed should be here but currently needs the
|
||||
* definition of i915_seqno_passed() which is below. It will be moved in
|
||||
* a later patch when the call to i915_seqno_passed() is obsoleted...
|
||||
*/
|
||||
|
||||
struct drm_i915_file_private {
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct drm_file *file;
|
||||
@ -2563,6 +2569,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
||||
return (int32_t)(seq1 - seq2) >= 0;
|
||||
}
|
||||
|
||||
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
|
||||
bool lazy_coherency)
|
||||
{
|
||||
u32 seqno;
|
||||
|
||||
BUG_ON(req == NULL);
|
||||
|
||||
seqno = req->ring->get_seqno(req->ring, lazy_coherency);
|
||||
|
||||
return i915_seqno_passed(seqno, req->seqno);
|
||||
}
|
||||
|
||||
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
|
||||
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
|
||||
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
|
||||
|
@ -1223,8 +1223,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
|
||||
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
|
||||
|
||||
if (i915_seqno_passed(ring->get_seqno(ring, true),
|
||||
i915_gem_request_get_seqno(req)))
|
||||
if (i915_gem_request_completed(req, true))
|
||||
return 0;
|
||||
|
||||
timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
|
||||
@ -1260,8 +1259,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
break;
|
||||
}
|
||||
|
||||
if (i915_seqno_passed(ring->get_seqno(ring, false),
|
||||
i915_gem_request_get_seqno(req))) {
|
||||
if (i915_gem_request_completed(req, false)) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
@ -2333,8 +2331,7 @@ i915_gem_object_retire(struct drm_i915_gem_object *obj)
|
||||
if (ring == NULL)
|
||||
return;
|
||||
|
||||
if (i915_seqno_passed(ring->get_seqno(ring, true),
|
||||
i915_gem_request_get_seqno(obj->last_read_req)))
|
||||
if (i915_gem_request_completed(obj->last_read_req, true))
|
||||
i915_gem_object_move_to_inactive(obj);
|
||||
}
|
||||
|
||||
@ -2601,12 +2598,9 @@ struct drm_i915_gem_request *
|
||||
i915_gem_find_active_request(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
u32 completed_seqno;
|
||||
|
||||
completed_seqno = ring->get_seqno(ring, false);
|
||||
|
||||
list_for_each_entry(request, &ring->request_list, list) {
|
||||
if (i915_seqno_passed(completed_seqno, request->seqno))
|
||||
if (i915_gem_request_completed(request, false))
|
||||
continue;
|
||||
|
||||
return request;
|
||||
@ -2734,15 +2728,11 @@ void i915_gem_reset(struct drm_device *dev)
|
||||
void
|
||||
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
||||
{
|
||||
uint32_t seqno;
|
||||
|
||||
if (list_empty(&ring->request_list))
|
||||
return;
|
||||
|
||||
WARN_ON(i915_verify_lists(ring->dev));
|
||||
|
||||
seqno = ring->get_seqno(ring, true);
|
||||
|
||||
/* Move any buffers on the active list that are no longer referenced
|
||||
* by the ringbuffer to the flushing/inactive lists as appropriate,
|
||||
* before we free the context associated with the requests.
|
||||
@ -2754,8 +2744,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
||||
struct drm_i915_gem_object,
|
||||
ring_list);
|
||||
|
||||
if (!i915_seqno_passed(seqno,
|
||||
i915_gem_request_get_seqno(obj->last_read_req)))
|
||||
if (!i915_gem_request_completed(obj->last_read_req, true))
|
||||
break;
|
||||
|
||||
i915_gem_object_move_to_inactive(obj);
|
||||
@ -2770,7 +2759,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
||||
struct drm_i915_gem_request,
|
||||
list);
|
||||
|
||||
if (!i915_seqno_passed(seqno, request->seqno))
|
||||
if (!i915_gem_request_completed(request, true))
|
||||
break;
|
||||
|
||||
trace_i915_gem_request_retire(request);
|
||||
@ -2797,7 +2786,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
||||
}
|
||||
|
||||
if (unlikely(ring->trace_irq_seqno &&
|
||||
i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
|
||||
i915_seqno_passed(ring->get_seqno(ring, true),
|
||||
ring->trace_irq_seqno))) {
|
||||
ring->irq_put(ring);
|
||||
ring->trace_irq_seqno = 0;
|
||||
}
|
||||
|
@ -2760,8 +2760,7 @@ static bool
|
||||
ring_idle(struct intel_engine_cs *ring)
|
||||
{
|
||||
return (list_empty(&ring->request_list) ||
|
||||
i915_seqno_passed(ring->get_seqno(ring, false),
|
||||
i915_gem_request_get_seqno(ring_last_request(ring))));
|
||||
i915_gem_request_completed(ring_last_request(ring), false));
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -9742,11 +9742,7 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
|
||||
|
||||
if (work->flip_ready_vblank == 0) {
|
||||
if (work->flip_queued_ring) {
|
||||
uint32_t s1 = work->flip_queued_ring->get_seqno(
|
||||
work->flip_queued_ring, true);
|
||||
uint32_t s2 = i915_gem_request_get_seqno(
|
||||
work->flip_queued_req);
|
||||
if (!i915_seqno_passed(s1, s2))
|
||||
if (!i915_gem_request_completed(work->flip_queued_req, true))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user